Merge m-c to fx-team. a=merge
authorRyan VanderMeulen <ryanvm@gmail.com>
Wed, 09 Jul 2014 16:18:27 -0400
changeset 193276 e9a7523b1abd61e3a1785dc865cc905e52d19e82
parent 193275 04cf8536314c0159e67831c786ed51d85a61b113 (current diff)
parent 193135 fc35681b0a87c921639dbed75045adcbc02a400a (diff)
child 193277 a19387ced68e283a62090a0fa3e7bfcd13c647e8
push id1
push userroot
push dateMon, 20 Oct 2014 17:29:22 +0000
reviewersmerge
milestone33.0a1
Merge m-c to fx-team. a=merge
configure.in
widget/gtk/mozgtk/stub/Makefile.in
--- a/b2g/config/emulator-ics/sources.xml
+++ b/b2g/config/emulator-ics/sources.xml
@@ -14,17 +14,17 @@
   <!--original fetch url was git://github.com/apitrace/-->
   <remote fetch="https://git.mozilla.org/external/apitrace" name="apitrace"/>
   <default remote="caf" revision="refs/tags/android-4.0.4_r2.1" sync-j="4"/>
   <!-- Gonk specific things and forks -->
   <project name="platform_build" path="build" remote="b2g" revision="0d616942c300d9fb142483210f1dda9096c9a9fc">
     <copyfile dest="Makefile" src="core/root.mk"/>
   </project>
   <project name="fake-dalvik" path="dalvik" remote="b2g" revision="ca1f327d5acc198bb4be62fa51db2c039032c9ce"/>
-  <project name="gaia.git" path="gaia" remote="mozillaorg" revision="c394b7b4205b6f1a6ca44915fc08650f3ad127ec"/>
+  <project name="gaia.git" path="gaia" remote="mozillaorg" revision="4e4e579b4b1e35f863ed43ef6ba840f49bfd761c"/>
   <project name="gonk-misc" path="gonk-misc" remote="b2g" revision="230f11aff069d90d20fc2dc63b48e9ae3d4bdcd1"/>
   <project name="rilproxy" path="rilproxy" remote="b2g" revision="827214fcf38d6569aeb5c6d6f31cb296d1f09272"/>
   <project name="platform_hardware_ril" path="hardware/ril" remote="b2g" revision="cd88d860656c31c7da7bb310d6a160d0011b0961"/>
   <project name="platform_external_qemu" path="external/qemu" remote="b2g" revision="bf9aaf39dd5a6491925a022db167c460f8207d34"/>
   <project name="moztt" path="external/moztt" remote="b2g" revision="dc5ca96695cab87b4c2fcd7c9f046ae3415a70a5"/>
   <project name="apitrace" path="external/apitrace" remote="apitrace" revision="ee6e7320bb83409ebd4685fbd87a8ae033704182"/>
   <!-- Stock Android things -->
   <project name="platform/abi/cpp" path="abi/cpp" revision="dd924f92906085b831bf1cbbc7484d3c043d613c"/>
@@ -93,16 +93,17 @@
   <project name="platform/system/bluetooth" path="system/bluetooth" revision="507e46e553586bec971551322f20d066c80a0788"/>
   <project name="platform/system/core" path="system/core" revision="91e5551f88aea5aa64e1b4f8b4b52d7be2b28b64"/>
   <project name="platform/system/extras" path="system/extras" revision="0205c49fedf29620165c6b4e6db3d13739c93396"/>
   <project name="platform/system/media" path="system/media" revision="7f17e3995d1588cfcc309b56525652794b6513ef"/>
   <project name="platform/system/netd" path="system/netd" revision="3d298fde142bee3fc4f07f63f16f2d8ce42339c0"/>
   <project name="platform/system/vold" path="system/vold" revision="919829940468066a32f403980b43f6ebfee5d314"/>
   <!-- Emulator specific things -->
   <project name="android-development" path="development" remote="b2g" revision="9abf0ab68376afae3e1c7beefa3e9cbee2fde202"/>
-  <project name="device_generic_goldfish" path="device/generic/goldfish" remote="b2g" revision="f486d0316aef2b3dea772dfb186e447e7a6a5de5"/>
+  <project name="device_generic_goldfish" path="device/generic/goldfish" remote="b2g" revision="0d5c43228006bae775c4cb57a6d3908484d41718"/>
   <project name="platform/external/iproute2" path="external/iproute2" revision="c66c5716d5335e450f7a7b71ccc6a604fb2f41d2"/>
   <project name="platform/prebuilts/gcc/linux-x86/host/i686-linux-glibc2.7-4.6" path="prebuilts/gcc/linux-x86/host/i686-linux-glibc2.7-4.6" revision="d2685281e2e54ca14d1df304867aa82c37b27162"/>
   <project name="platform/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.7-4.6" path="prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.7-4.6" revision="627f9b20fc518937b93747a7ff1ed4f5ed46e06f"/>
   <project name="platform/prebuilts/tools" path="prebuilts/tools" revision="acba00cdb4596c6dcb61ed06f14cf4ec89623539"/>
   <project name="platform_prebuilts_qemu-kernel" path="prebuilts/qemu-kernel" remote="b2g" revision="02c32feb2fe97037be0ac4dace3a6a5025ac895d"/>
   <project name="android-sdk" path="sdk" remote="b2g" revision="4f46930827957afbce500a4a920755a218bf3155"/>
+  <project name="darwinstreamingserver" path="system/darwinstreamingserver" remote="b2g" revision="cf85968c7f85e0ec36e72c87ceb4837a943b8af6"/>
 </manifest>
--- a/b2g/config/emulator-jb/sources.xml
+++ b/b2g/config/emulator-jb/sources.xml
@@ -12,17 +12,17 @@
   <!--original fetch url was https://git.mozilla.org/releases-->
   <remote fetch="https://git.mozilla.org/releases" name="mozillaorg"/>
   <!-- B2G specific things. -->
   <project name="platform_build" path="build" remote="b2g" revision="cc67f31dc638c0b7edba3cf7e3d87cadf0ed52bf">
     <copyfile dest="Makefile" src="core/root.mk"/>
   </project>
   <project name="rilproxy" path="rilproxy" remote="b2g" revision="827214fcf38d6569aeb5c6d6f31cb296d1f09272"/>
   <project name="fake-libdvm" path="dalvik" remote="b2g" revision="d50ae982b19f42f0b66d08b9eb306be81687869f"/>
-  <project name="gaia" path="gaia" remote="mozillaorg" revision="c394b7b4205b6f1a6ca44915fc08650f3ad127ec"/>
+  <project name="gaia" path="gaia" remote="mozillaorg" revision="4e4e579b4b1e35f863ed43ef6ba840f49bfd761c"/>
   <project name="gonk-misc" path="gonk-misc" remote="b2g" revision="230f11aff069d90d20fc2dc63b48e9ae3d4bdcd1"/>
   <project name="moztt" path="external/moztt" remote="b2g" revision="dc5ca96695cab87b4c2fcd7c9f046ae3415a70a5"/>
   <project name="apitrace" path="external/apitrace" remote="apitrace" revision="ee6e7320bb83409ebd4685fbd87a8ae033704182"/>
   <project name="valgrind" path="external/valgrind" remote="b2g" revision="daa61633c32b9606f58799a3186395fd2bbb8d8c"/>
   <project name="vex" path="external/VEX" remote="b2g" revision="47f031c320888fe9f3e656602588565b52d43010"/>
   <!-- Stock Android things -->
   <project groups="linux" name="platform/prebuilts/clang/linux-x86/3.1" path="prebuilts/clang/linux-x86/3.1" revision="5c45f43419d5582949284eee9cef0c43d866e03b"/>
   <project groups="linux" name="platform/prebuilts/clang/linux-x86/3.2" path="prebuilts/clang/linux-x86/3.2" revision="3748b4168e7bd8d46457d4b6786003bc6a5223ce"/>
@@ -128,9 +128,10 @@
   <project name="device/generic/armv7-a-neon" path="device/generic/armv7-a-neon" revision="3a9a17613cc685aa232432566ad6cc607eab4ec1"/>
   <project name="device_generic_goldfish" path="device/generic/goldfish" remote="b2g" revision="0e31f35a2a77301e91baa8a237aa9e9fa4076084"/>
   <project name="platform/external/libnfc-nci" path="external/libnfc-nci" revision="7d33aaf740bbf6c7c6e9c34a92b371eda311b66b"/>
   <project name="platform_external_qemu" path="external/qemu" remote="b2g" revision="c61e5f15fd62888f2c33d7d542b5b65c38102e8b"/>
   <project name="platform/external/wpa_supplicant_8" path="external/wpa_supplicant_8" revision="0e56e450367cd802241b27164a2979188242b95f"/>
   <project name="platform_hardware_ril" path="hardware/ril" remote="b2g" revision="832f4acaf481a19031e479a40b03d9ce5370ddee"/>
   <project name="platform_system_nfcd" path="system/nfcd" remote="b2g" revision="dd72bacb432efc5135a1f747d00aab91f898bddb"/>
   <project name="android-sdk" path="sdk" remote="b2g" revision="8b1365af38c9a653df97349ee53a3f5d64fd590a"/>
+  <project name="darwinstreamingserver" path="system/darwinstreamingserver" remote="b2g" revision="cf85968c7f85e0ec36e72c87ceb4837a943b8af6"/>
 </manifest>
--- a/b2g/config/emulator-kk/sources.xml
+++ b/b2g/config/emulator-kk/sources.xml
@@ -10,17 +10,17 @@
   <!--original fetch url was git://codeaurora.org/-->
   <remote fetch="https://git.mozilla.org/external/caf" name="caf"/>
   <!--original fetch url was https://git.mozilla.org/releases-->
   <remote fetch="https://git.mozilla.org/releases" name="mozillaorg"/>
   <!-- B2G specific things. -->
   <project name="platform_build" path="build" remote="b2g" revision="276ce45e78b09c4a4ee643646f691d22804754c1">
     <copyfile dest="Makefile" src="core/root.mk"/>
   </project>
-  <project name="gaia" path="gaia" remote="mozillaorg" revision="c394b7b4205b6f1a6ca44915fc08650f3ad127ec"/>
+  <project name="gaia" path="gaia" remote="mozillaorg" revision="4e4e579b4b1e35f863ed43ef6ba840f49bfd761c"/>
   <project name="fake-libdvm" path="dalvik" remote="b2g" revision="d50ae982b19f42f0b66d08b9eb306be81687869f"/>
   <project name="gonk-misc" path="gonk-misc" remote="b2g" revision="230f11aff069d90d20fc2dc63b48e9ae3d4bdcd1"/>
   <project name="librecovery" path="librecovery" remote="b2g" revision="891e5069c0ad330d8191bf8c7b879c814258c89f"/>
   <project name="moztt" path="external/moztt" remote="b2g" revision="dc5ca96695cab87b4c2fcd7c9f046ae3415a70a5"/>
   <project name="rilproxy" path="rilproxy" remote="b2g" revision="827214fcf38d6569aeb5c6d6f31cb296d1f09272"/>
   <project name="valgrind" path="external/valgrind" remote="b2g" revision="daa61633c32b9606f58799a3186395fd2bbb8d8c"/>
   <project name="vex" path="external/VEX" remote="b2g" revision="47f031c320888fe9f3e656602588565b52d43010"/>
   <project name="apitrace" path="external/apitrace" remote="apitrace" revision="ee6e7320bb83409ebd4685fbd87a8ae033704182"/>
@@ -126,9 +126,10 @@
   <!-- Emulator specific things -->
   <project name="device/generic/armv7-a-neon" path="device/generic/armv7-a-neon" revision="72ffdf71c68a96309212eb13d63560d66db14c9e"/>
   <project name="device_generic_goldfish" path="device/generic/goldfish" remote="b2g" revision="cf3f83a8ef13597b62fb6de7aa0cfaf5dc5de2b5"/>
   <project name="platform_external_qemu" path="external/qemu" remote="b2g" revision="870b4f52ba480da34651c172141d62e7fccd113e"/>
   <project name="platform/external/wpa_supplicant_8" path="external/wpa_supplicant_8" revision="694cecf256122d0cb3b6a1a4efb4b5c7401db223"/>
   <project name="platform_hardware_ril" path="hardware/ril" remote="b2g" revision="effff9b06977ffaf46b45e5737f83a608b2df620"/>
   <project name="platform/development" path="development" revision="5968ff4e13e0d696ad8d972281fc27ae5a12829b"/>
   <project name="android-sdk" path="sdk" remote="b2g" revision="0951179277915335251c5e11d242e4e1a8c2236f"/>
+  <project name="darwinstreamingserver" path="system/darwinstreamingserver" remote="b2g" revision="cf85968c7f85e0ec36e72c87ceb4837a943b8af6"/>
 </manifest>
--- a/b2g/config/emulator/sources.xml
+++ b/b2g/config/emulator/sources.xml
@@ -14,17 +14,17 @@
   <!--original fetch url was git://github.com/apitrace/-->
   <remote fetch="https://git.mozilla.org/external/apitrace" name="apitrace"/>
   <default remote="caf" revision="refs/tags/android-4.0.4_r2.1" sync-j="4"/>
   <!-- Gonk specific things and forks -->
   <project name="platform_build" path="build" remote="b2g" revision="0d616942c300d9fb142483210f1dda9096c9a9fc">
     <copyfile dest="Makefile" src="core/root.mk"/>
   </project>
   <project name="fake-dalvik" path="dalvik" remote="b2g" revision="ca1f327d5acc198bb4be62fa51db2c039032c9ce"/>
-  <project name="gaia.git" path="gaia" remote="mozillaorg" revision="c394b7b4205b6f1a6ca44915fc08650f3ad127ec"/>
+  <project name="gaia.git" path="gaia" remote="mozillaorg" revision="4e4e579b4b1e35f863ed43ef6ba840f49bfd761c"/>
   <project name="gonk-misc" path="gonk-misc" remote="b2g" revision="230f11aff069d90d20fc2dc63b48e9ae3d4bdcd1"/>
   <project name="rilproxy" path="rilproxy" remote="b2g" revision="827214fcf38d6569aeb5c6d6f31cb296d1f09272"/>
   <project name="platform_hardware_ril" path="hardware/ril" remote="b2g" revision="cd88d860656c31c7da7bb310d6a160d0011b0961"/>
   <project name="platform_external_qemu" path="external/qemu" remote="b2g" revision="bf9aaf39dd5a6491925a022db167c460f8207d34"/>
   <project name="moztt" path="external/moztt" remote="b2g" revision="dc5ca96695cab87b4c2fcd7c9f046ae3415a70a5"/>
   <project name="apitrace" path="external/apitrace" remote="apitrace" revision="ee6e7320bb83409ebd4685fbd87a8ae033704182"/>
   <!-- Stock Android things -->
   <project name="platform/abi/cpp" path="abi/cpp" revision="dd924f92906085b831bf1cbbc7484d3c043d613c"/>
@@ -93,16 +93,17 @@
   <project name="platform/system/bluetooth" path="system/bluetooth" revision="507e46e553586bec971551322f20d066c80a0788"/>
   <project name="platform/system/core" path="system/core" revision="91e5551f88aea5aa64e1b4f8b4b52d7be2b28b64"/>
   <project name="platform/system/extras" path="system/extras" revision="0205c49fedf29620165c6b4e6db3d13739c93396"/>
   <project name="platform/system/media" path="system/media" revision="7f17e3995d1588cfcc309b56525652794b6513ef"/>
   <project name="platform/system/netd" path="system/netd" revision="3d298fde142bee3fc4f07f63f16f2d8ce42339c0"/>
   <project name="platform/system/vold" path="system/vold" revision="919829940468066a32f403980b43f6ebfee5d314"/>
   <!-- Emulator specific things -->
   <project name="android-development" path="development" remote="b2g" revision="9abf0ab68376afae3e1c7beefa3e9cbee2fde202"/>
-  <project name="device_generic_goldfish" path="device/generic/goldfish" remote="b2g" revision="f486d0316aef2b3dea772dfb186e447e7a6a5de5"/>
+  <project name="device_generic_goldfish" path="device/generic/goldfish" remote="b2g" revision="0d5c43228006bae775c4cb57a6d3908484d41718"/>
   <project name="platform/external/iproute2" path="external/iproute2" revision="c66c5716d5335e450f7a7b71ccc6a604fb2f41d2"/>
   <project name="platform/prebuilts/gcc/linux-x86/host/i686-linux-glibc2.7-4.6" path="prebuilts/gcc/linux-x86/host/i686-linux-glibc2.7-4.6" revision="d2685281e2e54ca14d1df304867aa82c37b27162"/>
   <project name="platform/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.7-4.6" path="prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.7-4.6" revision="627f9b20fc518937b93747a7ff1ed4f5ed46e06f"/>
   <project name="platform/prebuilts/tools" path="prebuilts/tools" revision="acba00cdb4596c6dcb61ed06f14cf4ec89623539"/>
   <project name="platform_prebuilts_qemu-kernel" path="prebuilts/qemu-kernel" remote="b2g" revision="02c32feb2fe97037be0ac4dace3a6a5025ac895d"/>
   <project name="android-sdk" path="sdk" remote="b2g" revision="4f46930827957afbce500a4a920755a218bf3155"/>
+  <project name="darwinstreamingserver" path="system/darwinstreamingserver" remote="b2g" revision="cf85968c7f85e0ec36e72c87ceb4837a943b8af6"/>
 </manifest>
--- a/b2g/config/flame/sources.xml
+++ b/b2g/config/flame/sources.xml
@@ -12,17 +12,17 @@
   <!--original fetch url was https://git.mozilla.org/releases-->
   <remote fetch="https://git.mozilla.org/releases" name="mozillaorg"/>
   <!-- B2G specific things. -->
   <project name="platform_build" path="build" remote="b2g" revision="cc67f31dc638c0b7edba3cf7e3d87cadf0ed52bf">
     <copyfile dest="Makefile" src="core/root.mk"/>
   </project>
   <project name="librecovery" path="librecovery" remote="b2g" revision="891e5069c0ad330d8191bf8c7b879c814258c89f"/>
   <project name="fake-libdvm" path="dalvik" remote="b2g" revision="d50ae982b19f42f0b66d08b9eb306be81687869f"/>
-  <project name="gaia" path="gaia" remote="mozillaorg" revision="c394b7b4205b6f1a6ca44915fc08650f3ad127ec"/>
+  <project name="gaia" path="gaia" remote="mozillaorg" revision="4e4e579b4b1e35f863ed43ef6ba840f49bfd761c"/>
   <project name="gonk-misc" path="gonk-misc" remote="b2g" revision="230f11aff069d90d20fc2dc63b48e9ae3d4bdcd1"/>
   <project name="moztt" path="external/moztt" remote="b2g" revision="dc5ca96695cab87b4c2fcd7c9f046ae3415a70a5"/>
   <project name="apitrace" path="external/apitrace" remote="apitrace" revision="ee6e7320bb83409ebd4685fbd87a8ae033704182"/>
   <project name="valgrind" path="external/valgrind" remote="b2g" revision="daa61633c32b9606f58799a3186395fd2bbb8d8c"/>
   <project name="vex" path="external/VEX" remote="b2g" revision="47f031c320888fe9f3e656602588565b52d43010"/>
   <!-- Stock Android things -->
   <project groups="linux" name="platform/prebuilts/clang/linux-x86/3.1" path="prebuilts/clang/linux-x86/3.1" revision="e95b4ce22c825da44d14299e1190ea39a5260bde"/>
   <project groups="linux" name="platform/prebuilts/clang/linux-x86/3.2" path="prebuilts/clang/linux-x86/3.2" revision="471afab478649078ad7c75ec6b252481a59e19b8"/>
--- a/b2g/config/gaia.json
+++ b/b2g/config/gaia.json
@@ -1,9 +1,9 @@
 {
     "git": {
         "git_revision": "", 
         "remote": "", 
         "branch": ""
     }, 
-    "revision": "8749f2b7d7998019d04618b46d28b5cb9d77aadb", 
+    "revision": "158fb07e2e4939dddee026a33a05d65e38bb0e67", 
     "repo_path": "/integration/gaia-central"
 }
--- a/b2g/config/hamachi/sources.xml
+++ b/b2g/config/hamachi/sources.xml
@@ -12,17 +12,17 @@
   <!--original fetch url was git://github.com/apitrace/-->
   <remote fetch="https://git.mozilla.org/external/apitrace" name="apitrace"/>
   <default remote="caf" revision="b2g/ics_strawberry" sync-j="4"/>
   <!-- Gonk specific things and forks -->
   <project name="platform_build" path="build" remote="b2g" revision="0d616942c300d9fb142483210f1dda9096c9a9fc">
     <copyfile dest="Makefile" src="core/root.mk"/>
   </project>
   <project name="fake-dalvik" path="dalvik" remote="b2g" revision="ca1f327d5acc198bb4be62fa51db2c039032c9ce"/>
-  <project name="gaia.git" path="gaia" remote="mozillaorg" revision="c394b7b4205b6f1a6ca44915fc08650f3ad127ec"/>
+  <project name="gaia.git" path="gaia" remote="mozillaorg" revision="4e4e579b4b1e35f863ed43ef6ba840f49bfd761c"/>
   <project name="gonk-misc" path="gonk-misc" remote="b2g" revision="230f11aff069d90d20fc2dc63b48e9ae3d4bdcd1"/>
   <project name="rilproxy" path="rilproxy" remote="b2g" revision="827214fcf38d6569aeb5c6d6f31cb296d1f09272"/>
   <project name="librecovery" path="librecovery" remote="b2g" revision="891e5069c0ad330d8191bf8c7b879c814258c89f"/>
   <project name="moztt" path="external/moztt" remote="b2g" revision="dc5ca96695cab87b4c2fcd7c9f046ae3415a70a5"/>
   <project name="apitrace" path="external/apitrace" remote="apitrace" revision="ee6e7320bb83409ebd4685fbd87a8ae033704182"/>
   <!-- Stock Android things -->
   <project name="platform/abi/cpp" path="abi/cpp" revision="6426040f1be4a844082c9769171ce7f5341a5528"/>
   <project name="platform/bionic" path="bionic" revision="d2eb6c7b6e1bc7643c17df2d9d9bcb1704d0b9ab"/>
--- a/b2g/config/helix/sources.xml
+++ b/b2g/config/helix/sources.xml
@@ -10,17 +10,17 @@
   <!--original fetch url was https://git.mozilla.org/releases-->
   <remote fetch="https://git.mozilla.org/releases" name="mozillaorg"/>
   <default remote="caf" revision="b2g/ics_strawberry" sync-j="4"/>
   <!-- Gonk specific things and forks -->
   <project name="platform_build" path="build" remote="b2g" revision="0d616942c300d9fb142483210f1dda9096c9a9fc">
     <copyfile dest="Makefile" src="core/root.mk"/>
   </project>
   <project name="fake-dalvik" path="dalvik" remote="b2g" revision="ca1f327d5acc198bb4be62fa51db2c039032c9ce"/>
-  <project name="gaia.git" path="gaia" remote="mozillaorg" revision="c394b7b4205b6f1a6ca44915fc08650f3ad127ec"/>
+  <project name="gaia.git" path="gaia" remote="mozillaorg" revision="4e4e579b4b1e35f863ed43ef6ba840f49bfd761c"/>
   <project name="gonk-misc" path="gonk-misc" remote="b2g" revision="230f11aff069d90d20fc2dc63b48e9ae3d4bdcd1"/>
   <project name="rilproxy" path="rilproxy" remote="b2g" revision="827214fcf38d6569aeb5c6d6f31cb296d1f09272"/>
   <project name="librecovery" path="librecovery" remote="b2g" revision="891e5069c0ad330d8191bf8c7b879c814258c89f"/>
   <project name="moztt" path="external/moztt" remote="b2g" revision="dc5ca96695cab87b4c2fcd7c9f046ae3415a70a5"/>
   <project name="gonk-patches" path="patches" remote="b2g" revision="223a2421006e8f5da33f516f6891c87cae86b0f6"/>
   <!-- Stock Android things -->
   <project name="platform/abi/cpp" path="abi/cpp" revision="6426040f1be4a844082c9769171ce7f5341a5528"/>
   <project name="platform/bionic" path="bionic" revision="d2eb6c7b6e1bc7643c17df2d9d9bcb1704d0b9ab"/>
--- a/b2g/config/nexus-4/sources.xml
+++ b/b2g/config/nexus-4/sources.xml
@@ -12,17 +12,17 @@
   <!--original fetch url was https://git.mozilla.org/releases-->
   <remote fetch="https://git.mozilla.org/releases" name="mozillaorg"/>
   <!-- B2G specific things. -->
   <project name="platform_build" path="build" remote="b2g" revision="cc67f31dc638c0b7edba3cf7e3d87cadf0ed52bf">
     <copyfile dest="Makefile" src="core/root.mk"/>
   </project>
   <project name="rilproxy" path="rilproxy" remote="b2g" revision="827214fcf38d6569aeb5c6d6f31cb296d1f09272"/>
   <project name="fake-libdvm" path="dalvik" remote="b2g" revision="d50ae982b19f42f0b66d08b9eb306be81687869f"/>
-  <project name="gaia" path="gaia" remote="mozillaorg" revision="c394b7b4205b6f1a6ca44915fc08650f3ad127ec"/>
+  <project name="gaia" path="gaia" remote="mozillaorg" revision="4e4e579b4b1e35f863ed43ef6ba840f49bfd761c"/>
   <project name="gonk-misc" path="gonk-misc" remote="b2g" revision="230f11aff069d90d20fc2dc63b48e9ae3d4bdcd1"/>
   <project name="moztt" path="external/moztt" remote="b2g" revision="dc5ca96695cab87b4c2fcd7c9f046ae3415a70a5"/>
   <project name="apitrace" path="external/apitrace" remote="apitrace" revision="ee6e7320bb83409ebd4685fbd87a8ae033704182"/>
   <project name="valgrind" path="external/valgrind" remote="b2g" revision="daa61633c32b9606f58799a3186395fd2bbb8d8c"/>
   <project name="vex" path="external/VEX" remote="b2g" revision="47f031c320888fe9f3e656602588565b52d43010"/>
   <!-- Stock Android things -->
   <project groups="linux" name="platform/prebuilts/clang/linux-x86/3.1" path="prebuilts/clang/linux-x86/3.1" revision="5c45f43419d5582949284eee9cef0c43d866e03b"/>
   <project groups="linux" name="platform/prebuilts/clang/linux-x86/3.2" path="prebuilts/clang/linux-x86/3.2" revision="3748b4168e7bd8d46457d4b6786003bc6a5223ce"/>
--- a/b2g/config/wasabi/sources.xml
+++ b/b2g/config/wasabi/sources.xml
@@ -12,17 +12,17 @@
   <!--original fetch url was git://github.com/apitrace/-->
   <remote fetch="https://git.mozilla.org/external/apitrace" name="apitrace"/>
   <default remote="caf" revision="ics_chocolate_rb4.2" sync-j="4"/>
   <!-- Gonk specific things and forks -->
   <project name="platform_build" path="build" remote="b2g" revision="0d616942c300d9fb142483210f1dda9096c9a9fc">
     <copyfile dest="Makefile" src="core/root.mk"/>
   </project>
   <project name="fake-dalvik" path="dalvik" remote="b2g" revision="ca1f327d5acc198bb4be62fa51db2c039032c9ce"/>
-  <project name="gaia.git" path="gaia" remote="mozillaorg" revision="c394b7b4205b6f1a6ca44915fc08650f3ad127ec"/>
+  <project name="gaia.git" path="gaia" remote="mozillaorg" revision="4e4e579b4b1e35f863ed43ef6ba840f49bfd761c"/>
   <project name="gonk-misc" path="gonk-misc" remote="b2g" revision="230f11aff069d90d20fc2dc63b48e9ae3d4bdcd1"/>
   <project name="rilproxy" path="rilproxy" remote="b2g" revision="827214fcf38d6569aeb5c6d6f31cb296d1f09272"/>
   <project name="librecovery" path="librecovery" remote="b2g" revision="891e5069c0ad330d8191bf8c7b879c814258c89f"/>
   <project name="moztt" path="external/moztt" remote="b2g" revision="dc5ca96695cab87b4c2fcd7c9f046ae3415a70a5"/>
   <project name="apitrace" path="external/apitrace" remote="apitrace" revision="ee6e7320bb83409ebd4685fbd87a8ae033704182"/>
   <project name="gonk-patches" path="patches" remote="b2g" revision="223a2421006e8f5da33f516f6891c87cae86b0f6"/>
   <!-- Stock Android things -->
   <project name="platform/abi/cpp" path="abi/cpp" revision="6426040f1be4a844082c9769171ce7f5341a5528"/>
--- a/browser/modules/test/browser.ini
+++ b/browser/modules/test/browser.ini
@@ -19,18 +19,18 @@ skip-if = os == "linux" || e10s # Interm
 skip-if = e10s # Bug 941428 - UITour.jsm not e10s friendly
 [browser_UITour3.js]
 skip-if = os == "linux" || e10s # Linux: Bug 986760, Bug 989101; e10s: Bug 941428 - UITour.jsm not e10s friendly
 [browser_UITour_availableTargets.js]
 skip-if = e10s # Bug 941428 - UITour.jsm not e10s friendly
 [browser_UITour_detach_tab.js]
 skip-if = e10s # Bug 941428 - UITour.jsm not e10s friendly
 [browser_UITour_annotation_size_attributes.js]
-skip-if = e10s || os == "win" # Bug 941428 - UITour.jsm not e10s friendly. Intermittent test failures on Windows (bug 1026310 & bug 1032137)
+skip-if = e10s # Bug 941428 - UITour.jsm not e10s friendly
 [browser_UITour_panel_close_annotation.js]
-skip-if = e10s # Bug 941428 - UITour.jsm not e10s friendly
+skip-if = e10s || os == "win" # Bug 941428 - UITour.jsm not e10s friendly. Intermittent test failures on Windows (bug 1026310 & bug 1032137)
 [browser_UITour_registerPageID.js]
 skip-if = e10s # Bug 941428 - UITour.jsm not e10s friendly
 [browser_UITour_sync.js]
 skip-if = e10s # Bug 941428 - UITour.jsm not e10s friendly
 [browser_taskbar_preview.js]
 run-if = os == "win"
 skip-if = e10s # Bug 666808 - AeroPeek support for e10s
--- a/config/config.mk
+++ b/config/config.mk
@@ -59,16 +59,17 @@ endif
   MODULE \
   MSVC_ENABLE_PGO \
   NO_DIST_INSTALL \
   PARALLEL_DIRS \
   PROGRAM \
   RESOURCE_FILES \
   SDK_HEADERS \
   SIMPLE_PROGRAMS \
+  SONAME \
   TEST_DIRS \
   TIERS \
   TOOL_DIRS \
   XPCSHELL_TESTS \
   XPIDL_MODULE \
   $(NULL)
 
 _DEPRECATED_VARIABLES := \
@@ -143,16 +144,17 @@ COMMA = ,
 CHECK_VARS := \
  XPI_NAME \
  LIBRARY_NAME \
  MODULE \
  DEPTH \
  XPI_PKGNAME \
  INSTALL_EXTENSION_ID \
  SHARED_LIBRARY_NAME \
+ SONAME \
  STATIC_LIBRARY_NAME \
  $(NULL)
 
 # checks for internal spaces or trailing spaces in the variable
 # named by $x
 check-variable = $(if $(filter-out 0 1,$(words $($(x))z)),$(error Spaces are not allowed in $(x)))
 
 $(foreach x,$(CHECK_VARS),$(check-variable))
--- a/config/rules.mk
+++ b/config/rules.mk
@@ -172,16 +172,22 @@ IMPORT_LIBRARY		:= $(LIB_PREFIX)$(SHARED
 endif
 
 ifdef MAKE_FRAMEWORK
 SHARED_LIBRARY		:= $(SHARED_LIBRARY_NAME)
 else
 SHARED_LIBRARY		:= $(DLL_PREFIX)$(SHARED_LIBRARY_NAME)$(DLL_SUFFIX)
 endif
 
+ifdef SONAME
+DSO_SONAME			= $(DLL_PREFIX)$(SONAME)$(DLL_SUFFIX)
+else
+DSO_SONAME			= $(notdir $@)
+endif
+
 EMBED_MANIFEST_AT=2
 
 endif # MKSHLIB
 endif # FORCE_SHARED_LIB
 endif # LIBRARY
 
 ifdef FORCE_STATIC_LIB
 ifndef FORCE_SHARED_LIB
--- a/configure.in
+++ b/configure.in
@@ -1307,18 +1307,18 @@ if test "$GNU_CC"; then
     # But on OS X we just use C99 plus GNU extensions, in order to fix
     # bug 917526.
     CFLAGS="$CFLAGS -std=gnu99"
     if test "${OS_ARCH}" != Darwin; then
         CFLAGS="$CFLAGS -fgnu89-inline"
     fi
     # FIXME: Let us build with strict aliasing. bug 414641.
     CFLAGS="$CFLAGS -fno-strict-aliasing"
-    MKSHLIB='$(CXX) $(CXXFLAGS) $(DSO_PIC_CFLAGS) $(DSO_LDOPTS) -Wl,-h,$(notdir $@) -o $@'
-    MKCSHLIB='$(CC) $(CFLAGS) $(DSO_PIC_CFLAGS) $(DSO_LDOPTS) -Wl,-h,$(notdir $@) -o $@'
+    MKSHLIB='$(CXX) $(CXXFLAGS) $(DSO_PIC_CFLAGS) $(DSO_LDOPTS) -Wl,-h,$(DSO_SONAME) -o $@'
+    MKCSHLIB='$(CC) $(CFLAGS) $(DSO_PIC_CFLAGS) $(DSO_LDOPTS) -Wl,-h,$(DSO_SONAME) -o $@'
     WARNINGS_AS_ERRORS='-Werror'
     DSO_CFLAGS=''
     DSO_PIC_CFLAGS='-fPIC'
     ASFLAGS="$ASFLAGS -fPIC"
     AC_MSG_CHECKING([for --noexecstack option to as])
     _SAVE_CFLAGS=$CFLAGS
     CFLAGS="$CFLAGS -Wa,--noexecstack"
     AC_TRY_COMPILE(,,AC_MSG_RESULT([yes])
@@ -1456,18 +1456,18 @@ elif test "$SOLARIS_SUNPRO_CC"; then
     if test "$CPU_ARCH" = "sparc"; then
         # for Sun Studio on Solaris/SPARC
         DSO_PIC_CFLAGS='-xcode=pic32'
     else
         DSO_PIC_CFLAGS='-KPIC'
     fi
     _DEFINES_CFLAGS='$(ACDEFINES) -D_MOZILLA_CONFIG_H_ -DMOZILLA_CLIENT'
 else
-    MKSHLIB='$(LD) $(DSO_LDOPTS) -h $(notdir $@) -o $@'
-    MKCSHLIB='$(LD) $(DSO_LDOPTS) -h $(notdir $@) -o $@'
+    MKSHLIB='$(LD) $(DSO_LDOPTS) -h $(DSO_SONAME) -o $@'
+    MKCSHLIB='$(LD) $(DSO_LDOPTS) -h $(DSO_SONAME) -o $@'
 
     DSO_LDOPTS='-shared'
     if test "$GNU_LD"; then
         # Don't allow undefined symbols in libraries
         DSO_LDOPTS="$DSO_LDOPTS -z defs"
     fi
 
     DSO_CFLAGS=''
@@ -2274,18 +2274,18 @@ ia64*-hpux*)
         DSO_LDOPTS='-shared'
     fi
     # This will fail on a.out systems prior to 1.5.1_ALPHA.
     MKSHLIB_FORCE_ALL='-Wl,--whole-archive'
     MKSHLIB_UNFORCE_ALL='-Wl,--no-whole-archive'
     if test "$LIBRUNPATH"; then
         DSO_LDOPTS="-Wl,-R$LIBRUNPATH $DSO_LDOPTS"
     fi
-    MKSHLIB='$(CXX) $(CXXFLAGS) $(DSO_PIC_CFLAGS) $(DSO_LDOPTS) -Wl,-soname,$(notdir $@) -o $@'
-    MKCSHLIB='$(CC) $(CFLAGS) $(DSO_PIC_CFLAGS) $(DSO_LDOPTS) -Wl,-soname,$(notdir $@)) -o $@'
+    MKSHLIB='$(CXX) $(CXXFLAGS) $(DSO_PIC_CFLAGS) $(DSO_LDOPTS) -Wl,-soname,$(DSO_SONAME) -o $@'
+    MKCSHLIB='$(CC) $(CFLAGS) $(DSO_PIC_CFLAGS) $(DSO_LDOPTS) -Wl,-soname,$(DSO_SONAME) -o $@'
     ;;
 
 *-openbsd*)
     if test "$SO_VERSION"; then
         DLL_SUFFIX=".so.$SO_VERSION"
     else
         DLL_SUFFIX=".so.1.0"
     fi
@@ -2318,18 +2318,18 @@ ia64*-hpux*)
            _SAVE_LDFLAGS=$LDFLAGS
            LDFLAGS="-M /usr/lib/ld/map.noexstk $LDFLAGS"
            AC_TRY_LINK([#include <stdio.h>],
                        [printf("Hello World\n");],
                        ,
                        [LDFLAGS=$_SAVE_LDFLAGS])
        fi
        MOZ_OPTIMIZE_FLAGS="-xO4"
-       MKSHLIB='$(CXX) $(CXXFLAGS) $(DSO_PIC_FLAGS) $(DSO_LDOPTS) -h $(notdir $@) -o $@'
-       MKCSHLIB='$(CC) $(CFLAGS) $(DSO_PIC_FLAGS) $(DSO_LDOPTS) -h $(notdir $@) -o $@'
+       MKSHLIB='$(CXX) $(CXXFLAGS) $(DSO_PIC_FLAGS) $(DSO_LDOPTS) -h $(DSO_SONAME) -o $@'
+       MKCSHLIB='$(CC) $(CFLAGS) $(DSO_PIC_FLAGS) $(DSO_LDOPTS) -h $(DSO_SONAME) -o $@'
        MKSHLIB_FORCE_ALL='-z allextract'
        MKSHLIB_UNFORCE_ALL='-z defaultextract'
        DSO_LDOPTS='-G'
        AR_LIST="$AR t"
        AR_EXTRACT="$AR x"
        AR_DELETE="$AR d"
        AR='$(CXX) -xar'
        AR_FLAGS='-o $@'
--- a/content/media/encoder/TrackEncoder.cpp
+++ b/content/media/encoder/TrackEncoder.cpp
@@ -1,49 +1,80 @@
 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 #include "TrackEncoder.h"
 #include "AudioChannelFormat.h"
 #include "MediaStreamGraph.h"
+#include "prlog.h"
 #include "VideoUtils.h"
 
 #undef LOG
 #ifdef MOZ_WIDGET_GONK
 #include <android/log.h>
 #define LOG(args...) __android_log_print(ANDROID_LOG_INFO, "MediaEncoder", ## args);
 #else
 #define LOG(args, ...)
 #endif
 
 namespace mozilla {
 
+#ifdef PR_LOGGING
+PRLogModuleInfo* gTrackEncoderLog;
+#define TRACK_LOG(type, msg) PR_LOG(gTrackEncoderLog, type, msg)
+#else
+#define TRACK_LOG(type, msg)
+#endif
+
 static const int DEFAULT_CHANNELS = 1;
 static const int DEFAULT_SAMPLING_RATE = 16000;
 static const int DEFAULT_FRAME_WIDTH = 640;
 static const int DEFAULT_FRAME_HEIGHT = 480;
 static const int DEFAULT_TRACK_RATE = USECS_PER_S;
 
+TrackEncoder::TrackEncoder()
+  : mReentrantMonitor("media.TrackEncoder")
+  , mEncodingComplete(false)
+  , mEosSetInEncoder(false)
+  , mInitialized(false)
+  , mEndOfStream(false)
+  , mCanceled(false)
+#ifdef PR_LOGGING
+  , mAudioInitCounter(0)
+  , mVideoInitCounter(0)
+#endif
+{
+#ifdef PR_LOGGING
+  if (!gTrackEncoderLog) {
+    gTrackEncoderLog = PR_NewLogModule("TrackEncoder");
+  }
+#endif
+}
+
 void
 AudioTrackEncoder::NotifyQueuedTrackChanges(MediaStreamGraph* aGraph,
                                             TrackID aID,
                                             TrackRate aTrackRate,
                                             TrackTicks aTrackOffset,
                                             uint32_t aTrackEvents,
                                             const MediaSegment& aQueuedMedia)
 {
   if (mCanceled) {
     return;
   }
 
   const AudioSegment& audio = static_cast<const AudioSegment&>(aQueuedMedia);
 
   // Check and initialize parameters for codec encoder.
   if (!mInitialized) {
+#ifdef PR_LOGGING
+    mAudioInitCounter++;
+    TRACK_LOG(PR_LOG_DEBUG, ("Init the audio encoder %d times", mAudioInitCounter));
+#endif
     AudioSegment::ChunkIterator iter(const_cast<AudioSegment&>(audio));
     while (!iter.IsEnded()) {
       AudioChunk chunk = *iter;
 
       // The number of channels is determined by the first non-null chunk, and
       // thus the audio encoder is initialized at this time.
       if (!chunk.IsNull()) {
         nsresult rv = Init(chunk.mChannelData.Length(), aTrackRate);
@@ -153,16 +184,20 @@ VideoTrackEncoder::NotifyQueuedTrackChan
   if (mCanceled) {
     return;
   }
 
   const VideoSegment& video = static_cast<const VideoSegment&>(aQueuedMedia);
 
    // Check and initialize parameters for codec encoder.
   if (!mInitialized) {
+#ifdef PR_LOGGING
+    mVideoInitCounter++;
+    TRACK_LOG(PR_LOG_DEBUG, ("Init the video encoder %d times", mVideoInitCounter));
+#endif
     VideoSegment::ChunkIterator iter(const_cast<VideoSegment&>(video));
     while (!iter.IsEnded()) {
       VideoChunk chunk = *iter;
       if (!chunk.IsNull()) {
         gfx::IntSize imgsize = chunk.mFrame.GetImage()->GetSize();
         gfxIntSize intrinsicSize = chunk.mFrame.GetIntrinsicSize();
 #ifdef MOZ_WIDGET_GONK
         // Block the video frames come from video source.
--- a/content/media/encoder/TrackEncoder.h
+++ b/content/media/encoder/TrackEncoder.h
@@ -26,24 +26,17 @@ class MediaStreamGraph;
  * NotifyQueuedTrackChanges is called on subclasses of this class from the
  * MediaStreamGraph thread, and AppendAudioSegment/AppendVideoSegment is then
  * called to store media data in the TrackEncoder. Later on, GetEncodedTrack is
  * called on MediaEncoder's thread to encode and retrieve the encoded data.
  */
 class TrackEncoder
 {
 public:
-  TrackEncoder()
-    : mReentrantMonitor("media.TrackEncoder")
-    , mEncodingComplete(false)
-    , mEosSetInEncoder(false)
-    , mInitialized(false)
-    , mEndOfStream(false)
-    , mCanceled(false)
-  {}
+  TrackEncoder();
 
   virtual ~TrackEncoder() {}
 
   /**
    * Notified by the same callbcak of MediaEncoder when it has received a track
    * change from MediaStreamGraph. Called on the MediaStreamGraph thread.
    */
   virtual void NotifyQueuedTrackChanges(MediaStreamGraph* aGraph, TrackID aID,
@@ -126,16 +119,22 @@ protected:
    */
   bool mEndOfStream;
 
   /**
    * True if a cancellation of encoding is sent from MediaEncoder, protected by
    * mReentrantMonitor.
    */
   bool mCanceled;
+
+#ifdef PR_LOGGING
+  // How many times we have tried to initialize the encoder.
+  uint32_t mAudioInitCounter;
+  uint32_t mVideoInitCounter;
+#endif
 };
 
 class AudioTrackEncoder : public TrackEncoder
 {
 public:
   AudioTrackEncoder()
     : TrackEncoder()
     , mChannels(0)
--- a/content/media/test/test_mediarecorder_record_4ch_audiocontext.html
+++ b/content/media/test/test_mediarecorder_record_4ch_audiocontext.html
@@ -16,16 +16,17 @@ function startTest() {
   for (var i = 0; i < 80920; ++i) {
     for(var j = 0; j < 4; ++j) {
       buffer.getChannelData(j)[i] = Math.sin(1000 * 2 * Math.PI * i / context.sampleRate);
     }
   }
 
   var source = context.createBufferSource();
   source.buffer = buffer;
+  source.loop = true;
   var dest = context.createMediaStreamDestination();
   var stopTriggered = false;
   var onstopTriggered = false;
   dest.channelCount = 4;
   var expectedMimeType = 'audio/ogg';
   var totalBlobSize = 0;
   source.channelCountMode = 'explicit';
   source.connect(dest);
--- a/content/media/test/test_mediarecorder_record_audiocontext.html
+++ b/content/media/test/test_mediarecorder_record_audiocontext.html
@@ -15,16 +15,17 @@ function startTest() {
   var hasonstop = false;
   var buffer = context.createBuffer(1, 80920, context.sampleRate);
   for (var i = 0; i < 80920; ++i) {
     buffer.getChannelData(0)[i] = Math.sin(1000 * 2 * Math.PI * i / context.sampleRate);
   }
 
   var source = context.createBufferSource();
   source.buffer = buffer;
+  source.loop = true;
 
   var dest = context.createMediaStreamDestination();
   source.connect(dest);
   var elem = document.createElement('audio');
   elem.mozSrcObject = dest.stream;
   mMediaStream = dest.stream;
   source.start(0);
   elem.play();
--- a/dom/alarm/AlarmsManager.js
+++ b/dom/alarm/AlarmsManager.js
@@ -56,22 +56,25 @@ AlarmsManager.prototype = {
         isIgnoreTimezone = true;
         break;
 
       default:
         throw Components.results.NS_ERROR_INVALID_ARG;
         break;
     }
 
+    let sandbox = new Cu.Sandbox(Cu.getWebIDLCallerPrincipal());
+    sandbox.data = aData;
+    let data = Cu.evalInSandbox("JSON.stringify(data)", sandbox);
     let request = this.createRequest();
     this._cpmm.sendAsyncMessage("AlarmsManager:Add",
                                 { requestId: this.getRequestId(request),
                                   date: aDate,
                                   ignoreTimezone: isIgnoreTimezone,
-                                  data: aData,
+                                  data: data,
                                   pageURL: this._pageURL,
                                   manifestURL: this._manifestURL });
     return request;
   },
 
   remove: function remove(aId) {
     debug("remove()");
 
@@ -104,23 +107,26 @@ AlarmsManager.prototype = {
       case "AlarmsManager:Add:Return:OK":
         Services.DOMRequest.fireSuccess(request, json.id);
         break;
 
       case "AlarmsManager:GetAll:Return:OK":
         // We don't need to expose everything to the web content.
         let alarms = [];
         json.alarms.forEach(function trimAlarmInfo(aAlarm) {
+          let sandbox = new Cu.Sandbox(this._principal);
+          sandbox.data = aAlarm.data;
+          let data = Cu.evalInSandbox("JSON.parse(data)", sandbox);
           let alarm = { "id": aAlarm.id,
                         "date": aAlarm.date,
                         "respectTimezone": aAlarm.ignoreTimezone ?
                                              "ignoreTimezone" : "honorTimezone",
-                        "data": aAlarm.data };
+                        "data": data };
           alarms.push(alarm);
-        });
+        }.bind(this));
 
         Services.DOMRequest.fireSuccess(request,
                                         Cu.cloneInto(alarms, this._window));
         break;
 
       case "AlarmsManager:Add:Return:KO":
         Services.DOMRequest.fireError(request, json.errorMsg);
         break;
@@ -148,20 +154,21 @@ AlarmsManager.prototype = {
     this.initDOMRequestHelper(aWindow, ["AlarmsManager:Add:Return:OK",
                                         "AlarmsManager:Add:Return:KO",
                                         "AlarmsManager:GetAll:Return:OK",
                                         "AlarmsManager:GetAll:Return:KO"]);
 
     // Get the manifest URL if this is an installed app
     let appsService = Cc["@mozilla.org/AppsService;1"]
                         .getService(Ci.nsIAppsService);
-    let principal = aWindow.document.nodePrincipal;
-    this._pageURL = principal.URI.spec;
-    this._manifestURL = appsService.getManifestURLByLocalId(principal.appId);
     this._window = aWindow;
+    this._principal = this._window.document.nodePrincipal;
+    this._pageURL = this._principal.URI.spec;
+    this._manifestURL =
+      appsService.getManifestURLByLocalId(this._principal.appId);
   },
 
   // Called from DOMRequestIpcHelper.
   uninit: function uninit() {
     debug("uninit()");
   },
 }
 
new file mode 100644
--- /dev/null
+++ b/dom/alarm/test/file_empty.html
@@ -0,0 +1,2 @@
+<!DOCTYPE html>
+<html><head></head><body><span id="text">Nothing to see here</span><iframe name="subframe"></iframe></body></html>
--- a/dom/alarm/test/mochitest.ini
+++ b/dom/alarm/test/mochitest.ini
@@ -1,13 +1,18 @@
 [DEFAULT]
 skip-if = e10s
 
+support-files =
+  file_empty.html
+
 [test_alarm_add_data.html]
 skip-if = (buildapp == 'b2g' && toolkit != 'gonk') #Bug 931116, b2g desktop specific, initial triage
 [test_alarm_add_date.html]
 skip-if = (buildapp == 'b2g' && toolkit != 'gonk') #Bug 931116, b2g desktop specific, initial triage
 [test_alarm_add_respectTimezone.html]
 skip-if = (buildapp == 'b2g' && toolkit != 'gonk') #Bug 931116, b2g desktop specific, initial triage
 [test_alarm_non_permitted_app.html]
 [test_alarm_permitted_app.html]
 [test_alarm_remove.html]
 skip-if = (buildapp == 'b2g' && toolkit != 'gonk') #Bug 931116, b2g desktop specific, initial triage
+[test_bug1015540.html]
+skip-if = (buildapp == 'b2g' && toolkit != 'gonk') #Bug 931116, b2g desktop specific, initial triage
new file mode 100644
--- /dev/null
+++ b/dom/alarm/test/test_bug1015540.html
@@ -0,0 +1,73 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+  <meta charset="utf-8">
+  <title>Test data Paramter of Alarm API for Bug 1015540</title>
+  <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css"/>
+  <script type="application/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
+</head>
+<body>
+<a target="_blank" href="https://bugzilla.mozilla.org/show_bug.cgi?id=1015540">Bug 1015540</a>
+<p id="display"></p>
+<div id="content" style="display: none"></div>
+<pre id="test">
+  <script type="application/javascript">
+
+  "use strict";
+
+  // Verify passing a cross-origin object for the data paramter
+  function testCrossOriginObject() {
+    var tomorrow = new Date();
+    tomorrow.setDate(tomorrow.getDate() + 1);
+
+    var data = document.getElementById('ifr').contentWindow;
+
+    try {
+      navigator.mozAlarms.add(tomorrow, "honorTimezone", data);
+      ok(false, "Adding alarms for cross-origin objects should be prohibited.");
+    } catch (e) {
+      ok(true, "Adding alarms for cross-origin objects is prohibited.");
+    }
+
+    SimpleTest.finish();
+  }
+
+  function startTests() {
+    SpecialPowers.pushPrefEnv({
+      "set": [["dom.mozAlarms.enabled", true]]
+    }, function() {
+   	  SpecialPowers.addPermission("alarms", true, document);
+      var isAllowedToTest = true;
+
+      if (navigator.appVersion.indexOf("Android") !== -1) {
+        ok(true, "mozAlarms is not allowed on Android for now. " +
+                 "TODO Bug 863557.");
+        isAllowedToTest = false;
+      } else if (SpecialPowers.wrap(document).nodePrincipal.appStatus ==
+                 SpecialPowers.Ci.nsIPrincipal.APP_STATUS_NOT_INSTALLED) {
+        ok(true, "mozAlarms is not allowed for non-installed apps. " +
+                 "TODO Bug 876981.");
+        isAllowedToTest = false;
+      }
+
+      if (isAllowedToTest) {
+        ok(true, "Start to test...");
+        testCrossOriginObject();
+      } else {
+        // A sanity check to make sure we must run tests on Firefox OS (B2G).
+        if (navigator.userAgent.indexOf("Mobile") != -1 &&
+            navigator.appVersion.indexOf("Android") == -1) {
+          ok(false, "Should run the test on Firefox OS (B2G)!");
+        }
+
+        SimpleTest.finish();
+      }
+    });
+  }
+
+  SimpleTest.waitForExplicitFinish();
+  </script>
+</pre>
+<iframe id="ifr" onload="startTests()" src="http://example.org/tests/dom/alarm/test/file_empty.html"></iframe>
+</body>
+</html>
--- a/dom/apps/src/AppsServiceChild.jsm
+++ b/dom/apps/src/AppsServiceChild.jsm
@@ -3,345 +3,76 @@
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 "use strict";
 
 const Cu = Components.utils;
 const Cc = Components.classes;
 const Ci = Components.interfaces;
 
-// This module exposes a subset of the functionalities of the parent DOM
-// Registry to content processes, to be used from the AppsService component.
+// This module exposes a subset of the functionnalities of the parent DOM
+// Registry to content processes, to be be used from the AppsService component.
 
-this.EXPORTED_SYMBOLS = ["DOMApplicationRegistry", "WrappedManifestCache"];
+this.EXPORTED_SYMBOLS = ["DOMApplicationRegistry"];
 
 Cu.import("resource://gre/modules/AppsUtils.jsm");
 Cu.import("resource://gre/modules/Services.jsm");
 
 function debug(s) {
   //dump("-*- AppsServiceChild.jsm: " + s + "\n");
 }
 
-const APPS_IPC_MSG_NAMES = [
-  "Webapps:AddApp",
-  "Webapps:RemoveApp",
-  "Webapps:UpdateApp",
-  "Webapps:CheckForUpdate:Return:KO",
-  "Webapps:FireEvent",
-  "Webapps:UpdateState"
-];
-
-// A simple cache for the wrapped manifests.
-this.WrappedManifestCache = {
-  _cache: { },
-
-  // Gets an entry from the cache, and populates the cache if needed.
-  get: function mcache_get(aManifestURL, aManifest, aWindow, aInnerWindowID) {
-    if (!aManifest) {
-      return;
-    }
-
-    if (!(aManifestURL in this._cache)) {
-      this._cache[aManifestURL] = { };
-    }
-
-    let winObjs = this._cache[aManifestURL];
-    if (!(aInnerWindowID in winObjs)) {
-      winObjs[aInnerWindowID] = Cu.cloneInto(aManifest, aWindow);
-    }
-
-    return winObjs[aInnerWindowID];
-  },
-
-  // Invalidates an entry in the cache.
-  evict: function mcache_evict(aManifestURL, aInnerWindowID) {
-    debug("Evicting manifest " + aManifestURL + " window ID " +
-          aInnerWindowID);
-    if (aManifestURL in this._cache) {
-      let winObjs = this._cache[aManifestURL];
-      if (aInnerWindowID in winObjs) {
-        delete winObjs[aInnerWindowID];
-      }
-
-      if (Object.keys(winObjs).length == 0) {
-        delete this._cache[aManifestURL];
-      }
-    }
-  },
-
-  observe: function(aSubject, aTopic, aData) {
-    // Clear the cache on memory pressure.
-    this._cache = { };
-    Cu.forceGC();
-  },
-
-  init: function() {
-    Services.obs.addObserver(this, "memory-pressure", false);
-  }
-};
-
-this.WrappedManifestCache.init();
-
-
-// DOMApplicationRegistry keeps a cache containing a list of apps in the device.
-// This information is updated with the data received from the main process and
-// it is queried by the DOM objects to set their state.
-// This module handle all the messages broadcasted from the parent process,
-// including DOM events, which are dispatched to the corresponding DOM objects.
-
 this.DOMApplicationRegistry = {
-  // DOMApps will hold a list of arrays of weak references to
-  // mozIDOMApplication objects indexed by manifest URL.
-  DOMApps: {},
-
-  ready: false,
-  webapps: null,
-
   init: function init() {
+    debug("init");
     this.cpmm = Cc["@mozilla.org/childprocessmessagemanager;1"]
                   .getService(Ci.nsISyncMessageSender);
 
-    APPS_IPC_MSG_NAMES.forEach((function(aMsgName) {
+    ["Webapps:AddApp", "Webapps:RemoveApp"].forEach((function(aMsgName) {
       this.cpmm.addMessageListener(aMsgName, this);
     }).bind(this));
 
-    this.cpmm.sendAsyncMessage("Webapps:RegisterForMessages", {
-      messages: APPS_IPC_MSG_NAMES
-    });
+    // We need to prime the cache with the list of apps.
+    // XXX shoud we do this async and block callers if it's not yet there?
+    this.webapps = this.cpmm.sendSyncMessage("Webapps:GetList", { })[0];
 
-    // We need to prime the cache with the list of apps.
-    let list = this.cpmm.sendSyncMessage("Webapps:GetList", { })[0];
-    this.webapps = list.webapps;
     // We need a fast mapping from localId -> app, so we add an index.
-    // We also add the manifest to the app object.
     this.localIdIndex = { };
     for (let id in this.webapps) {
       let app = this.webapps[id];
       this.localIdIndex[app.localId] = app;
-      app.manifest = list.manifests[id];
     }
 
     Services.obs.addObserver(this, "xpcom-shutdown", false);
   },
 
   observe: function(aSubject, aTopic, aData) {
-    // cpmm.addMessageListener causes the DOMApplicationRegistry object to
-    // live forever if we don't clean up properly.
+    // cpmm.addMessageListener causes the DOMApplicationRegistry object to live
+    // forever if we don't clean up properly.
     this.webapps = null;
-    this.DOMApps = null;
-
-    APPS_IPC_MSG_NAMES.forEach((aMsgName) => {
+    ["Webapps:AddApp", "Webapps:RemoveApp"].forEach((function(aMsgName) {
       this.cpmm.removeMessageListener(aMsgName, this);
-    });
-
-    this.cpmm.sendAsyncMessage("Webapps:UnregisterForMessages",
-                               APPS_IPC_MSG_NAMES)
+    }).bind(this));
   },
 
   receiveMessage: function receiveMessage(aMessage) {
     debug("Received " + aMessage.name + " message.");
-    let msg = aMessage.data;
+    let msg = aMessage.json;
     switch (aMessage.name) {
       case "Webapps:AddApp":
         this.webapps[msg.id] = msg.app;
         this.localIdIndex[msg.app.localId] = msg.app;
-        if (msg.manifest) {
-          this.webapps[msg.id].manifest = msg.manifest;
-        }
         break;
       case "Webapps:RemoveApp":
-        delete this.DOMApps[this.webapps[msg.id].manifestURL];
         delete this.localIdIndex[this.webapps[msg.id].localId];
         delete this.webapps[msg.id];
         break;
-      case "Webapps:UpdateApp":
-        let app = this.webapps[msg.oldId];
-        if (!app) {
-          return;
-        }
-
-        if (msg.app) {
-          for (let prop in msg.app) {
-            app[prop] = msg.app[prop];
-          }
-        }
-
-        this.webapps[msg.newId] = app;
-        this.localIdIndex[app.localId] = app;
-        delete this.webapps[msg.oldId];
-
-        let apps = this.DOMApps[msg.app.manifestURL];
-        if (!apps) {
-          return;
-        }
-        for (let i = 0; i < apps.length; i++) {
-          let domApp = apps[i].get();
-          if (!domApp) {
-            apps.splice(i);
-            continue;
-          }
-          domApp._proxy = new Proxy(domApp, {
-            get: function(target, prop) {
-              if (!DOMApplicationRegistry.webapps[msg.newId]) {
-                return;
-              }
-              return DOMApplicationRegistry.webapps[msg.newId][prop];
-            },
-            set: function(target, prop, val) {
-              if (!DOMApplicationRegistry.webapps[msg.newId]) {
-                return;
-              }
-              DOMApplicationRegistry.webapps[msg.newId][prop] = val;
-              return;
-            },
-          });
-        }
-        break;
-      case "Webapps:FireEvent":
-        this._fireEvent(aMessage);
-        break;
-      case "Webapps:UpdateState":
-        this._updateState(msg);
-        break;
-      case "Webapps:CheckForUpdate:Return:KO":
-        let DOMApps = this.DOMApps[msg.manifestURL];
-        if (!DOMApps || !msg.requestID) {
-          return;
-        }
-        DOMApps.forEach((DOMApp) => {
-          let domApp = DOMApp.get();
-          if (domApp && msg.requestID) {
-            domApp._fireRequestResult(aMessage, true /* aIsError */);
-          }
-        });
-        break;
     }
   },
 
-  /**
-   * mozIDOMApplication management
-   */
-
-  // Every time a DOM app is created, we save a weak reference to it that will
-  // be used to dispatch events and fire request results.
-  addDOMApp: function(aApp, aManifestURL, aId) {
-    let weakRef = Cu.getWeakReference(aApp);
-
-    if (!this.DOMApps[aManifestURL]) {
-      this.DOMApps[aManifestURL] = [];
-    }
-
-    let apps = this.DOMApps[aManifestURL];
-
-    // Get rid of dead weak references.
-    for (let i = 0; i < apps.length; i++) {
-      if (!apps[i].get()) {
-        apps.splice(i);
-      }
-    }
-
-    apps.push(weakRef);
-
-    // Each DOM app contains a proxy object used to build their state. We
-    // return the handler for this proxy object with traps to get and set
-    // app properties kept in the DOMApplicationRegistry app cache.
-    return {
-      get: function(target, prop) {
-        if (!DOMApplicationRegistry.webapps[aId]) {
-          return;
-        }
-        return DOMApplicationRegistry.webapps[aId][prop];
-      },
-      set: function(target, prop, val) {
-        if (!DOMApplicationRegistry.webapps[aId]) {
-          return;
-        }
-        DOMApplicationRegistry.webapps[aId][prop] = val;
-        return;
-      },
-    };
-  },
-
-  _fireEvent: function(aMessage) {
-    let msg = aMessage.data;
-    debug("_fireEvent " + JSON.stringify(msg));
-    if (!this.DOMApps || !msg.manifestURL || !msg.eventType) {
-      return;
-    }
-
-    let DOMApps = this.DOMApps[msg.manifestURL];
-    if (!DOMApps) {
-      return;
-    }
-
-    // The parent might ask childs to trigger more than one event in one
-    // shot, so in order to avoid needless IPC we allow an array for the
-    // 'eventType' IPC message field.
-    if (!Array.isArray(msg.eventType)) {
-      msg.eventType = [msg.eventType];
-    }
-
-    DOMApps.forEach((DOMApp) => {
-      let domApp = DOMApp.get();
-      if (!domApp) {
-        return;
-      }
-      msg.eventType.forEach((aEventType) => {
-        if ('on' + aEventType in domApp) {
-          domApp._fireEvent(aEventType);
-        }
-      });
-
-      if (msg.requestID) {
-        aMessage.data.result = msg.manifestURL;
-        domApp._fireRequestResult(aMessage);
-      }
-    });
-  },
-
-  _updateState: function(aMessage) {
-    if (!this.DOMApps || !aMessage.id) {
-      return;
-    }
-
-    let app = this.webapps[aMessage.id];
-    if (!app) {
-      return;
-    }
-
-    if (aMessage.app) {
-      for (let prop in aMessage.app) {
-        app[prop] = aMessage.app[prop];
-      }
-    }
-
-    if ("error" in aMessage) {
-      app.downloadError = aMessage.error;
-    }
-
-    if (aMessage.manifest) {
-      app.manifest = aMessage.manifest;
-      // Evict the wrapped manifest cache for all the affected DOM objects.
-      let DOMApps = this.DOMApps[app.manifestURL];
-      if (!DOMApps) {
-        return;
-      }
-      DOMApps.forEach((DOMApp) => {
-        let domApp = DOMApp.get();
-        if (!domApp) {
-          return;
-        }
-        WrappedManifestCache.evict(app.manifestURL, domApp.innerWindowID);
-      });
-    }
-  },
-
-  /**
-   * nsIAppsService API
-   */
   getAppByManifestURL: function getAppByManifestURL(aManifestURL) {
     debug("getAppByManifestURL " + aManifestURL);
     return AppsUtils.getAppByManifestURL(this.webapps, aManifestURL);
   },
 
   getAppLocalIdByManifestURL: function getAppLocalIdByManifestURL(aManifestURL) {
     debug("getAppLocalIdByManifestURL " + aManifestURL);
     return AppsUtils.getAppLocalIdByManifestURL(this.webapps, aManifestURL);
@@ -353,17 +84,17 @@ this.DOMApplicationRegistry = {
   },
 
   getAppLocalIdByStoreId: function(aStoreId) {
     debug("getAppLocalIdByStoreId:" + aStoreId);
     return AppsUtils.getAppLocalIdByStoreId(this.webapps, aStoreId);
   },
 
   getAppByLocalId: function getAppByLocalId(aLocalId) {
-    debug("getAppByLocalId " + aLocalId + " - ready: " + this.ready);
+    debug("getAppByLocalId " + aLocalId);
     let app = this.localIdIndex[aLocalId];
     if (!app) {
       debug("Ouch, No app!");
       return null;
     }
 
     return new mozIApplication(app);
   },
--- a/dom/apps/src/Webapps.js
+++ b/dom/apps/src/Webapps.js
@@ -7,17 +7,16 @@ const Ci = Components.interfaces;
 const Cu = Components.utils;
 const Cr = Components.results;
 
 Cu.import("resource://gre/modules/XPCOMUtils.jsm");
 Cu.import("resource://gre/modules/Services.jsm");
 Cu.import("resource://gre/modules/DOMRequestHelper.jsm");
 Cu.import("resource://gre/modules/AppsUtils.jsm");
 Cu.import("resource://gre/modules/BrowserElementPromptService.jsm");
-Cu.import("resource://gre/modules/AppsServiceChild.jsm");
 
 XPCOMUtils.defineLazyServiceGetter(this, "cpmm",
                                    "@mozilla.org/childprocessmessagemanager;1",
                                    "nsIMessageSender");
 
 function convertAppsArray(aApps, aWindow) {
   let apps = new aWindow.Array();
   for (let i = 0; i < aApps.length; i++) {
@@ -274,121 +273,134 @@ WebappsRegistry.prototype = {
                                     flags: Ci.nsIClassInfo.DOM_OBJECT,
                                     classDescription: "Webapps Registry"})
 }
 
 /**
   * mozIDOMApplication object
   */
 
+// A simple cache for the wrapped manifests.
+let manifestCache = {
+  _cache: { },
+
+  // Gets an entry from the cache, and populates the cache if needed.
+  get: function mcache_get(aManifestURL, aManifest, aWindow, aInnerWindowID) {
+    if (!(aManifestURL in this._cache)) {
+      this._cache[aManifestURL] = { };
+    }
+
+    let winObjs = this._cache[aManifestURL];
+    if (!(aInnerWindowID in winObjs)) {
+      winObjs[aInnerWindowID] = Cu.cloneInto(aManifest, aWindow);
+    }
+
+    return winObjs[aInnerWindowID];
+  },
+
+  // Invalidates an entry in the cache.
+  evict: function mcache_evict(aManifestURL, aInnerWindowID) {
+    if (aManifestURL in this._cache) {
+      let winObjs = this._cache[aManifestURL];
+      if (aInnerWindowID in winObjs) {
+        delete winObjs[aInnerWindowID];
+      }
+
+      if (Object.keys(winObjs).length == 0) {
+        delete this._cache[aManifestURL];
+      }
+    }
+  },
+
+  observe: function(aSubject, aTopic, aData) {
+    // Clear the cache on memory pressure.
+    this._cache = { };
+  },
+
+  init: function() {
+    Services.obs.addObserver(this, "memory-pressure", false);
+  }
+};
+
 function createApplicationObject(aWindow, aApp) {
-  let app = Cc["@mozilla.org/webapps/application;1"]
-              .createInstance(Ci.mozIDOMApplication);
+  let app = Cc["@mozilla.org/webapps/application;1"].createInstance(Ci.mozIDOMApplication);
   app.wrappedJSObject.init(aWindow, aApp);
   return app;
 }
 
 function WebappsApplication() {
   this.wrappedJSObject = this;
 }
 
 WebappsApplication.prototype = {
   __proto__: DOMRequestIpcHelper.prototype,
 
   init: function(aWindow, aApp) {
-    let proxyHandler = DOMApplicationRegistry.addDOMApp(this,
-                                                        aApp.manifestURL,
-                                                        aApp.id);
-    this._proxy = new Proxy(this, proxyHandler);
-
     this._window = aWindow;
+    let principal = this._window.document.nodePrincipal;
+    this._appStatus = principal.appStatus;
+    this.origin = aApp.origin;
+    this._manifest = aApp.manifest;
+    this._updateManifest = aApp.updateManifest;
+    this.manifestURL = aApp.manifestURL;
+    this.receipts = aApp.receipts;
+    this.installOrigin = aApp.installOrigin;
+    this.installTime = aApp.installTime;
+    this.installState = aApp.installState || "installed";
+    this.removable = aApp.removable;
+    this.lastUpdateCheck = aApp.lastUpdateCheck ? aApp.lastUpdateCheck
+                                                : Date.now();
+    this.updateTime = aApp.updateTime ? aApp.updateTime
+                                      : aApp.installTime;
+    this.progress = NaN;
+    this.downloadAvailable = aApp.downloadAvailable;
+    this.downloading = aApp.downloading;
+    this.readyToApplyDownload = aApp.readyToApplyDownload;
+    this.downloadSize = aApp.downloadSize || 0;
 
     this._onprogress = null;
     this._ondownloadsuccess = null;
     this._ondownloaderror = null;
     this._ondownloadavailable = null;
     this._ondownloadapplied = null;
 
-    this.initDOMRequestHelper(aWindow);
-  },
-
-  get _appStatus() {
-    return this._proxy.appStatus;
-  },
-
-  get downloadAvailable() {
-    return this._proxy.downloadAvailable;
-  },
-
-  get downloading() {
-    return this._proxy.downloading;
-  },
+    this._downloadError = null;
 
-  get downloadSize() {
-    return this._proxy.downloadSize;
-  },
-
-  get installOrigin() {
-    return this._proxy.installOrigin;
-  },
-
-  get installState() {
-    return this._proxy.installState;
-  },
-
-  get installTime() {
-    return this._proxy.installTime;
-  },
+    this.initDOMRequestHelper(aWindow, [
+      { name: "Webapps:CheckForUpdate:Return:KO", weakRef: true },
+      { name: "Webapps:Connect:Return:OK", weakRef: true },
+      { name: "Webapps:Connect:Return:KO", weakRef: true },
+      { name: "Webapps:FireEvent", weakRef: true },
+      { name: "Webapps:GetConnections:Return:OK", weakRef: true },
+      { name: "Webapps:UpdateState", weakRef: true }
+    ]);
 
-  get lastUpdateCheck() {
-    return this._proxy.lastUpdateCheck;
-  },
-
-  get manifestURL() {
-    return this._proxy.manifestURL;
-  },
-
-  get origin() {
-    return this._proxy.origin;
-  },
-
-  get progress() {
-    return this._proxy.progress;
-  },
-
-  get readyToApplyDownload() {
-    return this._proxy.readyToApplyDownload;
-  },
-
-  get receipts() {
-    return this._proxy.receipts;
-  },
-
-  set receipts(aReceipts) {
-    this._proxy.receipts = aReceipts;
-  },
-
-  get removable() {
-    return this._proxy.removable;
-  },
-
-  get updateTime() {
-    return this._proxy.updateTime;
+    cpmm.sendAsyncMessage("Webapps:RegisterForMessages", {
+      messages: ["Webapps:FireEvent",
+                 "Webapps:UpdateState"],
+      app: {
+        id: this.id,
+        manifestURL: this.manifestURL,
+        installState: this.installState,
+        downloading: this.downloading
+      }
+    });
   },
 
   get manifest() {
-    return WrappedManifestCache.get(this.manifestURL,
-                                    this._proxy.manifest,
-                                    this._window,
-                                    this.innerWindowID);
+    return manifestCache.get(this.manifestURL,
+                             this._manifest,
+                             this._window,
+                             this.innerWindowID);
   },
 
   get updateManifest() {
-    return this._proxy.updateManifest ?
-      Cu.cloneInto(this._proxy.updateManifest, this._window) : null;
+    return this.updateManifest =
+      this._updateManifest ? Cu.cloneInto(this._updateManifest, this._window)
+                           : null;
   },
 
   set onprogress(aCallback) {
     this._onprogress = aCallback;
   },
 
   get onprogress() {
     return this._onprogress;
@@ -423,20 +435,20 @@ WebappsApplication.prototype = {
   },
 
   get ondownloadapplied() {
     return this._ondownloadapplied;
   },
 
   get downloadError() {
     // Only return DOMError when we have an error.
-    if (!this._proxy.downloadError) {
+    if (!this._downloadError) {
       return null;
     }
-    return new this._window.DOMError(this._proxy.downloadError);
+    return new this._window.DOMError(this._downloadError);
   },
 
   download: function() {
     cpmm.sendAsyncMessage("Webapps:Download",
                           { manifestURL: this.manifestURL });
   },
 
   cancelDownload: function() {
@@ -468,55 +480,51 @@ WebappsApplication.prototype = {
   },
 
   clearBrowserData: function() {
     let request = this.createRequest();
     let browserChild =
       BrowserElementPromptService.getBrowserElementChildForWindow(this._window);
     if (browserChild) {
       this.addMessageListeners("Webapps:ClearBrowserData:Return");
-      browserChild.messageManager.sendAsyncMessage("Webapps:ClearBrowserData", {
-        manifestURL: this.manifestURL,
-        oid: this._id,
-        requestID: this.getRequestId(request)
-      });
+      browserChild.messageManager.sendAsyncMessage(
+        "Webapps:ClearBrowserData",
+        { manifestURL: this.manifestURL,
+          oid: this._id,
+          requestID: this.getRequestId(request) }
+      );
     } else {
       Services.DOMRequest.fireErrorAsync(request, "NO_CLEARABLE_BROWSER");
     }
     return request;
   },
 
   connect: function(aKeyword, aRules) {
-    this.addMessageListeners(["Webapps:Connect:Return:OK",
-                              "Webapps:Connect:Return:KO"]);
     return this.createPromise(function (aResolve, aReject) {
-      cpmm.sendAsyncMessage("Webapps:Connect", {
-        keyword: aKeyword,
-        rules: aRules,
-        manifestURL: this.manifestURL,
-        outerWindowID: this._id,
-        requestID: this.getPromiseResolverId({
-          resolve: aResolve,
-          reject: aReject
-        })
-      });
+      cpmm.sendAsyncMessage("Webapps:Connect",
+                            { keyword: aKeyword,
+                              rules: aRules,
+                              manifestURL: this.manifestURL,
+                              outerWindowID: this._id,
+                              requestID: this.getPromiseResolverId({
+                                resolve: aResolve,
+                                reject: aReject
+                              })});
     }.bind(this));
   },
 
   getConnections: function() {
-    this.addMessageListeners("Webapps:GetConnections:Return:OK");
     return this.createPromise(function (aResolve, aReject) {
-      cpmm.sendAsyncMessage("Webapps:GetConnections", {
-        manifestURL: this.manifestURL,
-        outerWindowID: this._id,
-        requestID: this.getPromiseResolverId({
-          resolve: aResolve,
-          reject: aReject
-        })
-      });
+      cpmm.sendAsyncMessage("Webapps:GetConnections",
+                            { manifestURL: this.manifestURL,
+                              outerWindowID: this._id,
+                              requestID: this.getPromiseResolverId({
+                                resolve: aResolve,
+                                reject: aReject
+                              })});
     }.bind(this));
   },
 
   addReceipt: function(receipt) {
     let request = this.createRequest();
 
     this.addMessageListeners(["Webapps:AddReceipt:Return:OK",
                               "Webapps:AddReceipt:Return:KO"]);
@@ -555,92 +563,141 @@ WebappsApplication.prototype = {
                                                       oid: this._id,
                                                       requestID: this.getRequestId(request) });
 
     return request;
   },
 
   uninit: function() {
     this._onprogress = null;
-    WrappedManifestCache.evict(this.manifestURL, this.innerWindowID);
+    cpmm.sendAsyncMessage("Webapps:UnregisterForMessages", [
+      "Webapps:FireEvent",
+      "Webapps:UpdateState"
+    ]);
+
+    manifestCache.evict(this.manifestURL, this.innerWindowID);
   },
 
   _fireEvent: function(aName) {
     let handler = this["_on" + aName];
     if (handler) {
       let event = new this._window.MozApplicationEvent(aName, {
         application: this
       });
       try {
         handler.handleEvent(event);
       } catch (ex) {
         dump("Event handler expection " + ex + "\n");
       }
     }
   },
 
-  _fireRequestResult: function(aMessage, aIsError) {
-    let req;
-    let msg = aMessage.data;
-    req = this.takeRequest(msg.requestID);
-    if (!req) {
-      return;
+  _updateState: function(aMsg) {
+    if (aMsg.app) {
+      for (let prop in aMsg.app) {
+        this[prop] = aMsg.app[prop];
+      }
     }
 
-    aIsError ? Services.DOMRequest.fireError(req, msg.error)
-             : Services.DOMRequest.fireSuccess(req, msg.result);
+    // Intentional use of 'in' so we unset the error if this is explicitly null.
+    if ('error' in aMsg) {
+      this._downloadError = aMsg.error;
+    }
+
+    if (aMsg.manifest) {
+      this._manifest = aMsg.manifest;
+      manifestCache.evict(this.manifestURL, this.innerWindowID);
+    }
   },
 
   receiveMessage: function(aMessage) {
     let msg = aMessage.json;
     let req;
     if (aMessage.name == "Webapps:Connect:Return:OK" ||
         aMessage.name == "Webapps:Connect:Return:KO" ||
         aMessage.name == "Webapps:GetConnections:Return:OK") {
       req = this.takePromiseResolver(msg.requestID);
     } else {
       req = this.takeRequest(msg.requestID);
     }
 
-    if (msg.oid !== this._id || !req) {
+    // ondownload* callbacks should be triggered on all app instances
+    if ((msg.oid != this._id || !req) &&
+        aMessage.name !== "Webapps:FireEvent" &&
+        aMessage.name !== "Webapps:UpdateState") {
       return;
     }
 
     switch (aMessage.name) {
       case "Webapps:Launch:Return:KO":
         this.removeMessageListeners(["Webapps:Launch:Return:OK",
                                      "Webapps:Launch:Return:KO"]);
         Services.DOMRequest.fireError(req, "APP_INSTALL_PENDING");
         break;
       case "Webapps:Launch:Return:OK":
         this.removeMessageListeners(["Webapps:Launch:Return:OK",
                                      "Webapps:Launch:Return:KO"]);
         Services.DOMRequest.fireSuccess(req, null);
         break;
+      case "Webapps:CheckForUpdate:Return:KO":
+        Services.DOMRequest.fireError(req, msg.error);
+        break;
+      case "Webapps:FireEvent":
+        if (msg.manifestURL != this.manifestURL) {
+           return;
+        }
+
+        // The parent might ask childs to trigger more than one event in one
+        // shot, so in order to avoid needless IPC we allow an array for the
+        // 'eventType' IPC message field.
+        if (!Array.isArray(msg.eventType)) {
+          msg.eventType = [msg.eventType];
+        }
+
+        msg.eventType.forEach((aEventType) => {
+          // If we are in a successful state clear any past errors.
+          if (aEventType === 'downloadapplied' ||
+              aEventType === 'downloadsuccess') {
+            this._downloadError = null;
+          }
+
+          if ("_on" + aEventType in this) {
+            this._fireEvent(aEventType);
+          } else {
+            dump("Unsupported event type " + aEventType + "\n");
+          }
+        });
+
+        if (req) {
+          Services.DOMRequest.fireSuccess(req, this.manifestURL);
+        }
+        break;
+      case "Webapps:UpdateState":
+        if (msg.manifestURL != this.manifestURL) {
+          return;
+        }
+
+        this._updateState(msg);
+        break;
       case "Webapps:ClearBrowserData:Return":
         this.removeMessageListeners(aMessage.name);
         Services.DOMRequest.fireSuccess(req, null);
         break;
       case "Webapps:Connect:Return:OK":
-        this.removeMessageListeners(["Webapps:Connect:Return:OK",
-                                     "Webapps:Connect:Return:KO"]);
         let messagePorts = [];
         msg.messagePortIDs.forEach((aPortID) => {
           let port = new this._window.MozInterAppMessagePort(aPortID);
           messagePorts.push(port);
         });
         req.resolve(messagePorts);
         break;
       case "Webapps:Connect:Return:KO":
-        this.removeMessageListeners(["Webapps:Connect:Return:OK",
-                                     "Webapps:Connect:Return:KO"]);
         req.reject("No connections registered");
         break;
       case "Webapps:GetConnections:Return:OK":
-        this.removeMessageListeners(aMessage.name);
         let connections = [];
         msg.connections.forEach((aConnection) => {
           let connection =
             new this._window.MozInterAppConnection(aConnection.keyword,
                                                    aConnection.pubAppManifestURL,
                                                    aConnection.subAppManifestURL);
           connections.push(connection);
         });
@@ -812,18 +869,22 @@ WebappsApplicationMgmt.prototype = {
           let app = msg.app;
           let event = new this._window.MozApplicationEvent("applicationinstall",
                            { application : createApplicationObject(this._window, app) });
           this._oninstall.handleEvent(event);
         }
         break;
       case "Webapps:Uninstall:Broadcast:Return:OK":
         if (this._onuninstall) {
+          let detail = {
+            manifestURL: msg.manifestURL,
+            origin: msg.origin
+          };
           let event = new this._window.MozApplicationEvent("applicationuninstall",
-                           { application : createApplicationObject(this._window, msg) });
+                           { application : createApplicationObject(this._window, detail) });
           this._onuninstall.handleEvent(event);
         }
         break;
       case "Webapps:Uninstall:Return:OK":
         Services.DOMRequest.fireSuccess(req, msg.origin);
         break;
       case "Webapps:Uninstall:Return:KO":
         Services.DOMRequest.fireError(req, "NOT_INSTALLED");
@@ -842,10 +903,12 @@ WebappsApplicationMgmt.prototype = {
 
   classInfo: XPCOMUtils.generateCI({classID: Components.ID("{8c1bca96-266f-493a-8d57-ec7a95098c15}"),
                                     contractID: "@mozilla.org/webapps/application-mgmt;1",
                                     interfaces: [Ci.mozIDOMApplicationMgmt],
                                     flags: Ci.nsIClassInfo.DOM_OBJECT,
                                     classDescription: "Webapps Application Mgmt"})
 }
 
+manifestCache.init();
+
 this.NSGetFactory = XPCOMUtils.generateNSGetFactory([WebappsRegistry,
                                                      WebappsApplication]);
--- a/dom/apps/src/Webapps.jsm
+++ b/dom/apps/src/Webapps.jsm
@@ -1136,18 +1136,18 @@ this.DOMApplicationRegistry = {
         break;
       case "Webapps:UnregisterForMessages":
         this.removeMessageListener(msg, mm);
         break;
       case "child-process-shutdown":
         this.removeMessageListener(["Webapps:Internal:AllMessages"], mm);
         break;
       case "Webapps:GetList":
-        return this.doGetList();
-        break;
+        this.addMessageListener(["Webapps:AddApp", "Webapps:RemoveApp"], null, mm);
+        return this.webapps;
       case "Webapps:Download":
         this.startDownload(msg.manifestURL);
         break;
       case "Webapps:CancelDownload":
         this.cancelDownload(msg.manifestURL);
         break;
       case "Webapps:CheckForUpdate":
         this.checkForUpdate(msg, mm);
@@ -1240,48 +1240,16 @@ this.DOMApplicationRegistry = {
       } else {
         deferred.resolve();
       }
     });
 
     return deferred.promise;
   },
 
-  /**
-    * Returns the full list of apps and manifests.
-    */
-  doGetList: function() {
-    let tmp = [];
-
-    for (let id in this.webapps) {
-      tmp.push({ id: id });
-    }
-
-    let res = {};
-    let done = false;
-
-    this._readManifests(tmp).then(
-      function(manifests) {
-        manifests.forEach((item) => {
-          res[item.id] = item.manifest;
-        });
-        done = true;
-      }
-    );
-
-    let thread = Services.tm.currentThread;
-    while (!done) {
-      //debug("before processNextEvent");
-      thread.processNextEvent(/* mayWait */ true);
-      //after("before processNextEvent");
-    }
-    return { webapps: this.webapps, manifests: res };
-  },
-
-
   doLaunch: function (aData, aMm) {
     this.launch(
       aData.manifestURL,
       aData.startPoint,
       aData.timestamp,
       function onsuccess() {
         aMm.sendAsyncMessage("Webapps:Launch:Return:OK", aData);
       },
@@ -1357,17 +1325,17 @@ this.DOMApplicationRegistry = {
     this._saveApps().then(() => {
       this.broadcastMessage("Webapps:UpdateState", {
         app: {
           progress: 0,
           installState: download.previousState,
           downloading: false
         },
         error: error,
-        id: app.id
+        manifestURL: app.manifestURL,
       })
       this.broadcastMessage("Webapps:FireEvent", {
         eventType: "downloaderror",
         manifestURL: app.manifestURL
       });
     });
     AppDownloadManager.remove(aManifestURL);
   },
@@ -1388,17 +1356,17 @@ this.DOMApplicationRegistry = {
       throw new Error("APP_IS_DOWNLOADING");
     }
 
     // If the caller is trying to start a download but we have nothing to
     // download, send an error.
     if (!app.downloadAvailable) {
       this.broadcastMessage("Webapps:UpdateState", {
         error: "NO_DOWNLOAD_AVAILABLE",
-        id: app.id
+        manifestURL: app.manifestURL
       });
       this.broadcastMessage("Webapps:FireEvent", {
         eventType: "downloaderror",
         manifestURL: app.manifestURL
       });
       throw new Error("NO_DOWNLOAD_AVAILABLE");
     }
 
@@ -1436,17 +1404,17 @@ this.DOMApplicationRegistry = {
         debug("No appcache found, sending 'downloaded' for " + aManifestURL);
         app.downloadAvailable = false;
 
         yield this._saveApps();
 
         this.broadcastMessage("Webapps:UpdateState", {
           app: app,
           manifest: jsonManifest,
-          id: app.id
+          manifestURL: aManifestURL
         });
         this.broadcastMessage("Webapps:FireEvent", {
           eventType: "downloadsuccess",
           manifestURL: aManifestURL
         });
       }
 
       return;
@@ -1480,17 +1448,17 @@ this.DOMApplicationRegistry = {
     app.downloadAvailable = false;
     app.readyToApplyDownload = true;
     app.updateTime = Date.now();
 
     yield this._saveApps();
 
     this.broadcastMessage("Webapps:UpdateState", {
       app: app,
-      id: app.id
+      manifestURL: aManifestURL
     });
     this.broadcastMessage("Webapps:FireEvent", {
       eventType: "downloadsuccess",
       manifestURL: aManifestURL
     });
     if (app.installState == "pending") {
       // We restarted a failed download, apply it automatically.
       this.applyDownload(aManifestURL);
@@ -1582,17 +1550,17 @@ this.DOMApplicationRegistry = {
           manifestURL: app.manifestURL },
         true);
     }
     this.updateDataStore(this.webapps[id].localId, app.origin,
                          app.manifestURL, newManifest);
     this.broadcastMessage("Webapps:UpdateState", {
       app: app,
       manifest: newManifest,
-      id: app.id
+      manifestURL: app.manifestURL
     });
     this.broadcastMessage("Webapps:FireEvent", {
       eventType: "downloadapplied",
       manifestURL: app.manifestURL
     });
   }),
 
   startOfflineCacheDownload: function(aManifest, aApp, aProfileDir, aIsUpdate) {
@@ -1621,17 +1589,17 @@ this.DOMApplicationRegistry = {
       DOMApplicationRegistry.broadcastMessage("Webapps:UpdateState", {
         // Clear any previous errors.
         error: null,
         app: {
           downloading: true,
           installState: aApp.installState,
           progress: 0
         },
-        id: aApp.id
+        manifestURL: aApp.manifestURL
       });
       let cacheUpdate = updateSvc.scheduleAppUpdate(
         appcacheURI, docURI, aApp.localId, false, aProfileDir);
 
       // We save the download details for potential further usage like
       // cancelling it.
       let download = {
         cacheUpdate: cacheUpdate,
@@ -1671,17 +1639,16 @@ this.DOMApplicationRegistry = {
       this.notifyAppsRegistryReady();
     }
   },
 
   checkForUpdate: function(aData, aMm) {
     debug("checkForUpdate for " + aData.manifestURL);
 
     function sendError(aError) {
-      debug("checkForUpdate error " + aError);
       aData.error = aError;
       aMm.sendAsyncMessage("Webapps:CheckForUpdate:Return:KO", aData);
     }
 
     let id = this._appIdForManifestURL(aData.manifestURL);
     let app = this.webapps[id];
 
     // We cannot update an app that does not exists.
@@ -1701,67 +1668,71 @@ this.DOMApplicationRegistry = {
       sendError("APP_IS_DOWNLOADING");
       return;
     }
 
     // If the app is packaged and its manifestURL has an app:// scheme,
     // then we can't have an update.
     if (app.origin.startsWith("app://") &&
         app.manifestURL.startsWith("app://")) {
-      sendError("NOT_UPDATABLE");
+      aData.error = "NOT_UPDATABLE";
+      aMm.sendAsyncMessage("Webapps:CheckForUpdate:Return:KO", aData);
       return;
     }
 
     // For non-removable hosted apps that lives in the core apps dir we
     // only check the appcache because we can't modify the manifest even
     // if it has changed.
     let onlyCheckAppCache = false;
 
 #ifdef MOZ_WIDGET_GONK
     let appDir = FileUtils.getDir("coreAppsDir", ["webapps"], false);
     onlyCheckAppCache = (app.basePath == appDir.path);
 #endif
 
     if (onlyCheckAppCache) {
       // Bail out for packaged apps.
       if (app.origin.startsWith("app://")) {
-        sendError("NOT_UPDATABLE");
+        aData.error = "NOT_UPDATABLE";
+        aMm.sendAsyncMessage("Webapps:CheckForUpdate:Return:KO", aData);
         return;
       }
 
       // We need the manifest to check if we have an appcache.
       this._readManifests([{ id: id }]).then((aResult) => {
         let manifest = aResult[0].manifest;
         if (!manifest.appcache_path) {
-          sendError("NOT_UPDATABLE");
+          aData.error = "NOT_UPDATABLE";
+          aMm.sendAsyncMessage("Webapps:CheckForUpdate:Return:KO", aData);
           return;
         }
 
         debug("Checking only appcache for " + aData.manifestURL);
         // Check if the appcache is updatable, and send "downloadavailable" or
         // "downloadapplied".
         let updateObserver = {
           observe: function(aSubject, aTopic, aObsData) {
             debug("onlyCheckAppCache updateSvc.checkForUpdate return for " +
                   app.manifestURL + " - event is " + aTopic);
             if (aTopic == "offline-cache-update-available") {
               app.downloadAvailable = true;
               this._saveApps().then(() => {
                 this.broadcastMessage("Webapps:UpdateState", {
                   app: app,
-                  id: app.id
+                  manifestURL: app.manifestURL
                 });
                 this.broadcastMessage("Webapps:FireEvent", {
                   eventType: "downloadavailable",
                   manifestURL: app.manifestURL,
                   requestID: aData.requestID
                 });
               });
             } else {
-              sendError("NOT_UPDATABLE");
+              aData.error = "NOT_UPDATABLE";
+              aMm.sendAsyncMessage("Webapps:CheckForUpdate:Return:KO", aData);
             }
           }
         };
         let helper = new ManifestHelper(manifest, aData.manifestURL);
         debug("onlyCheckAppCache - launch updateSvc.checkForUpdate for " +
               helper.fullAppcachePath());
         updateSvc.checkForUpdate(Services.io.newURI(helper.fullAppcachePath(), null, null),
                                  app.localId, false, updateObserver);
@@ -1811,17 +1782,17 @@ this.DOMApplicationRegistry = {
             } else {
               this._saveApps().then(() => {
                 // Like if we got a 304, just send a 'downloadapplied'
                 // or downloadavailable event.
                 let eventType = app.downloadAvailable ? "downloadavailable"
                                                       : "downloadapplied";
                 aMm.sendAsyncMessage("Webapps:UpdateState", {
                   app: app,
-                  id: app.id
+                  manifestURL: app.manifestURL
                 });
                 aMm.sendAsyncMessage("Webapps:FireEvent", {
                   eventType: eventType,
                   manifestURL: app.manifestURL,
                   requestID: aData.requestID
                 });
               });
             }
@@ -1838,17 +1809,17 @@ this.DOMApplicationRegistry = {
           app.lastCheckedUpdate = Date.now();
           this._saveApps().then(() => {
             // If the app is a packaged app, we just send a 'downloadapplied'
             // or downloadavailable event.
             let eventType = app.downloadAvailable ? "downloadavailable"
                                                   : "downloadapplied";
             aMm.sendAsyncMessage("Webapps:UpdateState", {
               app: app,
-              id: app.id
+              manifestURL: app.manifestURL
             });
             aMm.sendAsyncMessage("Webapps:FireEvent", {
               eventType: eventType,
               manifestURL: app.manifestURL,
               requestID: aData.requestID
             });
           });
         } else {
@@ -1947,17 +1918,17 @@ this.DOMApplicationRegistry = {
     // event.
     aApp.downloadAvailable = true;
     aApp.downloadSize = manifest.size;
     aApp.updateManifest = aNewManifest;
     yield this._saveApps();
 
     this.broadcastMessage("Webapps:UpdateState", {
       app: aApp,
-      id: aApp.id
+      manifestURL: aApp.manifestURL
     });
     this.broadcastMessage("Webapps:FireEvent", {
       eventType: "downloadavailable",
       manifestURL: aApp.manifestURL,
       requestID: aData.requestID
     });
   }),
 
@@ -2013,17 +1984,17 @@ this.DOMApplicationRegistry = {
     // Update the registry.
     this.webapps[aId] = aApp;
     yield this._saveApps();
 
     if (!manifest.appcache_path) {
       this.broadcastMessage("Webapps:UpdateState", {
         app: aApp,
         manifest: aApp.manifest,
-        id: aApp.id
+        manifestURL: aApp.manifestURL
       });
       this.broadcastMessage("Webapps:FireEvent", {
         eventType: "downloadapplied",
         manifestURL: aApp.manifestURL,
         requestID: aData.requestID
       });
     } else {
       // Check if the appcache is updatable, and send "downloadavailable" or
@@ -2047,17 +2018,17 @@ this.DOMApplicationRegistry = {
                                                   : "downloadapplied";
 
       aApp.downloadAvailable = (eventType == "downloadavailable");
       yield this._saveApps();
 
       this.broadcastMessage("Webapps:UpdateState", {
         app: aApp,
         manifest: aApp.manifest,
-        id: aApp.id
+        manifestURL: aApp.manifestURL
       });
       this.broadcastMessage("Webapps:FireEvent", {
         eventType: eventType,
         manifestURL: aApp.manifestURL,
         requestID: aData.requestID
       });
     }
 
@@ -2474,18 +2445,17 @@ this.DOMApplicationRegistry = {
 
     // Store the manifest and the updateManifest.
     this._writeManifestFile(app.id, false, aManifest);
     if (aUpdateManifest) {
       this._writeManifestFile(app.id, true, aUpdateManifest);
     }
 
     this._saveApps().then(() => {
-      this.broadcastMessage("Webapps:AddApp",
-                            { id: app.id, app: app, manifest: aManifest });
+      this.broadcastMessage("Webapps:AddApp", { id: app.id, app: app });
     });
   }),
 
   confirmInstall: Task.async(function*(aData, aProfileDir, aInstallSuccessCallback) {
     debug("confirmInstall");
 
     let origin = Services.io.newURI(aData.app.origin, null, null);
     let id = this._appIdForManifestURL(aData.app.manifestURL);
@@ -2575,18 +2545,16 @@ this.DOMApplicationRegistry = {
       };
     }
 
     // We notify about the successful installation via mgmt.oninstall and the
     // corresponding DOMRequest.onsuccess event as soon as the app is properly
     // saved in the registry.
     yield this._saveApps();
 
-    aData.isPackage ? appObject.updateManifest = jsonManifest :
-                      appObject.manifest = jsonManifest;
     this.broadcastMessage("Webapps:AddApp", { id: id, app: appObject });
 
     // The presence of a requestID means that we have a page to update.
     if (aData.isPackage && aData.apkInstall && !aData.requestID) {
       // Skip directly to onInstallSuccessAck, since there isn't
       // a WebappsRegistry to receive Webapps:Install:Return:OK and respond
       // Webapps:Install:Return:Ack when an app is being auto-installed.
       this.onInstallSuccessAck(app.manifestURL);
@@ -2653,18 +2621,17 @@ this.DOMApplicationRegistry = {
     yield this._saveApps();
 
     this.updateAppHandlers(null, aManifest, aNewApp);
     // Clear the manifest cache in case it holds the update manifest.
     if (aId in this._manifestCache) {
       delete this._manifestCache[aId];
     }
 
-    this.broadcastMessage("Webapps:AddApp",
-                          { id: aId, app: aNewApp, manifest: aManifest });
+    this.broadcastMessage("Webapps:AddApp", { id: aId, app: aNewApp });
     Services.obs.notifyObservers(null, "webapps-installed",
       JSON.stringify({ manifestURL: aNewApp.manifestURL }));
 
     if (supportUseCurrentProfile()) {
       // Update the permissions for this app.
       PermissionsInstaller.installPermissions({
         manifest: aManifest,
         origin: aNewApp.origin,
@@ -2788,21 +2755,24 @@ this.DOMApplicationRegistry = {
 
       debug("About to download " + fullPackagePath);
 
       let requestChannel = this._getRequestChannel(fullPackagePath,
                                                    isLocalFileInstall,
                                                    oldApp,
                                                    aNewApp);
 
-      AppDownloadManager.add(aNewApp.manifestURL, {
-        channel: requestChannel,
-        appId: id,
-        previousState: aIsUpdate ? "installed" : "pending"
-      });
+      AppDownloadManager.add(
+        aNewApp.manifestURL,
+        {
+          channel: requestChannel,
+          appId: id,
+          previousState: aIsUpdate ? "installed" : "pending"
+        }
+      );
 
       // We set the 'downloading' flag to true right before starting the fetch.
       oldApp.downloading = true;
 
       // We determine the app's 'installState' according to its previous
       // state. Cancelled download should remain as 'pending'. Successfully
       // installed apps should morph to 'updating'.
       oldApp.installState = aIsUpdate ? "updating" : "pending";
@@ -2812,32 +2782,32 @@ this.DOMApplicationRegistry = {
 
       // Save the current state of the app to handle cases where we may be
       // retrying a past download.
       yield DOMApplicationRegistry._saveApps();
       DOMApplicationRegistry.broadcastMessage("Webapps:UpdateState", {
         // Clear any previous download errors.
         error: null,
         app: oldApp,
-        id: id
+        manifestURL: aNewApp.manifestURL
       });
 
       let zipFile = yield this._getPackage(requestChannel, id, oldApp, aNewApp);
       let hash = yield this._computeFileHash(zipFile.path);
 
       let responseStatus = requestChannel.responseStatus;
       let oldPackage = (responseStatus == 304 || hash == oldApp.packageHash);
 
       if (oldPackage) {
         debug("package's etag or hash unchanged; sending 'applied' event");
         // The package's Etag or hash has not changed.
         // We send an "applied" event right away so code awaiting that event
         // can proceed to access the app.  We also throw an error to alert
         // the caller that the package wasn't downloaded.
-        this._sendAppliedEvent(oldApp);
+        this._sendAppliedEvent(aNewApp, oldApp, id);
         throw new Error("PACKAGE_UNCHANGED");
       }
 
       let newManifest = yield this._openAndReadPackage(zipFile, oldApp, aNewApp,
               isLocalFileInstall, aIsUpdate, aManifest, requestChannel, hash);
 
       AppDownloadManager.remove(aNewApp.manifestURL);
 
@@ -2967,17 +2937,17 @@ this.DOMApplicationRegistry = {
     return requestChannel;
   },
 
   _sendDownloadProgressEvent: function(aNewApp, aProgress) {
     this.broadcastMessage("Webapps:UpdateState", {
       app: {
         progress: aProgress
       },
-      id: aNewApp.id
+      manifestURL: aNewApp.manifestURL
     });
     this.broadcastMessage("Webapps:FireEvent", {
       eventType: "progress",
       manifestURL: aNewApp.manifestURL
     });
   },
 
   _getPackage: function(aRequestChannel, aId, aOldApp, aNewApp) {
@@ -3084,53 +3054,56 @@ this.DOMApplicationRegistry = {
   /**
    * Send an "applied" event right away for the package being installed.
    *
    * XXX We use this to exit the app update process early when the downloaded
    * package is identical to the last one we installed.  Presumably we do
    * something similar after updating the app, and we could refactor both cases
    * to use the same code to send the "applied" event.
    *
-   * @param aApp {Object} app data
+   * @param aNewApp {Object} the new app data
+   * @param aOldApp {Object} the currently stored app data
+   * @param aId {String} the unique id of the app
    */
-  _sendAppliedEvent: function(aApp) {
-    aApp.downloading = false;
-    aApp.downloadAvailable = false;
-    aApp.downloadSize = 0;
-    aApp.installState = "installed";
-    aApp.readyToApplyDownload = false;
-    if (aApp.staged && aApp.staged.manifestHash) {
+  _sendAppliedEvent: function(aNewApp, aOldApp, aId) {
+    aOldApp.downloading = false;
+    aOldApp.downloadAvailable = false;
+    aOldApp.downloadSize = 0;
+    aOldApp.installState = "installed";
+    aOldApp.readyToApplyDownload = false;
+    if (aOldApp.staged && aOldApp.staged.manifestHash) {
       // If we're here then the manifest has changed but the package
       // hasn't. Let's clear this, so we don't keep offering
       // a bogus update to the user
-      aApp.manifestHash = aApp.staged.manifestHash;
-      aApp.etag = aApp.staged.etag || aApp.etag;
-      aApp.staged = {};
-     // Move the staged update manifest to a non staged one.
+      aOldApp.manifestHash = aOldApp.staged.manifestHash;
+      aOldApp.etag = aOldApp.staged.etag || aOldApp.etag;
+      aOldApp.staged = {};
+
+      // Move the staged update manifest to a non staged one.
       try {
-        let staged = this._getAppDir(aApp.id);
+        let staged = this._getAppDir(aId);
         staged.append("staged-update.webapp");
         staged.moveTo(staged.parent, "update.webapp");
       } catch (ex) {
         // We don't really mind much if this fails.
       }
     }
 
     // Save the updated registry, and cleanup the tmp directory.
     this._saveApps().then(() => {
       this.broadcastMessage("Webapps:UpdateState", {
-        app: aApp,
-        id: aApp.id
+        app: aOldApp,
+        manifestURL: aNewApp.manifestURL
       });
       this.broadcastMessage("Webapps:FireEvent", {
-        manifestURL: aApp.manifestURL,
+        manifestURL: aNewApp.manifestURL,
         eventType: ["downloadsuccess", "downloadapplied"]
       });
     });
-    let file = FileUtils.getFile("TmpD", ["webapps", aApp.id], false);
+    let file = FileUtils.getFile("TmpD", ["webapps", aId], false);
     if (file && file.exists()) {
       file.remove(true);
     }
   },
 
   _openAndReadPackage: function(aZipFile, aOldApp, aNewApp, aIsLocalFileInstall,
                                 aIsUpdate, aManifest, aRequestChannel, aHash) {
     return Task.spawn((function*() {
@@ -3425,20 +3398,19 @@ this.DOMApplicationRegistry = {
         delete this.webapps[oldId];
         // Rename the directories where the files are installed.
         [DIRECTORY_NAME, "TmpD"].forEach(function(aDir) {
           let parent = FileUtils.getDir(aDir, ["webapps"], true, true);
           let dir = FileUtils.getDir(aDir, ["webapps", oldId], true, true);
           dir.moveTo(parent, newId);
         });
         // Signals that we need to swap the old id with the new app.
-        this.broadcastMessage("Webapps:UpdateApp", { oldId: oldId,
-                                                     newId: newId,
-                                                     app: aOldApp });
-
+        this.broadcastMessage("Webapps:RemoveApp", { id: oldId });
+        this.broadcastMessage("Webapps:AddApp", { id: newId,
+                                                  app: aOldApp });
       }
     }
   },
 
   _getIds: function(aIsSigned, aZipReader, aConverter, aNewApp, aOldApp,
                     aIsUpdate) {
     // Get ids.json if the file is signed
     if (aIsSigned) {
@@ -3531,17 +3503,17 @@ this.DOMApplicationRegistry = {
     if (aOldApp.staged) {
       delete aOldApp.staged;
     }
 
     this._saveApps().then(() => {
       this.broadcastMessage("Webapps:UpdateState", {
         app: aOldApp,
         error: aError,
-        id: aNewApp.id
+        manifestURL: aNewApp.manifestURL
       });
       this.broadcastMessage("Webapps:FireEvent", {
         eventType: "downloaderror",
         manifestURL:  aNewApp.manifestURL
       });
     });
     AppDownloadManager.remove(aNewApp.manifestURL);
 
@@ -4083,17 +4055,17 @@ let AppcacheObserver = function(aApp) {
 };
 
 AppcacheObserver.prototype = {
   // nsIOfflineCacheUpdateObserver implementation
   _sendProgressEvent: function() {
     let app = this.app;
     DOMApplicationRegistry.broadcastMessage("Webapps:UpdateState", {
       app: app,
-      id: app.id
+      manifestURL: app.manifestURL
     });
     DOMApplicationRegistry.broadcastMessage("Webapps:FireEvent", {
       eventType: "progress",
       manifestURL: app.manifestURL
     });
   },
 
   updateStateChanged: function appObs_Update(aUpdate, aState) {
@@ -4115,17 +4087,17 @@ AppcacheObserver.prototype = {
         return;
       }
 
       app.updateTime = Date.now();
       app.downloading = false;
       app.downloadAvailable = false;
       DOMApplicationRegistry.broadcastMessage("Webapps:UpdateState", {
         app: app,
-        id: app.id
+        manifestURL: app.manifestURL
       });
       DOMApplicationRegistry.broadcastMessage("Webapps:FireEvent", {
         eventType: ["downloadsuccess", "downloadapplied"],
         manifestURL: app.manifestURL
       });
     }
 
     let setError = function appObs_setError(aError) {
@@ -4138,17 +4110,17 @@ AppcacheObserver.prototype = {
       if (app.isCanceling) {
         delete app.isCanceling;
         return;
       }
 
       DOMApplicationRegistry.broadcastMessage("Webapps:UpdateState", {
         app: app,
         error: aError,
-        id: app.id
+        manifestURL: app.manifestURL
       });
       DOMApplicationRegistry.broadcastMessage("Webapps:FireEvent", {
         eventType: "downloaderror",
         manifestURL: app.manifestURL
       });
     }
 
     switch (aState) {
--- a/dom/apps/tests/test_packaged_app_common.js
+++ b/dom/apps/tests/test_packaged_app_common.js
@@ -93,17 +93,16 @@ var PackagedTestHelper = (function Packa
       ok(false, "Got unexpected " + evt.target.error.name);
       finish();
     };
 
     navigator.mozApps.mgmt.oninstall = function(evt) {
       var aApp = evt.application;
       aApp.ondownloaderror = function(evt) {
         var error = aApp.downloadError.name;
-        ok(true, "Got downloaderror " + error);
         if (error == aExpectedError) {
           ok(true, "Got expected " + aExpectedError);
           var expected = {
             name: aName,
             manifestURL: aMiniManifestURL,
             installOrigin: gInstallOrigin,
             progress: 0,
             installState: "pending",
--- a/dom/apps/tests/test_packaged_app_update.html
+++ b/dom/apps/tests/test_packaged_app_update.html
@@ -74,25 +74,25 @@ function checkLastAppState(aMiniManifest
 
 function updateApp(aExpectedReady, aPreviousVersion, aNextVersion) {
   var lApp = PackagedTestHelper.gApp;
 
   var ondownloadappliedhandler =
     checkLastAppState.bind(PackagedTestHelper, miniManifestURL, false, false,
                            aNextVersion, PackagedTestHelper.next);
 
-  var ondownloadsuccesshandler =
-    checkLastAppState.bind(undefined, miniManifestURL,
-                           aExpectedReady, false, aPreviousVersion,
-                           function() {
-      navigator.mozApps.mgmt.applyDownload(lApp);
-  });
+    var ondownloadsuccesshandler =
+      checkLastAppState.bind(undefined, miniManifestURL,
+                             aExpectedReady, false, aPreviousVersion,
+                             function() {
+        navigator.mozApps.mgmt.applyDownload(lApp);
+    });
 
-  checkForUpdate(true, ondownloadsuccesshandler, ondownloadappliedhandler,
-                 null, true);
+    checkForUpdate(true, ondownloadsuccesshandler, ondownloadappliedhandler, null,
+                   true);
 
 }
 
 var initialPermissionState = {
   "geolocation": "prompt",
   "audio-capture": "prompt",
   "video-capture": "prompt",
   "test-permission-read": "prompt",
@@ -249,17 +249,17 @@ var steps = [
   },
   function() {
     info("== TEST == Update packaged app - Updating a pending app");
     miniManifestURL = PackagedTestHelper.gSJS +
                       "?getManifest=true" +
                       "&appName=arandomname" +
                       "&appToFail1";
     PackagedTestHelper.checkAppDownloadError(miniManifestURL,
-                                            "MANIFEST_MISMATCH", 1, false, true,
+                                            "MANIFEST_MISMATCH", 2, false, true,
                                              "arandomname",
                                              function () {
       checkForUpdate(false, null, null, null, false,
                      function (request) {
         if (request.error.name === "PENDING_APP_NOT_UPDATABLE") {
           info("Got expected PENDING_APP_NOT_UPDATEABLE");
         } else {
           ok(false, "Got unexpected " + request.error.name);
--- a/dom/apps/tests/test_receipt_operations.html
+++ b/dom/apps/tests/test_receipt_operations.html
@@ -238,9 +238,9 @@ function runTest() {
   ok(true, "App uninstalled");
 }
 
 addLoadEvent(go);
 
 </script>
 </pre>
 </body>
-</html>
+</html>
\ No newline at end of file
--- a/dom/cellbroadcast/tests/marionette/test_cellbroadcast_gsm.js
+++ b/dom/cellbroadcast/tests/marionette/test_cellbroadcast_gsm.js
@@ -325,21 +325,70 @@ function testReceiving_GSM_ServiceCatego
     is(aMessage.cdmaServiceCategory, 0, "aMessage.cdmaServiceCategory");
   };
 
   let pdu = buildHexStr(0, CB_MESSAGE_SIZE_GSM * 2);
   return sendMultipleRawCbsToEmulatorAndWait([pdu])
     .then((aMessage) => verifyCBMessage(aMessage));
 }
 
+function testReceiving_GSM_PaddingCharacters() {
+  log("Test receiving GSM Cell Broadcast - Padding Characters <CR>");
+
+  let promise = Promise.resolve();
+
+  let testContents = [
+    { pdu:
+        // CB PDU with GSM 7bit encoded text of
+        // "The quick brown fox jumps over the lazy dog
+        //  \r\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r
+        //  \r\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r
+        //  \r\r\r\r\r\r\r\r"
+        "C0020001011154741914AFA7C76B9058" +
+        "FEBEBB41E6371EA4AEB7E173D0DB5E96" +
+        "83E8E832881DD6E741E4F7B9D168341A" +
+        "8D46A3D168341A8D46A3D168341A8D46" +
+        "A3D168341A8D46A3D168341A8D46A3D1" +
+        "68341A8D46A3D100",
+      text:
+        "The quick brown fox jumps over the lazy dog"
+    },
+    { pdu:
+        // CB PDU with UCS2 encoded text of
+        // "The quick brown fox jumps over\r\r\r\r\r\r\r\r\r\r\r"
+        "C0020001481100540068006500200071" +
+        "007500690063006b002000620072006f" +
+        "0077006e00200066006f00780020006a" +
+        "0075006d007000730020006f00760065" +
+        "0072000D000D000D000D000D000D000D" +
+        "000D000D000D000D",
+      text:
+        "The quick brown fox jumps over"
+    }
+  ];
+
+  let verifyCBMessage = (aMessage, aText) => {
+    is(aMessage.body, aText, "aMessage.body");
+  };
+
+  testContents.forEach(function(aTestContent) {
+    promise = promise
+      .then(() => sendMultipleRawCbsToEmulatorAndWait([aTestContent.pdu]))
+      .then((aMessage) => verifyCBMessage(aMessage, aTestContent.text));
+  });
+
+  return promise;
+}
+
 startTestCommon(function testCaseMain() {
   return testReceiving_GSM_MessageAttributes()
     .then(() => testReceiving_GSM_GeographicalScope())
     .then(() => testReceiving_GSM_MessageCode())
     .then(() => testReceiving_GSM_MessageId())
     .then(() => testReceiving_GSM_Language_and_Body())
     .then(() => testReceiving_GSM_Timestamp())
     .then(() => testReceiving_GSM_WarningType())
     .then(() => testReceiving_GSM_EmergencyUserAlert())
     .then(() => testReceiving_GSM_Popup())
     .then(() => testReceiving_GSM_Multipart())
-    .then(() => testReceiving_GSM_ServiceCategory());
+    .then(() => testReceiving_GSM_ServiceCategory())
+    .then(() => testReceiving_GSM_PaddingCharacters());
 });
--- a/dom/ipc/TabChild.cpp
+++ b/dom/ipc/TabChild.cpp
@@ -759,16 +759,21 @@ TabChild::Observe(nsISupports *aSubject,
       nsCOMPtr<nsIDocument> doc(GetDocument());
 
       if (SameCOMIdentity(subject, doc)) {
         nsCOMPtr<nsIDOMWindowUtils> utils(GetDOMWindowUtils());
         utils->SetIsFirstPaint(true);
 
         mContentDocumentIsDisplayed = true;
 
+        // Reset CSS viewport and zoom to default on new page, then
+        // calculate them properly using the actual metadata from the
+        // page.
+        SetCSSViewport(kDefaultViewportSize);
+
         // In some cases before-first-paint gets called before
         // RecvUpdateDimensions is called and therefore before we have an
         // mInnerSize value set. In such cases defer initializing the viewport
         // until we we get an inner size.
         if (HasValidInnerSize()) {
           InitializeRootMetrics();
           utils->SetResolution(mLastRootMetrics.mResolution.scale,
                                mLastRootMetrics.mResolution.scale);
--- a/dom/system/gonk/ril_worker.js
+++ b/dom/system/gonk/ril_worker.js
@@ -8501,16 +8501,34 @@ GsmPDUHelperObject.prototype = {
           msg.language = this.readSeptetsToString.call(bufAdapter, 2, 0,
                                                        PDU_NL_IDENTIFIER_DEFAULT,
                                                        PDU_NL_IDENTIFIER_DEFAULT);
           length -= 2;
         }
         msg.body = this.readUCS2String.call(bufAdapter, length);
         break;
     }
+
+    // According to 9.3.19 CBS-Message-Information-Page in TS 23.041:
+    // "
+    //  This parameter is of a fixed length of 82 octets and carries up to and
+    //  including 82 octets of user information. Where the user information is
+    //  less than 82 octets, the remaining octets must be filled with padding.
+    // "
+    // According to 6.2.1.1 GSM 7 bit Default Alphabet and 6.2.3 UCS2 in
+    // TS 23.038, the padding character is <CR>.
+    if (!msg.body) {
+      return;
+    }
+    for (let i = msg.body.length - 1; i >= 0; i--) {
+      if (msg.body.charAt(i) !== '\r') {
+        msg.body = msg.body.substring(0, i + 1);
+        break;
+      }
+    }
   },
 
   /**
    * Read Cell GSM/ETWS/UMTS Broadcast Message.
    *
    * @param pduLength
    *        total length of the incoming PDU in octets.
    */
--- a/gfx/layers/client/SimpleTextureClientPool.cpp
+++ b/gfx/layers/client/SimpleTextureClientPool.cpp
@@ -59,17 +59,16 @@ SimpleTextureClientPool::SimpleTextureCl
 
 TemporaryRef<TextureClient>
 SimpleTextureClientPool::GetTextureClient(bool aAutoRecycle)
 {
   // Try to fetch a client from the pool
   RefPtr<TextureClient> textureClient;
   if (mAvailableTextureClients.size()) {
     textureClient = mAvailableTextureClients.top();
-    textureClient->WaitForBufferOwnership();
     mAvailableTextureClients.pop();
     RECYCLE_LOG("%s Skip allocate (%i left), returning %p\n", (mFormat == SurfaceFormat::B8G8R8A8?"poolA":"poolX"), mAvailableTextureClients.size(), textureClient.get());
 
   } else {
     // No unused clients in the pool, create one
     if (gfxPrefs::ForceShmemTiles()) {
       textureClient = TextureClient::CreateBufferTextureClient(mSurfaceAllocator,
         mFormat, TextureFlags::IMMEDIATE_UPLOAD | TextureFlags::RECYCLE, gfx::BackendType::NONE);
--- a/gfx/layers/client/TextureClientPool.cpp
+++ b/gfx/layers/client/TextureClientPool.cpp
@@ -39,17 +39,16 @@ TemporaryRef<TextureClient>
 TextureClientPool::GetTextureClient()
 {
   mOutstandingClients++;
 
   // Try to fetch a client from the pool
   RefPtr<TextureClient> textureClient;
   if (mTextureClients.size()) {
     textureClient = mTextureClients.top();
-    textureClient->WaitForBufferOwnership();
     mTextureClients.pop();
     return textureClient;
   }
 
   // We're increasing the number of outstanding TextureClients without reusing a
   // client, we may need to free a deferred-return TextureClient.
   ShrinkToMaximumSize();
 
--- a/gfx/layers/d3d11/CompositorD3D11.h
+++ b/gfx/layers/d3d11/CompositorD3D11.h
@@ -137,16 +137,18 @@ public:
   virtual LayersBackend GetBackendType() const MOZ_OVERRIDE {
     return LayersBackend::LAYERS_D3D11;
   }
 
   virtual nsIWidget* GetWidget() const MOZ_OVERRIDE { return mWidget; }
 
   ID3D11Device* GetDevice() { return mDevice; }
 
+  ID3D11DeviceContext* GetDC() { return mContext; }
+
 private:
   // ensure mSize is up to date with respect to mWidget
   void EnsureSize();
   void VerifyBufferSize();
   void UpdateRenderTarget();
   bool CreateShaders();
   void UpdateConstantBuffers();
   void SetSamplerForFilter(gfx::Filter aFilter);
--- a/gfx/layers/d3d11/TextureD3D11.cpp
+++ b/gfx/layers/d3d11/TextureD3D11.cpp
@@ -100,31 +100,38 @@ DataTextureSourceD3D11::~DataTextureSour
 
 
 template<typename T> // ID3D10Texture2D or ID3D11Texture2D
 static bool LockD3DTexture(T* aTexture)
 {
   MOZ_ASSERT(aTexture);
   RefPtr<IDXGIKeyedMutex> mutex;
   aTexture->QueryInterface((IDXGIKeyedMutex**)byRef(mutex));
-  if (!mutex) {
-    return false;
+  // Textures created by the DXVA decoders don't have a mutex for synchronization
+  if (mutex) {
+    HRESULT hr = mutex->AcquireSync(0, INFINITE);
+    if (FAILED(hr)) {
+      NS_WARNING("Failed to lock the texture");
+      return false;
+    }
   }
-  mutex->AcquireSync(0, INFINITE);
   return true;
 }
 
 template<typename T> // ID3D10Texture2D or ID3D11Texture2D
 static void UnlockD3DTexture(T* aTexture)
 {
   MOZ_ASSERT(aTexture);
   RefPtr<IDXGIKeyedMutex> mutex;
   aTexture->QueryInterface((IDXGIKeyedMutex**)byRef(mutex));
   if (mutex) {
-    mutex->ReleaseSync(0);
+    HRESULT hr = mutex->ReleaseSync(0);
+    if (FAILED(hr)) {
+      NS_WARNING("Failed to unlock the texture");
+    }
   }
 }
 
 TemporaryRef<TextureHost>
 CreateTextureHostD3D11(const SurfaceDescriptor& aDesc,
                        ISurfaceAllocator* aDeallocator,
                        TextureFlags aFlags)
 {
@@ -359,71 +366,109 @@ DXGITextureHostD3D11::GetTextureSources(
   return mTextureSource.get();
 }
 
 bool
 DataTextureSourceD3D11::Update(DataSourceSurface* aSurface,
                                nsIntRegion* aDestRegion,
                                IntPoint* aSrcOffset)
 {
-  // Right now we only support full surface update. If aDestRegion is provided,
-  // It will be ignored. Incremental update with a source offset is only used
-  // on Mac so it is not clear that we ever will need to support it for D3D.
+  // Incremental update with a source offset is only used on Mac so it is not
+  // clear that we ever will need to support it for D3D.
   MOZ_ASSERT(!aSrcOffset);
   MOZ_ASSERT(aSurface);
 
+  HRESULT hr;
+
   if (!mCompositor || !mCompositor->GetDevice()) {
     return false;
   }
 
   uint32_t bpp = BytesPerPixel(aSurface->GetFormat());
   DXGI_FORMAT dxgiFormat = SurfaceFormatToDXGIFormat(aSurface->GetFormat());
 
   mSize = aSurface->GetSize();
   mFormat = aSurface->GetFormat();
 
-  CD3D11_TEXTURE2D_DESC desc(dxgiFormat, mSize.width, mSize.height,
-                             1, 1, D3D11_BIND_SHADER_RESOURCE,
-                             D3D11_USAGE_IMMUTABLE);
+  CD3D11_TEXTURE2D_DESC desc(dxgiFormat, mSize.width, mSize.height, 1, 1);
 
   int32_t maxSize = mCompositor->GetMaxTextureSize();
   if ((mSize.width <= maxSize && mSize.height <= maxSize) ||
       (mFlags & TextureFlags::DISALLOW_BIGIMAGE)) {
-    D3D11_SUBRESOURCE_DATA initData;
-    initData.pSysMem = aSurface->GetData();
-    initData.SysMemPitch = aSurface->Stride();
+
+    if (mTexture) {
+      D3D11_TEXTURE2D_DESC currentDesc;
+      mTexture->GetDesc(&currentDesc);
+
+      // Make sure there's no size mismatch, if there is, recreate.
+      if (currentDesc.Width != mSize.width || currentDesc.Height != mSize.height ||
+          currentDesc.Format != dxgiFormat) {
+        mTexture = nullptr;
+        // Make sure we upload the whole surface.
+        aDestRegion = nullptr;
+      }
+    }
+
+    if (!mTexture) {
+      hr = mCompositor->GetDevice()->CreateTexture2D(&desc, nullptr, byRef(mTexture));
+      mIsTiled = false;
+      if (FAILED(hr) || !mTexture) {
+        Reset();
+        return false;
+      }
+    }
 
-    mCompositor->GetDevice()->CreateTexture2D(&desc, &initData, byRef(mTexture));
-    mIsTiled = false;
-    if (!mTexture) {
-      Reset();
-      return false;
+    DataSourceSurface::MappedSurface map;
+    aSurface->Map(DataSourceSurface::MapType::READ, &map);
+
+    if (aDestRegion) {
+      nsIntRegionRectIterator iter(*aDestRegion);
+      const nsIntRect *iterRect;
+      while ((iterRect = iter.Next())) {
+        D3D11_BOX box;
+        box.front = 0;
+        box.back = 1;
+        box.left = iterRect->x;
+        box.top = iterRect->y;
+        box.right = iterRect->XMost();
+        box.bottom = iterRect->YMost();
+
+        void* data = map.mData + map.mStride * iterRect->y + BytesPerPixel(aSurface->GetFormat()) * iterRect->x;
+
+        mCompositor->GetDC()->UpdateSubresource(mTexture, 0, &box, data, map.mStride, map.mStride * mSize.height);
+      }
+    } else {
+      mCompositor->GetDC()->UpdateSubresource(mTexture, 0, nullptr, aSurface->GetData(),
+                                              aSurface->Stride(), aSurface->Stride() * mSize.height);
     }
+
+    aSurface->Unmap();
   } else {
     mIsTiled = true;
     uint32_t tileCount = GetRequiredTilesD3D11(mSize.width, maxSize) *
                          GetRequiredTilesD3D11(mSize.height, maxSize);
 
     mTileTextures.resize(tileCount);
     mTexture = nullptr;
 
     for (uint32_t i = 0; i < tileCount; i++) {
       IntRect tileRect = GetTileRect(i);
 
       desc.Width = tileRect.width;
       desc.Height = tileRect.height;
+      desc.Usage = D3D11_USAGE_IMMUTABLE;
 
       D3D11_SUBRESOURCE_DATA initData;
       initData.pSysMem = aSurface->GetData() +
                          tileRect.y * aSurface->Stride() +
                          tileRect.x * bpp;
       initData.SysMemPitch = aSurface->Stride();
 
-      mCompositor->GetDevice()->CreateTexture2D(&desc, &initData, byRef(mTileTextures[i]));
-      if (!mTileTextures[i]) {
+      hr = mCompositor->GetDevice()->CreateTexture2D(&desc, &initData, byRef(mTileTextures[i]));
+      if (FAILED(hr) || !mTileTextures[i]) {
         Reset();
         return false;
       }
     }
   }
   return true;
 }
 
--- a/gfx/layers/opengl/GrallocTextureHost.cpp
+++ b/gfx/layers/opengl/GrallocTextureHost.cpp
@@ -254,16 +254,21 @@ GrallocTextureSourceOGL::SetCompositable
   mCompositableBackendData = aBackendData;
 
   gl()->MakeCurrent();
   GLuint tex = GetGLTexture();
   GLuint textureTarget = GetTextureTarget();
 
   gl()->fActiveTexture(LOCAL_GL_TEXTURE0);
   gl()->fBindTexture(textureTarget, tex);
+
+  // Setup texure parameters at the first binding.
+  gl()->fTexParameteri(textureTarget, LOCAL_GL_TEXTURE_WRAP_T, GetWrapMode());
+  gl()->fTexParameteri(textureTarget, LOCAL_GL_TEXTURE_WRAP_S, GetWrapMode());
+
   // create new EGLImage
   mEGLImage = EGLImageCreateFromNativeBuffer(gl(), mGraphicBuffer->getNativeBuffer());
   BindEGLImage();
   mNeedsReset = false;
 }
 
 gfx::IntSize
 GrallocTextureSourceOGL::GetSize() const
--- a/js/src/builtin/TypedObject.js
+++ b/js/src/builtin/TypedObject.js
@@ -41,17 +41,17 @@
     UnsafeGetReservedSlot(obj, JS_BUFVIEW_SLOT_OWNER)
 #define TYPEDOBJ_LENGTH(obj) \
     TO_INT32(UnsafeGetReservedSlot(obj, JS_BUFVIEW_SLOT_LENGTH))
 
 #define HAS_PROPERTY(obj, prop) \
     callFunction(std_Object_hasOwnProperty, obj, prop)
 
 function TypedObjectTypeDescr(typedObj) {
-  return TYPROTO_DESCR(typedObj.__proto__);
+  return TYPROTO_DESCR(std_Object_getPrototypeOf(typedObj));
 }
 
 ///////////////////////////////////////////////////////////////////////////
 // Getting values
 //
 // The methods in this section read from the memory pointed at
 // by `this` and produce JS values. This process is called *reification*
 // in the spec.
--- a/js/src/builtin/Utilities.js
+++ b/js/src/builtin/Utilities.js
@@ -58,16 +58,17 @@ var std_Math_min = Math.min;
 var std_Math_abs = Math.abs;
 var std_Math_imul = Math.imul;
 var std_Math_log2 = Math.log2;
 var std_Number_valueOf = Number.prototype.valueOf;
 var std_Number_POSITIVE_INFINITY = Number.POSITIVE_INFINITY;
 var std_Object_create = Object.create;
 var std_Object_getOwnPropertyNames = Object.getOwnPropertyNames;
 var std_Object_hasOwnProperty = Object.prototype.hasOwnProperty;
+var std_Object_getPrototypeOf = Object.getPrototypeOf;
 var std_RegExp_test = RegExp.prototype.test;
 var std_String_fromCharCode = String.fromCharCode;
 var std_String_charCodeAt = String.prototype.charCodeAt;
 var std_String_indexOf = String.prototype.indexOf;
 var std_String_lastIndexOf = String.prototype.lastIndexOf;
 var std_String_match = String.prototype.match;
 var std_String_replace = String.prototype.replace;
 var std_String_split = String.prototype.split;
--- a/js/src/configure.in
+++ b/js/src/configure.in
@@ -1094,18 +1094,18 @@ if test "$GNU_CC"; then
     # Per bug 719659 comment 2, some of the headers on ancient build machines
     # may require gnu89 inline semantics.  But otherwise, we use C99.
     # But on OS X we just use C99 plus GNU extensions, in order to fix
     # bug 917526.
     CFLAGS="$CFLAGS -std=gnu99"
     if test "${OS_ARCH}" != Darwin; then
         CFLAGS="$CFLAGS -fgnu89-inline"
     fi
-    MKSHLIB='$(CXX) $(CXXFLAGS) $(DSO_PIC_CFLAGS) $(DSO_LDOPTS) -Wl,-h,$(notdir $@) -o $@'
-    MKCSHLIB='$(CC) $(CFLAGS) $(DSO_PIC_CFLAGS) $(DSO_LDOPTS) -Wl,-h,$(notdir $@) -o $@'
+    MKSHLIB='$(CXX) $(CXXFLAGS) $(DSO_PIC_CFLAGS) $(DSO_LDOPTS) -Wl,-h,$(DSO_SONAME) -o $@'
+    MKCSHLIB='$(CC) $(CFLAGS) $(DSO_PIC_CFLAGS) $(DSO_LDOPTS) -Wl,-h,$(DSO_SONAME) -o $@'
     DSO_LDOPTS='-shared'
     if test "$GCC_USE_GNU_LD"; then
         # Some tools like ASan use a runtime library that is only
         # linked against executables, so we must allow undefined
         # symbols for shared objects in some cases.
         if test -z "$MOZ_NO_WLZDEFS"; then
             # Don't allow undefined symbols in libraries
             DSO_LDOPTS="$DSO_LDOPTS -Wl,-z,defs"
@@ -1187,18 +1187,18 @@ elif test "$SOLARIS_SUNPRO_CC"; then
     if test "$CPU_ARCH" = "sparc"; then
         # for Sun Studio on Solaris/SPARC
         DSO_PIC_CFLAGS='-xcode=pic32'
     else
         DSO_PIC_CFLAGS='-KPIC'
     fi
     _DEFINES_CFLAGS='$(ACDEFINES) -D_JS_CONFDEFS_H_ -DMOZILLA_CLIENT'
 else
-    MKSHLIB='$(LD) $(DSO_LDOPTS) -h $(notdir $@) -o $@'
-    MKCSHLIB='$(LD) $(DSO_LDOPTS) -h $(notdir $@) -o $@'
+    MKSHLIB='$(LD) $(DSO_LDOPTS) -h $(DSO_SONAME) -o $@'
+    MKCSHLIB='$(LD) $(DSO_LDOPTS) -h $(DSO_SONAME) -o $@'
 
     DSO_LDOPTS='-shared'
     if test "$GNU_LD"; then
         # Don't allow undefined symbols in libraries
         DSO_LDOPTS="$DSO_LDOPTS -z defs"
     fi
 
     DSO_CFLAGS=''
@@ -1794,18 +1794,18 @@ ia64*-hpux*)
     	DSO_LDOPTS='-shared'
     fi
     # This will fail on a.out systems prior to 1.5.1_ALPHA.
     MKSHLIB_FORCE_ALL='-Wl,--whole-archive'
     MKSHLIB_UNFORCE_ALL='-Wl,--no-whole-archive'
     if test "$LIBRUNPATH"; then
 	DSO_LDOPTS="-Wl,-R$LIBRUNPATH $DSO_LDOPTS"
     fi
-    MKSHLIB='$(CXX) $(CXXFLAGS) $(DSO_PIC_CFLAGS) $(DSO_LDOPTS) -Wl,-soname,$(notdir $@) -o $@'
-    MKCSHLIB='$(CC) $(CFLAGS) $(DSO_PIC_CFLAGS) $(DSO_LDOPTS) -Wl,-soname,$(notdir $@) -o $@'
+    MKSHLIB='$(CXX) $(CXXFLAGS) $(DSO_PIC_CFLAGS) $(DSO_LDOPTS) -Wl,-soname,$(DSO_SONAME) -o $@'
+    MKCSHLIB='$(CC) $(CFLAGS) $(DSO_PIC_CFLAGS) $(DSO_LDOPTS) -Wl,-soname,$(DSO_SONAME) -o $@'
     ;;
 
 *-openbsd*)
     DLL_SUFFIX=".so.1.0"
     DSO_CFLAGS=''
     DSO_PIC_CFLAGS='-fPIC'
     DSO_LDOPTS='-shared -fPIC'
     if test "$LIBRUNPATH"; then
@@ -1835,18 +1835,18 @@ ia64*-hpux*)
            _SAVE_LDFLAGS=$LDFLAGS
            LDFLAGS="-M /usr/lib/ld/map.noexstk $LDFLAGS"
            AC_TRY_LINK([#include <stdio.h>],
                        [printf("Hello World\n");],
                        ,
                        [LDFLAGS=$_SAVE_LDFLAGS])
        fi
        MOZ_OPTIMIZE_FLAGS="-xO4"
-       MKSHLIB='$(CXX) $(CXXFLAGS) $(DSO_PIC_FLAGS) $(DSO_LDOPTS) -h $(notdir $@) -o $@'
-       MKCSHLIB='$(CC) $(CFLAGS) $(DSO_PIC_FLAGS) $(DSO_LDOPTS) -h $(notdir $@) -o $@'
+       MKSHLIB='$(CXX) $(CXXFLAGS) $(DSO_PIC_FLAGS) $(DSO_LDOPTS) -h $(DSO_SONAME) -o $@'
+       MKCSHLIB='$(CC) $(CFLAGS) $(DSO_PIC_FLAGS) $(DSO_LDOPTS) -h $(DSO_SONAME) -o $@'
        MKSHLIB_FORCE_ALL='-z allextract'
        MKSHLIB_UNFORCE_ALL='-z defaultextract'
        DSO_LDOPTS='-G'
        AR_LIST="$AR t"
        AR_EXTRACT="$AR x"
        AR_DELETE="$AR d"
        AR='$(CXX) -xar'
        AR_FLAGS='-o $@'
--- a/js/src/gc/GCRuntime.h
+++ b/js/src/gc/GCRuntime.h
@@ -316,19 +316,18 @@ class GCRuntime
 
     bool shouldCleanUpEverything() { return cleanUpEverything; }
 
     bool areGrayBitsValid() { return grayBitsValid; }
     void setGrayBitsInvalid() { grayBitsValid = false; }
 
     bool isGcNeeded() { return isNeeded; }
 
-    double computeHeapGrowthFactor(size_t lastBytes) const;
-    size_t computeTriggerBytes(double growthFactor, size_t lastBytes,
-                               JSGCInvocationKind gckind) const;
+    double computeHeapGrowthFactor(size_t lastBytes);
+    size_t computeTriggerBytes(double growthFactor, size_t lastBytes, JSGCInvocationKind gckind);
     size_t allocationThreshold() { return allocThreshold; }
 
     JSGCMode gcMode() const { return mode; }
     void setGCMode(JSGCMode m) {
         mode = m;
         marker.setGCMode(mode);
     }
 
@@ -389,17 +388,17 @@ class GCRuntime
     void findZoneGroups();
     bool findZoneEdgesForWeakMaps();
     void getNextZoneGroup();
     void endMarkingZoneGroup();
     void beginSweepingZoneGroup();
     bool releaseObservedTypes();
     void endSweepingZoneGroup();
     bool sweepPhase(SliceBudget &sliceBudget);
-    void endSweepPhase(bool lastGC);
+    void endSweepPhase(JSGCInvocationKind gckind, bool lastGC);
     void sweepZones(FreeOp *fop, bool lastGC);
     void decommitArenasFromAvailableList(Chunk **availableListHeadp);
     void decommitArenas();
     void expireChunksAndArenas(bool shouldShrink);
     void sweepBackgroundThings(bool onBackgroundThread);
     void assertBackgroundSweepingFinished();
 
     void computeNonIncrementalMarkingForValidation();
@@ -509,19 +508,16 @@ class GCRuntime
     uint64_t              startNumber;
 
     /* Whether the currently running GC can finish in multiple slices. */
     bool                  isIncremental;
 
     /* Whether all compartments are being collected in first GC slice. */
     bool                  isFull;
 
-    /* The kind of the last collection. */
-    JSGCInvocationKind    lastKind;
-
     /* The reason that an interrupt-triggered GC should be called. */
     JS::gcreason::Reason  triggerReason;
 
     /*
      * If this is 0, all cross-compartment proxies must be registered in the
      * wrapper map. This checking must be disabled temporarily while creating
      * new wrappers. When non-zero, this records the recursion depth of wrapper
      * creation.
--- a/js/src/gc/Zone.cpp
+++ b/js/src/gc/Zone.cpp
@@ -26,17 +26,16 @@ JS::Zone::Zone(JSRuntime *rt)
     allocator(this),
     types(this),
     compartments(),
     gcGrayRoots(),
     gcHeapGrowthFactor(3.0),
     gcMallocBytes(0),
     gcMallocGCTriggered(false),
     gcBytes(0),
-    gcBytesAfterGC(0),
     gcTriggerBytes(0),
     data(nullptr),
     isSystem(false),
     usedByExclusiveThread(false),
     scheduledForDestruction(false),
     maybeAlive(true),
     active(false),
     jitZone_(nullptr),
--- a/js/src/gc/Zone.h
+++ b/js/src/gc/Zone.h
@@ -101,16 +101,17 @@ struct Zone : public JS::shadow::Zone,
 
     void discardJitCode(js::FreeOp *fop);
 
     void addSizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf,
                                 size_t *typePool,
                                 size_t *baselineStubsOptimized);
 
     void setGCLastBytes(size_t lastBytes, js::JSGCInvocationKind gckind);
+    void reduceGCTriggerBytes(size_t amount);
 
     void resetGCMallocBytes();
     void setGCMaxMallocBytes(size_t value);
     void updateMallocCounter(size_t nbytes) {
         // Note: this code may be run from worker threads. We tolerate any
         // thread races when updating gcMallocBytes.
         gcMallocBytes -= ptrdiff_t(nbytes);
         if (MOZ_UNLIKELY(isTooMuchMalloc()))
@@ -242,22 +243,18 @@ struct Zone : public JS::shadow::Zone,
     // This should be a bool, but Atomic only supports 32-bit and pointer-sized
     // types.
     mozilla::Atomic<uint32_t, mozilla::ReleaseAcquire> gcMallocGCTriggered;
 
     // Counts the number of bytes allocated in the GC heap for this zone. It is
     // updated by both the main and GC helper threads.
     mozilla::Atomic<size_t, mozilla::ReleaseAcquire> gcBytes;
 
-    // The number of bytes allocated in the GC heap for this zone after the last GC.
-    size_t gcBytesAfterGC;
-
-    // GC trigger threshold for allocations on the GC heap. It is updated by
-    // both the main and GC helper threads.
-    mozilla::Atomic<size_t, mozilla::ReleaseAcquire> gcTriggerBytes;
+    // GC trigger threshold for allocations on the GC heap.
+    size_t gcTriggerBytes;
 
     // Per-zone data for use by an embedder.
     void *data;
 
     bool isSystem;
 
     bool usedByExclusiveThread;
 
--- a/js/src/irregexp/NativeRegExpMacroAssembler.cpp
+++ b/js/src/irregexp/NativeRegExpMacroAssembler.cpp
@@ -450,17 +450,17 @@ NativeRegExpMacroAssembler::GenerateCode
     writePerfSpewerJitCodeProfile(code, "RegExp");
 #endif
 
     for (size_t i = 0; i < labelPatches.length(); i++) {
         LabelPatch &v = labelPatches[i];
         JS_ASSERT(!v.label);
         v.patchOffset.fixup(&masm);
         uintptr_t offset = masm.actualOffset(v.labelOffset);
-        Assembler::patchDataWithValueCheck(CodeLocationLabel(code, v.patchOffset),
+        Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, v.patchOffset),
                                            ImmPtr(code->raw() + offset),
                                            ImmPtr(0));
     }
 
     IonSpew(IonSpew_Codegen, "Created RegExp (raw %p length %d)",
             (void *) code->raw(), (int) masm.bytesNeeded());
 
     RegExpCode res;
--- a/js/src/jit/AsmJS.cpp
+++ b/js/src/jit/AsmJS.cpp
@@ -6747,17 +6747,17 @@ GenerateInterruptExit(ModuleCompiler &m,
     masm.branchIfFalseBool(ReturnReg, throwLabel);
 
     // This will restore stack to the address before the call.
     masm.movePtr(s0, StackPointer);
     masm.PopRegsInMask(AllRegsExceptSP);
 
     // Pop resumePC into PC. Clobber HeapReg to make the jump and restore it
     // during jump delay slot.
-    JS_ASSERT(Imm16::isInSignedRange(AsmJSModule::heapGlobalDataOffset()));
+    JS_ASSERT(Imm16::IsInSignedRange(AsmJSModule::heapGlobalDataOffset()));
     masm.pop(HeapReg);
     masm.as_jr(HeapReg);
     masm.loadPtr(Address(GlobalReg, AsmJSModule::heapGlobalDataOffset()), HeapReg);
 #elif defined(JS_CODEGEN_ARM)
     masm.setFramePushed(0);         // set to zero so we can use masm.framePushed() below
     masm.PushRegsInMask(RegisterSet(GeneralRegisterSet(Registers::AllMask & ~(1<<Registers::sp)), FloatRegisterSet(uint32_t(0))));   // save all GP registers,excep sp
 
     // Save both the APSR and FPSCR in non-volatile registers.
--- a/js/src/jit/AsmJSModule.cpp
+++ b/js/src/jit/AsmJSModule.cpp
@@ -369,17 +369,17 @@ AsmJSModule::finish(ExclusiveContext *cx
         while (labelOffset != LabelBase::INVALID_OFFSET) {
             size_t patchAtOffset = masm.labelOffsetToPatchOffset(labelOffset);
             RelativeLink link(RelativeLink::CodeLabel);
             link.patchAtOffset = patchAtOffset;
             link.targetOffset = targetOffset;
             if (!staticLinkData_.relativeLinks.append(link))
                 return false;
 
-            labelOffset = Assembler::extractCodeLabelOffset(code_ + patchAtOffset);
+            labelOffset = Assembler::ExtractCodeLabelOffset(code_ + patchAtOffset);
         }
     }
 
 #if defined(JS_CODEGEN_X86)
     // Global data accesses in x86 need to be patched with the absolute
     // address of the global. Globals are allocated sequentially after the
     // code section so we can just use an RelativeLink.
     for (size_t i = 0; i < masm.numAsmJSGlobalAccesses(); i++) {
@@ -394,17 +394,17 @@ AsmJSModule::finish(ExclusiveContext *cx
 
 #if defined(JS_CODEGEN_MIPS)
     // On MIPS we need to update all the long jumps because they contain an
     // absolute adress.
     for (size_t i = 0; i < masm.numLongJumps(); i++) {
         RelativeLink link(RelativeLink::InstructionImmediate);
         link.patchAtOffset = masm.longJump(i);
         InstImm *inst = (InstImm *)(code_ + masm.longJump(i));
-        link.targetOffset = Assembler::extractLuiOriValue(inst, inst->next()) - (uint32_t)code_;
+        link.targetOffset = Assembler::ExtractLuiOriValue(inst, inst->next()) - (uint32_t)code_;
         if (!staticLinkData_.relativeLinks.append(link))
             return false;
     }
 #endif
 
 #if defined(JS_CODEGEN_X64)
     // Global data accesses on x64 use rip-relative addressing and thus do
     // not need patching after deserialization.
@@ -592,22 +592,22 @@ AsmJSModule::staticallyLink(ExclusiveCon
 
     for (size_t i = 0; i < staticLinkData_.relativeLinks.length(); i++) {
         RelativeLink link = staticLinkData_.relativeLinks[i];
         uint8_t *patchAt = code_ + link.patchAtOffset;
         uint8_t *target = code_ + link.targetOffset;
         if (link.isRawPointerPatch())
             *(uint8_t **)(patchAt) = target;
         else
-            Assembler::patchInstructionImmediate(patchAt, PatchedImmPtr(target));
+            Assembler::PatchInstructionImmediate(patchAt, PatchedImmPtr(target));
     }
 
     for (size_t i = 0; i < staticLinkData_.absoluteLinks.length(); i++) {
         AbsoluteLink link = staticLinkData_.absoluteLinks[i];
-        Assembler::patchDataWithValueCheck(CodeLocationLabel(code_ + link.patchAt.offset()),
+        Assembler::PatchDataWithValueCheck(CodeLocationLabel(code_ + link.patchAt.offset()),
                                            PatchedImmPtr(AddressOf(link.target, cx)),
                                            PatchedImmPtr((void*)-1));
     }
 
     // Initialize global data segment
 
     for (size_t i = 0; i < exits_.length(); i++) {
         exitIndexToGlobalDatum(i).exit = interpExitTrampoline(exits_[i]);
@@ -637,31 +637,31 @@ AsmJSModule::initHeap(Handle<ArrayBuffer
         void *addr = access.patchOffsetAt(code_);
         uint32_t disp = reinterpret_cast<uint32_t>(JSC::X86Assembler::getPointer(addr));
         JS_ASSERT(disp <= INT32_MAX);
         JSC::X86Assembler::setPointer(addr, (void *)(heapOffset + disp));
     }
 #elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS)
     uint32_t heapLength = heap->byteLength();
     for (unsigned i = 0; i < heapAccesses_.length(); i++) {
-        jit::Assembler::updateBoundsCheck(heapLength,
+        jit::Assembler::UpdateBoundsCheck(heapLength,
                                           (jit::Instruction*)(heapAccesses_[i].offset() + code_));
     }
 #endif
 }
 
 void
 AsmJSModule::restoreToInitialState(ArrayBufferObject *maybePrevBuffer, ExclusiveContext *cx)
 {
 #ifdef DEBUG
-    // Put the absolute links back to -1 so patchDataWithValueCheck assertions
+    // Put the absolute links back to -1 so PatchDataWithValueCheck assertions
     // in staticallyLink are valid.
     for (size_t i = 0; i < staticLinkData_.absoluteLinks.length(); i++) {
         AbsoluteLink link = staticLinkData_.absoluteLinks[i];
-        Assembler::patchDataWithValueCheck(CodeLocationLabel(code_ + link.patchAt.offset()),
+        Assembler::PatchDataWithValueCheck(CodeLocationLabel(code_ + link.patchAt.offset()),
                                            PatchedImmPtr((void*)-1),
                                            PatchedImmPtr(AddressOf(link.target, cx)));
     }
 #endif
 
     if (maybePrevBuffer) {
 #if defined(JS_CODEGEN_X86)
         // Subtract out the base-pointer added by AsmJSModule::initHeap.
--- a/js/src/jit/BaselineCompiler.cpp
+++ b/js/src/jit/BaselineCompiler.cpp
@@ -213,17 +213,17 @@ BaselineCompiler::compile()
     baselineScript->adoptFallbackStubs(&stubSpace_);
 
     // Patch IC loads using IC entries
     for (size_t i = 0; i < icLoadLabels_.length(); i++) {
         CodeOffsetLabel label = icLoadLabels_[i].label;
         label.fixup(&masm);
         size_t icEntry = icLoadLabels_[i].icEntry;
         ICEntry *entryAddr = &(baselineScript->icEntry(icEntry));
-        Assembler::patchDataWithValueCheck(CodeLocationLabel(code, label),
+        Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, label),
                                            ImmPtr(entryAddr),
                                            ImmPtr((void*)-1));
     }
 
     if (modifiesArguments_)
         baselineScript->setModifiesArguments();
 
     // All barriers are emitted off-by-default, toggle them on if needed.
--- a/js/src/jit/CodeGenerator.cpp
+++ b/js/src/jit/CodeGenerator.cpp
@@ -6718,17 +6718,17 @@ CodeGenerator::link(JSContext *cx, types
 
     // In parallel execution mode, when we first compile a script, we
     // don't know that its potential callees are compiled, so set a
     // flag warning that the callees may not be fully compiled.
     if (!callTargets.empty())
         ionScript->setHasUncompiledCallTarget();
 
     invalidateEpilogueData_.fixup(&masm);
-    Assembler::patchDataWithValueCheck(CodeLocationLabel(code, invalidateEpilogueData_),
+    Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, invalidateEpilogueData_),
                                        ImmPtr(ionScript),
                                        ImmPtr((void*)-1));
 
     IonSpew(IonSpew_Codegen, "Created IonScript %p (raw %p)",
             (void *) ionScript, (void *) code->raw());
 
     ionScript->setInvalidationEpilogueDataOffset(invalidateEpilogueData_.offset());
     ionScript->setOsrPc(gen->info().osrPc());
@@ -6740,17 +6740,17 @@ CodeGenerator::link(JSContext *cx, types
 
 #if defined(JS_ION_PERF)
     if (PerfEnabled())
         perfSpewer_.writeProfile(script, code, masm);
 #endif
 
     for (size_t i = 0; i < ionScriptLabels_.length(); i++) {
         ionScriptLabels_[i].fixup(&masm);
-        Assembler::patchDataWithValueCheck(CodeLocationLabel(code, ionScriptLabels_[i]),
+        Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, ionScriptLabels_[i]),
                                            ImmPtr(ionScript),
                                            ImmPtr((void*)-1));
     }
 
     // for generating inline caches during the execution.
     if (runtimeData_.length())
         ionScript->copyRuntimeData(&runtimeData_[0]);
     if (cacheList_.length())
@@ -6778,24 +6778,24 @@ CodeGenerator::link(JSContext *cx, types
         ionScript->copyCallTargetEntries(callTargets.begin());
     if (patchableBackedges_.length() > 0)
         ionScript->copyPatchableBackedges(cx, code, patchableBackedges_.begin());
 
 #ifdef JS_TRACE_LOGGING
     TraceLogger *logger = TraceLoggerForMainThread(cx->runtime());
     for (uint32_t i = 0; i < patchableTraceLoggers_.length(); i++) {
         patchableTraceLoggers_[i].fixup(&masm);
-        Assembler::patchDataWithValueCheck(CodeLocationLabel(code, patchableTraceLoggers_[i]),
+        Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, patchableTraceLoggers_[i]),
                                            ImmPtr(logger),
                                            ImmPtr(nullptr));
     }
     uint32_t scriptId = TraceLogCreateTextId(logger, script);
     for (uint32_t i = 0; i < patchableTLScripts_.length(); i++) {
         patchableTLScripts_[i].fixup(&masm);
-        Assembler::patchDataWithValueCheck(CodeLocationLabel(code, patchableTLScripts_[i]),
+        Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, patchableTLScripts_[i]),
                                            ImmPtr((void *) uintptr_t(scriptId)),
                                            ImmPtr((void *)0));
     }
 #endif
 
     switch (executionMode) {
       case SequentialExecution:
         // The correct state for prebarriers is unknown until the end of compilation,
--- a/js/src/jit/Ion.cpp
+++ b/js/src/jit/Ion.cpp
@@ -2616,24 +2616,24 @@ InvalidateActivation(FreeOp *fop, uint8_
         // IonScript pointer embedded into the invalidation epilogue)
         // where the safepointed call instruction used to be. We rely on
         // the call sequence causing the safepoint being >= the size of
         // a uint32, which is checked during safepoint index
         // construction.
         CodeLocationLabel dataLabelToMunge(it.returnAddressToFp());
         ptrdiff_t delta = ionScript->invalidateEpilogueDataOffset() -
                           (it.returnAddressToFp() - ionCode->raw());
-        Assembler::patchWrite_Imm32(dataLabelToMunge, Imm32(delta));
+        Assembler::PatchWrite_Imm32(dataLabelToMunge, Imm32(delta));
 
         CodeLocationLabel osiPatchPoint = SafepointReader::InvalidationPatchPoint(ionScript, si);
         CodeLocationLabel invalidateEpilogue(ionCode, CodeOffsetLabel(ionScript->invalidateEpilogueOffset()));
 
         IonSpew(IonSpew_Invalidate, "   ! Invalidate ionScript %p (ref %u) -> patching osipoint %p",
                 ionScript, ionScript->refcount(), (void *) osiPatchPoint.raw());
-        Assembler::patchWrite_NearCall(osiPatchPoint, invalidateEpilogue);
+        Assembler::PatchWrite_NearCall(osiPatchPoint, invalidateEpilogue);
     }
 
     IonSpew(IonSpew_Invalidate, "END invalidating activation");
 }
 
 void
 jit::StopAllOffThreadCompilations(JSCompartment *comp)
 {
--- a/js/src/jit/IonCaches.cpp
+++ b/js/src/jit/IonCaches.cpp
@@ -241,17 +241,17 @@ class IonCache::StubAttacher
         rejoinOffset_.fixup(&masm);
         CodeLocationJump rejoinJump(code, rejoinOffset_);
         PatchJump(rejoinJump, rejoinLabel_);
     }
 
     void patchStubCodePointer(MacroAssembler &masm, JitCode *code) {
         if (hasStubCodePatchOffset_) {
             stubCodePatchOffset_.fixup(&masm);
-            Assembler::patchDataWithValueCheck(CodeLocationLabel(code, stubCodePatchOffset_),
+            Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, stubCodePatchOffset_),
                                                ImmPtr(code), STUB_ADDR);
         }
     }
 
     virtual void patchNextStubJump(MacroAssembler &masm, JitCode *code) = 0;
 };
 
 const ImmPtr IonCache::StubAttacher::STUB_ADDR = ImmPtr((void*)0xdeadc0de);
@@ -368,17 +368,17 @@ DispatchIonCache::bindInitialJump(MacroA
 void
 DispatchIonCache::updateBaseAddress(JitCode *code, MacroAssembler &masm)
 {
     // The address of firstStub_ should be pointer aligned.
     JS_ASSERT(uintptr_t(&firstStub_) % sizeof(uintptr_t) == 0);
 
     IonCache::updateBaseAddress(code, masm);
     dispatchLabel_.fixup(&masm);
-    Assembler::patchDataWithValueCheck(CodeLocationLabel(code, dispatchLabel_),
+    Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, dispatchLabel_),
                                        ImmPtr(&firstStub_),
                                        ImmPtr((void*)-1));
     firstStub_ = fallbackLabel_.raw();
     rejoinLabel_.repoint(code, &masm);
 }
 
 void
 IonCache::attachStub(MacroAssembler &masm, StubAttacher &attacher, Handle<JitCode *> code)
--- a/js/src/jit/IonCaches.h
+++ b/js/src/jit/IonCaches.h
@@ -370,17 +370,17 @@ class RepatchIonCache : public IonCache
     static const size_t REJOIN_LABEL_OFFSET = 0;
 #endif
 
     CodeLocationLabel rejoinLabel() const {
         uint8_t *ptr = initialJump_.raw();
 #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS)
         uint32_t i = 0;
         while (i < REJOIN_LABEL_OFFSET)
-            ptr = Assembler::nextInstruction(ptr, &i);
+            ptr = Assembler::NextInstruction(ptr, &i);
 #endif
         return CodeLocationLabel(ptr);
     }
 
   public:
     RepatchIonCache()
       : initialJump_(),
         lastJump_()
--- a/js/src/jit/IonFrames.cpp
+++ b/js/src/jit/IonFrames.cpp
@@ -150,17 +150,17 @@ JitFrameIterator::checkInvalidation(IonS
         invalidated = !script->hasIonScript() ||
             !script->ionScript()->containsReturnAddress(returnAddr);
     }
     if (!invalidated)
         return false;
 
     int32_t invalidationDataOffset = ((int32_t *) returnAddr)[-1];
     uint8_t *ionScriptDataOffset = returnAddr + invalidationDataOffset;
-    IonScript *ionScript = (IonScript *) Assembler::getPointer(ionScriptDataOffset);
+    IonScript *ionScript = (IonScript *) Assembler::GetPointer(ionScriptDataOffset);
     JS_ASSERT(ionScript->containsReturnAddress(returnAddr));
     *ionScriptOut = ionScript;
     return true;
 }
 
 CalleeToken
 JitFrameIterator::calleeToken() const
 {
@@ -1421,17 +1421,17 @@ OsiIndex::fixUpOffset(MacroAssembler &ma
 }
 
 uint32_t
 OsiIndex::returnPointDisplacement() const
 {
     // In general, pointer arithmetic on code is bad, but in this case,
     // getting the return address from a call instruction, stepping over pools
     // would be wrong.
-    return callPointDisplacement_ + Assembler::patchWrite_NearCallSize();
+    return callPointDisplacement_ + Assembler::PatchWrite_NearCallSize();
 }
 
 SnapshotIterator::SnapshotIterator(IonScript *ionScript, SnapshotOffset snapshotOffset,
                                    IonJSFrameLayout *fp, const MachineState &machine)
   : snapshot_(ionScript->snapshots(),
               snapshotOffset,
               ionScript->snapshotsRVATableSize(),
               ionScript->snapshotsListSize()),
--- a/js/src/jit/IonMacroAssembler.h
+++ b/js/src/jit/IonMacroAssembler.h
@@ -897,17 +897,17 @@ class MacroAssembler : public MacroAssem
 
     void link(JitCode *code) {
         JS_ASSERT(!oom());
         // If this code can transition to C++ code and witness a GC, then we need to store
         // the JitCode onto the stack in order to GC it correctly.  exitCodePatch should
         // be unset if the code never needed to push its JitCode*.
         if (hasEnteredExitFrame()) {
             exitCodePatch_.fixup(this);
-            patchDataWithValueCheck(CodeLocationLabel(code, exitCodePatch_),
+            PatchDataWithValueCheck(CodeLocationLabel(code, exitCodePatch_),
                                     ImmPtr(code),
                                     ImmPtr((void*)-1));
         }
 
     }
 
     // Generates code used to complete a bailout.
     void generateBailoutTail(Register scratch, Register bailoutInfo);
--- a/js/src/jit/LIR.h
+++ b/js/src/jit/LIR.h
@@ -1423,17 +1423,17 @@ class LSafepoint : public TempObject
     }
     void setOffset(uint32_t offset) {
         safepointOffset_ = offset;
     }
     uint32_t osiReturnPointOffset() const {
         // In general, pointer arithmetic on code is bad, but in this case,
         // getting the return address from a call instruction, stepping over pools
         // would be wrong.
-        return osiCallPointOffset_ + Assembler::patchWrite_NearCallSize();
+        return osiCallPointOffset_ + Assembler::PatchWrite_NearCallSize();
     }
     uint32_t osiCallPointOffset() const {
         return osiCallPointOffset_;
     }
     void setOsiCallPointOffset(uint32_t osiCallPointOffset) {
         JS_ASSERT(!osiCallPointOffset_);
         osiCallPointOffset_ = osiCallPointOffset;
     }
--- a/js/src/jit/Safepoints.cpp
+++ b/js/src/jit/Safepoints.cpp
@@ -392,17 +392,17 @@ SafepointReader::SafepointReader(IonScri
     allFloatSpills_ = FloatRegisterSet(ReadFloatRegisterMask(stream_));
 
     advanceFromGcRegs();
 }
 
 uint32_t
 SafepointReader::osiReturnPointOffset() const
 {
-    return osiCallPointOffset_ + Assembler::patchWrite_NearCallSize();
+    return osiCallPointOffset_ + Assembler::PatchWrite_NearCallSize();
 }
 
 CodeLocationLabel
 SafepointReader::InvalidationPatchPoint(IonScript *script, const SafepointIndex *si)
 {
     SafepointReader reader(script, si);
 
     return CodeLocationLabel(script->method(), CodeOffsetLabel(reader.osiCallPointOffset()));
--- a/js/src/jit/arm/Architecture-arm.cpp
+++ b/js/src/jit/arm/Architecture-arm.cpp
@@ -155,19 +155,19 @@ uint32_t GetARMFlags()
         Elf32_auxv_t aux;
         while (read(fd, &aux, sizeof(Elf32_auxv_t))) {
             if (aux.a_type == AT_HWCAP) {
                 close(fd);
                 flags = aux.a_un.a_val;
                 isSet = true;
 #if defined(__ARM_ARCH_7__) || defined (__ARM_ARCH_7A__)
                 // This should really be detected at runtime, but /proc/*/auxv
-                // doesn't seem to carry the ISA.  We could look in
-                // /proc/cpuinfo as well, but the chances that it will be
-                // different from this are low.
+                // doesn't seem to carry the ISA. We could look in /proc/cpuinfo
+                // as well, but the chances that it will be different from this
+                // are low.
                 flags |= HWCAP_ARMv7;
 #endif
                 return flags;
             }
         }
         close(fd);
     }
 
@@ -309,24 +309,24 @@ FloatRegisters::FromName(const char *nam
 }
 
 FloatRegisterSet
 VFPRegister::ReduceSetForPush(const FloatRegisterSet &s)
 {
     FloatRegisterSet mod;
     for (TypedRegisterIterator<FloatRegister> iter(s); iter.more(); iter++) {
         if ((*iter).isSingle()) {
-            // add in just this float
+            // Add in just this float.
             mod.addUnchecked(*iter);
         } else if ((*iter).id() < 16) {
-            // a double with an overlay, add in both floats
+            // A double with an overlay, add in both floats.
             mod.addUnchecked((*iter).singleOverlay(0));
             mod.addUnchecked((*iter).singleOverlay(1));
         } else {
-            // add in the lone double in the range 16-31
+            // Add in the lone double in the range 16-31.
             mod.addUnchecked(*iter);
         }
     }
     return mod;
 }
 
 uint32_t
 VFPRegister::GetSizeInBytes(const FloatRegisterSet &s)
--- a/js/src/jit/arm/Architecture-arm.h
+++ b/js/src/jit/arm/Architecture-arm.h
@@ -9,17 +9,18 @@
 
 #include "mozilla/MathAlgorithms.h"
 
 #include <limits.h>
 #include <stdint.h>
 
 #include "js/Utility.h"
 
-// gcc appears to use __ARM_PCS_VFP to denote that the target is a hard-float target.
+// Gcc appears to use __ARM_PCS_VFP to denote that the target is a hard-float
+// target.
 #if defined(__ARM_PCS_VFP)
 #define JS_CODEGEN_ARM_HARDFP
 #endif
 
 namespace js {
 namespace jit {
 
 // In bytes: slots needed for potential memory->memory move spills.
@@ -33,21 +34,21 @@ static const uint32_t ION_FRAME_SLACK_SI
 static const int32_t NUNBOX32_TYPE_OFFSET    = 4;
 static const int32_t NUNBOX32_PAYLOAD_OFFSET = 0;
 
 static const uint32_t ShadowStackSpace = 0;
 ////
 // These offsets are related to bailouts.
 ////
 
-// Size of each bailout table entry. On arm, this is presently
-// a single call (which is wrong!). the call clobbers lr.
-// For now, I've dealt with this by ensuring that we never allocate to lr.
-// it should probably be 8 bytes, a mov of an immediate into r12 (not
-// allocated presently, or ever) followed by a branch to the apropriate code.
+// Size of each bailout table entry. On arm, this is presently a single call
+// (which is wrong!). The call clobbers lr.
+// For now, I've dealt with this by ensuring that we never allocate to lr. It
+// should probably be 8 bytes, a mov of an immediate into r12 (not allocated
+// presently, or ever) followed by a branch to the apropriate code.
 static const uint32_t BAILOUT_TABLE_ENTRY_SIZE    = 4;
 
 class Registers
 {
   public:
     enum RegisterID {
         r0 = 0,
         r1,
@@ -134,17 +135,17 @@ class Registers
     // Registers returned from a JS -> JS call.
     static const uint32_t JSCallMask =
         (1 << Registers::r2) |
         (1 << Registers::r3);
 
     // Registers returned from a JS -> C call.
     static const uint32_t CallMask =
         (1 << Registers::r0) |
-        (1 << Registers::r1);  // used for double-size returns
+        (1 << Registers::r1);  // Used for double-size returns.
 
     static const uint32_t AllocatableMask = AllMask & ~NonAllocatableMask;
     typedef uint32_t SetType;
     static uint32_t SetSize(SetType x) {
         static_assert(sizeof(SetType) == 4, "SetType must be 32 bits");
         return mozilla::CountPopulation32(x);
     }
 };
@@ -235,38 +236,36 @@ class FloatRegisters
 };
 
 template <typename T>
 class TypedRegisterSet;
 
 class VFPRegister
 {
   public:
-    // What type of data is being stored in this register?
-    // UInt / Int are specifically for vcvt, where we need
-    // to know how the data is supposed to be converted.
+    // What type of data is being stored in this register? UInt / Int are
+    // specifically for vcvt, where we need to know how the data is supposed to
+    // be converted.
     enum RegType {
         Single = 0x0,
         Double = 0x1,
         UInt   = 0x2,
         Int    = 0x3
     };
 
     typedef FloatRegisters Codes;
     typedef Codes::Code Code;
 
   protected:
     RegType kind : 2;
-    // ARM doesn't have more than 32 registers...
-    // don't take more bits than we'll need.
-    // Presently, I don't have plans to address the upper
-    // and lower halves of the double registers seprately, so
-    // 5 bits should suffice.  If I do decide to address them seprately
-    // (vmov, I'm looking at you), I will likely specify it as a separate
-    // field.
+    // ARM doesn't have more than 32 registers. Don't take more bits than we'll
+    // need. Presently, we don't have plans to address the upper and lower
+    // halves of the double registers seprately, so 5 bits should suffice. If we
+    // do decide to address them seprately (vmov, I'm looking at you), we will
+    // likely specify it as a separate field.
   public:
     Code code_ : 5;
   protected:
     bool _isInvalid : 1;
     bool _isMissing : 1;
 
   public:
     MOZ_CONSTEXPR VFPRegister(uint32_t r, RegType k)
@@ -302,17 +301,17 @@ class VFPRegister
     VFPRegister doubleOverlay(unsigned int which = 0) const;
     VFPRegister singleOverlay(unsigned int which = 0) const;
     VFPRegister sintOverlay(unsigned int which = 0) const;
     VFPRegister uintOverlay(unsigned int which = 0) const;
 
     struct VFPRegIndexSplit;
     VFPRegIndexSplit encode();
 
-    // for serializing values
+    // For serializing values.
     struct VFPRegIndexSplit {
         const uint32_t block : 4;
         const uint32_t bit : 1;
 
       private:
         friend VFPRegIndexSplit js::jit::VFPRegister::encode();
 
         VFPRegIndexSplit(uint32_t block_, uint32_t bit_)
@@ -320,18 +319,18 @@ class VFPRegister
         {
             JS_ASSERT(block == block_);
             JS_ASSERT(bit == bit_);
         }
     };
 
     Code code() const {
         JS_ASSERT(!_isInvalid && !_isMissing);
-        // this should only be used in areas where we only have doubles
-        // and singles.
+        // This should only be used in areas where we only have doubles and
+        // singles.
         JS_ASSERT(isFloat());
         return Code(code_);
     }
     uint32_t id() const {
         return code_;
     }
     static VFPRegister FromCode(uint32_t i) {
         uint32_t code = i & 31;
@@ -386,18 +385,18 @@ class VFPRegister
             return 1;
         }
         // s1 has 0 other aligned aliases, 1 total.
         // s0 has 1 other aligned aliase, 2 total.
         return 2 - (code_ & 1);
     }
     // |   d0    |
     // | s0 | s1 |
-    // if we've stored s0 and s1 in memory, we also want to say that d0
-    // is stored there, but it is only stored at the location where it is aligned
+    // If we've stored s0 and s1 in memory, we also want to say that d0 is
+    // stored there, but it is only stored at the location where it is aligned
     // e.g. at s0, not s1.
     void alignedAliased(uint32_t aliasIdx, VFPRegister *ret) {
         if (aliasIdx == 0) {
             *ret = *this;
             return;
         }
         JS_ASSERT(aliasIdx == 1);
         if (isDouble()) {
@@ -419,47 +418,47 @@ class VFPRegister
     }
     static TypedRegisterSet<VFPRegister> ReduceSetForPush(const TypedRegisterSet<VFPRegister> &s);
     static uint32_t GetSizeInBytes(const TypedRegisterSet<VFPRegister> &s);
     static uint32_t GetPushSizeInBytes(const TypedRegisterSet<VFPRegister> &s);
     uint32_t getRegisterDumpOffsetInBytes();
 
 };
 
-// The only floating point register set that we work with
-// are the VFP Registers
+// The only floating point register set that we work with are the VFP Registers.
 typedef VFPRegister FloatRegister;
 
 uint32_t GetARMFlags();
 bool HasMOVWT();
 bool HasVFPv3();
 bool HasVFP();
 bool Has32DP();
 bool HasIDIV();
 
-// Arm/D32 has double registers that can NOT be treated as float32
-// and this requires some dances in lowering.
+// Arm/D32 has double registers that can NOT be treated as float32 and this
+// requires some dances in lowering.
 inline bool
 hasUnaliasedDouble()
 {
     return Has32DP();
 }
 
-// On ARM, Dn aliases both S2n and S2n+1, so if you need to convert a float32
-// to a double as a temporary, you need a temporary double register.
+// On ARM, Dn aliases both S2n and S2n+1, so if you need to convert a float32 to
+// a double as a temporary, you need a temporary double register.
 inline bool
 hasMultiAlias()
 {
     return true;
 }
 
 bool ParseARMHwCapFlags(const char *armHwCap);
 
-// If the simulator is used then the ABI choice is dynamic.  Otherwise the ABI is static
-// and useHardFpABI is inlined so that unused branches can be optimized away.
+// If the simulator is used then the ABI choice is dynamic. Otherwise the ABI is
+// static and useHardFpABI is inlined so that unused branches can be optimized
+// away.
 #if defined(JS_ARM_SIMULATOR)
 bool UseHardFpABI();
 #else
 static inline bool UseHardFpABI()
 {
 #if defined(JS_CODEGEN_ARM_HARDFP)
     return true;
 #else
--- a/js/src/jit/arm/Assembler-arm.cpp
+++ b/js/src/jit/arm/Assembler-arm.cpp
@@ -17,18 +17,18 @@
 #include "jit/arm/MacroAssembler-arm.h"
 #include "jit/JitCompartment.h"
 
 using namespace js;
 using namespace js::jit;
 
 using mozilla::CountLeadingZeroes32;
 
-// Note this is used for inter-AsmJS calls and may pass arguments and results
-// in floating point registers even if the system ABI does not.
+// Note this is used for inter-AsmJS calls and may pass arguments and results in
+// floating point registers even if the system ABI does not.
 ABIArgGenerator::ABIArgGenerator() :
     intRegIndex_(0),
     floatRegIndex_(0),
     stackOffset_(0),
     current_()
 {}
 
 ABIArg
@@ -61,18 +61,18 @@ ABIArgGenerator::next(MIRType type)
         MOZ_ASSUME_UNREACHABLE("Unexpected argument type");
     }
 
     return current_;
 }
 const Register ABIArgGenerator::NonArgReturnVolatileReg0 = r4;
 const Register ABIArgGenerator::NonArgReturnVolatileReg1 = r5;
 
-// Encode a standard register when it is being used as src1, the dest, and
-// an extra register. These should never be called with an InvalidReg.
+// Encode a standard register when it is being used as src1, the dest, and an
+// extra register. These should never be called with an InvalidReg.
 uint32_t
 js::jit::RT(Register r)
 {
     JS_ASSERT((r.code() & ~0xf) == 0);
     return r.code() << 12;
 }
 
 uint32_t
@@ -91,18 +91,18 @@ js::jit::RD(Register r)
 
 uint32_t
 js::jit::RM(Register r)
 {
     JS_ASSERT((r.code() & ~0xf) == 0);
     return r.code() << 8;
 }
 
-// Encode a standard register when it is being used as src1, the dest, and
-// an extra register.  For these, an InvalidReg is used to indicate a optional
+// Encode a standard register when it is being used as src1, the dest, and an
+// extra register. For these, an InvalidReg is used to indicate a optional
 // register that has been omitted.
 uint32_t
 js::jit::maybeRT(Register r)
 {
     if (r == InvalidReg)
         return 0;
 
     JS_ASSERT((r.code() & ~0xf) == 0);
@@ -127,233 +127,233 @@ js::jit::maybeRD(Register r)
 
     JS_ASSERT((r.code() & ~0xf) == 0);
     return r.code() << 12;
 }
 
 Register
 js::jit::toRD(Instruction &i)
 {
-    return Register::FromCode((i.encode()>>12) & 0xf);
+    return Register::FromCode((i.encode() >> 12) & 0xf);
 }
 Register
 js::jit::toR(Instruction &i)
 {
     return Register::FromCode(i.encode() & 0xf);
 }
 
 Register
 js::jit::toRM(Instruction &i)
 {
-    return Register::FromCode((i.encode()>>8) & 0xf);
+    return Register::FromCode((i.encode() >> 8) & 0xf);
 }
 
 Register
 js::jit::toRN(Instruction &i)
 {
-    return Register::FromCode((i.encode()>>16) & 0xf);
+    return Register::FromCode((i.encode() >> 16) & 0xf);
 }
 
 uint32_t
 js::jit::VD(VFPRegister vr)
 {
     if (vr.isMissing())
         return 0;
 
-    //bits 15,14,13,12, 22
+    // Bits 15,14,13,12, 22.
     VFPRegister::VFPRegIndexSplit s = vr.encode();
     return s.bit << 22 | s.block << 12;
 }
 uint32_t
 js::jit::VN(VFPRegister vr)
 {
     if (vr.isMissing())
         return 0;
 
-    // bits 19,18,17,16, 7
+    // Bits 19,18,17,16, 7.
     VFPRegister::VFPRegIndexSplit s = vr.encode();
     return s.bit << 7 | s.block << 16;
 }
 uint32_t
 js::jit::VM(VFPRegister vr)
 {
     if (vr.isMissing())
         return 0;
 
-    // bits 5, 3,2,1,0
+    // Bits 5, 3,2,1,0.
     VFPRegister::VFPRegIndexSplit s = vr.encode();
     return s.bit << 5 | s.block;
 }
 
 VFPRegister::VFPRegIndexSplit
 jit::VFPRegister::encode()
 {
     JS_ASSERT(!_isInvalid);
 
     switch (kind) {
       case Double:
         return VFPRegIndexSplit(code_ & 0xf , code_ >> 4);
       case Single:
         return VFPRegIndexSplit(code_ >> 1, code_ & 1);
       default:
-        // vfp register treated as an integer, NOT a gpr
+        // VFP register treated as an integer, NOT a gpr.
         return VFPRegIndexSplit(code_ >> 1, code_ & 1);
     }
 }
 
 bool
-InstDTR::isTHIS(const Instruction &i)
+InstDTR::IsTHIS(const Instruction &i)
 {
     return (i.encode() & IsDTRMask) == (uint32_t)IsDTR;
 }
 
 InstDTR *
-InstDTR::asTHIS(const Instruction &i)
+InstDTR::AsTHIS(const Instruction &i)
 {
-    if (isTHIS(i))
+    if (IsTHIS(i))
         return (InstDTR*)&i;
     return nullptr;
 }
 
 bool
-InstLDR::isTHIS(const Instruction &i)
+InstLDR::IsTHIS(const Instruction &i)
 {
     return (i.encode() & IsDTRMask) == (uint32_t)IsDTR;
 }
 
 InstLDR *
-InstLDR::asTHIS(const Instruction &i)
+InstLDR::AsTHIS(const Instruction &i)
 {
-    if (isTHIS(i))
+    if (IsTHIS(i))
         return (InstLDR*)&i;
     return nullptr;
 }
 
 InstNOP *
-InstNOP::asTHIS(Instruction &i)
+InstNOP::AsTHIS(Instruction &i)
 {
-    if (isTHIS(i))
-        return (InstNOP*) (&i);
+    if (IsTHIS(i))
+        return (InstNOP*)&i;
     return nullptr;
 }
 
 bool
-InstNOP::isTHIS(const Instruction &i)
+InstNOP::IsTHIS(const Instruction &i)
 {
     return (i.encode() & 0x0fffffff) == NopInst;
 }
 
 bool
-InstBranchReg::isTHIS(const Instruction &i)
+InstBranchReg::IsTHIS(const Instruction &i)
 {
-    return InstBXReg::isTHIS(i) || InstBLXReg::isTHIS(i);
+    return InstBXReg::IsTHIS(i) || InstBLXReg::IsTHIS(i);
 }
 
 InstBranchReg *
-InstBranchReg::asTHIS(const Instruction &i)
+InstBranchReg::AsTHIS(const Instruction &i)
 {
-    if (isTHIS(i))
+    if (IsTHIS(i))
         return (InstBranchReg*)&i;
     return nullptr;
 }
 void
 InstBranchReg::extractDest(Register *dest)
 {
     *dest = toR(*this);
 }
 bool
 InstBranchReg::checkDest(Register dest)
 {
     return dest == toR(*this);
 }
 
 bool
-InstBranchImm::isTHIS(const Instruction &i)
+InstBranchImm::IsTHIS(const Instruction &i)
 {
-    return InstBImm::isTHIS(i) || InstBLImm::isTHIS(i);
+    return InstBImm::IsTHIS(i) || InstBLImm::IsTHIS(i);
 }
 
 InstBranchImm *
-InstBranchImm::asTHIS(const Instruction &i)
+InstBranchImm::AsTHIS(const Instruction &i)
 {
-    if (isTHIS(i))
+    if (IsTHIS(i))
         return (InstBranchImm*)&i;
     return nullptr;
 }
 
 void
 InstBranchImm::extractImm(BOffImm *dest)
 {
     *dest = BOffImm(*this);
 }
 
 bool
-InstBXReg::isTHIS(const Instruction &i)
+InstBXReg::IsTHIS(const Instruction &i)
 {
     return (i.encode() & IsBRegMask) == IsBX;
 }
 
 InstBXReg *
-InstBXReg::asTHIS(const Instruction &i)
+InstBXReg::AsTHIS(const Instruction &i)
 {
-    if (isTHIS(i))
+    if (IsTHIS(i))
         return (InstBXReg*)&i;
     return nullptr;
 }
 
 bool
-InstBLXReg::isTHIS(const Instruction &i)
+InstBLXReg::IsTHIS(const Instruction &i)
 {
     return (i.encode() & IsBRegMask) == IsBLX;
 
 }
 InstBLXReg *
-InstBLXReg::asTHIS(const Instruction &i)
+InstBLXReg::AsTHIS(const Instruction &i)
 {
-    if (isTHIS(i))
+    if (IsTHIS(i))
         return (InstBLXReg*)&i;
     return nullptr;
 }
 
 bool
-InstBImm::isTHIS(const Instruction &i)
+InstBImm::IsTHIS(const Instruction &i)
 {
     return (i.encode () & IsBImmMask) == IsB;
 }
 InstBImm *
-InstBImm::asTHIS(const Instruction &i)
+InstBImm::AsTHIS(const Instruction &i)
 {
-    if (isTHIS(i))
+    if (IsTHIS(i))
         return (InstBImm*)&i;
     return nullptr;
 }
 
 bool
-InstBLImm::isTHIS(const Instruction &i)
+InstBLImm::IsTHIS(const Instruction &i)
 {
     return (i.encode () & IsBImmMask) == IsBL;
 
 }
 InstBLImm *
-InstBLImm::asTHIS(Instruction &i)
+InstBLImm::AsTHIS(Instruction &i)
 {
-    if (isTHIS(i))
+    if (IsTHIS(i))
         return (InstBLImm*)&i;
     return nullptr;
 }
 
 bool
-InstMovWT::isTHIS(Instruction &i)
+InstMovWT::IsTHIS(Instruction &i)
 {
-    return  InstMovW::isTHIS(i) || InstMovT::isTHIS(i);
+    return  InstMovW::IsTHIS(i) || InstMovT::IsTHIS(i);
 }
 InstMovWT *
-InstMovWT::asTHIS(Instruction &i)
+InstMovWT::AsTHIS(Instruction &i)
 {
-    if (isTHIS(i))
+    if (IsTHIS(i))
         return (InstMovWT*)&i;
     return nullptr;
 }
 
 void
 InstMovWT::extractImm(Imm16 *imm)
 {
     *imm = Imm16(*this);
@@ -371,51 +371,51 @@ InstMovWT::extractDest(Register *dest)
 }
 bool
 InstMovWT::checkDest(Register dest)
 {
     return dest == toRD(*this);
 }
 
 bool
-InstMovW::isTHIS(const Instruction &i)
+InstMovW::IsTHIS(const Instruction &i)
 {
     return (i.encode() & IsWTMask) == IsW;
 }
 
 InstMovW *
-InstMovW::asTHIS(const Instruction &i)
+InstMovW::AsTHIS(const Instruction &i)
 {
-    if (isTHIS(i))
-        return (InstMovW*) (&i);
+    if (IsTHIS(i))
+        return (InstMovW*)&i;
     return nullptr;
 }
 InstMovT *
-InstMovT::asTHIS(const Instruction &i)
+InstMovT::AsTHIS(const Instruction &i)
 {
-    if (isTHIS(i))
-        return (InstMovT*) (&i);
+    if (IsTHIS(i))
+        return (InstMovT*)&i;
     return nullptr;
 }
 
 bool
-InstMovT::isTHIS(const Instruction &i)
+InstMovT::IsTHIS(const Instruction &i)
 {
     return (i.encode() & IsWTMask) == IsT;
 }
 
 InstALU *
-InstALU::asTHIS(const Instruction &i)
+InstALU::AsTHIS(const Instruction &i)
 {
-    if (isTHIS(i))
-        return (InstALU*) (&i);
+    if (IsTHIS(i))
+        return (InstALU*)&i;
     return nullptr;
 }
 bool
-InstALU::isTHIS(const Instruction &i)
+InstALU::IsTHIS(const Instruction &i)
 {
     return (i.encode() & ALUMask) == 0;
 }
 void
 InstALU::extractOp(ALUOp *ret)
 {
     *ret = ALUOp(encode() & (0xf << 21));
 }
@@ -448,41 +448,41 @@ InstALU::checkOp1(Register rn)
 }
 Operand2
 InstALU::extractOp2()
 {
     return Operand2(encode());
 }
 
 InstCMP *
-InstCMP::asTHIS(const Instruction &i)
+InstCMP::AsTHIS(const Instruction &i)
 {
-    if (isTHIS(i))
-        return (InstCMP*) (&i);
+    if (IsTHIS(i))
+        return (InstCMP*)&i;
     return nullptr;
 }
 
 bool
-InstCMP::isTHIS(const Instruction &i)
+InstCMP::IsTHIS(const Instruction &i)
 {
-    return InstALU::isTHIS(i) && InstALU::asTHIS(i)->checkDest(r0) && InstALU::asTHIS(i)->checkOp(op_cmp);
+    return InstALU::IsTHIS(i) && InstALU::AsTHIS(i)->checkDest(r0) && InstALU::AsTHIS(i)->checkOp(OpCmp);
 }
 
 InstMOV *
-InstMOV::asTHIS(const Instruction &i)
+InstMOV::AsTHIS(const Instruction &i)
 {
-    if (isTHIS(i))
-        return (InstMOV*) (&i);
+    if (IsTHIS(i))
+        return (InstMOV*)&i;
     return nullptr;
 }
 
 bool
-InstMOV::isTHIS(const Instruction &i)
+InstMOV::IsTHIS(const Instruction &i)
 {
-    return InstALU::isTHIS(i) && InstALU::asTHIS(i)->checkOp1(r0) && InstALU::asTHIS(i)->checkOp(op_mov);
+    return InstALU::IsTHIS(i) && InstALU::AsTHIS(i)->checkOp1(r0) && InstALU::AsTHIS(i)->checkOp(OpMov);
 }
 
 Op2Reg
 Operand2::toOp2Reg() {
     return *(Op2Reg*)this;
 }
 O2RegImmShift
 Op2Reg::toO2RegImmShift() {
@@ -509,32 +509,34 @@ Imm16::Imm16(uint32_t imm)
 
 Imm16::Imm16()
   : invalid(0xfff)
 { }
 
 void
 jit::PatchJump(CodeLocationJump &jump_, CodeLocationLabel label)
 {
-    // We need to determine if this jump can fit into the standard 24+2 bit address
-    // or if we need a larger branch (or just need to use our pool entry)
+    // We need to determine if this jump can fit into the standard 24+2 bit
+    // address or if we need a larger branch (or just need to use our pool
+    // entry).
     Instruction *jump = (Instruction*)jump_.raw();
     // jumpWithPatch() returns the offset of the jump and never a pool or nop.
     Assembler::Condition c;
     jump->extractCond(&c);
     JS_ASSERT(jump->is<InstBranchImm>() || jump->is<InstLDR>());
 
     int jumpOffset = label.raw() - jump_.raw();
-    if (BOffImm::isInRange(jumpOffset)) {
-        // This instruction started off as a branch, and will remain one
-        Assembler::retargetNearBranch(jump, jumpOffset, c);
+    if (BOffImm::IsInRange(jumpOffset)) {
+        // This instruction started off as a branch, and will remain one.
+        Assembler::RetargetNearBranch(jump, jumpOffset, c);
     } else {
-        // This instruction started off as a branch, but now needs to be demoted to an ldr.
+        // This instruction started off as a branch, but now needs to be demoted
+        // to an ldr.
         uint8_t **slot = reinterpret_cast<uint8_t**>(jump_.jumpTableEntry());
-        Assembler::retargetFarBranch(jump, slot, label.raw(), c);
+        Assembler::RetargetFarBranch(jump, slot, label.raw(), c);
     }
 }
 
 void
 Assembler::finish()
 {
     flush();
     JS_ASSERT(!isFinished);
@@ -596,17 +598,17 @@ BufferOffset
 Assembler::actualOffset(BufferOffset off_) const
 {
     return BufferOffset(off_.getOffset() + m_buffer.poolSizeBefore(off_.getOffset()));
 }
 
 class RelocationIterator
 {
     CompactBufferReader reader_;
-    // offset in bytes
+    // Offset in bytes.
     uint32_t offset_;
 
   public:
     RelocationIterator(CompactBufferReader &reader)
       : reader_(reader)
     { }
 
     bool read() {
@@ -618,43 +620,43 @@ class RelocationIterator
 
     uint32_t offset() const {
         return offset_;
     }
 };
 
 template<class Iter>
 const uint32_t *
-Assembler::getCF32Target(Iter *iter)
+Assembler::GetCF32Target(Iter *iter)
 {
     Instruction *inst1 = iter->cur();
     Instruction *inst2 = iter->next();
     Instruction *inst3 = iter->next();
     Instruction *inst4 = iter->next();
 
     if (inst1->is<InstBranchImm>()) {
-        // see if we have a simple case, b #offset
+        // See if we have a simple case, b #offset.
         BOffImm imm;
         InstBranchImm *jumpB = inst1->as<InstBranchImm>();
         jumpB->extractImm(&imm);
         return imm.getDest(inst1)->raw();
     }
 
     if (inst1->is<InstMovW>() && inst2->is<InstMovT>() &&
         (inst3->is<InstNOP>() || inst3->is<InstBranchReg>() || inst4->is<InstBranchReg>()))
     {
-        // see if we have the complex case,
-        // movw r_temp, #imm1
-        // movt r_temp, #imm2
-        // bx r_temp
+        // See if we have the complex case:
+        //  movw r_temp, #imm1
+        //  movt r_temp, #imm2
+        //  bx r_temp
         // OR
-        // movw r_temp, #imm1
-        // movt r_temp, #imm2
-        // str pc, [sp]
-        // bx r_temp
+        //  movw r_temp, #imm1
+        //  movt r_temp, #imm2
+        //  str pc, [sp]
+        //  bx r_temp
 
         Imm16 targ_bot;
         Imm16 targ_top;
         Register temp;
 
         // Extract both the temp register and the bottom immediate.
         InstMovW *bottom = inst1->as<InstMovW>();
         bottom->extractImm(&targ_bot);
@@ -664,66 +666,66 @@ Assembler::getCF32Target(Iter *iter)
         InstMovT *top = inst2->as<InstMovT>();
         top->extractImm(&targ_top);
 
         // Make sure they are being loaded into the same register.
         JS_ASSERT(top->checkDest(temp));
 
         // Make sure we're branching to the same register.
 #ifdef DEBUG
-        // A toggled call sometimes has a NOP instead of a branch for the third instruction.
-        // No way to assert that it's valid in that situation.
+        // A toggled call sometimes has a NOP instead of a branch for the third
+        // instruction. No way to assert that it's valid in that situation.
         if (!inst3->is<InstNOP>()) {
             InstBranchReg *realBranch = inst3->is<InstBranchReg>() ? inst3->as<InstBranchReg>()
                                                                    : inst4->as<InstBranchReg>();
             JS_ASSERT(realBranch->checkDest(temp));
         }
 #endif
 
         uint32_t *dest = (uint32_t*) (targ_bot.decode() | (targ_top.decode() << 16));
         return dest;
     }
 
     if (inst1->is<InstLDR>()) {
         InstLDR *load = inst1->as<InstLDR>();
         uint32_t inst = load->encode();
-        // get the address of the instruction as a raw pointer
+        // Get the address of the instruction as a raw pointer.
         char *dataInst = reinterpret_cast<char*>(load);
         IsUp_ iu = IsUp_(inst & IsUp);
         int32_t offset = inst & 0xfff;
         if (iu != IsUp) {
             offset = - offset;
         }
         uint32_t **ptr = (uint32_t **)&dataInst[offset + 8];
         return *ptr;
 
     }
 
     MOZ_ASSUME_UNREACHABLE("unsupported branch relocation");
 }
 
 uintptr_t
-Assembler::getPointer(uint8_t *instPtr)
+Assembler::GetPointer(uint8_t *instPtr)
 {
     InstructionIterator iter((Instruction*)instPtr);
-    uintptr_t ret = (uintptr_t)getPtr32Target(&iter, nullptr, nullptr);
+    uintptr_t ret = (uintptr_t)GetPtr32Target(&iter, nullptr, nullptr);
     return ret;
 }
 
 template<class Iter>
 const uint32_t *
-Assembler::getPtr32Target(Iter *start, Register *dest, RelocStyle *style)
+Assembler::GetPtr32Target(Iter *start, Register *dest, RelocStyle *style)
 {
     Instruction *load1 = start->cur();
     Instruction *load2 = start->next();
 
     if (load1->is<InstMovW>() && load2->is<InstMovT>()) {
-        // see if we have the complex case,
-        // movw r_temp, #imm1
-        // movt r_temp, #imm2
+        // See if we have the complex case:
+        //  movw r_temp, #imm1
+        //  movt r_temp, #imm2
 
         Imm16 targ_bot;
         Imm16 targ_top;
         Register temp;
 
         // Extract both the temp register and the bottom immediate.
         InstMovW *bottom = load1->as<InstMovW>();
         bottom->extractImm(&targ_bot);
@@ -742,36 +744,37 @@ Assembler::getPtr32Target(Iter *start, R
             *style = L_MOVWT;
 
         uint32_t *value = (uint32_t*) (targ_bot.decode() | (targ_top.decode() << 16));
         return value;
     }
     if (load1->is<InstLDR>()) {
         InstLDR *load = load1->as<InstLDR>();
         uint32_t inst = load->encode();
-        // get the address of the instruction as a raw pointer
+        // Get the address of the instruction as a raw pointer.
         char *dataInst = reinterpret_cast<char*>(load);
         IsUp_ iu = IsUp_(inst & IsUp);
         int32_t offset = inst & 0xfff;
         if (iu == IsDown)
             offset = - offset;
         if (dest)
             *dest = toRD(*load);
         if (style)
             *style = L_LDR;
         uint32_t **ptr = (uint32_t **)&dataInst[offset + 8];
         return *ptr;
     }
+
     MOZ_ASSUME_UNREACHABLE("unsupported relocation");
 }
 
 static JitCode *
 CodeFromJump(InstructionIterator *jump)
 {
-    uint8_t *target = (uint8_t *)Assembler::getCF32Target(jump);
+    uint8_t *target = (uint8_t *)Assembler::GetCF32Target(jump);
     return JitCode::FromExecutable(target);
 }
 
 void
 Assembler::TraceJumpRelocations(JSTracer *trc, JitCode *code, CompactBufferReader &reader)
 {
     RelocationIterator iter(reader);
     while (iter.read()) {
@@ -782,30 +785,30 @@ Assembler::TraceJumpRelocations(JSTracer
 }
 
 static void
 TraceDataRelocations(JSTracer *trc, uint8_t *buffer, CompactBufferReader &reader)
 {
     while (reader.more()) {
         size_t offset = reader.readUnsigned();
         InstructionIterator iter((Instruction*)(buffer + offset));
-        void *ptr = const_cast<uint32_t *>(Assembler::getPtr32Target(&iter));
+        void *ptr = const_cast<uint32_t *>(Assembler::GetPtr32Target(&iter));
         // No barrier needed since these are constants.
         gc::MarkGCThingUnbarriered(trc, reinterpret_cast<void **>(&ptr), "ion-masm-ptr");
     }
 
 }
 static void
 TraceDataRelocations(JSTracer *trc, ARMBuffer *buffer,
                      Vector<BufferOffset, 0, SystemAllocPolicy> *locs)
 {
     for (unsigned int idx = 0; idx < locs->length(); idx++) {
         BufferOffset bo = (*locs)[idx];
         ARMBuffer::AssemblerBufferInstIterator iter(bo, buffer);
-        void *ptr = const_cast<uint32_t *>(Assembler::getPtr32Target(&iter));
+        void *ptr = const_cast<uint32_t *>(Assembler::GetPtr32Target(&iter));
 
         // No barrier needed since these are constants.
         gc::MarkGCThingUnbarriered(trc, reinterpret_cast<void **>(&ptr), "ion-masm-ptr");
     }
 
 }
 void
 Assembler::TraceDataRelocations(JSTracer *trc, JitCode *code, CompactBufferReader &reader)
@@ -859,18 +862,18 @@ Assembler::processCodeLabels(uint8_t *ra
     }
 }
 
 void
 Assembler::writeCodePointer(AbsoluteLabel *absoluteLabel) {
     JS_ASSERT(!absoluteLabel->bound());
     BufferOffset off = writeInst(LabelBase::INVALID_OFFSET);
 
-    // x86/x64 makes general use of AbsoluteLabel and weaves a linked list of
-    // uses of an AbsoluteLabel through the assembly. ARM only uses labels
+    // The x86/x64 makes general use of AbsoluteLabel and weaves a linked list
+    // of uses of an AbsoluteLabel through the assembly. ARM only uses labels
     // for the case statements of switch jump tables. Thus, for simplicity, we
     // simply treat the AbsoluteLabel as a label and bind it to the offset of
     // the jump table entry that needs to be patched.
     LabelBase *label = absoluteLabel;
     label->bind(off.getOffset());
 }
 
 void
@@ -884,214 +887,213 @@ Assembler::Bind(uint8_t *rawCode, Absolu
 Assembler::Condition
 Assembler::InvertCondition(Condition cond)
 {
     const uint32_t ConditionInversionBit = 0x10000000;
     return Condition(ConditionInversionBit ^ cond);
 }
 
 Imm8::TwoImm8mData
-Imm8::encodeTwoImms(uint32_t imm)
+Imm8::EncodeTwoImms(uint32_t imm)
 {
-    // In the ideal case, we are looking for a number that (in binary) looks like:
-    // 0b((00)*)n_1((00)*)n_2((00)*)
-    //    left  n1   mid  n2
-    // where both n_1 and n_2 fit into 8 bits.
-    // since this is being done with rotates, we also need to handle the case
+    // In the ideal case, we are looking for a number that (in binary) looks
+    // like:
+    //   0b((00)*)n_1((00)*)n_2((00)*)
+    //      left  n1   mid  n2
+    //   where both n_1 and n_2 fit into 8 bits.
+    // Since this is being done with rotates, we also need to handle the case
     // that one of these numbers is in fact split between the left and right
     // sides, in which case the constant will look like:
-    // 0bn_1a((00)*)n_2((00)*)n_1b
-    //   n1a  mid  n2   rgh    n1b
-    // also remember, values are rotated by multiples of two, and left,
-    // mid or right can have length zero
+    //   0bn_1a((00)*)n_2((00)*)n_1b
+    //     n1a  mid  n2   rgh    n1b
+    // Also remember, values are rotated by multiples of two, and left, mid or
+    // right can have length zero.
     uint32_t imm1, imm2;
     int left = CountLeadingZeroes32(imm) & 0x1E;
     uint32_t no_n1 = imm & ~(0xff << (24 - left));
 
-    // not technically needed: this case only happens if we can encode
-    // as a single imm8m.  There is a perfectly reasonable encoding in this
-    // case, but we shouldn't encourage people to do things like this.
+    // Not technically needed: this case only happens if we can encode as a
+    // single imm8m. There is a perfectly reasonable encoding in this case, but
+    // we shouldn't encourage people to do things like this.
     if (no_n1 == 0)
         return TwoImm8mData();
 
     int mid = CountLeadingZeroes32(no_n1) & 0x1E;
     uint32_t no_n2 = no_n1 & ~((0xff << ((24 - mid) & 0x1f)) | 0xff >> ((8 + mid) & 0x1f));
 
     if (no_n2 == 0) {
-        // we hit the easy case, no wraparound.
-        // note: a single constant *may* look like this.
+        // We hit the easy case, no wraparound.
+        // Note: a single constant *may* look like this.
         int imm1shift = left + 8;
         int imm2shift = mid + 8;
         imm1 = (imm >> (32 - imm1shift)) & 0xff;
         if (imm2shift >= 32) {
             imm2shift = 0;
-            // this assert does not always hold
-            //assert((imm & 0xff) == no_n1);
-            // in fact, this would lead to some incredibly subtle bugs.
+            // This assert does not always hold, in fact, this would lead to
+            // some incredibly subtle bugs.
+            // assert((imm & 0xff) == no_n1);
             imm2 = no_n1;
         } else {
             imm2 = ((imm >> (32 - imm2shift)) | (imm << imm2shift)) & 0xff;
             JS_ASSERT( ((no_n1 >> (32 - imm2shift)) | (no_n1 << imm2shift)) ==
                        imm2);
         }
         JS_ASSERT((imm1shift & 0x1) == 0);
         JS_ASSERT((imm2shift & 0x1) == 0);
         return TwoImm8mData(datastore::Imm8mData(imm1, imm1shift >> 1),
                             datastore::Imm8mData(imm2, imm2shift >> 1));
     }
 
-    // either it wraps, or it does not fit.
-    // if we initially chopped off more than 8 bits, then it won't fit.
+    // Either it wraps, or it does not fit. If we initially chopped off more
+    // than 8 bits, then it won't fit.
     if (left >= 8)
         return TwoImm8mData();
 
     int right = 32 - (CountLeadingZeroes32(no_n2) & 30);
-    // all remaining set bits *must* fit into the lower 8 bits
-    // the right == 8 case should be handled by the previous case.
+    // All remaining set bits *must* fit into the lower 8 bits.
+    // The right == 8 case should be handled by the previous case.
     if (right > 8)
         return TwoImm8mData();
 
-    // make sure the initial bits that we removed for no_n1
-    // fit into the 8-(32-right) leftmost bits
-    if (((imm & (0xff << (24 - left))) << (8-right)) != 0) {
+    // Make sure the initial bits that we removed for no_n1 fit into the
+    // 8-(32-right) leftmost bits.
+    if (((imm & (0xff << (24 - left))) << (8 - right)) != 0) {
         // BUT we may have removed more bits than we needed to for no_n1
-        // 0x04104001 e.g. we can encode 0x104 with a single op, then
-        // 0x04000001 with a second, but we try to encode 0x0410000
-        // and find that we need a second op for 0x4000, and 0x1 cannot
-        // be included in the encoding of 0x04100000
-        no_n1 = imm & ~((0xff >> (8-right)) | (0xff << (24 + right)));
+        // 0x04104001 e.g. we can encode 0x104 with a single op, then 0x04000001
+        // with a second, but we try to encode 0x0410000 and find that we need a
+        // second op for 0x4000, and 0x1 cannot be included in the encoding of
+        // 0x04100000.
+        no_n1 = imm & ~((0xff >> (8 - right)) | (0xff << (24 + right)));
         mid = CountLeadingZeroes32(no_n1) & 30;
-        no_n2 =
-            no_n1  & ~((0xff << ((24 - mid)&31)) | 0xff >> ((8 + mid)&31));
+        no_n2 = no_n1  & ~((0xff << ((24 - mid)&31)) | 0xff >> ((8 + mid)&31));
         if (no_n2 != 0)
             return TwoImm8mData();
     }
 
-    // now assemble all of this information into a two coherent constants
-    // it is a rotate right from the lower 8 bits.
+    // Now assemble all of this information into a two coherent constants it is
+    // a rotate right from the lower 8 bits.
     int imm1shift = 8 - right;
     imm1 = 0xff & ((imm << imm1shift) | (imm >> (32 - imm1shift)));
-    JS_ASSERT ((imm1shift&~0x1e) == 0);
+    JS_ASSERT ((imm1shift & ~0x1e) == 0);
     // left + 8 + mid is the position of the leftmost bit of n_2.
-    // we needed to rotate 0x000000ab right by 8 in order to get
-    // 0xab000000, then shift again by the leftmost bit in order to
-    // get the constant that we care about.
+    // We needed to rotate 0x000000ab right by 8 in order to get 0xab000000,
+    // then shift again by the leftmost bit in order to get the constant that we
+    // care about.
     int imm2shift =  mid + 8;
     imm2 = ((imm >> (32 - imm2shift)) | (imm << imm2shift)) & 0xff;
     JS_ASSERT((imm1shift & 0x1) == 0);
     JS_ASSERT((imm2shift & 0x1) == 0);
     return TwoImm8mData(datastore::Imm8mData(imm1, imm1shift >> 1),
                         datastore::Imm8mData(imm2, imm2shift >> 1));
 }
 
 ALUOp
 jit::ALUNeg(ALUOp op, Register dest, Imm32 *imm, Register *negDest)
 {
-    // find an alternate ALUOp to get the job done, and use a different imm.
+    // Find an alternate ALUOp to get the job done, and use a different imm.
     *negDest = dest;
     switch (op) {
-      case op_mov:
+      case OpMov:
         *imm = Imm32(~imm->value);
-        return op_mvn;
-      case op_mvn:
+        return OpMvn;
+      case OpMvn:
         *imm = Imm32(~imm->value);
-        return op_mov;
-      case op_and:
+        return OpMov;
+      case OpAnd:
         *imm = Imm32(~imm->value);
-        return op_bic;
-      case op_bic:
+        return OpBic;
+      case OpBic:
         *imm = Imm32(~imm->value);
-        return op_and;
-      case op_add:
+        return OpAnd;
+      case OpAdd:
         *imm = Imm32(-imm->value);
-        return op_sub;
-      case op_sub:
+        return OpSub;
+      case OpSub:
         *imm = Imm32(-imm->value);
-        return op_add;
-      case op_cmp:
+        return OpAdd;
+      case OpCmp:
         *imm = Imm32(-imm->value);
-        return op_cmn;
-      case op_cmn:
+        return OpCmn;
+      case OpCmn:
         *imm = Imm32(-imm->value);
-        return op_cmp;
-      case op_tst:
+        return OpCmp;
+      case OpTst:
         JS_ASSERT(dest == InvalidReg);
         *imm = Imm32(~imm->value);
         *negDest = ScratchRegister;
-        return op_bic;
+        return OpBic;
         // orr has orn on thumb2 only.
       default:
-        return op_invalid;
+        return OpInvalid;
     }
 }
 
 bool
 jit::can_dbl(ALUOp op)
 {
-    // some instructions can't be processed as two separate instructions
-    // such as and, and possibly add (when we're setting ccodes).
-    // there is also some hilarity with *reading* condition codes.
-    // for example, adc dest, src1, 0xfff; (add with carry) can be split up
-    // into adc dest, src1, 0xf00; add dest, dest, 0xff, since "reading" the
-    // condition code increments the result by one conditionally, that only needs
-    // to be done on one of the two instructions.
+    // Some instructions can't be processed as two separate instructions such as
+    // and, and possibly add (when we're setting ccodes). There is also some
+    // hilarity with *reading* condition codes. For example, adc dest, src1,
+    // 0xfff; (add with carry) can be split up into adc dest, src1, 0xf00; add
+    // dest, dest, 0xff, since "reading" the condition code increments the
+    // result by one conditionally, that only needs to be done on one of the two
+    // instructions.
     switch (op) {
-      case op_bic:
-      case op_add:
-      case op_sub:
-      case op_eor:
-      case op_orr:
+      case OpBic:
+      case OpAdd:
+      case OpSub:
+      case OpEor:
+      case OpOrr:
         return true;
       default:
         return false;
     }
 }
 
 bool
 jit::condsAreSafe(ALUOp op) {
-    // Even when we are setting condition codes, sometimes we can
-    // get away with splitting an operation into two.
-    // for example, if our immediate is 0x00ff00ff, and the operation is eors
-    // we can split this in half, since x ^ 0x00ff0000 ^ 0x000000ff should
-    // set all of its condition codes exactly the same as x ^ 0x00ff00ff.
-    // However, if the operation were adds,
-    // we cannot split this in half.  If the source on the add is
-    // 0xfff00ff0, the result sholud be 0xef10ef, but do we set the overflow bit
-    // or not?  Depending on which half is performed first (0x00ff0000
-    // or 0x000000ff) the V bit will be set differently, and *not* updating
-    // the V bit would be wrong.  Theoretically, the following should work
-    // adds r0, r1, 0x00ff0000;
-    // addsvs r0, r1, 0x000000ff;
-    // addvc r0, r1, 0x000000ff;
-    // but this is 3 instructions, and at that point, we might as well use
+    // Even when we are setting condition codes, sometimes we can get away with
+    // splitting an operation into two. For example, if our immediate is
+    // 0x00ff00ff, and the operation is eors we can split this in half, since x
+    // ^ 0x00ff0000 ^ 0x000000ff should set all of its condition codes exactly
+    // the same as x ^ 0x00ff00ff. However, if the operation were adds, we
+    // cannot split this in half. If the source on the add is 0xfff00ff0, the
+    // result sholud be 0xef10ef, but do we set the overflow bit or not?
+    // Depending on which half is performed first (0x00ff0000 or 0x000000ff) the
+    // V bit will be set differently, and *not* updating the V bit would be
+    // wrong. Theoretically, the following should work:
+    //  adds r0, r1, 0x00ff0000;
+    //  addsvs r0, r1, 0x000000ff;
+    //  addvc r0, r1, 0x000000ff;
+    // But this is 3 instructions, and at that point, we might as well use
     // something else.
     switch(op) {
-      case op_bic:
-      case op_orr:
-      case op_eor:
+      case OpBic:
+      case OpOrr:
+      case OpEor:
         return true;
       default:
         return false;
     }
 }
 
 ALUOp
 jit::getDestVariant(ALUOp op)
 {
-    // all of the compare operations are dest-less variants of a standard
-    // operation.  Given the dest-less variant, return the dest-ful variant.
+    // All of the compare operations are dest-less variants of a standard
+    // operation. Given the dest-less variant, return the dest-ful variant.
     switch (op) {
-      case op_cmp:
-        return op_sub;
-      case op_cmn:
-        return op_add;
-      case op_tst:
-        return op_and;
-      case op_teq:
-        return op_eor;
+      case OpCmp:
+        return OpSub;
+      case OpCmn:
+        return OpAdd;
+      case OpTst:
+        return OpAnd;
+      case OpTeq:
+        return OpEor;
       default:
         return op;
     }
 }
 
 O2RegImmShift
 jit::O2Reg(Register r) {
     return O2RegImmShift(r, LSL, 0);
@@ -1153,17 +1155,17 @@ jit::ror(Register r, Register amt)
 O2RegRegShift
 jit::asr (Register r, Register amt)
 {
     return O2RegRegShift(r, ASR, amt);
 }
 
 static js::jit::DoubleEncoder doubleEncoder;
 
-/* static */ const js::jit::VFPImm js::jit::VFPImm::one(0x3FF00000);
+/* static */ const js::jit::VFPImm js::jit::VFPImm::One(0x3FF00000);
 
 js::jit::VFPImm::VFPImm(uint32_t top)
 {
     data = -1;
     datastore::Imm8VFPImmData tmp;
     if (doubleEncoder.lookup(top, &tmp))
         data = tmp.encode();
 }
@@ -1171,64 +1173,64 @@ js::jit::VFPImm::VFPImm(uint32_t top)
 BOffImm::BOffImm(Instruction &inst)
   : data(inst.encode() & 0x00ffffff)
 {
 }
 
 Instruction *
 BOffImm::getDest(Instruction *src)
 {
-    // TODO: It is probably worthwhile to verify that src is actually a branch
+    // TODO: It is probably worthwhile to verify that src is actually a branch.
     // NOTE: This does not explicitly shift the offset of the destination left by 2,
     // since it is indexing into an array of instruction sized objects.
-    return &src[(((int32_t)data<<8)>>8) + 2];
+    return &src[(((int32_t)data << 8) >> 8) + 2];
 }
 
-//VFPRegister implementation
+// VFPRegister implementation
 VFPRegister
 VFPRegister::doubleOverlay(unsigned int which) const
 {
     JS_ASSERT(!_isInvalid);
     if (kind != Double)
         return VFPRegister(code_ >> 1, Double);
     return *this;
 }
 VFPRegister
 VFPRegister::singleOverlay(unsigned int which) const
 {
     JS_ASSERT(!_isInvalid);
     if (kind == Double) {
-        // There are no corresponding float registers for d16-d31
+        // There are no corresponding float registers for d16-d31.
         JS_ASSERT(code_ < 16);
         JS_ASSERT(which < 2);
         return VFPRegister((code_ << 1) + which, Single);
     }
     JS_ASSERT(which == 0);
     return VFPRegister(code_, Single);
 }
 
 VFPRegister
 VFPRegister::sintOverlay(unsigned int which) const
 {
     JS_ASSERT(!_isInvalid);
     if (kind == Double) {
-        // There are no corresponding float registers for d16-d31
+        // There are no corresponding float registers for d16-d31.
         JS_ASSERT(code_ < 16);
         JS_ASSERT(which < 2);
         return VFPRegister((code_ << 1) + which, Int);
     }
     JS_ASSERT(which == 0);
     return VFPRegister(code_, Int);
 }
 VFPRegister
 VFPRegister::uintOverlay(unsigned int which) const
 {
     JS_ASSERT(!_isInvalid);
     if (kind == Double) {
-        // There are no corresponding float registers for d16-d31
+        // There are no corresponding float registers for d16-d31.
         JS_ASSERT(code_ < 16);
         JS_ASSERT(which < 2);
         return VFPRegister((code_ << 1) + which, UInt);
     }
     JS_ASSERT(which == 0);
     return VFPRegister(code_, UInt);
 }
 
@@ -1257,19 +1259,19 @@ Assembler::oom() const
 }
 
 bool
 Assembler::addCodeLabel(CodeLabel label)
 {
     return codeLabels_.append(label);
 }
 
-// Size of the instruction stream, in bytes.  Including pools. This function expects
-// all pools that need to be placed have been placed.  If they haven't then we
-// need to go an flush the pools :(
+// Size of the instruction stream, in bytes. Including pools. This function
+// expects all pools that need to be placed have been placed. If they haven't
+// then we need to go an flush the pools :(
 size_t
 Assembler::size() const
 {
     return m_buffer.size();
 }
 // Size of the relocation table, in bytes.
 size_t
 Assembler::jumpRelocationTableBytes() const
@@ -1293,33 +1295,33 @@ size_t
 Assembler::bytesNeeded() const
 {
     return size() +
         jumpRelocationTableBytes() +
         dataRelocationTableBytes() +
         preBarrierTableBytes();
 }
 
-// write a blob of binary into the instruction stream
+// Write a blob of binary into the instruction stream.
 BufferOffset
 Assembler::writeInst(uint32_t x, uint32_t *dest)
 {
     if (dest == nullptr)
         return m_buffer.putInt(x);
 
-    writeInstStatic(x, dest);
+    WriteInstStatic(x, dest);
     return BufferOffset();
 }
 BufferOffset
 Assembler::writeBranchInst(uint32_t x)
 {
     return m_buffer.putInt(x, /* markAsBranch = */ true);
 }
 void
-Assembler::writeInstStatic(uint32_t x, uint32_t *dest)
+Assembler::WriteInstStatic(uint32_t x, uint32_t *dest)
 {
     JS_ASSERT(dest != nullptr);
     *dest = x;
 }
 
 BufferOffset
 Assembler::align(int alignment)
 {
@@ -1337,16 +1339,17 @@ Assembler::align(int alignment)
             BufferOffset tmp = as_nop();
             if (!ret.assigned())
                 ret = tmp;
         }
     }
     return ret;
 
 }
+
 BufferOffset
 Assembler::as_nop()
 {
     return writeInst(0xe320f000);
 }
 BufferOffset
 Assembler::as_alu(Register dest, Register src1, Operand2 op2,
                   ALUOp op, SetCond_ sc, Condition c, Instruction *instdest)
@@ -1354,104 +1357,104 @@ Assembler::as_alu(Register dest, Registe
     return writeInst((int)op | (int)sc | (int) c | op2.encode() |
                      ((dest == InvalidReg) ? 0 : RD(dest)) |
                      ((src1 == InvalidReg) ? 0 : RN(src1)), (uint32_t*)instdest);
 }
 
 BufferOffset
 Assembler::as_mov(Register dest, Operand2 op2, SetCond_ sc, Condition c, Instruction *instdest)
 {
-    return as_alu(dest, InvalidReg, op2, op_mov, sc, c, instdest);
+    return as_alu(dest, InvalidReg, op2, OpMov, sc, c, instdest);
 }
 
 BufferOffset
 Assembler::as_mvn(Register dest, Operand2 op2, SetCond_ sc, Condition c)
 {
-    return as_alu(dest, InvalidReg, op2, op_mvn, sc, c);
+    return as_alu(dest, InvalidReg, op2, OpMvn, sc, c);
 }
 
 // Logical operations.
 BufferOffset
 Assembler::as_and(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c)
 {
-    return as_alu(dest, src1, op2, op_and, sc, c);
+    return as_alu(dest, src1, op2, OpAnd, sc, c);
 }
 BufferOffset
 Assembler::as_bic(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c)
 {
-    return as_alu(dest, src1, op2, op_bic, sc, c);
+    return as_alu(dest, src1, op2, OpBic, sc, c);
 }
 BufferOffset
 Assembler::as_eor(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c)
 {
-    return as_alu(dest, src1, op2, op_eor, sc, c);
+    return as_alu(dest, src1, op2, OpEor, sc, c);
 }
 BufferOffset
 Assembler::as_orr(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c)
 {
-    return as_alu(dest, src1, op2, op_orr, sc, c);
+    return as_alu(dest, src1, op2, OpOrr, sc, c);
 }
 
 // Mathematical operations.
 BufferOffset
 Assembler::as_adc(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c)
 {
-    return as_alu(dest, src1, op2, op_adc, sc, c);
+    return as_alu(dest, src1, op2, OpAdc, sc, c);
 }
 BufferOffset
 Assembler::as_add(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c)
 {
-    return as_alu(dest, src1, op2, op_add, sc, c);
+    return as_alu(dest, src1, op2, OpAdd, sc, c);
 }
 BufferOffset
 Assembler::as_sbc(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c)
 {
-    return as_alu(dest, src1, op2, op_sbc, sc, c);
+    return as_alu(dest, src1, op2, OpSbc, sc, c);
 }
 BufferOffset
 Assembler::as_sub(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c)
 {
-    return as_alu(dest, src1, op2, op_sub, sc, c);
+    return as_alu(dest, src1, op2, OpSub, sc, c);
 }
 BufferOffset
 Assembler::as_rsb(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c)
 {
-    return as_alu(dest, src1, op2, op_rsb, sc, c);
+    return as_alu(dest, src1, op2, OpRsb, sc, c);
 }
 BufferOffset
 Assembler::as_rsc(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c)
 {
-    return as_alu(dest, src1, op2, op_rsc, sc, c);
+    return as_alu(dest, src1, op2, OpRsc, sc, c);
 }
 
 // Test operations.
 BufferOffset
 Assembler::as_cmn(Register src1, Operand2 op2, Condition c)
 {
-    return as_alu(InvalidReg, src1, op2, op_cmn, SetCond, c);
+    return as_alu(InvalidReg, src1, op2, OpCmn, SetCond, c);
 }
 BufferOffset
 Assembler::as_cmp(Register src1, Operand2 op2, Condition c)
 {
-    return as_alu(InvalidReg, src1, op2, op_cmp, SetCond, c);
+    return as_alu(InvalidReg, src1, op2, OpCmp, SetCond, c);
 }
 BufferOffset
 Assembler::as_teq(Register src1, Operand2 op2, Condition c)
 {
-    return as_alu(InvalidReg, src1, op2, op_teq, SetCond, c);
+    return as_alu(InvalidReg, src1, op2, OpTeq, SetCond, c);
 }
 BufferOffset
 Assembler::as_tst(Register src1, Operand2 op2, Condition c)
 {
-    return as_alu(InvalidReg, src1, op2, op_tst, SetCond, c);
+    return as_alu(InvalidReg, src1, op2, OpTst, SetCond, c);
 }
 
-// Not quite ALU worthy, but useful none the less:
-// These also have the isue of these being formatted
-// completly differently from the standard ALU operations.
+// Not quite ALU worthy, but these are useful none the less. These also have
+// the isue of these being formatted completly differently from the standard ALU
+// operations.
 BufferOffset
 Assembler::as_movw(Register dest, Imm16 imm, Condition c, Instruction *pos)
 {
     JS_ASSERT(HasMOVWT());
     return writeInst(0x03000000 | c | imm.encode() | RD(dest), (uint32_t*)pos);
 }
 BufferOffset
 Assembler::as_movt(Register dest, Imm16 imm, Condition c, Instruction *pos)
@@ -1467,194 +1470,193 @@ Assembler::as_genmul(Register dhi, Regis
                      MULOp op, SetCond_ sc, Condition c)
 {
 
     return writeInst(RN(dhi) | maybeRD(dlo) | RM(rm) | rn.code() | op | sc | c | mull_tag);
 }
 BufferOffset
 Assembler::as_mul(Register dest, Register src1, Register src2, SetCond_ sc, Condition c)
 {
-    return as_genmul(dest, InvalidReg, src1, src2, opm_mul, sc, c);
+    return as_genmul(dest, InvalidReg, src1, src2, OpmMul, sc, c);
 }
 BufferOffset
 Assembler::as_mla(Register dest, Register acc, Register src1, Register src2,
                   SetCond_ sc, Condition c)
 {
-    return as_genmul(dest, acc, src1, src2, opm_mla, sc, c);
+    return as_genmul(dest, acc, src1, src2, OpmMla, sc, c);
 }
 BufferOffset
 Assembler::as_umaal(Register destHI, Register destLO, Register src1, Register src2, Condition c)
 {
-    return as_genmul(destHI, destLO, src1, src2, opm_umaal, NoSetCond, c);
+    return as_genmul(destHI, destLO, src1, src2, OpmUmaal, NoSetCond, c);
 }
 BufferOffset
 Assembler::as_mls(Register dest, Register acc, Register src1, Register src2, Condition c)
 {
-    return as_genmul(dest, acc, src1, src2, opm_mls, NoSetCond, c);
+    return as_genmul(dest, acc, src1, src2, OpmMls, NoSetCond, c);
 }
 
 BufferOffset
 Assembler::as_umull(Register destHI, Register destLO, Register src1, Register src2,
                     SetCond_ sc, Condition c)
 {
-    return as_genmul(destHI, destLO, src1, src2, opm_umull, sc, c);
+    return as_genmul(destHI, destLO, src1, src2, OpmUmull, sc, c);
 }
 
 BufferOffset
 Assembler::as_umlal(Register destHI, Register destLO, Register src1, Register src2,
                     SetCond_ sc, Condition c)
 {
-    return as_genmul(destHI, destLO, src1, src2, opm_umlal, sc, c);
+    return as_genmul(destHI, destLO, src1, src2, OpmUmlal, sc, c);
 }
 
 BufferOffset
 Assembler::as_smull(Register destHI, Register destLO, Register src1, Register src2,
                     SetCond_ sc, Condition c)
 {
-    return as_genmul(destHI, destLO, src1, src2, opm_smull, sc, c);
+    return as_genmul(destHI, destLO, src1, src2, OpmSmull, sc, c);
 }
 
 BufferOffset
 Assembler::as_smlal(Register destHI, Register destLO, Register src1, Register src2,
                     SetCond_ sc, Condition c)
 {
-    return as_genmul(destHI, destLO, src1, src2, opm_smlal, sc, c);
+    return as_genmul(destHI, destLO, src1, src2, OpmSmlal, sc, c);
 }
 
 BufferOffset
 Assembler::as_sdiv(Register rd, Register rn, Register rm, Condition c)
 {
     return writeInst(0x0710f010 | c | RN(rd) | RM(rm) | rn.code());
 }
 
 BufferOffset
 Assembler::as_udiv(Register rd, Register rn, Register rm, Condition c)
 {
     return writeInst(0x0730f010 | c | RN(rd) | RM(rm) | rn.code());
 }
 
-// Data transfer instructions: ldr, str, ldrb, strb.
-// Using an int to differentiate between 8 bits and 32 bits is
-// overkill, but meh
+// Data transfer instructions: ldr, str, ldrb, strb. Using an int to
+// differentiate between 8 bits and 32 bits is overkill, but meh.
 BufferOffset
 Assembler::as_dtr(LoadStore ls, int size, Index mode,
                   Register rt, DTRAddr addr, Condition c, uint32_t *dest)
 {
     JS_ASSERT (mode == Offset ||  (rt != addr.getBase() && pc != addr.getBase()));
     JS_ASSERT(size == 32 || size == 8);
     return writeInst( 0x04000000 | ls | (size == 8 ? 0x00400000 : 0) | mode | c |
                       RT(rt) | addr.encode(), dest);
 
 }
 class PoolHintData {
   public:
     enum LoadType {
-        // set 0 to bogus, since that is the value most likely to be
+        // Set 0 to bogus, since that is the value most likely to be
         // accidentally left somewhere.
-        poolBOGUS  = 0,
-        poolDTR    = 1,
-        poolBranch = 2,
-        poolVDTR   = 3
+        PoolBOGUS  = 0,
+        PoolDTR    = 1,
+        PoolBranch = 2,
+        PoolVDTR   = 3
     };
 
   private:
-    uint32_t   index    : 16;
-    uint32_t   cond     : 4;
-    LoadType   loadType : 2;
-    uint32_t   destReg  : 5;
-    uint32_t   destType : 1;
+    uint32_t   index_    : 16;
+    uint32_t   cond_     : 4;
+    LoadType   loadType_ : 2;
+    uint32_t   destReg_  : 5;
+    uint32_t   destType_ : 1;
     uint32_t   ONES     : 4;
 
-    static const uint32_t expectedOnes = 0xfu;
+    static const uint32_t ExpectedOnes = 0xfu;
 
   public:
-    void init(uint32_t index_, Assembler::Condition cond_, LoadType lt, Register destReg_) {
-        index = index_;
-        JS_ASSERT(index == index_);
-        cond = cond_ >> 28;
-        JS_ASSERT(cond == cond_ >> 28);
-        loadType = lt;
-        ONES = expectedOnes;
-        destReg = destReg_.code();
-        destType = 0;
+    void init(uint32_t index, Assembler::Condition cond, LoadType lt, Register destReg) {
+        index_ = index;
+        JS_ASSERT(index_ == index);
+        cond_ = cond >> 28;
+        JS_ASSERT(cond_ == cond >> 28);
+        loadType_ = lt;
+        ONES = ExpectedOnes;
+        destReg_ = destReg.code();
+        destType_ = 0;
     }
-    void init(uint32_t index_, Assembler::Condition cond_, LoadType lt, const VFPRegister &destReg_) {
-        JS_ASSERT(destReg_.isFloat());
-        index = index_;
-        JS_ASSERT(index == index_);
-        cond = cond_ >> 28;
-        JS_ASSERT(cond == cond_ >> 28);
-        loadType = lt;
-        ONES = expectedOnes;
-        destReg = destReg_.isDouble() ? destReg_.code() : destReg_.doubleOverlay().code();
-        destType = destReg_.isDouble();
+    void init(uint32_t index, Assembler::Condition cond, LoadType lt, const VFPRegister &destReg) {
+        JS_ASSERT(destReg.isFloat());
+        index_ = index;
+        JS_ASSERT(index_ == index);
+        cond_ = cond >> 28;
+        JS_ASSERT(cond_ == cond >> 28);
+        loadType_ = lt;
+        ONES = ExpectedOnes;
+        destReg_ = destReg.isDouble() ? destReg.code() : destReg.doubleOverlay().code();
+        destType_ = destReg.isDouble();
     }
     Assembler::Condition getCond() {
-        return Assembler::Condition(cond << 28);
+        return Assembler::Condition(cond_ << 28);
     }
 
     Register getReg() {
-        return Register::FromCode(destReg);
+        return Register::FromCode(destReg_);
     }
     VFPRegister getVFPReg() {
-        VFPRegister r = VFPRegister(FloatRegister::FromCode(destReg));
-        return destType ? r : r.singleOverlay();
+        VFPRegister r = VFPRegister(FloatRegister::FromCode(destReg_));
+        return destType_ ? r : r.singleOverlay();
     }
 
     int32_t getIndex() {
-        return index;
+        return index_;
     }
-    void setIndex(uint32_t index_) {
-        JS_ASSERT(ONES == expectedOnes && loadType != poolBOGUS);
-        index = index_;
-        JS_ASSERT(index == index_);
+    void setIndex(uint32_t index) {
+        JS_ASSERT(ONES == ExpectedOnes && loadType_ != PoolBOGUS);
+        index_ = index;
+        JS_ASSERT(index_ == index);
     }
 
     LoadType getLoadType() {
-        // If this *was* a poolBranch, but the branch has already been bound
+        // If this *was* a PoolBranch, but the branch has already been bound
         // then this isn't going to look like a real poolhintdata, but we still
         // want to lie about it so everyone knows it *used* to be a branch.
-        if (ONES != expectedOnes)
-            return PoolHintData::poolBranch;
-        return loadType;
+        if (ONES != ExpectedOnes)
+            return PoolHintData::PoolBranch;
+        return loadType_;
     }
 
     bool isValidPoolHint() {
-        // Most instructions cannot have a condition that is 0xf. Notable exceptions are
-        // blx and the entire NEON instruction set. For the purposes of pool loads, and
-        // possibly patched branches, the possible instructions are ldr and b, neither of
-        // which can have a condition code of 0xf.
-        return ONES == expectedOnes;
+        // Most instructions cannot have a condition that is 0xf. Notable
+        // exceptions are blx and the entire NEON instruction set. For the
+        // purposes of pool loads, and possibly patched branches, the possible
+        // instructions are ldr and b, neither of which can have a condition
+        // code of 0xf.
+        return ONES == ExpectedOnes;
     }
 };
 
 union PoolHintPun {
     PoolHintData phd;
     uint32_t raw;
 };
 
-// Handles all of the other integral data transferring functions:
-// ldrsb, ldrsh, ldrd, etc.
-// size is given in bits.
+// Handles all of the other integral data transferring functions: ldrsb, ldrsh,
+// ldrd, etc. The size is given in bits.
 BufferOffset
 Assembler::as_extdtr(LoadStore ls, int size, bool IsSigned, Index mode,
                      Register rt, EDtrAddr addr, Condition c, uint32_t *dest)
 {
     int extra_bits2 = 0;
     int extra_bits1 = 0;
     switch(size) {
       case 8:
         JS_ASSERT(IsSigned);
-        JS_ASSERT(ls!=IsStore);
+        JS_ASSERT(ls != IsStore);
         extra_bits1 = 0x1;
         extra_bits2 = 0x2;
         break;
       case 16:
-        //case 32:
-        // doesn't need to be handled-- it is handled by the default ldr/str
+        // 'case 32' doesn't need to be handled, it is handled by the default
+        // ldr/str.
         extra_bits2 = 0x01;
         extra_bits1 = (ls == IsStore) ? 0 : 1;
         if (IsSigned) {
             JS_ASSERT(ls != IsStore);
             extra_bits2 |= 0x2;
         }
         break;
       case 64:
@@ -1675,17 +1677,17 @@ Assembler::as_dtm(LoadStore ls, Register
     return writeInst(0x08000000 | RN(rn) | ls |
                      mode | mask | c | wb);
 }
 
 BufferOffset
 Assembler::as_Imm32Pool(Register dest, uint32_t value, Condition c)
 {
     PoolHintPun php;
-    php.phd.init(0, c, PoolHintData::poolDTR, dest);
+    php.phd.init(0, c, PoolHintData::PoolDTR, dest);
     return m_buffer.insertEntry(4, (uint8_t*)&php.raw, int32Pool, (uint8_t*)&value);
 }
 
 void
 Assembler::as_WritePoolEntry(Instruction *addr, Condition c, uint32_t data)
 {
     JS_ASSERT(addr->is<InstLDR>());
     int32_t offset = addr->encode() & 0xfff;
@@ -1698,146 +1700,135 @@ Assembler::as_WritePoolEntry(Instruction
     addr->extractCond(&orig_cond);
     JS_ASSERT(orig_cond == c);
 }
 
 BufferOffset
 Assembler::as_BranchPool(uint32_t value, RepatchLabel *label, ARMBuffer::PoolEntry *pe, Condition c)
 {
     PoolHintPun php;
-    php.phd.init(0, c, PoolHintData::poolBranch, pc);
+    php.phd.init(0, c, PoolHintData::PoolBranch, pc);
     BufferOffset ret = m_buffer.insertEntry(4, (uint8_t*)&php.raw, int32Pool, (uint8_t*)&value, pe,
                                             /* markAsBranch = */ true);
-    // If this label is already bound, then immediately replace the stub load with
-    // a correct branch.
+    // If this label is already bound, then immediately replace the stub load
+    // with a correct branch.
     if (label->bound()) {
         BufferOffset dest(label);
         as_b(dest.diffB<BOffImm>(ret), c, ret);
     } else {
         label->use(ret.getOffset());
     }
     return ret;
 }
 
 BufferOffset
 Assembler::as_FImm64Pool(VFPRegister dest, double value, Condition c)
 {
     JS_ASSERT(dest.isDouble());
     PoolHintPun php;
-    php.phd.init(0, c, PoolHintData::poolVDTR, dest);
+    php.phd.init(0, c, PoolHintData::PoolVDTR, dest);
     return m_buffer.insertEntry(4, (uint8_t*)&php.raw, doublePool, (uint8_t*)&value);
 }
 
 struct PaddedFloat32
 {
     float value;
     uint32_t padding;
 };
 JS_STATIC_ASSERT(sizeof(PaddedFloat32) == sizeof(double));
 
 BufferOffset
 Assembler::as_FImm32Pool(VFPRegister dest, float value, Condition c)
 {
-    /*
-     * Insert floats into the double pool as they have the same limitations on
-     * immediate offset.  This wastes 4 bytes padding per float.  An alternative
-     * would be to have a separate pool for floats.
-     */
+    // Insert floats into the double pool as they have the same limitations on
+    // immediate offset. This wastes 4 bytes padding per float. An alternative
+    // would be to have a separate pool for floats.
     JS_ASSERT(dest.isSingle());
     PoolHintPun php;
-    php.phd.init(0, c, PoolHintData::poolVDTR, dest);
+    php.phd.init(0, c, PoolHintData::PoolVDTR, dest);
     PaddedFloat32 pf = { value, 0 };
     return m_buffer.insertEntry(4, (uint8_t*)&php.raw, doublePool, (uint8_t*)&pf);
 }
 
 // Pool callbacks stuff:
 void
-Assembler::insertTokenIntoTag(uint32_t instSize, uint8_t *load_, int32_t token)
+Assembler::InsertTokenIntoTag(uint32_t instSize, uint8_t *load_, int32_t token)
 {
     uint32_t *load = (uint32_t*) load_;
     PoolHintPun php;
     php.raw = *load;
     php.phd.setIndex(token);
     *load = php.raw;
 }
-// patchConstantPoolLoad takes the address of the instruction that wants to be patched, and
-//the address of the start of the constant pool, and figures things out from there.
+
+// patchConstantPoolLoad takes the address of the instruction that wants to be
+// patched, and the address of the start of the constant pool, and figures
+// things out from there.
 bool
-Assembler::patchConstantPoolLoad(void* loadAddr, void* constPoolAddr)
+Assembler::PatchConstantPoolLoad(void* loadAddr, void* constPoolAddr)
 {
     PoolHintData data = *(PoolHintData*)loadAddr;
     uint32_t *instAddr = (uint32_t*) loadAddr;
     int offset = (char *)constPoolAddr - (char *)loadAddr;
     switch(data.getLoadType()) {
-      case PoolHintData::poolBOGUS:
+      case PoolHintData::PoolBOGUS:
         MOZ_ASSUME_UNREACHABLE("bogus load type!");
-      case PoolHintData::poolDTR:
-        dummy->as_dtr(IsLoad, 32, Offset, data.getReg(),
+      case PoolHintData::PoolDTR:
+        Dummy->as_dtr(IsLoad, 32, Offset, data.getReg(),
                       DTRAddr(pc, DtrOffImm(offset+4*data.getIndex() - 8)), data.getCond(), instAddr);
         break;
-      case PoolHintData::poolBranch:
-        // Either this used to be a poolBranch, and the label was already bound, so it was
-        // replaced with a real branch, or this may happen in the future.
-        // If this is going to happen in the future, then the actual bits that are written here
-        // don't matter (except the condition code, since that is always preserved across
-        // patchings) but if it does not get bound later,
-        // then we want to make sure this is a load from the pool entry (and the pool entry
-        // should be nullptr so it will crash).
+      case PoolHintData::PoolBranch:
+        // Either this used to be a poolBranch, and the label was already bound,
+        // so it was replaced with a real branch, or this may happen in the
+        // future. If this is going to happen in the future, then the actual
+        // bits that are written here don't matter (except the condition code,
+        // since that is always preserved across patchings) but if it does not
+        // get bound later, then we want to make sure this is a load from the
+        // pool entry (and the pool entry should be nullptr so it will crash).
         if (data.isValidPoolHint()) {
-            dummy->as_dtr(IsLoad, 32, Offset, pc,
+            Dummy->as_dtr(IsLoad, 32, Offset, pc,
                           DTRAddr(pc, DtrOffImm(offset+4*data.getIndex() - 8)),
                           data.getCond(), instAddr);
         }
         break;
-      case PoolHintData::poolVDTR: {
+      case PoolHintData::PoolVDTR: {
         VFPRegister dest = data.getVFPReg();
         int32_t imm = offset + (8 * data.getIndex()) - 8;
         if (imm < -1023 || imm  > 1023)
             return false;
-        dummy->as_vdtr(IsLoad, dest, VFPAddr(pc, VFPOffImm(imm)), data.getCond(), instAddr);
+        Dummy->as_vdtr(IsLoad, dest, VFPAddr(pc, VFPOffImm(imm)), data.getCond(), instAddr);
         break;
       }
     }
     return true;
 }
 
-uint32_t
-Assembler::placeConstantPoolBarrier(int offset)
-{
-    // BUG: 700526
-    // this is still an active path, however, we do not hit it in the test
-    // suite at all.
-    MOZ_ASSUME_UNREACHABLE("ARMAssembler holdover");
-}
-
 // Control flow stuff:
 
-// bx can *only* branch to a register
-// never to an immediate.
+// bx can *only* branch to a register, never to an immediate.
 BufferOffset
 Assembler::as_bx(Register r, Condition c, bool isPatchable)
 {
-    BufferOffset ret = writeInst(((int) c) | op_bx | r.code());
+    BufferOffset ret = writeInst(((int) c) | OpBx | r.code());
     if (c == Always && !isPatchable)
         m_buffer.markGuard();
     return ret;
 }
 void
-Assembler::writePoolGuard(BufferOffset branch, Instruction *dest, BufferOffset afterPool)
+Assembler::WritePoolGuard(BufferOffset branch, Instruction *dest, BufferOffset afterPool)
 {
     BOffImm off = afterPool.diffB<BOffImm>(branch);
     *dest = InstBImm(off, Always);
 }
 // Branch can branch to an immediate *or* to a register.
-// Branches to immediates are pc relative, branches to registers
-// are absolute
+// Branches to immediates are pc relative, branches to registers are absolute.
 BufferOffset
 Assembler::as_b(BOffImm off, Condition c, bool isPatchable)
 {
-    BufferOffset ret = writeBranchInst(((int)c) | op_b | off.encode());
+    BufferOffset ret = writeBranchInst(((int)c) | OpB | off.encode());
     if (c == Always && !isPatchable)
         m_buffer.markGuard();
     return ret;
 }
 
 BufferOffset
 Assembler::as_b(Label *l, Condition c, bool isPatchable)
 {
@@ -1854,17 +1845,17 @@ Assembler::as_b(Label *l, Condition c, b
     }
 
     int32_t old;
     BufferOffset ret;
     if (l->used()) {
         old = l->offset();
         // This will currently throw an assertion if we couldn't actually
         // encode the offset of the branch.
-        if (!BOffImm::isInRange(old)) {
+        if (!BOffImm::IsInRange(old)) {
             m_buffer.fail_bail();
             return ret;
         }
         ret = as_b(BOffImm(old), c, isPatchable);
     } else {
         old = LabelBase::INVALID_OFFSET;
         BOffImm inv;
         ret = as_b(inv, c, isPatchable);
@@ -1876,32 +1867,32 @@ Assembler::as_b(Label *l, Condition c, b
 BufferOffset
 Assembler::as_b(BOffImm off, Condition c, BufferOffset inst)
 {
     *editSrc(inst) = InstBImm(off, c);
     return inst;
 }
 
 // blx can go to either an immediate or a register.
-// When blx'ing to a register, we change processor state
-// depending on the low bit of the register
-// when blx'ing to an immediate, we *always* change processor state.
+// When blx'ing to a register, we change processor state depending on the low
+// bit of the register when blx'ing to an immediate, we *always* change
+// processor state.
 
 BufferOffset
 Assembler::as_blx(Register r, Condition c)
 {
-    return writeInst(((int) c) | op_blx | r.code());
+    return writeInst(((int) c) | OpBlx | r.code());
 }
 
 // bl can only branch to an pc-relative immediate offset
 // It cannot change the processor state.
 BufferOffset
 Assembler::as_bl(BOffImm off, Condition c)
 {
-    return writeBranchInst(((int)c) | op_bl | off.encode());
+    return writeBranchInst(((int)c) | OpBl | off.encode());
 }
 
 BufferOffset
 Assembler::as_bl(Label *l, Condition c)
 {
     if (m_buffer.oom()) {
         BufferOffset ret;
         return ret;
@@ -1913,20 +1904,20 @@ Assembler::as_bl(Label *l, Condition c)
         as_bl(BufferOffset(l).diffB<BOffImm>(ret), c, ret);
         return ret;
     }
 
     int32_t old;
     BufferOffset ret;
     // See if the list was empty :(
     if (l->used()) {
-        // This will currently throw an assertion if we couldn't actually
-        // encode the offset of the branch.
+        // This will currently throw an assertion if we couldn't actually encode
+        // the offset of the branch.
         old = l->offset();
-        if (!BOffImm::isInRange(old)) {
+        if (!BOffImm::IsInRange(old)) {
             m_buffer.fail_bail();
             return ret;
         }
         ret = as_bl(BOffImm(old), c);
     } else {
         old = LabelBase::INVALID_OFFSET;
         BOffImm inv;
         ret = as_bl(inv, c);
@@ -1946,73 +1937,74 @@ BufferOffset
 Assembler::as_mrs(Register r, Condition c)
 {
     return writeInst(0x010f0000 | int(c) | RD(r));
 }
 
 BufferOffset
 Assembler::as_msr(Register r, Condition c)
 {
-    // hardcode the 'mask' field to 0b11 for now.  it is bits 18 and 19, which are the two high bits of the 'c' in this constant.
+    // Hardcode the 'mask' field to 0b11 for now. It is bits 18 and 19, which
+    // are the two high bits of the 'c' in this constant.
     JS_ASSERT((r.code() & ~0xf) == 0);
     return writeInst(0x012cf000 | int(c) | r.code());
 }
 
 // VFP instructions!
 enum vfp_tags {
-    vfp_tag   = 0x0C000A00,
-    vfp_arith = 0x02000000
+    VfpTag   = 0x0C000A00,
+    VfpArith = 0x02000000
 };
 BufferOffset
 Assembler::writeVFPInst(vfp_size sz, uint32_t blob, uint32_t *dest)
 {
     JS_ASSERT((sz & blob) == 0);
-    JS_ASSERT((vfp_tag & blob) == 0);
-    return writeInst(vfp_tag | sz | blob, dest);
+    JS_ASSERT((VfpTag & blob) == 0);
+    return writeInst(VfpTag | sz | blob, dest);
 }
 
 // Unityped variants: all registers hold the same (ieee754 single/double)
 // notably not included are vcvt; vmov vd, #imm; vmov rt, vn.
 BufferOffset
 Assembler::as_vfp_float(VFPRegister vd, VFPRegister vn, VFPRegister vm,
                   VFPOp op, Condition c)
 {
-    // Make sure we believe that all of our operands are the same kind
+    // Make sure we believe that all of our operands are the same kind.
     JS_ASSERT_IF(!vn.isMissing(), vd.equiv(vn));
     JS_ASSERT_IF(!vm.isMissing(), vd.equiv(vm));
-    vfp_size sz = vd.isDouble() ? isDouble : isSingle;
-    return writeVFPInst(sz, VD(vd) | VN(vn) | VM(vm) | op | vfp_arith | c);
+    vfp_size sz = vd.isDouble() ? IsDouble : IsSingle;
+    return writeVFPInst(sz, VD(vd) | VN(vn) | VM(vm) | op | VfpArith | c);
 }
 
 BufferOffset
 Assembler::as_vadd(VFPRegister vd, VFPRegister vn, VFPRegister vm,
                  Condition c)
 {
-    return as_vfp_float(vd, vn, vm, opv_add, c);
+    return as_vfp_float(vd, vn, vm, OpvAdd, c);
 }
 
 BufferOffset
 Assembler::as_vdiv(VFPRegister vd, VFPRegister vn, VFPRegister vm,
                  Condition c)
 {
-    return as_vfp_float(vd, vn, vm, opv_div, c);
+    return as_vfp_float(vd, vn, vm, OpvDiv, c);
 }
 
 BufferOffset
 Assembler::as_vmul(VFPRegister vd, VFPRegister vn, VFPRegister vm,
                  Condition c)
 {
-    return as_vfp_float(vd, vn, vm, opv_mul, c);
+    return as_vfp_float(vd, vn, vm, OpvMul, c);
 }
 
 BufferOffset
 Assembler::as_vnmul(VFPRegister vd, VFPRegister vn, VFPRegister vm,
                   Condition c)
 {
-    return as_vfp_float(vd, vn, vm, opv_mul, c);
+    return as_vfp_float(vd, vn, vm, OpvMul, c);
     MOZ_ASSUME_UNREACHABLE("Feature NYI");
 }
 
 BufferOffset
 Assembler::as_vnmla(VFPRegister vd, VFPRegister vn, VFPRegister vm,
                   Condition c)
 {
     MOZ_ASSUME_UNREACHABLE("Feature NYI");
@@ -2024,80 +2016,80 @@ Assembler::as_vnmls(VFPRegister vd, VFPR
 {
     MOZ_ASSUME_UNREACHABLE("Feature NYI");
     return BufferOffset();
 }
 
 BufferOffset
 Assembler::as_vneg(VFPRegister vd, VFPRegister vm, Condition c)
 {
-    return as_vfp_float(vd, NoVFPRegister, vm, opv_neg, c);
+    return as_vfp_float(vd, NoVFPRegister, vm, OpvNeg, c);
 }
 
 BufferOffset
 Assembler::as_vsqrt(VFPRegister vd, VFPRegister vm, Condition c)
 {
-    return as_vfp_float(vd, NoVFPRegister, vm, opv_sqrt, c);
+    return as_vfp_float(vd, NoVFPRegister, vm, OpvSqrt, c);
 }
 
 BufferOffset
 Assembler::as_vabs(VFPRegister vd, VFPRegister vm, Condition c)
 {
-    return as_vfp_float(vd, NoVFPRegister, vm, opv_abs, c);
+    return as_vfp_float(vd, NoVFPRegister, vm, OpvAbs, c);
 }
 
 BufferOffset
 Assembler::as_vsub(VFPRegister vd, VFPRegister vn, VFPRegister vm,
                  Condition c)
 {
-    return as_vfp_float(vd, vn, vm, opv_sub, c);
+    return as_vfp_float(vd, vn, vm, OpvSub, c);
 }
 
 BufferOffset
 Assembler::as_vcmp(VFPRegister vd, VFPRegister vm,
                  Condition c)
 {
-    return as_vfp_float(vd, NoVFPRegister, vm, opv_cmp, c);
+    return as_vfp_float(vd, NoVFPRegister, vm, OpvCmp, c);
 }
 BufferOffset
 Assembler::as_vcmpz(VFPRegister vd, Condition c)
 {
-    return as_vfp_float(vd, NoVFPRegister, NoVFPRegister, opv_cmpz, c);
+    return as_vfp_float(vd, NoVFPRegister, NoVFPRegister, OpvCmpz, c);
 }
 
 // Specifically, a move between two same sized-registers.
 BufferOffset
 Assembler::as_vmov(VFPRegister vd, VFPRegister vsrc, Condition c)
 {
-    return as_vfp_float(vd, NoVFPRegister, vsrc, opv_mov, c);
+    return as_vfp_float(vd, NoVFPRegister, vsrc, OpvMov, c);
 }
-//xfer between Core and VFP
+// Transfer between Core and VFP.
 
-// Unlike the next function, moving between the core registers and vfp
-// registers can't be *that* properly typed.  Namely, since I don't want to
-// munge the type VFPRegister to also include core registers.  Thus, the core
-// and vfp registers are passed in based on their type, and src/dest is
-// determined by the float2core.
+// Unlike the next function, moving between the core registers and vfp registers
+// can't be *that* properly typed. Namely, since I don't want to munge the type
+// VFPRegister to also include core registers. Thus, the core and vfp registers
+// are passed in based on their type, and src/dest is determined by the
+// float2core.
 
 BufferOffset
 Assembler::as_vxfer(Register vt1, Register vt2, VFPRegister vm, FloatToCore_ f2c,
                     Condition c, int idx)
 {
-    vfp_size sz = isSingle;
+    vfp_size sz = IsSingle;
     if (vm.isDouble()) {
         // Technically, this can be done with a vmov à la ARM ARM under vmov
-        // however, that requires at least an extra bit saying if the
-        // operation should be performed on the lower or upper half of the
-        // double.  Moving a single to/from 2N/2N+1 isn't equivalent,
-        // since there are 32 single registers, and 32 double registers
-        // so there is no way to encode the last 16 double registers.
-        sz = isDouble;
+        // however, that requires at least an extra bit saying if the operation
+        // should be performed on the lower or upper half of the double. Moving
+        // a single to/from 2N/2N+1 isn't equivalent, since there are 32 single
+        // registers, and 32 double registers so there is no way to encode the
+        // last 16 double registers.
+        sz = IsDouble;
         JS_ASSERT(idx == 0 || idx == 1);
-        // If we are transferring a single half of the double
-        // then it must be moving a VFP reg to a core reg.
+        // If we are transferring a single half of the double then it must be
+        // moving a VFP reg to a core reg.
         if (vt2 == InvalidReg)
             JS_ASSERT(f2c == FloatToCore);
         idx = idx << 21;
     } else {
         JS_ASSERT(idx == 0);
     }
 
     if (vt2 == InvalidReg) {
@@ -2105,113 +2097,111 @@ Assembler::as_vxfer(Register vt1, Regist
                             RT(vt1) | maybeRN(vt2) | VN(vm) | idx);
     } else {
         // We are doing a 64 bit transfer.
         return writeVFPInst(sz, DoubleTransfer | f2c | c |
                             RT(vt1) | maybeRN(vt2) | VM(vm) | idx);
     }
 }
 enum vcvt_destFloatness {
-    toInteger = 1 << 18,
-    toFloat  = 0 << 18
+    VcvtToInteger = 1 << 18,
+    VcvtToFloat  = 0 << 18
 };
 enum vcvt_toZero {
-    toZero = 1 << 7, // use the default rounding mode, which rounds truncates
-    toFPSCR = 0 << 7 // use whatever rounding mode the fpscr specifies
+    VcvtToZero = 1 << 7, // Use the default rounding mode, which rounds truncates.
+    VcvtToFPSCR = 0 << 7 // Use whatever rounding mode the fpscr specifies.
 };
 enum vcvt_Signedness {
-    toSigned   = 1 << 16,
-    toUnsigned = 0 << 16,
-    fromSigned   = 1 << 7,
-    fromUnsigned = 0 << 7
+    VcvtToSigned   = 1 << 16,
+    VcvtToUnsigned = 0 << 16,
+    VcvtFromSigned   = 1 << 7,
+    VcvtFromUnsigned = 0 << 7
 };
 
-// our encoding actually allows just the src and the dest (and their types)
-// to uniquely specify the encoding that we are going to use.
+// Our encoding actually allows just the src and the dest (and their types) to
+// uniquely specify the encoding that we are going to use.
 BufferOffset
 Assembler::as_vcvt(VFPRegister vd, VFPRegister vm, bool useFPSCR,
                    Condition c)
 {
-    // Unlike other cases, the source and dest types cannot be the same
+    // Unlike other cases, the source and dest types cannot be the same.
     JS_ASSERT(!vd.equiv(vm));
-    vfp_size sz = isDouble;
+    vfp_size sz = IsDouble;
     if (vd.isFloat() && vm.isFloat()) {
-        // Doing a float -> float conversion
+        // Doing a float -> float conversion.
         if (vm.isSingle())
-            sz = isSingle;
-        return writeVFPInst(sz, c | 0x02B700C0 |
-                            VM(vm) | VD(vd));
+            sz = IsSingle;
+        return writeVFPInst(sz, c | 0x02B700C0 | VM(vm) | VD(vd));
     }
 
     // At least one of the registers should be a float.
     vcvt_destFloatness destFloat;
     vcvt_Signedness opSign;
-    vcvt_toZero doToZero = toFPSCR;
+    vcvt_toZero doToZero = VcvtToFPSCR;
     JS_ASSERT(vd.isFloat() || vm.isFloat());
     if (vd.isSingle() || vm.isSingle()) {
-        sz = isSingle;
+        sz = IsSingle;
     }
     if (vd.isFloat()) {
-        destFloat = toFloat;
-        opSign = (vm.isSInt()) ? fromSigned : fromUnsigned;
+        destFloat = VcvtToFloat;
+        opSign = (vm.isSInt()) ? VcvtFromSigned : VcvtFromUnsigned;
     } else {
-        destFloat = toInteger;
-        opSign = (vd.isSInt()) ? toSigned : toUnsigned;
-        doToZero = useFPSCR ? toFPSCR : toZero;
+        destFloat = VcvtToInteger;
+        opSign = (vd.isSInt()) ? VcvtToSigned : VcvtToUnsigned;
+        doToZero = useFPSCR ? VcvtToFPSCR : VcvtToZero;
     }
     return writeVFPInst(sz, c | 0x02B80040 | VD(vd) | VM(vm) | destFloat | opSign | doToZero);
 }
 
 BufferOffset
 Assembler::as_vcvtFixed(VFPRegister vd, bool isSigned, uint32_t fixedPoint, bool toFixed, Condition c)
 {
     JS_ASSERT(vd.isFloat());
     uint32_t sx = 0x1;
-    vfp_size sf = vd.isDouble() ? isDouble : isSingle;
+    vfp_size sf = vd.isDouble() ? IsDouble : IsSingle;
     int32_t imm5 = fixedPoint;
     imm5 = (sx ? 32 : 16) - imm5;
     JS_ASSERT(imm5 >= 0);
     imm5 = imm5 >> 1 | (imm5 & 1) << 5;
     return writeVFPInst(sf, 0x02BA0040 | VD(vd) | toFixed << 18 | sx << 7 |
                         (!isSigned) << 16 | imm5 | c);
 }
 
-// xfer between VFP and memory
+// Transfer between VFP and memory.
 BufferOffset
 Assembler::as_vdtr(LoadStore ls, VFPRegister vd, VFPAddr addr,
-                   Condition c /* vfp doesn't have a wb option*/,
+                   Condition c /* vfp doesn't have a wb option */,
                    uint32_t *dest)
 {
-    vfp_size sz = vd.isDouble() ? isDouble : isSingle;
+    vfp_size sz = vd.isDouble() ? IsDouble : IsSingle;
     return writeVFPInst(sz, ls | 0x01000000 | addr.encode() | VD(vd) | c, dest);
 }
 
-// VFP's ldm/stm work differently from the standard arm ones.
-// You can only transfer a range
+// VFP's ldm/stm work differently from the standard arm ones. You can only
+// transfer a range.
 
 BufferOffset
 Assembler::as_vdtm(LoadStore st, Register rn, VFPRegister vd, int length,
-                 /*also has update conditions*/Condition c)
+                   /* also has update conditions */ Condition c)
 {
     JS_ASSERT(length <= 16 && length >= 0);
-    vfp_size sz = vd.isDouble() ? isDouble : isSingle;
+    vfp_size sz = vd.isDouble() ? IsDouble : IsSingle;
 
     if (vd.isDouble())
         length *= 2;
 
-    return writeVFPInst(sz, dtmLoadStore | RN(rn) | VD(vd) |
-                        length |
+    return writeVFPInst(sz, dtmLoadStore | RN(rn) | VD(vd) | length |
                         dtmMode | dtmUpdate | dtmCond);
 }
 
 BufferOffset
 Assembler::as_vimm(VFPRegister vd, VFPImm imm, Condition c)
 {
     JS_ASSERT(imm.isValid());
-    vfp_size sz = vd.isDouble() ? isDouble : isSingle;
+    vfp_size sz = vd.isDouble() ? IsDouble : IsSingle;
     return writeVFPInst(sz,  c | imm.encode() | VD(vd) | 0x02B00000);
 
 }
 BufferOffset
 Assembler::as_vmrs(Register r, Condition c)
 {
     return writeInst(c | 0x0ef10a10 | RT(r));
 }
@@ -2228,30 +2218,29 @@ Assembler::nextLink(BufferOffset b, Buff
     Instruction branch = *editSrc(b);
     JS_ASSERT(branch.is<InstBranchImm>());
 
     BOffImm destOff;
     branch.as<InstBranchImm>()->extractImm(&destOff);
     if (destOff.isInvalid())
         return false;
 
-    // Propagate the next link back to the caller, by
-    // constructing a new BufferOffset into the space they
-    // provided.
+    // Propagate the next link back to the caller, by constructing a new
+    // BufferOffset into the space they provided.
     new (next) BufferOffset(destOff.decode());
     return true;
 }
 
 void
 Assembler::bind(Label *label, BufferOffset boff)
 {
     if (label->used()) {
         bool more;
-        // If our caller didn't give us an explicit target to bind to
-        // then we want to bind to the location of the next instruction
+        // If our caller didn't give us an explicit target to bind to then we
+        // want to bind to the location of the next instruction.
         BufferOffset dest = boff.assigned() ? boff : nextOffset();
         BufferOffset b(label);
         do {
             BufferOffset next;
             more = nextLink(b, &next);
             Instruction branch = *editSrc(b);
             Condition c;
             branch.extractCond(&c);
@@ -2267,18 +2256,18 @@ Assembler::bind(Label *label, BufferOffs
     label->bind(nextOffset().getOffset());
 }
 
 void
 Assembler::bind(RepatchLabel *label)
 {
     BufferOffset dest = nextOffset();
     if (label->used()) {
-        // If the label has a use, then change this use to refer to
-        // the bound label;
+        // If the label has a use, then change this use to refer to the bound
+        // label.
         BufferOffset branchOff(label->offset());
         // Since this was created with a RepatchLabel, the value written in the
         // instruction stream is not branch shaped, it is PoolHintData shaped.
         Instruction *branch = editSrc(branchOff);
         PoolHintPun p;
         p.raw = branch->encode();
         Condition cond;
         if (p.phd.isValidPoolHint())
@@ -2301,75 +2290,69 @@ Assembler::retarget(Label *label, Label 
             // onto target's.
             BufferOffset labelBranchOffset(label);
             BufferOffset next;
 
             // Find the head of the use chain for label.
             while (nextLink(labelBranchOffset, &next))
                 labelBranchOffset = next;
 
-            // Then patch the head of label's use chain to the tail of
-            // target's use chain, prepending the entire use chain of target.
+            // Then patch the head of label's use chain to the tail of target's
+            // use chain, prepending the entire use chain of target.
             Instruction branch = *editSrc(labelBranchOffset);
             Condition c;
             branch.extractCond(&c);
             int32_t prev = target->use(label->offset());
             if (branch.is<InstBImm>())
                 as_b(BOffImm(prev), c, labelBranchOffset);
             else if (branch.is<InstBLImm>())
                 as_bl(BOffImm(prev), c, labelBranchOffset);
             else
                 MOZ_ASSUME_UNREACHABLE("crazy fixup!");
         } else {
-            // The target is unbound and unused.  We can just take the head of
+            // The target is unbound and unused. We can just take the head of
             // the list hanging off of label, and dump that into target.
             DebugOnly<uint32_t> prev = target->use(label->offset());
             JS_ASSERT((int32_t)prev == Label::INVALID_OFFSET);
         }
     }
     label->reset();
 
 }
 
 
 void dbg_break() {}
 static int stopBKPT = -1;
 void
 Assembler::as_bkpt()
 {
-    // This is a count of how many times a breakpoint instruction has been generated.
-    // It is embedded into the instruction for debugging purposes.  gdb will print "bkpt xxx"
-    // when you attempt to dissassemble a breakpoint with the number xxx embedded into it.
-    // If this breakpoint is being hit, then you can run (in gdb)
-    // >b dbg_break
-    // >b main
-    // >commands
-    // >set stopBKPT = xxx
-    // >c
-    // >end
-
-    // which will set a breakpoint on the function dbg_break above
-    // set a scripted breakpoint on main that will set the (otherwise unmodified)
-    // value to the number of the breakpoint, so dbg_break will actuall be called
-    // and finally, when you run the executable, execution will halt when that
-    // breakpoint is generated
+    // This is a count of how many times a breakpoint instruction has been
+    // generated. It is embedded into the instruction for debugging
+    // purposes. Gdb will print "bkpt xxx" when you attempt to dissassemble a
+    // breakpoint with the number xxx embedded into it. If this breakpoint is
+    // being hit, then you can run (in gdb):
+    //  >b dbg_break
+    //  >b main
+    //  >commands
+    //  >set stopBKPT = xxx
+    //  >c
+    //  >end
+    // which will set a breakpoint on the function dbg_break above set a
+    // scripted breakpoint on main that will set the (otherwise unmodified)
+    // value to the number of the breakpoint, so dbg_break will actuall be
+    // called and finally, when you run the executable, execution will halt when
+    // that breakpoint is generated.
     static int hit = 0;
     if (stopBKPT == hit)
         dbg_break();
-    writeInst(0xe1200070 | (hit & 0xf) | ((hit & 0xfff0)<<4));
+    writeInst(0xe1200070 | (hit & 0xf) | ((hit & 0xfff0) << 4));
     hit++;
 }
 
 void
-Assembler::dumpPool()
-{
-    m_buffer.flushPool();
-}
-
-void
 Assembler::flushBuffer()
 {
     m_buffer.flushPool();
 }
 
 void
 Assembler::enterNoPool()
 {
@@ -2389,55 +2372,55 @@ Assembler::getBranchOffset(const Instruc
         return 0;
 
     InstBranchImm *i = i_->as<InstBranchImm>();
     BOffImm dest;
     i->extractImm(&dest);
     return dest.decode();
 }
 void
-Assembler::retargetNearBranch(Instruction *i, int offset, bool final)
+Assembler::RetargetNearBranch(Instruction *i, int offset, bool final)
 {
     Assembler::Condition c;
     i->extractCond(&c);
-    retargetNearBranch(i, offset, c, final);
+    RetargetNearBranch(i, offset, c, final);
 }
 
 void
-Assembler::retargetNearBranch(Instruction *i, int offset, Condition cond, bool final)
+Assembler::RetargetNearBranch(Instruction *i, int offset, Condition cond, bool final)
 {
     // Retargeting calls is totally unsupported!
     JS_ASSERT_IF(i->is<InstBranchImm>(), i->is<InstBImm>() || i->is<InstBLImm>());
     if (i->is<InstBLImm>())
         new (i) InstBLImm(BOffImm(offset), cond);
     else
         new (i) InstBImm(BOffImm(offset), cond);
 
-    // Flush the cache, since an instruction was overwritten
+    // Flush the cache, since an instruction was overwritten.
     if (final)
         AutoFlushICache::flush(uintptr_t(i), 4);
 }
 
 void
-Assembler::retargetFarBranch(Instruction *i, uint8_t **slot, uint8_t *dest, Condition cond)
+Assembler::RetargetFarBranch(Instruction *i, uint8_t **slot, uint8_t *dest, Condition cond)
 {
     int32_t offset = reinterpret_cast<uint8_t*>(slot) - reinterpret_cast<uint8_t*>(i);
     if (!i->is<InstLDR>()) {
         new (i) InstLDR(Offset, pc, DTRAddr(pc, DtrOffImm(offset - 8)), cond);
         AutoFlushICache::flush(uintptr_t(i), 4);
     }
     *slot = dest;
 
 }
 
 struct PoolHeader : Instruction {
     struct Header
     {
-        // size should take into account the pool header.
-        // size is in units of Instruction (4bytes), not byte
+        // The size should take into account the pool header.
+        // The size is in units of Instruction (4 bytes), not byte.
         uint32_t size : 15;
         bool isNatural : 1;
         uint32_t ONES : 16;
 
         Header(int size_, bool isNatural_)
           : size(size_),
             isNatural(isNatural_),
             ONES(0xffff)
@@ -2464,116 +2447,114 @@ struct PoolHeader : Instruction {
     uint32_t size() const {
         Header tmp(this);
         return tmp.size;
     }
     uint32_t isNatural() const {
         Header tmp(this);
         return tmp.isNatural;
     }
-    static bool isTHIS(const Instruction &i) {
+    static bool IsTHIS(const Instruction &i) {
         return (*i.raw() & 0xffff0000) == 0xffff0000;
     }
-    static const PoolHeader *asTHIS(const Instruction &i) {
-        if (!isTHIS(i))
+    static const PoolHeader *AsTHIS(const Instruction &i) {
+        if (!IsTHIS(i))
             return nullptr;
         return static_cast<const PoolHeader*>(&i);
     }
 };
 
 
 void
-Assembler::writePoolHeader(uint8_t *start, Pool *p, bool isNatural)
+Assembler::WritePoolHeader(uint8_t *start, Pool *p, bool isNatural)
 {
     STATIC_ASSERT(sizeof(PoolHeader) == 4);
     uint8_t *pool = start+4;
-    // go through the usual rigaramarole to get the size of the pool.
+    // Go through the usual rigmarole to get the size of the pool.
     pool = p[0].addPoolSize(pool);
     pool = p[1].addPoolSize(pool);
     pool = p[1].other->addPoolSize(pool);
     pool = p[0].other->addPoolSize(pool);
     uint32_t size = pool - start;
     JS_ASSERT((size & 3) == 0);
     size = size >> 2;
     JS_ASSERT(size < (1 << 15));
     PoolHeader header(size, isNatural);
     *(PoolHeader*)start = header;
 }
 
 
 void
-Assembler::writePoolFooter(uint8_t *start, Pool *p, bool isNatural)
+Assembler::WritePoolFooter(uint8_t *start, Pool *p, bool isNatural)
 {
     return;
 }
 
-// The size of an arbitrary 32-bit call in the instruction stream.
-// On ARM this sequence is |pc = ldr pc - 4; imm32| given that we
-// never reach the imm32.
+// The size of an arbitrary 32-bit call in the instruction stream. On ARM this
+// sequence is |pc = ldr pc - 4; imm32| given that we never reach the imm32.
 uint32_t
-Assembler::patchWrite_NearCallSize()
+Assembler::PatchWrite_NearCallSize()
 {
     return sizeof(uint32_t);
 }
 void
-Assembler::patchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall)
+Assembler::PatchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall)
 {
     Instruction *inst = (Instruction *) start.raw();
-    // Overwrite whatever instruction used to be here with a call.
-    // Since the destination is in the same function, it will be within range of the 24<<2 byte
-    // bl instruction.
+    // Overwrite whatever instruction used to be here with a call. Since the
+    // destination is in the same function, it will be within range of the
+    // 24 << 2 byte bl instruction.
     uint8_t *dest = toCall.raw();
     new (inst) InstBLImm(BOffImm(dest - (uint8_t*)inst) , Always);
     // Ensure everyone sees the code that was just written into memory.
-
     AutoFlushICache::flush(uintptr_t(inst), 4);
 
 }
 void
-Assembler::patchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue,
+Assembler::PatchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue,
                                    PatchedImmPtr expectedValue)
 {
     Instruction *ptr = (Instruction *) label.raw();
     InstructionIterator iter(ptr);
     Register dest;
     Assembler::RelocStyle rs;
-    DebugOnly<const uint32_t *> val = getPtr32Target(&iter, &dest, &rs);
+    DebugOnly<const uint32_t *> val = GetPtr32Target(&iter, &dest, &rs);
     JS_ASSERT((uint32_t)(const uint32_t *)val == uint32_t(expectedValue.value));
-    reinterpret_cast<MacroAssemblerARM*>(dummy)->ma_movPatchable(Imm32(int32_t(newValue.value)),
+    reinterpret_cast<MacroAssemblerARM*>(Dummy)->ma_movPatchable(Imm32(int32_t(newValue.value)),
                                                                  dest, Always, rs, ptr);
     // L_LDR won't cause any instructions to be updated.
     if (rs != L_LDR) {
         AutoFlushICache::flush(uintptr_t(ptr), 4);
         AutoFlushICache::flush(uintptr_t(ptr->next()), 4);
     }
 }
 
 void
-Assembler::patchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue, ImmPtr expectedValue)
+Assembler::PatchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue, ImmPtr expectedValue)
 {
-    patchDataWithValueCheck(label, PatchedImmPtr(newValue.value), PatchedImmPtr(expectedValue.value));
+    PatchDataWithValueCheck(label, PatchedImmPtr(newValue.value), PatchedImmPtr(expectedValue.value));
 }
 
 // This just stomps over memory with 32 bits of raw data. Its purpose is to
 // overwrite the call of JITed code with 32 bits worth of an offset. This will
-// is only meant to function on code that has been invalidated, so it should
-// be totally safe. Since that instruction will never be executed again, a
-// ICache flush should not be necessary
+// is only meant to function on code that has been invalidated, so it should be
+// totally safe. Since that instruction will never be executed again, a ICache
+// flush should not be necessary
 void
-Assembler::patchWrite_Imm32(CodeLocationLabel label, Imm32 imm) {
+Assembler::PatchWrite_Imm32(CodeLocationLabel label, Imm32 imm) {
     // Raw is going to be the return address.
     uint32_t *raw = (uint32_t*)label.raw();
-    // Overwrite the 4 bytes before the return address, which will
-    // end up being the call instruction.
-    *(raw-1) = imm.value;
+    // Overwrite the 4 bytes before the return address, which will end up being
+    // the call instruction.
+    *(raw - 1) = imm.value;
 }
 
 
 uint8_t *
-Assembler::nextInstruction(uint8_t *inst_, uint32_t *count)
+Assembler::NextInstruction(uint8_t *inst_, uint32_t *count)
 {
     Instruction *inst = reinterpret_cast<Instruction*>(inst_);
     if (count != nullptr)
         *count += sizeof(Instruction);
     return reinterpret_cast<uint8_t*>(inst->next());
 }
 
 static bool
@@ -2581,26 +2562,27 @@ InstIsGuard(Instruction *inst, const Poo
 {
     Assembler::Condition c;
     inst->extractCond(&c);
     if (c != Assembler::Always)
         return false;
     if (!(inst->is<InstBXReg>() || inst->is<InstBImm>()))
         return false;
     // See if the next instruction is a pool header.
-    *ph = (inst+1)->as<const PoolHeader>();
+    *ph = (inst + 1)->as<const PoolHeader>();
     return *ph != nullptr;
 }
 
 static bool
 InstIsBNop(Instruction *inst) {
-    // In some special situations, it is necessary to insert a NOP
-    // into the instruction stream that nobody knows about, since nobody should know about
-    // it, make sure it gets skipped when Instruction::next() is called.
-    // this generates a very specific nop, namely a branch to the next instruction.
+    // In some special situations, it is necessary to insert a NOP into the
+    // instruction stream that nobody knows about, since nobody should know
+    // about it, make sure it gets skipped when Instruction::next() is called.
+    // this generates a very specific nop, namely a branch to the next
+    // instruction.
     Assembler::Condition c;
     inst->extractCond(&c);
     if (c != Assembler::Always)
         return false;
     if (!inst->is<InstBImm>())
         return false;
     InstBImm *b = inst->as<InstBImm>();
     BOffImm offset;
@@ -2616,19 +2598,18 @@ InstIsArtificialGuard(Instruction *inst,
     return !(*ph)->isNatural();
 }
 
 // If the instruction points to a artificial pool guard then skip the pool.
 Instruction *
 Instruction::skipPool()
 {
     const PoolHeader *ph;
-    // If this is a guard, and the next instruction is a header,
-    // always work around the pool. If it isn't a guard, then start
-    // looking ahead.
+    // If this is a guard, and the next instruction is a header, always work
+    // around the pool. If it isn't a guard, then start looking ahead.
     if (InstIsGuard(this, &ph)) {
         // Don't skip a natural guard.
         if (ph->isNatural())
             return this;
         return (this + 1 + ph->size())->skipPool();
     }
     if (InstIsBNop(this))
         return (this + 1)->skipPool();
@@ -2666,18 +2647,18 @@ Instruction::skipPool()
 //    0xdeadbeef
 //    add r4, r4, r4  <= returned value
 
 Instruction *
 Instruction::next()
 {
     Instruction *ret = this+1;
     const PoolHeader *ph;
-    // If this is a guard, and the next instruction is a header, always work around the pool
-    // If it isn't a guard, then start looking ahead.
+    // If this is a guard, and the next instruction is a header, always work
+    // around the pool. If it isn't a guard, then start looking ahead.
     if (InstIsGuard(this, &ph))
         return (ret + ph->size())->skipPool();
     if (InstIsArtificialGuard(ret, &ph))
         return (ret + 1 + ph->size())->skipPool();
     return ret->skipPool();
 }
 
 void
@@ -2721,19 +2702,18 @@ void
 Assembler::ToggleCall(CodeLocationLabel inst_, bool enabled)
 {
     Instruction *inst = (Instruction *)inst_.raw();
     // Skip a pool with an artificial guard.
     inst = inst->skipPool();
     JS_ASSERT(inst->is<InstMovW>() || inst->is<InstLDR>());
 
     if (inst->is<InstMovW>()) {
-        // If it looks like the start of a movw/movt sequence,
-        // then make sure we have all of it (and advance the iterator
-        // past the full sequence)
+        // If it looks like the start of a movw/movt sequence, then make sure we
+        // have all of it (and advance the iterator past the full sequence).
         inst = inst->next();
         JS_ASSERT(inst->is<InstMovT>());
     }
 
     inst = inst->next();
     JS_ASSERT(inst->is<InstNOP>() || inst->is<InstBLXReg>());
 
     if (enabled == inst->is<InstBLXReg>()) {
@@ -2753,19 +2733,18 @@ size_t
 Assembler::ToggledCallSize(uint8_t *code)
 {
     Instruction *inst = (Instruction *)code;
     // Skip a pool with an artificial guard.
     inst = inst->skipPool();
     JS_ASSERT(inst->is<InstMovW>() || inst->is<InstLDR>());
 
     if (inst->is<InstMovW>()) {
-        // If it looks like the start of a movw/movt sequence,
-        // then make sure we have all of it (and advance the iterator
-        // past the full sequence)
+        // If it looks like the start of a movw/movt sequence, then make sure we
+        // have all of it (and advance the iterator past the full sequence).
         inst = inst->next();
         JS_ASSERT(inst->is<InstMovT>());
     }
 
     inst = inst->next();
     JS_ASSERT(inst->is<InstNOP>() || inst->is<InstBLXReg>());
     return uintptr_t(inst) + 4 - uintptr_t(code);
 }
@@ -2775,41 +2754,42 @@ Assembler::BailoutTableStart(uint8_t *co
 {
     Instruction *inst = (Instruction *)code;
     // Skip a pool with an artificial guard or NOP fill.
     inst = inst->skipPool();
     JS_ASSERT(inst->is<InstBLImm>());
     return (uint8_t *) inst;
 }
 
-void Assembler::updateBoundsCheck(uint32_t heapSize, Instruction *inst)
+void Assembler::UpdateBoundsCheck(uint32_t heapSize, Instruction *inst)
 {
     JS_ASSERT(inst->is<InstCMP>());
     InstCMP *cmp = inst->as<InstCMP>();
 
     Register index;
     cmp->extractOp1(&index);
 
     Operand2 op = cmp->extractOp2();
     JS_ASSERT(op.isImm8());
 
     Imm8 imm8 = Imm8(heapSize);
     JS_ASSERT(!imm8.invalid);
 
-    *inst = InstALU(InvalidReg, index, imm8, op_cmp, SetCond, Always);
-    // NOTE: we don't update the Auto Flush Cache!  this function is currently only called from
-    // within AsmJSModule::patchHeapAccesses, which does that for us.  Don't call this!
+    *inst = InstALU(InvalidReg, index, imm8, OpCmp, SetCond, Always);
+    // NOTE: we don't update the Auto Flush Cache!  this function is currently
+    // only called from within AsmJSModule::patchHeapAccesses, which does that
+    // for us. Don't call this!
 }
 
 InstructionIterator::InstructionIterator(Instruction *i_) : i(i_)
 {
     // Work around pools with an artificial pool guard and around nop-fill.
     i = i->skipPool();
 }
-Assembler *Assembler::dummy = nullptr;
+Assembler *Assembler::Dummy = nullptr;
 
 uint32_t Assembler::NopFill = 0;
 
 uint32_t
 Assembler::GetNopFill()
 {
     static bool isSet = false;
     if (!isSet) {
--- a/js/src/jit/arm/Assembler-arm.h
+++ b/js/src/jit/arm/Assembler-arm.h
@@ -15,22 +15,20 @@
 #include "jit/CompactBuffer.h"
 #include "jit/IonCode.h"
 #include "jit/shared/Assembler-shared.h"
 #include "jit/shared/IonAssemblerBufferWithConstantPools.h"
 
 namespace js {
 namespace jit {
 
-//NOTE: there are duplicates in this list!
-// sometimes we want to specifically refer to the
-// link register as a link register (bl lr is much
-// clearer than bl r14).  HOWEVER, this register can
-// easily be a gpr when it is not busy holding the return
-// address.
+// NOTE: there are duplicates in this list! Sometimes we want to specifically
+// refer to the link register as a link register (bl lr is much clearer than bl
+// r14). HOWEVER, this register can easily be a gpr when it is not busy holding
+// the return address.
 static MOZ_CONSTEXPR_VAR Register r0  = { Registers::r0 };
 static MOZ_CONSTEXPR_VAR Register r1  = { Registers::r1 };
 static MOZ_CONSTEXPR_VAR Register r2  = { Registers::r2 };
 static MOZ_CONSTEXPR_VAR Register r3  = { Registers::r3 };
 static MOZ_CONSTEXPR_VAR Register r4  = { Registers::r4 };
 static MOZ_CONSTEXPR_VAR Register r5  = { Registers::r5 };
 static MOZ_CONSTEXPR_VAR Register r6  = { Registers::r6 };
 static MOZ_CONSTEXPR_VAR Register r7  = { Registers::r7 };
@@ -126,21 +124,20 @@ static MOZ_CONSTEXPR_VAR FloatRegister d
 static MOZ_CONSTEXPR_VAR FloatRegister d9(FloatRegisters::d9);
 static MOZ_CONSTEXPR_VAR FloatRegister d10(FloatRegisters::d10);
 static MOZ_CONSTEXPR_VAR FloatRegister d11(FloatRegisters::d11);
 static MOZ_CONSTEXPR_VAR FloatRegister d12(FloatRegisters::d12);
 static MOZ_CONSTEXPR_VAR FloatRegister d13(FloatRegisters::d13);
 static MOZ_CONSTEXPR_VAR FloatRegister d14(FloatRegisters::d14);
 static MOZ_CONSTEXPR_VAR FloatRegister d15(FloatRegisters::d15);
 
-// For maximal awesomeness, 8 should be sufficent.
-// ldrd/strd (dual-register load/store) operate in a single cycle
-// when the address they are dealing with is 8 byte aligned.
-// Also, the ARM abi wants the stack to be 8 byte aligned at
-// function boundaries.  I'm trying to make sure this is always true.
+// For maximal awesomeness, 8 should be sufficent. ldrd/strd (dual-register
+// load/store) operate in a single cycle when the address they are dealing with
+// is 8 byte aligned. Also, the ARM abi wants the stack to be 8 byte aligned at
+// function boundaries. I'm trying to make sure this is always true.
 static const uint32_t StackAlignment = 8;
 static const uint32_t CodeAlignment = 8;
 static const bool StackKeptAligned = true;
 
 static const Scale ScalePointer = TimesFour;
 
 class Instruction;
 class InstBranchImm;
@@ -159,18 +156,18 @@ Register toRM (Instruction &i);
 Register toRD (Instruction &i);
 Register toR (Instruction &i);
 
 class VFPRegister;
 uint32_t VD(VFPRegister vr);
 uint32_t VN(VFPRegister vr);
 uint32_t VM(VFPRegister vr);
 
-// For being passed into the generic vfp instruction generator when
-// there is an instruction that only takes two registers
+// For being passed into the generic vfp instruction generator when there is an
+// instruction that only takes two registers.
 static MOZ_CONSTEXPR_VAR VFPRegister NoVFPRegister(VFPRegister::Double, 0, false, true);
 
 struct ImmTag : public Imm32
 {
     ImmTag(JSValueTag mask)
       : Imm32(int32_t(mask))
     { }
 };
@@ -179,59 +176,57 @@ struct ImmType : public ImmTag
 {
     ImmType(JSValueType type)
       : ImmTag(JSVAL_TYPE_TO_TAG(type))
     { }
 };
 
 enum Index {
     Offset = 0 << 21 | 1<<24,
-    PreIndex = 1<<21 | 1 << 24,
+    PreIndex = 1 << 21 | 1 << 24,
     PostIndex = 0 << 21 | 0 << 24
-    // The docs were rather unclear on this. it sounds like
-    // 1<<21 | 0 << 24 encodes dtrt
+    // The docs were rather unclear on this. It sounds like
+    // 1 << 21 | 0 << 24 encodes dtrt.
 };
 
 // Seriously, wtf arm
 enum IsImmOp2_ {
     IsImmOp2    = 1 << 25,
     IsNotImmOp2 = 0 << 25
 };
 enum IsImmDTR_ {
     IsImmDTR    = 0 << 25,
     IsNotImmDTR = 1 << 25
 };
-// For the extra memory operations, ldrd, ldrsb, ldrh
+// For the extra memory operations, ldrd, ldrsb, ldrh.
 enum IsImmEDTR_ {
     IsImmEDTR    = 1 << 22,
     IsNotImmEDTR = 0 << 22
 };
 
 
 enum ShiftType {
     LSL = 0, // << 5
     LSR = 1, // << 5
     ASR = 2, // << 5
     ROR = 3, // << 5
     RRX = ROR // RRX is encoded as ROR with a 0 offset.
 };
 
-// The actual codes that get set by instructions
-// and the codes that are checked by the conditions below.
+// The actual codes that get set by instructions and the codes that are checked
+// by the conditions below.
 struct ConditionCodes
 {
     bool Zero : 1;
     bool Overflow : 1;
     bool Carry : 1;
     bool Minus : 1;
 };
 
-// Modes for STM/LDM.
-// Names are the suffixes applied to
-// the instruction.
+// Modes for STM/LDM. Names are the suffixes applied to the instruction.
 enum DTMMode {
     A = 0 << 24, // empty / after
     B = 1 << 24, // full / before
     D = 0 << 23, // decrement
     I = 1 << 23, // increment
     DA = D | A,
     DB = D | B,
     IA = I | A,
@@ -246,145 +241,143 @@ enum DTMWriteBack {
 enum SetCond_ {
     SetCond   = 1 << 20,
     NoSetCond = 0 << 20
 };
 enum LoadStore {
     IsLoad  = 1 << 20,
     IsStore = 0 << 20
 };
-// You almost never want to use this directly.
-// Instead, you wantto pass in a signed constant,
-// and let this bit be implicitly set for you.
-// this is however, necessary if we want a negative index
+// You almost never want to use this directly. Instead, you wantto pass in a
+// signed constant, and let this bit be implicitly set for you. This is however,
+// necessary if we want a negative index.
 enum IsUp_ {
     IsUp   = 1 << 23,
     IsDown = 0 << 23
 };
 enum ALUOp {
-    op_mov = 0xd << 21,
-    op_mvn = 0xf << 21,
-    op_and = 0x0 << 21,
-    op_bic = 0xe << 21,
-    op_eor = 0x1 << 21,
-    op_orr = 0xc << 21,
-    op_adc = 0x5 << 21,
-    op_add = 0x4 << 21,
-    op_sbc = 0x6 << 21,
-    op_sub = 0x2 << 21,
-    op_rsb = 0x3 << 21,
-    op_rsc = 0x7 << 21,
-    op_cmn = 0xb << 21,
-    op_cmp = 0xa << 21,
-    op_teq = 0x9 << 21,
-    op_tst = 0x8 << 21,
-    op_invalid = -1
+    OpMov = 0xd << 21,
+    OpMvn = 0xf << 21,
+    OpAnd = 0x0 << 21,
+    OpBic = 0xe << 21,
+    OpEor = 0x1 << 21,
+    OpOrr = 0xc << 21,
+    OpAdc = 0x5 << 21,
+    OpAdd = 0x4 << 21,
+    OpSbc = 0x6 << 21,
+    OpSub = 0x2 << 21,
+    OpRsb = 0x3 << 21,
+    OpRsc = 0x7 << 21,
+    OpCmn = 0xb << 21,
+    OpCmp = 0xa << 21,
+    OpTeq = 0x9 << 21,
+    OpTst = 0x8 << 21,
+    OpInvalid = -1
 };
 
 
 enum MULOp {
-    opm_mul   = 0 << 21,
-    opm_mla   = 1 << 21,
-    opm_umaal = 2 << 21,
-    opm_mls   = 3 << 21,
-    opm_umull = 4 << 21,
-    opm_umlal = 5 << 21,
-    opm_smull = 6 << 21,
-    opm_smlal = 7 << 21
+    OpmMul   = 0 << 21,
+    OpmMla   = 1 << 21,
+    OpmUmaal = 2 << 21,
+    OpmMls   = 3 << 21,
+    OpmUmull = 4 << 21,
+    OpmUmlal = 5 << 21,
+    OpmSmull = 6 << 21,
+    OpmSmlal = 7 << 21
 };
 enum BranchTag {
-    op_b = 0x0a000000,
-    op_b_mask = 0x0f000000,
-    op_b_dest_mask = 0x00ffffff,
-    op_bl = 0x0b000000,
-    op_blx = 0x012fff30,
-    op_bx  = 0x012fff10
+    OpB = 0x0a000000,
+    OpBMask = 0x0f000000,
+    OpBDestMask = 0x00ffffff,
+    OpBl = 0x0b000000,
+    OpBlx = 0x012fff30,
+    OpBx  = 0x012fff10
 };
 
 // Just like ALUOp, but for the vfp instruction set.
 enum VFPOp {
-    opv_mul  = 0x2 << 20,
-    opv_add  = 0x3 << 20,
-    opv_sub  = 0x3 << 20 | 0x1 << 6,
-    opv_div  = 0x8 << 20,
-    opv_mov  = 0xB << 20 | 0x1 << 6,
-    opv_abs  = 0xB << 20 | 0x3 << 6,
-    opv_neg  = 0xB << 20 | 0x1 << 6 | 0x1 << 16,
-    opv_sqrt = 0xB << 20 | 0x3 << 6 | 0x1 << 16,
-    opv_cmp  = 0xB << 20 | 0x1 << 6 | 0x4 << 16,
-    opv_cmpz  = 0xB << 20 | 0x1 << 6 | 0x5 << 16
+    OpvMul  = 0x2 << 20,
+    OpvAdd  = 0x3 << 20,
+    OpvSub  = 0x3 << 20 | 0x1 << 6,
+    OpvDiv  = 0x8 << 20,
+    OpvMov  = 0xB << 20 | 0x1 << 6,
+    OpvAbs  = 0xB << 20 | 0x3 << 6,
+    OpvNeg  = 0xB << 20 | 0x1 << 6 | 0x1 << 16,
+    OpvSqrt = 0xB << 20 | 0x3 << 6 | 0x1 << 16,
+    OpvCmp  = 0xB << 20 | 0x1 << 6 | 0x4 << 16,
+    OpvCmpz  = 0xB << 20 | 0x1 << 6 | 0x5 << 16
 };
 // Negate the operation, AND negate the immediate that we were passed in.
 ALUOp ALUNeg(ALUOp op, Register dest, Imm32 *imm, Register *negDest);
 bool can_dbl(ALUOp op);
 bool condsAreSafe(ALUOp op);
-// If there is a variant of op that has a dest (think cmp/sub)
-// return that variant of it.
+// If there is a variant of op that has a dest (think cmp/sub) return that
+// variant of it.
 ALUOp getDestVariant(ALUOp op);
 
 static const ValueOperand JSReturnOperand = ValueOperand(JSReturnReg_Type, JSReturnReg_Data);
 static const ValueOperand softfpReturnOperand = ValueOperand(r1, r0);
 // All of these classes exist solely to shuffle data into the various operands.
-// For example Operand2 can be an imm8, a register-shifted-by-a-constant or
-// a register-shifted-by-a-register.  I represent this in C++ by having a
-// base class Operand2, which just stores the 32 bits of data as they will be
-// encoded in the instruction.  You cannot directly create an Operand2
-// since it is tricky, and not entirely sane to do so.  Instead, you create
-// one of its child classes, e.g. Imm8.  Imm8's constructor takes a single
-// integer argument.  Imm8 will verify that its argument can be encoded
-// as an ARM 12 bit imm8, encode it using an Imm8data, and finally call
-// its parent's (Operand2) constructor with the Imm8data.  The Operand2
-// constructor will then call the Imm8data's encode() function to extract
-// the raw bits from it.  In the future, we should be able to extract
-// data from the Operand2 by asking it for its component Imm8data
-// structures.  The reason this is so horribly round-about is I wanted
-// to have Imm8 and RegisterShiftedRegister inherit directly from Operand2
-// but have all of them take up only a single word of storage.
-// I also wanted to avoid passing around raw integers at all
-// since they are error prone.
+// For example Operand2 can be an imm8, a register-shifted-by-a-constant or a
+// register-shifted-by-a-register. We represent this in C++ by having a base
+// class Operand2, which just stores the 32 bits of data as they will be encoded
+// in the instruction. You cannot directly create an Operand2 since it is
+// tricky, and not entirely sane to do so. Instead, you create one of its child
+// classes, e.g. Imm8. Imm8's constructor takes a single integer argument. Imm8
+// will verify that its argument can be encoded as an ARM 12 bit imm8, encode it
+// using an Imm8data, and finally call its parent's (Operand2) constructor with
+// the Imm8data. The Operand2 constructor will then call the Imm8data's encode()
+// function to extract the raw bits from it.
+//
+// In the future, we should be able to extract data from the Operand2 by asking
+// it for its component Imm8data structures. The reason this is so horribly
+// round-about is we wanted to have Imm8 and RegisterShiftedRegister inherit
+// directly from Operand2 but have all of them take up only a single word of
+// storage. We also wanted to avoid passing around raw integers at all since
+// they are error prone.
 class Op2Reg;
 class O2RegImmShift;
 class O2RegRegShift;
 namespace datastore {
 struct Reg
 {
-    // the "second register"
+    // The "second register".
     uint32_t RM : 4;
-    // do we get another register for shifting
+    // Do we get another register for shifting.
     uint32_t RRS : 1;
     ShiftType Type : 2;
-    // I'd like this to be a more sensible encoding, but that would
-    // need to be a struct and that would not pack :(
+    // We'd like this to be a more sensible encoding, but that would need to be
+    // a struct and that would not pack :(
     uint32_t ShiftAmount : 5;
     uint32_t pad : 20;
 
     Reg(uint32_t rm, ShiftType type, uint32_t rsr, uint32_t shiftamount)
       : RM(rm), RRS(rsr), Type(type), ShiftAmount(shiftamount), pad(0)
     { }
 
     uint32_t encode() {
         return RM | RRS << 4 | Type << 5 | ShiftAmount << 7;
     }
     explicit Reg(const Op2Reg &op) {
         memcpy(this, &op, sizeof(*this));
     }
 };
 
-// Op2 has a mode labelled "<imm8m>", which is arm's magical
-// immediate encoding.  Some instructions actually get 8 bits of
-// data, which is called Imm8Data below.  These should have edit
-// distance > 1, but this is how it is for now.
+// Op2 has a mode labelled "<imm8m>", which is arm's magical immediate encoding.
+// Some instructions actually get 8 bits of data, which is called Imm8Data
+// below. These should have edit distance > 1, but this is how it is for now.
 struct Imm8mData
 {
   private:
     uint32_t data : 8;
     uint32_t rot : 4;
-    // Throw in an extra bit that will be 1 if we can't encode this
-    // properly.  if we can encode it properly, a simple "|" will still
-    // suffice to meld it into the instruction.
+    // Throw in an extra bit that will be 1 if we can't encode this properly.
+    // if we can encode it properly, a simple "|" will still suffice to meld it
+    // into the instruction.
     uint32_t buff : 19;
   public:
     uint32_t invalid : 1;
 
     uint32_t encode() {
         JS_ASSERT(!invalid);
         return data | rot << 8;
     };
@@ -408,54 +401,53 @@ struct Imm8Data
     uint32_t imm4L : 4;
     uint32_t pad : 4;
     uint32_t imm4H : 4;
 
   public:
     uint32_t encode() {
         return imm4L | (imm4H << 8);
     };
-    Imm8Data(uint32_t imm) : imm4L(imm&0xf), imm4H(imm>>4) {
+    Imm8Data(uint32_t imm) : imm4L(imm & 0xf), imm4H(imm >> 4) {
         JS_ASSERT(imm <= 0xff);
     }
 };
 
-// VLDR/VSTR take an 8 bit offset, which is implicitly left shifted
-// by 2.
+// VLDR/VSTR take an 8 bit offset, which is implicitly left shifted by 2.
 struct Imm8VFPOffData
 {
   private:
     uint32_t data;
 
   public:
     uint32_t encode() {
         return data;
     };
     Imm8VFPOffData(uint32_t imm) : data (imm) {
         JS_ASSERT((imm & ~(0xff)) == 0);
     }
 };
 
-// ARM can magically encode 256 very special immediates to be moved
-// into a register.
+// ARM can magically encode 256 very special immediates to be moved into a
+// register.
 struct Imm8VFPImmData
 {
   private:
     uint32_t imm4L : 4;
     uint32_t pad : 12;
     uint32_t imm4H : 4;
     int32_t isInvalid : 12;
 
   public:
     Imm8VFPImmData()
       : imm4L(-1U & 0xf), imm4H(-1U & 0xf), isInvalid(-1)
     { }
 
     Imm8VFPImmData(uint32_t imm)
-      : imm4L(imm&0xf), imm4H(imm>>4), isInvalid(0)
+      : imm4L(imm&0xf), imm4H(imm >> 4), isInvalid(0)
     {
         JS_ASSERT(imm <= 0xff);
     }
 
     uint32_t encode() {
         if (isInvalid != 0)
             return -1;
         return imm4L | (imm4H << 16);
@@ -490,17 +482,17 @@ struct RIS
         JS_ASSERT(ShiftAmount == imm);
     }
     explicit RIS(Reg r) : ShiftAmount(r.ShiftAmount) {}
 };
 
 struct RRS
 {
     uint32_t MustZero : 1;
-    // the register that holds the shift amount
+    // The register that holds the shift amount.
     uint32_t RS : 4;
 
     RRS(uint32_t rs)
       : RS(rs)
     {
         JS_ASSERT(rs == RS);
     }
 
@@ -548,63 +540,63 @@ class Operand2
     uint32_t encode() {
         return oper;
     }
 };
 
 class Imm8 : public Operand2
 {
   public:
-    static datastore::Imm8mData encodeImm(uint32_t imm) {
+    static datastore::Imm8mData EncodeImm(uint32_t imm) {
         // mozilla::CountLeadingZeroes32(imm) requires imm != 0.
         if (imm == 0)
             return datastore::Imm8mData(0, 0);
         int left = mozilla::CountLeadingZeroes32(imm) & 30;
         // See if imm is a simple value that can be encoded with a rotate of 0.
         // This is effectively imm <= 0xff, but I assume this can be optimized
-        // more
+        // more.
         if (left >= 24)
             return datastore::Imm8mData(imm, 0);
 
         // Mask out the 8 bits following the first bit that we found, see if we
         // have 0 yet.
         int no_imm = imm & ~(0xff << (24 - left));
         if (no_imm == 0) {
-            return  datastore::Imm8mData(imm >> (24 - left), ((8+left) >> 1));
+            return  datastore::Imm8mData(imm >> (24 - left), ((8 + left) >> 1));
         }
         // Look for the most signifigant bit set, once again.
         int right = 32 - (mozilla::CountLeadingZeroes32(no_imm) & 30);
         // If it is in the bottom 8 bits, there is a chance that this is a
         // wraparound case.
         if (right >= 8)
             return datastore::Imm8mData();
         // Rather than masking out bits and checking for 0, just rotate the
         // immediate that we were passed in, and see if it fits into 8 bits.
         unsigned int mask = imm << (8 - right) | imm >> (24 + right);
         if (mask <= 0xff)
-            return datastore::Imm8mData(mask, (8-right) >> 1);
+            return datastore::Imm8mData(mask, (8 - right) >> 1);
         return datastore::Imm8mData();
     }
-    // pair template?
+    // Pair template?
     struct TwoImm8mData
     {
         datastore::Imm8mData fst, snd;
 
         TwoImm8mData()
           : fst(), snd()
         { }
 
         TwoImm8mData(datastore::Imm8mData _fst, datastore::Imm8mData _snd)
           : fst(_fst), snd(_snd)
         { }
     };
 
-    static TwoImm8mData encodeTwoImms(uint32_t);
+    static TwoImm8mData EncodeTwoImms(uint32_t);
     Imm8(uint32_t imm)
-      : Operand2(encodeImm(imm))
+      : Operand2(EncodeImm(imm))
     { }
 };
 
 class Op2Reg : public Operand2
 {
   public:
     Op2Reg(Register rm, ShiftType type, datastore::RIS shiftImm)
       : Operand2(datastore::Reg(rm.code(), type, 0, shiftImm.encode()))
@@ -667,21 +659,21 @@ O2RegImmShift asr (Register r, int amt);
 O2RegImmShift rol (Register r, int amt);
 O2RegImmShift ror (Register r, int amt);
 
 O2RegRegShift lsl (Register r, Register amt);
 O2RegRegShift lsr (Register r, Register amt);
 O2RegRegShift asr (Register r, Register amt);
 O2RegRegShift ror (Register r, Register amt);
 
-// An offset from a register to be used for ldr/str.  This should include
-// the sign bit, since ARM has "signed-magnitude" offsets.  That is it encodes
-// an unsigned offset, then the instruction specifies if the offset is positive
-// or negative.  The +/- bit is necessary if the instruction set wants to be
-// able to have a negative register offset e.g. ldr pc, [r1,-r2];
+// An offset from a register to be used for ldr/str. This should include the
+// sign bit, since ARM has "signed-magnitude" offsets. That is it encodes an
+// unsigned offset, then the instruction specifies if the offset is positive or
+// negative. The +/- bit is necessary if the instruction set wants to be able to
+// have a negative register offset e.g. ldr pc, [r1,-r2];
 class DtrOff
 {
     uint32_t data;
 
   protected:
     DtrOff(datastore::Imm12Data immdata, IsUp_ iu)
       : data(immdata.encode() | (uint32_t)IsImmDTR | ((uint32_t)iu))
     { }
@@ -702,17 +694,17 @@ class DtrOffImm : public DtrOff
     {
         JS_ASSERT(mozilla::Abs(imm) < 4096);
     }
 };
 
 class DtrOffReg : public DtrOff
 {
     // These are designed to be called by a constructor of a subclass.
-    // Constructing the necessary RIS/RRS structures are annoying
+    // Constructing the necessary RIS/RRS structures are annoying.
   protected:
     DtrOffReg(Register rn, ShiftType type, datastore::RIS shiftImm, IsUp_ iu = IsUp)
       : DtrOff(datastore::Reg(rn.code(), type, 0, shiftImm.encode()), iu)
     { }
 
     DtrOffReg(Register rn, ShiftType type, datastore::RRS shiftReg, IsUp_ iu = IsUp)
       : DtrOff(datastore::Reg(rn.code(), type, 1, shiftReg.encode()), iu)
     { }
@@ -729,17 +721,17 @@ class DtrRegImmShift : public DtrOffReg
 class DtrRegRegShift : public DtrOffReg
 {
   public:
     DtrRegRegShift(Register rn, ShiftType type, Register rs, IsUp_ iu = IsUp)
       : DtrOffReg(rn, type, datastore::RRS(rs.code()), iu)
     { }
 };
 
-// we will frequently want to bundle a register with its offset so that we have
+// We will frequently want to bundle a register with its offset so that we have
 // an "operand" to a load instruction.
 class DTRAddr
 {
     uint32_t data;
 
   public:
     DTRAddr(Register reg, DtrOff dtr)
       : data(dtr.encode() | (reg.code() << 16))
@@ -784,19 +776,18 @@ class EDtrOffImm : public EDtrOff
   public:
     EDtrOffImm(int32_t imm)
       : EDtrOff(datastore::Imm8Data(mozilla::Abs(imm)), (imm >= 0) ? IsUp : IsDown)
     {
         JS_ASSERT(mozilla::Abs(imm) < 256);
     }
 };
 
-// this is the most-derived class, since the extended data
-// transfer instructions don't support any sort of modifying the
-// "index" operand
+// This is the most-derived class, since the extended data transfer instructions
+// don't support any sort of modifying the "index" operand.
 class EDtrOffReg : public EDtrOff
 {
   public:
     EDtrOffReg(Register rm)
       : EDtrOff(rm)
     { }
 };
 
@@ -858,50 +849,51 @@ class VFPAddr
         return data;
     }
 };
 
 class VFPImm {
     uint32_t data;
 
   public:
-    static const VFPImm one;
+    static const VFPImm One;
 
     VFPImm(uint32_t topWordOfDouble);
 
     uint32_t encode() {
         return data;
     }
     bool isValid() {
         return data != -1U;
     }
 };
 
-// A BOffImm is an immediate that is used for branches. Namely, it is the offset that will
-// be encoded in the branch instruction. This is the only sane way of constructing a branch.
+// A BOffImm is an immediate that is used for branches. Namely, it is the offset
+// that will be encoded in the branch instruction. This is the only sane way of
+// constructing a branch.
 class BOffImm
 {
     uint32_t data;
 
   public:
     uint32_t encode() {
         return data;
     }
     int32_t decode() {
         return ((((int32_t)data) << 8) >> 6) + 8;
     }
 
     explicit BOffImm(int offset)
       : data ((offset - 8) >> 2 & 0x00ffffff)
     {
         JS_ASSERT((offset & 0x3) == 0);
-        if (!isInRange(offset))
+        if (!IsInRange(offset))
             CrashAtUnhandlableOOM("BOffImm");
     }
-    static bool isInRange(int offset)
+    static bool IsInRange(int offset)
     {
         if ((offset - 8) < -33554432)
             return false;
         if ((offset - 8) > 33554428)
             return false;
         return true;
     }
     static const int INVALID = 0x00800000;
@@ -938,25 +930,23 @@ class Imm16
         return lower | upper << 12;
     }
 
     bool isInvalid () {
         return invalid;
     }
 };
 
-/* I would preffer that these do not exist, since there are essentially
-* no instructions that would ever take more than one of these, however,
-* the MIR wants to only have one type of arguments to functions, so bugger.
-*/
+// I would preffer that these do not exist, since there are essentially no
+// instructions that would ever take more than one of these, however, the MIR
+// wants to only have one type of arguments to functions, so bugger.
 class Operand
 {
-    // the encoding of registers is the same for OP2, DTR and EDTR
-    // yet the type system doesn't let us express this, so choices
-    // must be made.
+    // The encoding of registers is the same for OP2, DTR and EDTR yet the type
+    // system doesn't let us express this, so choices must be made.
   public:
     enum Tag_ {
         OP2,
         MEM,
         FOP
     };
 
   private:
@@ -1032,17 +1022,17 @@ void
 PatchJump(CodeLocationJump &jump_, CodeLocationLabel label);
 class InstructionIterator;
 class Assembler;
 typedef js::jit::AssemblerBufferWithConstantPool<1024, 4, Instruction, Assembler, 1> ARMBuffer;
 
 class Assembler : public AssemblerShared
 {
   public:
-    // ARM conditional constants
+    // ARM conditional constants:
     enum ARMCondition {
         EQ = 0x00000000, // Zero
         NE = 0x10000000, // Non-zero
         CS = 0x20000000,
         CC = 0x30000000,
         MI = 0x40000000,
         PL = 0x50000000,
         VS = 0x60000000,
@@ -1089,17 +1079,18 @@ class Assembler : public AssemblerShared
     };
 
     // Bit set when a DoubleCondition does not map to a single ARM condition.
     // The macro assembler has to special-case these conditions, or else
     // ConditionFromDoubleCondition will complain.
     static const int DoubleConditionBitSpecial = 0x1;
 
     enum DoubleCondition {
-        // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
+        // These conditions will only evaluate to true if the comparison is
+        // ordered - i.e. neither operand is NaN.
         DoubleOrdered = VFP_NotUnordered,
         DoubleEqual = VFP_Equal,
         DoubleNotEqual = VFP_NotEqualOrUnordered | DoubleConditionBitSpecial,
         DoubleGreaterThan = VFP_GreaterThan,
         DoubleGreaterThanOrEqual = VFP_GreaterThanOrEqual,
         DoubleLessThan = VFP_LessThan,
         DoubleLessThanOrEqual = VFP_LessThanOrEqual,
         // If either operand is NaN, these conditions always evaluate to true.
@@ -1115,77 +1106,77 @@ class Assembler : public AssemblerShared
     Condition getCondition(uint32_t inst) {
         return (Condition) (0xf0000000 & inst);
     }
     static inline Condition ConditionFromDoubleCondition(DoubleCondition cond) {
         JS_ASSERT(!(cond & DoubleConditionBitSpecial));
         return static_cast<Condition>(cond);
     }
 
-    // :( this should be protected, but since CodeGenerator
-    // wants to use it, It needs to go out here :(
+    // This should be protected, but since CodeGenerator wants to use it, it
+    // needs to go out here :(
 
     BufferOffset nextOffset() {
         return m_buffer.nextOffset();
     }
 
   protected:
     BufferOffset labelOffset (Label *l) {
         return BufferOffset(l->bound());
     }
 
-    Instruction * editSrc (BufferOffset bo) {
+    Instruction *editSrc (BufferOffset bo) {
         return m_buffer.getInst(bo);
     }
   public:
     void resetCounter();
     uint32_t actualOffset(uint32_t) const;
     uint32_t actualIndex(uint32_t) const;
     static uint8_t *PatchableJumpAddress(JitCode *code, uint32_t index);
     BufferOffset actualOffset(BufferOffset) const;
     static uint32_t NopFill;
     static uint32_t GetNopFill();
   protected:
 
-    // structure for fixing up pc-relative loads/jumps when a the machine code
-    // gets moved (executable copy, gc, etc.)
+    // Structure for fixing up pc-relative loads/jumps when a the machine code
+    // gets moved (executable copy, gc, etc.).
     struct RelativePatch
     {
         void *target;
         Relocation::Kind kind;
         RelativePatch(void *target, Relocation::Kind kind)
             : target(target), kind(kind)
         { }
     };
 
-    // TODO: this should actually be a pool-like object
-    //       It is currently a big hack, and probably shouldn't exist
+    // TODO: this should actually be a pool-like object. It is currently a big
+    // hack, and probably shouldn't exist.
     js::Vector<CodeLabel, 0, SystemAllocPolicy> codeLabels_;
     js::Vector<RelativePatch, 8, SystemAllocPolicy> jumps_;
     js::Vector<BufferOffset, 0, SystemAllocPolicy> tmpJumpRelocations_;
     js::Vector<BufferOffset, 0, SystemAllocPolicy> tmpDataRelocations_;
     js::Vector<BufferOffset, 0, SystemAllocPolicy> tmpPreBarriers_;
 
     CompactBufferWriter jumpRelocations_;
     CompactBufferWriter dataRelocations_;
     CompactBufferWriter relocations_;
     CompactBufferWriter preBarriers_;
 
     ARMBuffer m_buffer;
 
-    // There is now a semi-unified interface for instruction generation.
-    // During assembly, there is an active buffer that instructions are
-    // being written into, but later, we may wish to modify instructions
-    // that have already been created.  In order to do this, we call the
-    // same assembly function, but pass it a destination address, which
-    // will be overwritten with a new instruction. In order to do this very
-    // after assembly buffers no longer exist, when calling with a third
-    // dest parameter, a this object is still needed.  dummy always happens
-    // to be null, but we shouldn't be looking at it in any case.
-    static Assembler *dummy;
+    // There is now a semi-unified interface for instruction generation. During
+    // assembly, there is an active buffer that instructions are being written
+    // into, but later, we may wish to modify instructions that have already
+    // been created. In order to do this, we call the same assembly function,
+    // but pass it a destination address, which will be overwritten with a new
+    // instruction. In order to do this very after assembly buffers no longer
+    // exist, when calling with a third dest parameter, a this object is still
+    // needed. Dummy always happens to be null, but we shouldn't be looking at
+    // it in any case.
+    static Assembler *Dummy;
     mozilla::Array<Pool, 4> pools_;
     Pool *int32Pool;
     Pool *doublePool;
 
   public:
     // For the nopFill use a branch to the next instruction: 0xeaffffff.
     Assembler()
       : m_buffer(4, 4, 0, &pools_[0], 8, 0xeaffffff, GetNopFill()),
@@ -1197,23 +1188,23 @@ class Assembler : public AssemblerShared
     {
     }
 
     // We need to wait until an AutoIonContextAlloc is created by the
     // IonMacroAssembler, before allocating any space.
     void initWithAllocator() {
         m_buffer.initWithAllocator();
 
-        // Set up the backwards double region
+        // Set up the backwards double region.
         new (&pools_[2]) Pool (1024, 8, 4, 8, 8, m_buffer.LifoAlloc_, true);
-        // Set up the backwards 32 bit region
+        // Set up the backwards 32 bit region.
         new (&pools_[3]) Pool (4096, 4, 4, 8, 4, m_buffer.LifoAlloc_, true, true);
-        // Set up the forwards double region
+        // Set up the forwards double region.
         new (doublePool) Pool (1024, 8, 4, 8, 8, m_buffer.LifoAlloc_, false, false, &pools_[2]);
-        // Set up the forwards 32 bit region
+        // Set up the forwards 32 bit region.
         new (int32Pool) Pool (4096, 4, 4, 8, 4, m_buffer.LifoAlloc_, false, true, &pools_[3]);
         for (int i = 0; i < 4; i++) {
             if (pools_[i].poolData == nullptr) {
                 m_buffer.fail_oom();
                 return;
             }
         }
     }
@@ -1244,25 +1235,25 @@ class Assembler : public AssemblerShared
     };
 
     enum RelocStyle {
         L_MOVWT,
         L_LDR
     };
 
   public:
-    // Given the start of a Control Flow sequence, grab the value that is finally branched to
-    // given the start of a function that loads an address into a register get the address that
-    // ends up in the register.
+    // Given the start of a Control Flow sequence, grab the value that is
+    // finally branched to given the start of a function that loads an address
+    // into a register get the address that ends up in the register.
     template <class Iter>
-    static const uint32_t * getCF32Target(Iter *iter);
+    static const uint32_t *GetCF32Target(Iter *iter);
 
-    static uintptr_t getPointer(uint8_t *);
+    static uintptr_t GetPointer(uint8_t *);
     template <class Iter>
-    static const uint32_t * getPtr32Target(Iter *iter, Register *dest = nullptr, RelocStyle *rs = nullptr);
+    static const uint32_t *GetPtr32Target(Iter *iter, Register *dest = nullptr, RelocStyle *rs = nullptr);
 
     bool oom() const;
 
     void setPrinter(Sprinter *sp) {
     }
 
   private:
     bool isFinished;
@@ -1286,77 +1277,77 @@ class Assembler : public AssemblerShared
     // Size of the jump relocation table, in bytes.
     size_t jumpRelocationTableBytes() const;
     size_t dataRelocationTableBytes() const;
     size_t preBarrierTableBytes() const;
 
     // Size of the data table, in bytes.
     size_t bytesNeeded() const;
 
-    // Write a blob of binary into the instruction stream *OR*
-    // into a destination address. If dest is nullptr (the default), then the
+    // Write a blob of binary into the instruction stream *OR* into a
+    // destination address. If dest is nullptr (the default), then the
     // instruction gets written into the instruction stream. If dest is not null
     // it is interpreted as a pointer to the location that we want the
     // instruction to be written.
     BufferOffset writeInst(uint32_t x, uint32_t *dest = nullptr);
 
     // As above, but also mark the instruction as a branch.
     BufferOffset writeBranchInst(uint32_t x);
 
     // A static variant for the cases where we don't want to have an assembler
     // object at all. Normally, you would use the dummy (nullptr) object.
-    static void writeInstStatic(uint32_t x, uint32_t *dest);
+    static void WriteInstStatic(uint32_t x, uint32_t *dest);
 
   public:
     void writeCodePointer(AbsoluteLabel *label);
 
     BufferOffset align(int alignment);
     BufferOffset as_nop();
     BufferOffset as_alu(Register dest, Register src1, Operand2 op2,
                 ALUOp op, SetCond_ sc = NoSetCond, Condition c = Always, Instruction *instdest = nullptr);
 
     BufferOffset as_mov(Register dest,
                 Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always, Instruction *instdest = nullptr);
     BufferOffset as_mvn(Register dest, Operand2 op2,
                 SetCond_ sc = NoSetCond, Condition c = Always);
-    // logical operations
+    // Logical operations:
     BufferOffset as_and(Register dest, Register src1,
                 Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always);
     BufferOffset as_bic(Register dest, Register src1,
                 Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always);
     BufferOffset as_eor(Register dest, Register src1,
                 Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always);
     BufferOffset as_orr(Register dest, Register src1,
                 Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always);
-    // mathematical operations
+    // Mathematical operations:
     BufferOffset as_adc(Register dest, Register src1,
                 Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always);
     BufferOffset as_add(Register dest, Register src1,
                 Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always);
     BufferOffset as_sbc(Register dest, Register src1,
                 Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always);
     BufferOffset as_sub(Register dest, Register src1,
                 Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always);
     BufferOffset as_rsb(Register dest, Register src1,
                 Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always);
     BufferOffset as_rsc(Register dest, Register src1,
                 Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always);
-    // test operations
+    // Test operations:
     BufferOffset as_cmn(Register src1, Operand2 op2,
                 Condition c = Always);
     BufferOffset as_cmp(Register src1, Operand2 op2,
                 Condition c = Always);
     BufferOffset as_teq(Register src1, Operand2 op2,
                 Condition c = Always);
     BufferOffset as_tst(Register src1, Operand2 op2,
                 Condition c = Always);
 
-    // Not quite ALU worthy, but useful none the less:
-    // These also have the isue of these being formatted
-    // completly differently from the standard ALU operations.
+    // Not quite ALU worthy, but useful none the less: These also have the isue
+    // of these being formatted completly differently from the standard ALU
+    // operations.
     BufferOffset as_movw(Register dest, Imm16 imm, Condition c = Always, Instruction *pos = nullptr);
     BufferOffset as_movt(Register dest, Imm16 imm, Condition c = Always, Instruction *pos = nullptr);
 
     BufferOffset as_genmul(Register d1, Register d2, Register rm, Register rn,
                    MULOp op, SetCond_ sc, Condition c = Always);
     BufferOffset as_mul(Register dest, Register src1, Register src2,
                 SetCond_ sc = NoSetCond, Condition c = Always);
     BufferOffset as_mla(Register dest, Register acc, Register src1, Register src2,
@@ -1373,78 +1364,74 @@ class Assembler : public AssemblerShared
                 SetCond_ sc = NoSetCond, Condition c = Always);
     BufferOffset as_smlal(Register dest1, Register dest2, Register src1, Register src2,
                 SetCond_ sc = NoSetCond, Condition c = Always);
 
     BufferOffset as_sdiv(Register dest, Register num, Register div, Condition c = Always);
     BufferOffset as_udiv(Register dest, Register num, Register div, Condition c = Always);
 
     // Data transfer instructions: ldr, str, ldrb, strb.
-    // Using an int to differentiate between 8 bits and 32 bits is
-    // overkill, but meh
+    // Using an int to differentiate between 8 bits and 32 bits is overkill.
     BufferOffset as_dtr(LoadStore ls, int size, Index mode,
                 Register rt, DTRAddr addr, Condition c = Always, uint32_t *dest = nullptr);
     // Handles all of the other integral data transferring functions:
-    // ldrsb, ldrsh, ldrd, etc.
-    // size is given in bits.
+    // ldrsb, ldrsh, ldrd, etc. The size is given in bits.
     BufferOffset as_extdtr(LoadStore ls, int size, bool IsSigned, Index mode,
                    Register rt, EDtrAddr addr, Condition c = Always, uint32_t *dest = nullptr);
 
     BufferOffset as_dtm(LoadStore ls, Register rn, uint32_t mask,
                 DTMMode mode, DTMWriteBack wb, Condition c = Always);
-    //overwrite a pool entry with new data.
+    // Overwrite a pool entry with new data.
     void as_WritePoolEntry(Instruction *addr, Condition c, uint32_t data);
-    // load a 32 bit immediate from a pool into a register
+    // Load a 32 bit immediate from a pool into a register.
     BufferOffset as_Imm32Pool(Register dest, uint32_t value, Condition c = Always);
-    // make a patchable jump that can target the entire 32 bit address space.
+    // Make a patchable jump that can target the entire 32 bit address space.
     BufferOffset as_BranchPool(uint32_t value, RepatchLabel *label, ARMBuffer::PoolEntry *pe = nullptr, Condition c = Always);
 
-    // load a 64 bit floating point immediate from a pool into a register
+    // Load a 64 bit floating point immediate from a pool into a register.
     BufferOffset as_FImm64Pool(VFPRegister dest, double value, Condition c = Always);
-    // load a 32 bit floating point immediate from a pool into a register
+    // Load a 32 bit floating point immediate from a pool into a register.
     BufferOffset as_FImm32Pool(VFPRegister dest, float value, Condition c = Always);
 
     // Control flow stuff:
 
-    // bx can *only* branch to a register
-    // never to an immediate.
+    // bx can *only* branch to a register never to an immediate.
     BufferOffset as_bx(Register r, Condition c = Always, bool isPatchable = false);
 
-    // Branch can branch to an immediate *or* to a register.
-    // Branches to immediates are pc relative, branches to registers
-    // are absolute
+    // Branch can branch to an immediate *or* to a register. Branches to
+    // immediates are pc relative, branches to registers are absolute.
     BufferOffset as_b(BOffImm off, Condition c, bool isPatchable = false);
 
     BufferOffset as_b(Label *l, Condition c = Always, bool isPatchable = false);
     BufferOffset as_b(BOffImm off, Condition c, BufferOffset inst);
 
-    // blx can go to either an immediate or a register.
-    // When blx'ing to a register, we change processor mode
-    // depending on the low bit of the register
-    // when blx'ing to an immediate, we *always* change processor state.
+    // blx can go to either an immediate or a register. When blx'ing to a
+    // register, we change processor mode depending on the low bit of the
+    // register when blx'ing to an immediate, we *always* change processor
+    // state.
     BufferOffset as_blx(Label *l);
 
     BufferOffset as_blx(Register r, Condition c = Always);
     BufferOffset as_bl(BOffImm off, Condition c);
-    // bl can only branch+link to an immediate, never to a register
-    // it never changes processor state
+    // bl can only branch+link to an immediate, never to a register it never
+    // changes processor state.
     BufferOffset as_bl();
     // bl #imm can have a condition code, blx #imm cannot.
     // blx reg can be conditional.
     BufferOffset as_bl(Label *l, Condition c);
     BufferOffset as_bl(BOffImm off, Condition c, BufferOffset inst);
 
     BufferOffset as_mrs(Register r, Condition c = Always);
     BufferOffset as_msr(Register r, Condition c = Always);
     // VFP instructions!
   private:
 
     enum vfp_size {
-        isDouble = 1 << 8,
-        isSingle = 0 << 8
+        IsDouble = 1 << 8,
+        IsSingle = 0 << 8
     };
 
     BufferOffset writeVFPInst(vfp_size sz, uint32_t blob, uint32_t *dest=nullptr);
     // Unityped variants: all registers hold the same (ieee754 single/double)
     // notably not included are vcvt; vmov vd, #imm; vmov rt, vn.
     BufferOffset as_vfp_float(VFPRegister vd, VFPRegister vn, VFPRegister vm,
                       VFPOp op, Condition c = Always);
 
@@ -1475,63 +1462,63 @@ class Assembler : public AssemblerShared
 
     BufferOffset as_vsub(VFPRegister vd, VFPRegister vn, VFPRegister vm,
                  Condition c = Always);
 
     BufferOffset as_vcmp(VFPRegister vd, VFPRegister vm,
                  Condition c = Always);
     BufferOffset as_vcmpz(VFPRegister vd,  Condition c = Always);
 
-    // specifically, a move between two same sized-registers
+    // Specifically, a move between two same sized-registers.
     BufferOffset as_vmov(VFPRegister vd, VFPRegister vsrc, Condition c = Always);
-    /*xfer between Core and VFP*/
+    // Transfer between Core and VFP.
     enum FloatToCore_ {
         FloatToCore = 1 << 20,
         CoreToFloat = 0 << 20
     };
 
   private:
     enum VFPXferSize {
         WordTransfer   = 0x02000010,
         DoubleTransfer = 0x00400010
     };
 
   public:
     // Unlike the next function, moving between the core registers and vfp
-    // registers can't be *that* properly typed.  Namely, since I don't want to
-    // munge the type VFPRegister to also include core registers.  Thus, the core
+    // registers can't be *that* properly typed. Namely, since I don't want to
+    // munge the type VFPRegister to also include core registers. Thus, the core
     // and vfp registers are passed in based on their type, and src/dest is
     // determined by the float2core.
 
     BufferOffset as_vxfer(Register vt1, Register vt2, VFPRegister vm, FloatToCore_ f2c,
                   Condition c = Always, int idx = 0);
 
-    // our encoding actually allows just the src and the dest (and theiyr types)
+    // Our encoding actually allows just the src and the dest (and their types)
     // to uniquely specify the encoding that we are going to use.
     BufferOffset as_vcvt(VFPRegister vd, VFPRegister vm, bool useFPSCR = false,
                          Condition c = Always);
-    // hard coded to a 32 bit fixed width result for now
+    // Hard coded to a 32 bit fixed width result for now.
     BufferOffset as_vcvtFixed(VFPRegister vd, bool isSigned, uint32_t fixedPoint, bool toFixed, Condition c = Always);
 
-    /* xfer between VFP and memory*/
+    // Transfer between VFP and memory.
     BufferOffset as_vdtr(LoadStore ls, VFPRegister vd, VFPAddr addr,
                  Condition c = Always /* vfp doesn't have a wb option*/,
                  uint32_t *dest = nullptr);
 
-    // VFP's ldm/stm work differently from the standard arm ones.
-    // You can only transfer a range
+    // VFP's ldm/stm work differently from the standard arm ones. You can only
+    // transfer a range.
 
     BufferOffset as_vdtm(LoadStore st, Register rn, VFPRegister vd, int length,
                  /*also has update conditions*/Condition c = Always);
 
     BufferOffset as_vimm(VFPRegister vd, VFPImm imm, Condition c = Always);
 
     BufferOffset as_vmrs(Register r, Condition c = Always);
     BufferOffset as_vmsr(Register r, Condition c = Always);
-    // label operations
+    // Label operations.
     bool nextLink(BufferOffset b, BufferOffset *next);
     void bind(Label *label, BufferOffset boff = BufferOffset());
     void bind(RepatchLabel *label);
     uint32_t currentOffset() {
         return nextOffset().getOffset();
     }
     void retarget(Label *label, Label *target);
     // I'm going to pretend this doesn't exist for now.
@@ -1633,17 +1620,17 @@ class Assembler : public AssemblerShared
         }
         dtmLastReg = rn.code();
     }
     void finishFloatTransfer() {
         JS_ASSERT(dtmActive);
         dtmActive = false;
         JS_ASSERT(dtmLastReg != -1);
         dtmDelta = dtmDelta ? dtmDelta : 1;
-        // fencepost problem.
+        // Fencepost problem.
         int len = dtmDelta * (dtmLastReg - vdtmFirstReg) + 1;
         as_vdtm(dtmLoadStore, dtmBase,
                 VFPRegister(FloatRegister::FromCode(Min(vdtmFirstReg, dtmLastReg))),
                 len, dtmCond);
     }
 
   private:
     int dtmRegBitField;
@@ -1654,210 +1641,208 @@ class Assembler : public AssemblerShared
     DTMWriteBack dtmUpdate;
     DTMMode dtmMode;
     LoadStore dtmLoadStore;
     bool dtmActive;
     Condition dtmCond;
 
   public:
     enum {
-        padForAlign8  = (int)0x00,
-        padForAlign16 = (int)0x0000,
-        padForAlign32 = (int)0xe12fff7f  // 'bkpt 0xffff'
+        PadForAlign8  = (int)0x00,
+        PadForAlign16 = (int)0x0000,
+        PadForAlign32 = (int)0xe12fff7f  // 'bkpt 0xffff'
     };
 
-    // API for speaking with the IonAssemblerBufferWithConstantPools
-    // generate an initial placeholder instruction that we want to later fix up
-    static void insertTokenIntoTag(uint32_t size, uint8_t *load, int32_t token);
-    // take the stub value that was written in before, and write in an actual load
-    // using the index we'd computed previously as well as the address of the pool start.
-    static bool patchConstantPoolLoad(void* loadAddr, void* constPoolAddr);
-    // this is a callback for when we have filled a pool, and MUST flush it now.
-    // The pool requires the assembler to place a branch past the pool, and it
-    // calls this function.
-    static uint32_t placeConstantPoolBarrier(int offset);
+    // API for speaking with the IonAssemblerBufferWithConstantPools generate an
+    // initial placeholder instruction that we want to later fix up.
+    static void InsertTokenIntoTag(uint32_t size, uint8_t *load, int32_t token);
+    // Take the stub value that was written in before, and write in an actual
+    // load using the index we'd computed previously as well as the address of
+    // the pool start.
+    static bool PatchConstantPoolLoad(void* loadAddr, void* constPoolAddr);
     // END API
 
-    // move our entire pool into the instruction stream
-    // This is to force an opportunistic dump of the pool, prefferably when it
-    // is more convenient to do a dump.
+    // Move our entire pool into the instruction stream. This is to force an
+    // opportunistic dump of the pool, prefferably when it is more convenient to
+    // do a dump.
     void dumpPool();
     void flushBuffer();
     void enterNoPool();
     void leaveNoPool();
-    // this should return a BOffImm, but I didn't want to require everyplace that used the
-    // AssemblerBuffer to make that class.
+    // This should return a BOffImm, but we didn't want to require everyplace
+    // that used the AssemblerBuffer to make that class.
     static ptrdiff_t getBranchOffset(const Instruction *i);
-    static void retargetNearBranch(Instruction *i, int offset, Condition cond, bool final = true);
-    static void retargetNearBranch(Instruction *i, int offset, bool final = true);
-    static void retargetFarBranch(Instruction *i, uint8_t **slot, uint8_t *dest, Condition cond);
+    static void RetargetNearBranch(Instruction *i, int offset, Condition cond, bool final = true);
+    static void RetargetNearBranch(Instruction *i, int offset, bool final = true);
+    static void RetargetFarBranch(Instruction *i, uint8_t **slot, uint8_t *dest, Condition cond);
 
-    static void writePoolHeader(uint8_t *start, Pool *p, bool isNatural);
-    static void writePoolFooter(uint8_t *start, Pool *p, bool isNatural);
-    static void writePoolGuard(BufferOffset branch, Instruction *inst, BufferOffset dest);
+    static void WritePoolHeader(uint8_t *start, Pool *p, bool isNatural);
+    static void WritePoolFooter(uint8_t *start, Pool *p, bool isNatural);
+    static void WritePoolGuard(BufferOffset branch, Instruction *inst, BufferOffset dest);
 
 
-    static uint32_t patchWrite_NearCallSize();
-    static uint32_t nopSize() { return 4; }
-    static void patchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall);
-    static void patchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue,
+    static uint32_t PatchWrite_NearCallSize();
+    static uint32_t NopSize() { return 4; }
+    static void PatchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall);
+    static void PatchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue,
                                         PatchedImmPtr expectedValue);
-    static void patchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue,
+    static void PatchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue,
                                         ImmPtr expectedValue);
-    static void patchWrite_Imm32(CodeLocationLabel label, Imm32 imm);
+    static void PatchWrite_Imm32(CodeLocationLabel label, Imm32 imm);
 
-    static void patchInstructionImmediate(uint8_t *code, PatchedImmPtr imm) {
+    static void PatchInstructionImmediate(uint8_t *code, PatchedImmPtr imm) {
         MOZ_ASSUME_UNREACHABLE("Unused.");
     }
 
-    static uint32_t alignDoubleArg(uint32_t offset) {
-        return (offset+1)&~1;
+    static uint32_t AlignDoubleArg(uint32_t offset) {
+        return (offset + 1) & ~1;
     }
-    static uint8_t *nextInstruction(uint8_t *instruction, uint32_t *count = nullptr);
+    static uint8_t *NextInstruction(uint8_t *instruction, uint32_t *count = nullptr);
+
     // Toggle a jmp or cmp emitted by toggledJump().
-
     static void ToggleToJmp(CodeLocationLabel inst_);
     static void ToggleToCmp(CodeLocationLabel inst_);
 
     static uint8_t *BailoutTableStart(uint8_t *code);
 
     static size_t ToggledCallSize(uint8_t *code);
     static void ToggleCall(CodeLocationLabel inst_, bool enabled);
 
-    static void updateBoundsCheck(uint32_t logHeapSize, Instruction *inst);
+    static void UpdateBoundsCheck(uint32_t logHeapSize, Instruction *inst);
     void processCodeLabels(uint8_t *rawCode);
-    static int32_t extractCodeLabelOffset(uint8_t *code) {
+    static int32_t ExtractCodeLabelOffset(uint8_t *code) {
         return *(uintptr_t *)code;
     }
 
     bool bailed() {
         return m_buffer.bail();
     }
 }; // Assembler
 
-// An Instruction is a structure for both encoding and decoding any and all ARM instructions.
-// many classes have not been implemented thusfar.
+// An Instruction is a structure for both encoding and decoding any and all ARM
+// instructions. Many classes have not been implemented thus far.
 class Instruction
 {
     uint32_t data;
 
   protected:
     // This is not for defaulting to always, this is for instructions that
-    // cannot be made conditional, and have the usually invalid 4b1111 cond field
+    // cannot be made conditional, and have the usually invalid 4b1111 cond
+    // field.
     Instruction (uint32_t data_, bool fake = false) : data(data_ | 0xf0000000) {
         JS_ASSERT (fake || ((data_ & 0xf0000000) == 0));
     }
-    // Standard constructor
+    // Standard constructor.
     Instruction (uint32_t data_, Assembler::Condition c) : data(data_ | (uint32_t) c) {
         JS_ASSERT ((data_ & 0xf0000000) == 0);
     }
-    // You should never create an instruction directly.  You should create a
-    // more specific instruction which will eventually call one of these
-    // constructors for you.
+    // You should never create an instruction directly. You should create a more
+    // specific instruction which will eventually call one of these constructors
+    // for you.
   public:
     uint32_t encode() const {
         return data;
     }
-    // Check if this instruction is really a particular case
+    // Check if this instruction is really a particular case.
     template <class C>
-    bool is() const { return C::isTHIS(*this); }
+    bool is() const { return C::IsTHIS(*this); }
 
-    // safely get a more specific variant of this pointer
+    // Safely get a more specific variant of this pointer.
     template <class C>
-    C *as() const { return C::asTHIS(*this); }
+    C *as() const { return C::AsTHIS(*this); }
 
     const Instruction & operator=(const Instruction &src) {
         data = src.data;
         return *this;
     }
-    // Since almost all instructions have condition codes, the condition
-    // code extractor resides in the base class.
+    // Since almost all instructions have condition codes, the condition code
+    // extractor resides in the base class.
     void extractCond(Assembler::Condition *c) {
         if (data >> 28 != 0xf )
             *c = (Assembler::Condition)(data & 0xf0000000);
     }
     // Get the next instruction in the instruction stream.
     // This does neat things like ignoreconstant pools and their guards.
     Instruction *next();
 
     // Skipping pools with artificial guards.
     Instruction *skipPool();
 
-    // Sometimes, an api wants a uint32_t (or a pointer to it) rather than
-    // an instruction.  raw() just coerces this into a pointer to a uint32_t
+    // Sometimes, an api wants a uint32_t (or a pointer to it) rather than an
+    // instruction. raw() just coerces this into a pointer to a uint32_t.
     const uint32_t *raw() const { return &data; }
     uint32_t size() const { return 4; }
 }; // Instruction
 
-// make sure that it is the right size
+// Make sure that it is the right size.
 JS_STATIC_ASSERT(sizeof(Instruction) == 4);
 
-// Data Transfer Instructions
+// Data Transfer Instructions.
 class InstDTR : public Instruction
 {
   public:
     enum IsByte_ {
         IsByte = 0x00400000,
         IsWord = 0x00000000
     };
     static const int IsDTR     = 0x04000000;
     static const int IsDTRMask = 0x0c000000;
 
     // TODO: Replace the initialization with something that is safer.
     InstDTR(LoadStore ls, IsByte_ ib, Index mode, Register rt, DTRAddr addr, Assembler::Condition c)
       : Instruction(ls | ib | mode | RT(rt) | addr.encode() | IsDTR, c)
     { }
 
-    static bool isTHIS(const Instruction &i);
-    static InstDTR *asTHIS(const Instruction &i);
+    static bool IsTHIS(const Instruction &i);
+    static InstDTR *AsTHIS(const Instruction &i);
 
 };
 JS_STATIC_ASSERT(sizeof(InstDTR) == sizeof(Instruction));
 
 class InstLDR : public InstDTR
 {
   public:
     InstLDR(Index mode, Register rt, DTRAddr addr, Assembler::Condition c)
         : InstDTR(IsLoad, IsWord, mode, rt, addr, c)
     { }
-    static bool isTHIS(const Instruction &i);
-    static InstLDR *asTHIS(const Instruction &i);
+    static bool IsTHIS(const Instruction &i);
+    static InstLDR *AsTHIS(const Instruction &i);
 
 };
 JS_STATIC_ASSERT(sizeof(InstDTR) == sizeof(InstLDR));
 
 class InstNOP : public Instruction
 {
   public:
     static const uint32_t NopInst = 0x0320f000;
 
     InstNOP()
       : Instruction(NopInst, Assembler::Always)
     { }
 
-    static bool isTHIS(const Instruction &i);
-    static InstNOP *asTHIS(Instruction &i);
+    static bool IsTHIS(const Instruction &i);
+    static InstNOP *AsTHIS(Instruction &i);
 };
 
 // Branching to a register, or calling a register
 class InstBranchReg : public Instruction
 {
   protected:
     // Don't use BranchTag yourself, use a derived instruction.
     enum BranchTag {
         IsBX  = 0x012fff10,
         IsBLX = 0x012fff30
     };
     static const uint32_t IsBRegMask = 0x0ffffff0;
     InstBranchReg(BranchTag tag, Register rm, Assembler::Condition c)
       : Instruction(tag | rm.code(), c)
     { }
   public:
-    static bool isTHIS (const Instruction &i);
-    static InstBranchReg *asTHIS (const Instruction &i);
+    static bool IsTHIS (const Instruction &i);
+    static InstBranchReg *AsTHIS (const Instruction &i);
     // Get the register that is being branched to
     void extractDest(Register *dest);
     // Make sure we are branching to a pre-known register
     bool checkDest(Register dest);
 };
 JS_STATIC_ASSERT(sizeof(InstBranchReg) == sizeof(Instruction));
 
 // Branching to an immediate offset, or calling an immediate offset
@@ -1870,58 +1855,58 @@ class InstBranchImm : public Instruction
     };
     static const uint32_t IsBImmMask = 0x0f000000;
 
     InstBranchImm(BranchTag tag, BOffImm off, Assembler::Condition c)
       : Instruction(tag | off.encode(), c)
     { }
 
   public:
-    static bool isTHIS (const Instruction &i);
-    static InstBranchImm *asTHIS (const Instruction &i);
+    static bool IsTHIS (const Instruction &i);
+    static InstBranchImm *AsTHIS (const Instruction &i);
     void extractImm(BOffImm *dest);
 };
 JS_STATIC_ASSERT(sizeof(InstBranchImm) == sizeof(Instruction));
 
 // Very specific branching instructions.
 class InstBXReg : public InstBranchReg
 {
   public:
-    static bool isTHIS (const Instruction &i);
-    static InstBXReg *asTHIS (const Instruction &i);
+    static bool IsTHIS (const Instruction &i);
+    static InstBXReg *AsTHIS (const Instruction &i);
 };
 class InstBLXReg : public InstBranchReg
 {
   public:
     InstBLXReg(Register reg, Assembler::Condition c)
       : InstBranchReg(IsBLX, reg, c)
     { }
 
-    static bool isTHIS (const Instruction &i);
-    static InstBLXReg *asTHIS (const Instruction &i);
+    static bool IsTHIS (const Instruction &i);
+    static InstBLXReg *AsTHIS (const Instruction &i);
 };
 class InstBImm : public InstBranchImm
 {
   public:
     InstBImm(BOffImm off, Assembler::Condition c)
       : InstBranchImm(IsB, off, c)
     { }
 
-    static bool isTHIS (const Instruction &i);
-    static InstBImm *asTHIS (const Instruction &i);
+    static bool IsTHIS (const Instruction &i);
+    static InstBImm *AsTHIS (const Instruction &i);
 };
 class InstBLImm : public InstBranchImm
 {
   public:
     InstBLImm(BOffImm off, Assembler::Condition c)
       : InstBranchImm(IsBL, off, c)
     { }
 
-    static bool isTHIS (const Instruction &i);
-    static InstBLImm *asTHIS (Instruction &i);
+    static bool IsTHIS (const Instruction &i);
+    static InstBLImm *AsTHIS (Instruction &i);
 };
 
 // Both movw and movt. The layout of both the immediate and the destination
 // register is the same so the code is being shared.
 class InstMovWT : public Instruction
 {
   protected:
     enum WT {
@@ -1935,73 +1920,73 @@ class InstMovWT : public Instruction
     { }
 
   public:
     void extractImm(Imm16 *dest);
     void extractDest(Register *dest);
     bool checkImm(Imm16 dest);
     bool checkDest(Register dest);
 
-    static bool isTHIS (Instruction &i);
-    static InstMovWT *asTHIS (Instruction &i);
+    static bool IsTHIS (Instruction &i);
+    static InstMovWT *AsTHIS (Instruction &i);
 
 };
 JS_STATIC_ASSERT(sizeof(InstMovWT) == sizeof(Instruction));
 
 class InstMovW : public InstMovWT
 {
   public:
     InstMovW (Register rd, Imm16 imm, Assembler::Condition c)
       : InstMovWT(rd, imm, IsW, c)
     { }
 
-    static bool isTHIS (const Instruction &i);
-    static InstMovW *asTHIS (const Instruction &i);
+    static bool IsTHIS (const Instruction &i);
+    static InstMovW *AsTHIS (const Instruction &i);
 };
 
 class InstMovT : public InstMovWT
 {
   public:
     InstMovT (Register rd, Imm16 imm, Assembler::Condition c)
       : InstMovWT(rd, imm, IsT, c)
     { }
-    static bool isTHIS (const Instruction &i);
-    static InstMovT *asTHIS (const Instruction &i);
+    static bool IsTHIS (const Instruction &i);
+    static InstMovT *AsTHIS (const Instruction &i);
 };
 
 class InstALU : public Instruction
 {
     static const int32_t ALUMask = 0xc << 24;
   public:
     InstALU (Register rd, Register rn, Operand2 op2, ALUOp op, SetCond_ sc, Assembler::Condition c)
         : Instruction(maybeRD(rd) | maybeRN(rn) | op2.encode() | op | sc, c)
     { }
-    static bool isTHIS (const Instruction &i);
-    static InstALU *asTHIS (const Instruction &i);
+    static bool IsTHIS (const Instruction &i);
+    static InstALU *AsTHIS (const Instruction &i);
     void extractOp(ALUOp *ret);
     bool checkOp(ALUOp op);
     void extractDest(Register *ret);
     bool checkDest(Register rd);
     void extractOp1(Register *ret);
     bool checkOp1(Register rn);
     Operand2 extractOp2();
 };
 
 class InstCMP : public InstALU
 {
   public:
-    static bool isTHIS (const Instruction &i);
-    static InstCMP *asTHIS (const Instruction &i);
+    static bool IsTHIS (const Instruction &i);
+    static InstCMP *AsTHIS (const Instruction &i);
 };
 
 class InstMOV : public InstALU
 {
   public:
-    static bool isTHIS (const Instruction &i);
-    static InstMOV *asTHIS (const Instruction &i);
+    static bool IsTHIS (const Instruction &i);
+    static InstMOV *AsTHIS (const Instruction &i);
 };
 
 
 class InstructionIterator {
   private:
     Instruction *i;
   public:
     InstructionIterator(Instruction *i_);
@@ -2022,17 +2007,17 @@ GetIntArgReg(uint32_t usedIntArgs, uint3
 {
     if (usedIntArgs >= NumIntArgRegs)
         return false;
     *out = Register::FromCode(usedIntArgs);
     return true;
 }
 
 // Get a register in which we plan to put a quantity that will be used as an
-// integer argument.  This differs from GetIntArgReg in that if we have no more
+// integer argument. This differs from GetIntArgReg in that if we have no more
 // actual argument registers to use we will fall back on using whatever
 // CallTempReg* don't overlap the argument registers, and only fail once those
 // run out too.
 static inline bool
 GetTempRegForIntArg(uint32_t usedIntArgs, uint32_t usedFloatArgs, Register *out)
 {
     if (GetIntArgReg(usedIntArgs, usedFloatArgs, out))
         return true;
@@ -2098,17 +2083,17 @@ GetFloat32ArgStackDisp(uint32_t usedIntA
 static inline uint32_t
 GetDoubleArgStackDisp(uint32_t usedIntArgs, uint32_t usedFloatArgs, uint32_t *padding)
 {
     JS_ASSERT(UseHardFpABI());
     JS_ASSERT(usedFloatArgs >= NumFloatArgRegs);
     uint32_t intSlots = 0;
     if (usedIntArgs > NumIntArgRegs) {
         intSlots = usedIntArgs - NumIntArgRegs;
-        // update the amount of padding required.
+        // Update the amount of padding required.
         *padding += (*padding + usedIntArgs) % 2;
     }
     uint32_t doubleSlots = usedFloatArgs - NumFloatArgRegs;
     doubleSlots *= 2;
     return (intSlots + doubleSlots + *padding) * sizeof(intptr_t);
 }
 
 #endif
@@ -2119,28 +2104,27 @@ class DoubleEncoder {
     uint32_t rep(bool b, uint32_t count) {
         uint32_t ret = 0;
         for (uint32_t i = 0; i < count; i++)
             ret = (ret << 1) | b;
         return ret;
     }
 
     uint32_t encode(uint8_t value) {
-        //ARM ARM "VFP modified immediate constants"
-        // aBbbbbbb bbcdefgh 000...
-        // we want to return the top 32 bits of the double
-        // the rest are 0.
+        // ARM ARM "VFP modified immediate constants"
+        //  aBbbbbbb bbcdefgh 000...
+        // We want to return the top 32 bits of the double the rest are 0.
         bool a = value >> 7;
         bool b = value >> 6 & 1;
         bool B = !b;
         uint32_t cdefgh = value & 0x3f;
-        return a << 31 |
-            B << 30 |
-            rep(b, 8) << 22 |
-            cdefgh << 16;
+        return         a << 31 |
+                       B << 30 |
+               rep(b, 8) << 22 |
+                  cdefgh << 16;
     }
 
     struct DoubleEntry
     {
         uint32_t dblTop;
         datastore::Imm8VFPImmData data;
 
         DoubleEntry()
--- a/js/src/jit/arm/Bailouts-arm.cpp
+++ b/js/src/jit/arm/Bailouts-arm.cpp
@@ -15,19 +15,19 @@ using namespace js;
 using namespace js::jit;
 
 namespace js {
 namespace jit {
 
 class BailoutStack
 {
     uintptr_t frameClassId_;
-    // This is pushed in the bailout handler.  Both entry points into the handler
+    // This is pushed in the bailout handler. Both entry points into the handler
     // inserts their own value int lr, which is then placed onto the stack along
-    // with frameClassId_ above.  This should be migrated to ip.
+    // with frameClassId_ above. This should be migrated to ip.
   public:
     union {
         uintptr_t frameSize_;
         uintptr_t tableOffset_;
     };
 
   protected: // Silence Clang warning about unused private fields.
     mozilla::Array<double, FloatRegisters::Total> fpregs_;
--- a/js/src/jit/arm/BaselineHelpers-arm.h
+++ b/js/src/jit/arm/BaselineHelpers-arm.h
@@ -41,26 +41,26 @@ EmitCallIC(CodeOffsetLabel *patchOffset,
     // Load stub pointer into BaselineStubReg
     masm.loadPtr(Address(BaselineStubReg, ICEntry::offsetOfFirstStub()), BaselineStubReg);
 
     // Load stubcode pointer from BaselineStubEntry.
     // R2 won't be active when we call ICs, so we can use r0.
     JS_ASSERT(R2 == ValueOperand(r1, r0));
     masm.loadPtr(Address(BaselineStubReg, ICStub::offsetOfStubCode()), r0);
 
-    // Call the stubcode via a direct branch-and-link
+    // Call the stubcode via a direct branch-and-link.
     masm.ma_blx(r0);
 }
 
 inline void
 EmitEnterTypeMonitorIC(MacroAssembler &masm,
                        size_t monitorStubOffset = ICMonitoredStub::offsetOfFirstMonitorStub())
 {
-    // This is expected to be called from within an IC, when BaselineStubReg
-    // is properly initialized to point to the stub.
+    // This is expected to be called from within an IC, when BaselineStubReg is
+    // properly initialized to point to the stub.
     masm.loadPtr(Address(BaselineStubReg, (uint32_t) monitorStubOffset), BaselineStubReg);
 
     // Load stubcode pointer from BaselineStubEntry.
     // R2 won't be active when we call ICs, so we can use r0.
     JS_ASSERT(R2 == ValueOperand(r1, r0));
     masm.loadPtr(Address(BaselineStubReg, ICStub::offsetOfStubCode()), r0);
 
     // Jump to the stubcode.
@@ -91,31 +91,31 @@ EmitTailCallVM(JitCode *target, MacroAss
     masm.ma_add(Imm32(BaselineFrame::FramePointerOffset), r0);
     masm.ma_sub(BaselineStackReg, r0);
 
     // Store frame size without VMFunction arguments for GC marking.
     masm.ma_sub(r0, Imm32(argSize), r1);
     masm.store32(r1, Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFrameSize()));
 
     // Push frame descriptor and perform the tail call.
-    // BaselineTailCallReg (lr) already contains the return address (as we keep it there through
-    // the stub calls), but the VMWrapper code being called expects the return address to also
-    // be pushed on the stack.
+    // BaselineTailCallReg (lr) already contains the return address (as we keep
+    // it there through the stub calls), but the VMWrapper code being called
+    // expects the return address to also be pushed on the stack.
     JS_ASSERT(BaselineTailCallReg == lr);
     masm.makeFrameDescriptor(r0, JitFrame_BaselineJS);
     masm.push(r0);
     masm.push(lr);
     masm.branch(target);
 }
 
 inline void
 EmitCreateStubFrameDescriptor(MacroAssembler &masm, Register reg)
 {
-    // Compute stub frame size. We have to add two pointers: the stub reg and previous
-    // frame pointer pushed by EmitEnterStubFrame.
+    // Compute stub frame size. We have to add two pointers: the stub reg and
+    // previous frame pointer pushed by EmitEnterStubFrame.
     masm.mov(BaselineFrameReg, reg);
     masm.ma_add(Imm32(sizeof(void *) * 2), reg);
     masm.ma_sub(BaselineStackReg, reg);
 
     masm.makeFrameDescriptor(reg, JitFrame_BaselineStub);
 }
 
 inline void
@@ -137,18 +137,18 @@ EmitEnterStubFrame(MacroAssembler &masm,
 
     // Compute frame size.
     masm.mov(BaselineFrameReg, scratch);
     masm.ma_add(Imm32(BaselineFrame::FramePointerOffset), scratch);
     masm.ma_sub(BaselineStackReg, scratch);
 
     masm.store32(scratch, Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFrameSize()));
 
-    // Note: when making changes here,  don't forget to update STUB_FRAME_SIZE
-    // if needed.
+    // Note: when making changes here, don't forget to update STUB_FRAME_SIZE if
+    // needed.
 
     // Push frame descriptor and return address.
     masm.makeFrameDescriptor(scratch, JitFrame_BaselineJS);
     masm.push(scratch);
     masm.push(BaselineTailCallReg);
 
     // Save old frame pointer, stack pointer and stub reg.
     masm.push(BaselineStubReg);
@@ -157,20 +157,20 @@ EmitEnterStubFrame(MacroAssembler &masm,
 
     // We pushed 4 words, so the stack is still aligned to 8 bytes.
     masm.checkStackAlignment();
 }
 
 inline void
 EmitLeaveStubFrame(MacroAssembler &masm, bool calledIntoIon = false)
 {
-    // Ion frames do not save and restore the frame pointer. If we called
-    // into Ion, we have to restore the stack pointer from the frame descriptor.
-    // If we performed a VM call, the descriptor has been popped already so
-    // in that case we use the frame pointer.
+    // Ion frames do not save and restore the frame pointer. If we called into
+    // Ion, we have to restore the stack pointer from the frame descriptor. If
+    // we performed a VM call, the descriptor has been popped already so in that
+    // case we use the frame pointer.
     if (calledIntoIon) {
         masm.pop(ScratchRegister);
         masm.ma_lsr(Imm32(FRAMESIZE_SHIFT), ScratchRegister, ScratchRegister);
         masm.ma_add(ScratchRegister, BaselineStackReg);
     } else {
         masm.mov(BaselineFrameReg, BaselineStackReg);
     }
 
@@ -185,67 +185,67 @@ EmitLeaveStubFrame(MacroAssembler &masm,
 }
 
 inline void
 EmitStowICValues(MacroAssembler &masm, int values)
 {
     JS_ASSERT(values >= 0 && values <= 2);
     switch(values) {
       case 1:
-        // Stow R0
+        // Stow R0.
         masm.pushValue(R0);
         break;
       case 2:
-        // Stow R0 and R1
+        // Stow R0 and R1.
         masm.pushValue(R0);
         masm.pushValue(R1);
         break;
     }
 }
 
 inline void
 EmitUnstowICValues(MacroAssembler &masm, int values, bool discard = false)
 {
     JS_ASSERT(values >= 0 && values <= 2);
     switch(values) {
       case 1:
-        // Unstow R0
+        // Unstow R0.
         if (discard)
             masm.addPtr(Imm32(sizeof(Value)), BaselineStackReg);
         else
             masm.popValue(R0);
         break;
       case 2:
-        // Unstow R0 and R1
+        // Unstow R0 and R1.
         if (discard) {
             masm.addPtr(Imm32(sizeof(Value) * 2), BaselineStackReg);
         } else {
             masm.popValue(R1);
             masm.popValue(R0);
         }
         break;
     }
 }
 
 inline void
 EmitCallTypeUpdateIC(MacroAssembler &masm, JitCode *code, uint32_t objectOffset)
 {
     JS_ASSERT(R2 == ValueOperand(r1, r0));
 
-    // R0 contains the value that needs to be typechecked.
-    // The object we're updating is a boxed Value on the stack, at offset
-    // objectOffset from esp, excluding the return address.
+    // R0 contains the value that needs to be typechecked. The object we're
+    // updating is a boxed Value on the stack, at offset objectOffset from esp,
+    // excluding the return address.
 
     // Save the current BaselineStubReg to stack, as well as the TailCallReg,
     // since on ARM, the LR is live.
     masm.push(BaselineStubReg);
     masm.push(BaselineTailCallReg);
 
-    // This is expected to be called from within an IC, when BaselineStubReg
-    // is properly initialized to point to the stub.
+    // This is expected to be called from within an IC, when BaselineStubReg is
+    // properly initialized to point to the stub.
     masm.loadPtr(Address(BaselineStubReg, ICUpdatedStub::offsetOfFirstUpdateStub()),
                  BaselineStubReg);
 
     // TODO: Change r0 uses below to use masm's configurable scratch register instead.
 
     // Load stubcode pointer from BaselineStubReg into BaselineTailCallReg.
     masm.loadPtr(Address(BaselineStubReg, ICStub::offsetOfStubCode()), r0);
 
@@ -281,33 +281,33 @@ EmitCallTypeUpdateIC(MacroAssembler &mas
     // Success at end.
     masm.bind(&success);
 }
 
 template <typename AddrType>
 inline void
 EmitPreBarrier(MacroAssembler &masm, const AddrType &addr, MIRType type)
 {
-    // on ARM, lr is clobbered by patchableCallPreBarrier.  Save it first.
+    // On ARM, lr is clobbered by patchableCallPreBarrier. Save it first.
     masm.push(lr);
     masm.patchableCallPreBarrier(addr, type);
     masm.pop(lr);
 }
 
 inline void
 EmitStubGuardFailure(MacroAssembler &masm)
 {
     JS_ASSERT(R2 == ValueOperand(r1, r0));
 
     // NOTE: This routine assumes that the stub guard code left the stack in the
     // same state it was in when it was entered.
 
     // BaselineStubEntry points to the current stub.
 
-    // Load next stub into BaselineStubReg
+    // Load next stub into BaselineStubReg.
     masm.loadPtr(Address(BaselineStubReg, ICStub::offsetOfNext()), BaselineStubReg);
 
     // Load stubcode pointer from BaselineStubEntry into scratch register.
     masm.loadPtr(Address(BaselineStubReg, ICStub::offsetOfStubCode()), r0);
 
     // Return address is already loaded, just jump to the next stubcode.
     JS_ASSERT(BaselineTailCallReg == lr);
     masm.branch(r0);
--- a/js/src/jit/arm/BaselineIC-arm.cpp
+++ b/js/src/jit/arm/BaselineIC-arm.cpp
@@ -31,17 +31,17 @@ ICCompare_Int32::Compiler::generateStubC
     masm.cmp32(R0.payloadReg(), R1.payloadReg());
     masm.ma_mov(Imm32(1), R0.payloadReg(), NoSetCond, cond);
     masm.ma_mov(Imm32(0), R0.payloadReg(), NoSetCond, Assembler::InvertCondition(cond));
 
     // Result is implicitly boxed already.
     masm.tagValue(JSVAL_TYPE_BOOLEAN, R0.payloadReg(), R0);
     EmitReturnFromIC(masm);
 
-    // Failure case - jump to next stub
+    // Failure case - jump to next stub.
     masm.bind(&failure);
     EmitStubGuardFailure(masm);
 
     return true;
 }
 
 bool
 ICCompare_Double::Compiler::generateStubCode(MacroAssembler &masm)
@@ -57,17 +57,17 @@ ICCompare_Double::Compiler::generateStub
 
     masm.compareDouble(FloatReg0, FloatReg1);
     masm.ma_mov(Imm32(0), dest);
     masm.ma_mov(Imm32(1), dest, NoSetCond, cond);
 
     masm.tagValue(JSVAL_TYPE_BOOLEAN, dest, R0);
     EmitReturnFromIC(masm);
 
-    // Failure case - jump to next stub
+    // Failure case - jump to next stub.
     masm.bind(&failure);
     EmitStubGuardFailure(masm);
     return true;
 }
 
 // ICBinaryArith_Int32
 
 extern "C" {
@@ -77,35 +77,35 @@ extern "C" {
 bool
 ICBinaryArith_Int32::Compiler::generateStubCode(MacroAssembler &masm)
 {
     // Guard that R0 is an integer and R1 is an integer.
     Label failure;
     masm.branchTestInt32(Assembler::NotEqual, R0, &failure);
     masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
 
-    // Add R0 and R1.  Don't need to explicitly unbox, just use R2's payloadReg.
+    // Add R0 and R1. Don't need to explicitly unbox, just use R2's payloadReg.
     Register scratchReg = R2.payloadReg();
 
     // DIV and MOD need an extra non-volatile ValueOperand to hold R0.
     GeneralRegisterSet savedRegs = availableGeneralRegs(2);
     savedRegs = GeneralRegisterSet::Intersect(GeneralRegisterSet::NonVolatile(), savedRegs);
     ValueOperand savedValue = savedRegs.takeAnyValue();
 
     Label maybeNegZero, revertRegister;
     switch(op_) {
       case JSOP_ADD:
         masm.ma_add(R0.payloadReg(), R1.payloadReg(), scratchReg, SetCond);
 
-        // Just jump to failure on overflow.  R0 and R1 are preserved, so we can just jump to
-        // the next stub.
+        // Just jump to failure on overflow. R0 and R1 are preserved, so we can
+        // just jump to the next stub.
         masm.j(Assembler::Overflow, &failure);
 
-        // Box the result and return.  We know R0.typeReg() already contains the integer
-        // tag, so we just need to move the result value into place.
+        // Box the result and return. We know R0.typeReg() already contains the
+        // integer tag, so we just need to move the result value into place.
         masm.mov(scratchReg, R0.payloadReg());
         break;
       case JSOP_SUB:
         masm.ma_sub(R0.payloadReg(), R1.payloadReg(), scratchReg, SetCond);
         masm.j(Assembler::Overflow, &failure);
         masm.mov(scratchReg, R0.payloadReg());
         break;
       case JSOP_MUL: {
@@ -126,17 +126,18 @@ ICBinaryArith_Int32::Compiler::generateS
         masm.ma_cmp(R1.payloadReg(), Imm32(-1), Assembler::Equal);
         masm.j(Assembler::Equal, &failure);
 
         // Check for both division by zero and 0 / X with X < 0 (results in -0).
         masm.ma_cmp(R1.payloadReg(), Imm32(0));
         masm.ma_cmp(R0.payloadReg(), Imm32(0), Assembler::LessThan);
         masm.j(Assembler::Equal, &failure);
 
-        // The call will preserve registers r4-r11. Save R0 and the link register.
+        // The call will preserve registers r4-r11. Save R0 and the link
+        // register.
         JS_ASSERT(R1 == ValueOperand(r5, r4));
         JS_ASSERT(R0 == ValueOperand(r3, r2));
         masm.moveValue(R0, savedValue);
 
         masm.setupAlignedABICall(2);
         masm.passABIArg(R0.payloadReg());
         masm.passABIArg(R1.payloadReg());
         masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, __aeabi_idivmod));
@@ -217,17 +218,17 @@ ICBinaryArith_Int32::Compiler::generateS
       case JSOP_MOD:
         masm.bind(&revertRegister);
         masm.moveValue(savedValue, R0);
         break;
       default:
         break;
     }
 
-    // Failure case - jump to next stub
+    // Failure case - jump to next stub.
     masm.bind(&failure);
     EmitStubGuardFailure(masm);
 
     return true;
 }
 
 bool
 ICUnaryArith_Int32::Compiler::generateStubCode(MacroAssembler &masm)
--- a/js/src/jit/arm/BaselineRegisters-arm.h
+++ b/js/src/jit/arm/BaselineRegisters-arm.h
@@ -18,40 +18,38 @@ namespace jit {
 // r14 = link-register
 
 // r13 = stack-pointer
 // r11 = frame-pointer
 static MOZ_CONSTEXPR_VAR Register BaselineFrameReg = r11;
 static MOZ_CONSTEXPR_VAR Register BaselineStackReg = sp;
 
 // ValueOperands R0, R1, and R2.
-// R0 == JSReturnReg, and R2 uses registers not
-// preserved across calls.  R1 value should be
-// preserved across calls.
+// R0 == JSReturnReg, and R2 uses registers not preserved across calls. R1 value
+// should be preserved across calls.
 static MOZ_CONSTEXPR_VAR ValueOperand R0(r3, r2);
 static MOZ_CONSTEXPR_VAR ValueOperand R1(r5, r4);
 static MOZ_CONSTEXPR_VAR ValueOperand R2(r1, r0);
 
 // BaselineTailCallReg and BaselineStubReg
-// These use registers that are not preserved across
-// calls.
+// These use registers that are not preserved across calls.
 static MOZ_CONSTEXPR_VAR Register BaselineTailCallReg = r14;
 static MOZ_CONSTEXPR_VAR Register BaselineStubReg     = r9;
 
 static MOZ_CONSTEXPR_VAR Register ExtractTemp0        = InvalidReg;
 static MOZ_CONSTEXPR_VAR Register ExtractTemp1        = InvalidReg;
 
 // Register used internally by MacroAssemblerARM.
 static MOZ_CONSTEXPR_VAR Register BaselineSecondScratchReg = r6;
 
 // R7 - R9 are generally available for use within stubcode.
 
-// Note that BaselineTailCallReg is actually just the link
-// register.  In ARM code emission, we do not clobber BaselineTailCallReg
-// since we keep the return address for calls there.
+// Note that BaselineTailCallReg is actually just the link register. In ARM code
+// emission, we do not clobber BaselineTailCallReg since we keep the return
+// address for calls there.
 
 // FloatReg0 must be equal to ReturnFloatReg.
 static MOZ_CONSTEXPR_VAR FloatRegister FloatReg0      = d0;
 static MOZ_CONSTEXPR_VAR FloatRegister FloatReg1      = d1;
 
 } // namespace jit
 } // namespace js
 
--- a/js/src/jit/arm/CodeGenerator-arm.cpp
+++ b/js/src/jit/arm/CodeGenerator-arm.cpp
@@ -87,17 +87,17 @@ CodeGeneratorARM::generateEpilogue()
 #endif
 
     if (gen->compilingAsmJS())
         masm.freeStack(frameDepth_);
     else
         masm.freeStack(frameSize());
     JS_ASSERT(masm.framePushed() == 0);
     masm.pop(pc);
-    masm.dumpPool();
+    masm.flushBuffer();
     return true;
 }
 
 void
 CodeGeneratorARM::emitBranch(Assembler::Condition cond, MBasicBlock *mirTrue, MBasicBlock *mirFalse)
 {
     if (isNextBlock(mirFalse->lir())) {
         jumpToBlock(mirTrue, cond);
@@ -270,28 +270,32 @@ CodeGeneratorARM::visitMinMaxD(LMinMaxD 
     JS_ASSERT(first == output);
 
     Assembler::Condition cond = ins->mir()->isMax()
         ? Assembler::VFP_LessThanOrEqual
         : Assembler::VFP_GreaterThanOrEqual;
     Label nan, equal, returnSecond, done;
 
     masm.compareDouble(first, second);
-    masm.ma_b(&nan, Assembler::VFP_Unordered); // first or second is NaN, result is NaN.
-    masm.ma_b(&equal, Assembler::VFP_Equal); // make sure we handle -0 and 0 right.
+    // First or second is NaN, result is NaN.
+    masm.ma_b(&nan, Assembler::VFP_Unordered);
+    // Make sure we handle -0 and 0 right.
+    masm.ma_b(&equal, Assembler::VFP_Equal);
     masm.ma_b(&returnSecond, cond);
     masm.ma_b(&done);
 
     // Check for zero.
     masm.bind(&equal);
     masm.compareDouble(first, InvalidFloatReg);
-    masm.ma_b(&done, Assembler::VFP_NotEqualOrUnordered); // first wasn't 0 or -0, so just return it.
+    // First wasn't 0 or -0, so just return it.
+    masm.ma_b(&done, Assembler::VFP_NotEqualOrUnordered);
     // So now both operands are either -0 or 0.
     if (ins->mir()->isMax()) {
-        masm.ma_vadd(second, first, first); // -0 + -0 = -0 and -0 + 0 = 0.
+        // -0 + -0 = -0 and -0 + 0 = 0.
+        masm.ma_vadd(second, first, first);
     } else {
         masm.ma_vneg(first, first);
         masm.ma_vsub(first, second, first);
         masm.ma_vneg(first, first);
     }
     masm.ma_b(&done);
 
     masm.bind(&nan);
@@ -398,88 +402,90 @@ CodeGeneratorARM::visitMulI(LMulI *ins)
         }
         // TODO: move these to ma_mul.
         switch (constant) {
           case -1:
             masm.ma_rsb(ToRegister(lhs), Imm32(0), ToRegister(dest), SetCond);
             break;
           case 0:
             masm.ma_mov(Imm32(0), ToRegister(dest));
-            return true; // escape overflow check;
+            return true; // Escape overflow check;
           case 1:
-            // nop
+            // Nop
             masm.ma_mov(ToRegister(lhs), ToRegister(dest));
-            return true; // escape overflow check;
+            return true; // Escape overflow check;
           case 2:
             masm.ma_add(ToRegister(lhs), ToRegister(lhs), ToRegister(dest), SetCond);
             // Overflow is handled later.
             break;
           default: {
             bool handled = false;
             if (constant > 0) {
                 // Try shift and add sequences for a positive constant.
                 if (!mul->canOverflow()) {
-                    // If it cannot overflow, we can do lots of optimizations
+                    // If it cannot overflow, we can do lots of optimizations.
                     Register src = ToRegister(lhs);
                     uint32_t shift = FloorLog2(constant);
                     uint32_t rest = constant - (1 << shift);
-                    // See if the constant has one bit set, meaning it can be encoded as a bitshift
+                    // See if the constant has one bit set, meaning it can be
+                    // encoded as a bitshift.
                     if ((1 << shift) == constant) {
                         masm.ma_lsl(Imm32(shift), src, ToRegister(dest));
                         handled = true;
                     } else {
-                        // If the constant cannot be encoded as (1<<C1), see if it can be encoded as
-                        // (1<<C1) | (1<<C2), which can be computed using an add and a shift
+                        // If the constant cannot be encoded as (1 << C1), see
+                        // if it can be encoded as (1 << C1) | (1 << C2), which
+                        // can be computed using an add and a shift.
                         uint32_t shift_rest = FloorLog2(rest);
                         if ((1u << shift_rest) == rest) {
                             masm.as_add(ToRegister(dest), src, lsl(src, shift-shift_rest));
                             if (shift_rest != 0)
                                 masm.ma_lsl(Imm32(shift_rest), ToRegister(dest), ToRegister(dest));
                             handled = true;
                         }
                     }
                 } else if (ToRegister(lhs) != ToRegister(dest)) {
                     // To stay on the safe side, only optimize things that are a
                     // power of 2.
 
                     uint32_t shift = FloorLog2(constant);
                     if ((1 << shift) == constant) {
                         // dest = lhs * pow(2,shift)
                         masm.ma_lsl(Imm32(shift), ToRegister(lhs), ToRegister(dest));
-                        // At runtime, check (lhs == dest >> shift), if this does not hold,
-                        // some bits were lost due to overflow, and the computation should
-                        // be resumed as a double.
+                        // At runtime, check (lhs == dest >> shift), if this
+                        // does not hold, some bits were lost due to overflow,
+                        // and the computation should be resumed as a double.
                         masm.as_cmp(ToRegister(lhs), asr(ToRegister(dest), shift));
                         c = Assembler::NotEqual;
                         handled = true;
                     }
                 }
             }
 
             if (!handled) {
                 if (mul->canOverflow())
                     c = masm.ma_check_mul(ToRegister(lhs), Imm32(ToInt32(rhs)), ToRegister(dest), c);
                 else
                     masm.ma_mul(ToRegister(lhs), Imm32(ToInt32(rhs)), ToRegister(dest));
             }
           }
         }
-        // Bailout on overflow
+        // Bailout on overflow.
         if (mul->canOverflow() && !bailoutIf(c, ins->snapshot()))
             return false;
     } else {
         Assembler::Condition c = Assembler::Overflow;
 
-        //masm.imull(ToOperand(rhs), ToRegister(lhs));
+        // masm.imull(ToOperand(rhs), ToRegister(lhs));
         if (mul->canOverflow())
             c = masm.ma_check_mul(ToRegister(lhs), ToRegister(rhs), ToRegister(dest), c);
         else
             masm.ma_mul(ToRegister(lhs), ToRegister(rhs), ToRegister(dest));
 
-        // Bailout on overflow
+        // Bailout on overflow.
         if (mul->canOverflow() && !bailoutIf(c, ins->snapshot()))
             return false;
 
         if (mul->canBeNegativeZero()) {
             Label done;
             masm.ma_cmp(ToRegister(dest), Imm32(0));
             masm.ma_b(&done, Assembler::NotEqual);
 
@@ -497,18 +503,21 @@ CodeGeneratorARM::visitMulI(LMulI *ins)
 
 bool
 CodeGeneratorARM::divICommon(MDiv *mir, Register lhs, Register rhs, Register output,
                              LSnapshot *snapshot, Label &done)
 {
     if (mir->canBeNegativeOverflow()) {
         // Handle INT32_MIN / -1;
         // The integer division will give INT32_MIN, but we want -(double)INT32_MIN.
-        masm.ma_cmp(lhs, Imm32(INT32_MIN)); // sets EQ if lhs == INT32_MIN
-        masm.ma_cmp(rhs, Imm32(-1), Assembler::Equal); // if EQ (LHS == INT32_MIN), sets EQ if rhs == -1
+
+        // Sets EQ if lhs == INT32_MIN.
+        masm.ma_cmp(lhs, Imm32(INT32_MIN));
+        // If EQ (LHS == INT32_MIN), sets EQ if rhs == -1.
+        masm.ma_cmp(rhs, Imm32(-1), Assembler::Equal);
         if (mir->canTruncateOverflow()) {
             // (-INT32_MIN)|0 = INT32_MIN
             Label skip;
             masm.ma_b(&skip, Assembler::NotEqual);
             masm.ma_mov(Imm32(INT32_MIN), output);
             masm.ma_b(&done);
             masm.bind(&skip);
         } else {
@@ -548,17 +557,17 @@ CodeGeneratorARM::divICommon(MDiv *mir, 
     }
 
     return true;
 }
 
 bool
 CodeGeneratorARM::visitDivI(LDivI *ins)
 {
-    // Extract the registers from this instruction
+    // Extract the registers from this instruction.
     Register lhs = ToRegister(ins->lhs());
     Register rhs = ToRegister(ins->rhs());
     Register temp = ToRegister(ins->getTemp(0));
     Register output = ToRegister(ins->output());
     MDiv *mir = ins->mir();
 
     Label done;
     if (!divICommon(mir, lhs, rhs, output, ins->snapshot(), done))
@@ -583,17 +592,17 @@ CodeGeneratorARM::visitDivI(LDivI *ins)
 extern "C" {
     extern MOZ_EXPORT int64_t __aeabi_idivmod(int,int);
     extern MOZ_EXPORT int64_t __aeabi_uidivmod(int,int);
 }
 
 bool
 CodeGeneratorARM::visitSoftDivI(LSoftDivI *ins)
 {
-    // Extract the registers from this instruction
+    // Extract the registers from this instruction.
     Register lhs = ToRegister(ins->lhs());
     Register rhs = ToRegister(ins->rhs());
     Register output = ToRegister(ins->output());
     MDiv *mir = ins->mir();
 
     Label done;
     if (!divICommon(mir, lhs, rhs, output, ins->snapshot(), done))
         return false;
@@ -657,29 +666,30 @@ CodeGeneratorARM::visitDivPowTwoI(LDivPo
 
     return true;
 }
 
 bool
 CodeGeneratorARM::modICommon(MMod *mir, Register lhs, Register rhs, Register output,
                              LSnapshot *snapshot, Label &done)
 {
-    // 0/X (with X < 0) is bad because both of these values *should* be doubles, and
-    // the result should be -0.0, which cannot be represented in integers.
+    // 0/X (with X < 0) is bad because both of these values *should* be doubles,
+    // and the result should be -0.0, which cannot be represented in integers.
     // X/0 is bad because it will give garbage (or abort), when it should give
     // either \infty, -\infty or NAN.
 
     // Prevent 0 / X (with X < 0) and X / 0
-    // testing X / Y.  Compare Y with 0.
-    // There are three cases: (Y < 0), (Y == 0) and (Y > 0)
-    // If (Y < 0), then we compare X with 0, and bail if X == 0
-    // If (Y == 0), then we simply want to bail.  Since this does not set
-    // the flags necessary for LT to trigger, we don't test X, and take the
-    // bailout because the EQ flag is set.
-    // if (Y > 0), we don't set EQ, and we don't trigger LT, so we don't take the bailout.
+    // testing X / Y. Compare Y with 0.
+    // There are three cases: (Y < 0), (Y == 0) and (Y > 0).
+    // If (Y < 0), then we compare X with 0, and bail if X == 0.
+    // If (Y == 0), then we simply want to bail. Since this does not set the
+    // flags necessary for LT to trigger, we don't test X, and take the bailout
+    // because the EQ flag is set.
+    // If (Y > 0), we don't set EQ, and we don't trigger LT, so we don't take
+    // the bailout.
     if (mir->canBeDivideByZero() || mir->canBeNegativeDividend()) {
         masm.ma_cmp(rhs, Imm32(0));
         masm.ma_cmp(lhs, Imm32(0), Assembler::LessThan);
         if (mir->isTruncated()) {
             // NaN|0 == 0 and (0 % -X)|0 == 0
             Label skip;
             masm.ma_b(&skip, Assembler::NotEqual);
             masm.ma_mov(Imm32(0), output);
@@ -699,26 +709,26 @@ bool
 CodeGeneratorARM::visitModI(LModI *ins)
 {
     Register lhs = ToRegister(ins->lhs());
     Register rhs = ToRegister(ins->rhs());
     Register output = ToRegister(ins->output());
     Register callTemp = ToRegister(ins->callTemp());
     MMod *mir = ins->mir();
 
-    // save the lhs in case we end up with a 0 that should be a -0.0 because lhs < 0.
+    // Save the lhs in case we end up with a 0 that should be a -0.0 because lhs < 0.
     masm.ma_mov(lhs, callTemp);
 
     Label done;
     if (!modICommon(mir, lhs, rhs, output, ins->snapshot(), done))
         return false;
 
     masm.ma_smod(lhs, rhs, output);
 
-    // If X%Y == 0 and X < 0, then we *actually* wanted to return -0.0
+    // If X%Y == 0 and X < 0, then we *actually* wanted to return -0.0.
     if (mir->canBeNegativeDividend()) {
         if (mir->isTruncated()) {
             // -0.0|0 == 0
         } else {
             JS_ASSERT(mir->fallible());
             // See if X < 0
             masm.ma_cmp(output, Imm32(0));
             masm.ma_b(&done, Assembler::NotEqual);
@@ -730,33 +740,35 @@ CodeGeneratorARM::visitModI(LModI *ins)
 
     masm.bind(&done);
     return true;
 }
 
 bool
 CodeGeneratorARM::visitSoftModI(LSoftModI *ins)
 {
-    // Extract the registers from this instruction
+    // Extract the registers from this instruction.
     Register lhs = ToRegister(ins->lhs());
     Register rhs = ToRegister(ins->rhs());
     Register output = ToRegister(ins->output());
     Register callTemp = ToRegister(ins->callTemp());
     MMod *mir = ins->mir();
     Label done;
 
-    // save the lhs in case we end up with a 0 that should be a -0.0 because lhs < 0.
+    // Save the lhs in case we end up with a 0 that should be a -0.0 because lhs < 0.
     JS_ASSERT(callTemp.code() > r3.code() && callTemp.code() < r12.code());
     masm.ma_mov(lhs, callTemp);
 
     // Prevent INT_MIN % -1;
     // The integer division will give INT_MIN, but we want -(double)INT_MIN.
     if (mir->canBeNegativeDividend()) {
-        masm.ma_cmp(lhs, Imm32(INT_MIN)); // sets EQ if lhs == INT_MIN
-        masm.ma_cmp(rhs, Imm32(-1), Assembler::Equal); // if EQ (LHS == INT_MIN), sets EQ if rhs == -1
+        // Sets EQ if lhs == INT_MIN
+        masm.ma_cmp(lhs, Imm32(INT_MIN));
+        // If EQ (LHS == INT_MIN), sets EQ if rhs == -1
+        masm.ma_cmp(rhs, Imm32(-1), Assembler::Equal);
         if (mir->isTruncated()) {
             // (INT_MIN % -1)|0 == 0
             Label skip;
             masm.ma_b(&skip, Assembler::NotEqual);
             masm.ma_mov(Imm32(0), output);
             masm.ma_b(&done);
             masm.bind(&skip);
         } else {
@@ -797,21 +809,22 @@ CodeGeneratorARM::visitSoftModI(LSoftMod
 
 bool
 CodeGeneratorARM::visitModPowTwoI(LModPowTwoI *ins)
 {
     Register in = ToRegister(ins->getOperand(0));
     Register out = ToRegister(ins->getDef(0));
     MMod *mir = ins->mir();
     Label fin;
-    // bug 739870, jbramley has a different sequence that may help with speed here
+    // bug 739870, jbramley has a different sequence that may help with speed
+    // here.
     masm.ma_mov(in, out, SetCond);
     masm.ma_b(&fin, Assembler::Zero);
     masm.ma_rsb(Imm32(0), out, NoSetCond, Assembler::Signed);
-    masm.ma_and(Imm32((1<<ins->shift())-1), out);
+    masm.ma_and(Imm32((1 << ins->shift()) - 1), out);
     masm.ma_rsb(Imm32(0), out, SetCond, Assembler::Signed);
     if (mir->canBeNegativeDividend()) {
         if (!mir->isTruncated()) {
             JS_ASSERT(mir->fallible());
             if (!bailoutIf(Assembler::Zero, ins->snapshot()))
                 return false;
         } else {
             // -0|0 == 0
@@ -841,32 +854,31 @@ CodeGeneratorARM::visitModMaskI(LModMask
     }
     return true;
 }
 bool
 CodeGeneratorARM::visitBitNotI(LBitNotI *ins)
 {
     const LAllocation *input = ins->getOperand(0);
     const LDefinition *dest = ins->getDef(0);
-    // this will not actually be true on arm.
-    // We can not an imm8m in order to get a wider range
-    // of numbers
+    // This will not actually be true on arm. We can not an imm8m in order to
+    // get a wider range of numbers
     JS_ASSERT(!input->isConstant());
 
     masm.ma_mvn(ToRegister(input), ToRegister(dest));
     return true;
 }
 
 bool
 CodeGeneratorARM::visitBitOpI(LBitOpI *ins)
 {
     const LAllocation *lhs = ins->getOperand(0);
     const LAllocation *rhs = ins->getOperand(1);
     const LDefinition *dest = ins->getDef(0);
-    // all of these bitops should be either imm32's, or integer registers.
+    // All of these bitops should be either imm32's, or integer registers.
     switch (ins->bitop()) {
       case JSOP_BITOR:
         if (rhs->isConstant())
             masm.ma_orr(Imm32(ToInt32(rhs)), ToRegister(lhs), ToRegister(dest));
         else
             masm.ma_orr(ToRegister(rhs), ToRegister(lhs), ToRegister(dest));
         break;
       case JSOP_BITXOR:
@@ -923,18 +935,18 @@ CodeGeneratorARM::visitShiftI(LShiftI *i
                 }
             }
             break;
           default:
             MOZ_ASSUME_UNREACHABLE("Unexpected shift op");
         }
     } else {
         // The shift amounts should be AND'ed into the 0-31 range since arm
-        // shifts by the lower byte of the register (it will attempt to shift
-        // by 250 if you ask it to).
+        // shifts by the lower byte of the register (it will attempt to shift by
+        // 250 if you ask it to).
         masm.ma_and(Imm32(0x1F), ToRegister(rhs), dest);
 
         switch (ins->bitop()) {
           case JSOP_LSH:
             masm.ma_lsl(dest, lhs, dest);
             break;
           case JSOP_RSH:
             masm.ma_asr(dest, lhs, dest);
@@ -989,17 +1001,18 @@ CodeGeneratorARM::visitPowHalfD(LPowHalf
     Label done;
 
     // Masm.pow(-Infinity, 0.5) == Infinity.
     masm.ma_vimm(NegativeInfinity<double>(), ScratchDoubleReg);
     masm.compareDouble(input, ScratchDoubleReg);
     masm.ma_vneg(ScratchDoubleReg, output, Assembler::Equal);
     masm.ma_b(&done, Assembler::Equal);
 
-    // Math.pow(-0, 0.5) == 0 == Math.pow(0, 0.5). Adding 0 converts any -0 to 0.
+    // Math.pow(-0, 0.5) == 0 == Math.pow(0, 0.5).
+    // Adding 0 converts any -0 to 0.
     masm.ma_vimm(0.0, ScratchDoubleReg);
     masm.ma_vadd(ScratchDoubleReg, input, output);
     masm.ma_vsqrt(output, output);
 
     masm.bind(&done);
     return true;
 }
 
@@ -1062,55 +1075,56 @@ CodeGeneratorARM::visitOutOfLineTableSwi
     }
 
     return true;
 }
 
 bool
 CodeGeneratorARM::emitTableSwitchDispatch(MTableSwitch *mir, Register index, Register base)
 {
-    // the code generated by this is utter hax.
-    // the end result looks something like:
+    // The code generated by this is utter hax.
+    // The end result looks something like:
     // SUBS index, input, #base
     // RSBSPL index, index, #max
     // LDRPL pc, pc, index lsl 2
     // B default
 
     // If the range of targets in N through M, we first subtract off the lowest
-    // case (N), which both shifts the arguments into the range 0 to (M-N) with
-    // and sets the MInus flag if the argument was out of range on the low end.
+    // case (N), which both shifts the arguments into the range 0 to (M - N)
+    // with and sets the MInus flag if the argument was out of range on the low
+    // end.
 
     // Then we a reverse subtract with the size of the jump table, which will
     // reverse the order of range (It is size through 0, rather than 0 through
-    // size).  The main purpose of this is that we set the same flag as the lower
-    // bound check for the upper bound check.  Lastly, we do this conditionally
+    // size). The main purpose of this is that we set the same flag as the lower
+    // bound check for the upper bound check. Lastly, we do this conditionally
     // on the previous check succeeding.
 
     // Then we conditionally load the pc offset by the (reversed) index (times
-    // the address size) into the pc, which branches to the correct case.
-    // NOTE: when we go to read the pc, the value that we get back is the pc of
-    // the current instruction *PLUS 8*.  This means that ldr foo, [pc, +0]
-    // reads $pc+8.  In other words, there is an empty word after the branch into
-    // the switch table before the table actually starts.  Since the only other
-    // unhandled case is the default case (both out of range high and out of range low)
-    // I then insert a branch to default case into the extra slot, which ensures
-    // we don't attempt to execute the address table.
+    // the address size) into the pc, which branches to the correct case. NOTE:
+    // when we go to read the pc, the value that we get back is the pc of the
+    // current instruction *PLUS 8*. This means that ldr foo, [pc, +0] reads
+    // $pc+8. In other words, there is an empty word after the branch into the
+    // switch table before the table actually starts. Since the only other
+    // unhandled case is the default case (both out of range high and out of
+    // range low) I then insert a branch to default case into the extra slot,
+    // which ensures we don't attempt to execute the address table.
     Label *defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
 
     int32_t cases = mir->numCases();
-    // Lower value with low value
+    // Lower value with low value.
     masm.ma_sub(index, Imm32(mir->low()), index, SetCond);
     masm.ma_rsb(index, Imm32(cases - 1), index, SetCond, Assembler::NotSigned);
     AutoForbidPools afp(&masm);
     masm.ma_ldr(DTRAddr(pc, DtrRegImmShift(index, LSL, 2)), pc, Offset, Assembler::NotSigned);
     masm.ma_b(defaultcase);
 
-    // To fill in the CodeLabels for the case entries, we need to first
-    // generate the case entries (we don't yet know their offsets in the
-    // instruction stream).
+    // To fill in the CodeLabels for the case entries, we need to first generate
+    // the case entries (we don't yet know their offsets in the instruction
+    // stream).
     OutOfLineTableSwitch *ool = new(alloc()) OutOfLineTableSwitch(alloc(), mir);
     for (int32_t i = 0; i < cases; i++) {
         CodeLabel cl;
         masm.writeCodePointer(cl.dest());
         if (!ool->addCodeLabel(cl))
             return false;
     }
     if (!addOutOfLineCode(ool))
@@ -1221,33 +1235,33 @@ CodeGeneratorARM::visitCeilF(LCeilF *lir
 
 bool
 CodeGeneratorARM::visitRound(LRound *lir)
 {
     FloatRegister input = ToFloatRegister(lir->input());
     Register output = ToRegister(lir->output());
     FloatRegister tmp = ToFloatRegister(lir->temp());
     Label bail;
-    // Output is either correct, or clamped.  All -0 cases have been translated to a clamped
-    // case.a
+    // Output is either correct, or clamped. All -0 cases have been translated
+    // to a clamped case.
     masm.round(input, output, &bail, tmp);
     if (!bailoutFrom(&bail, lir->snapshot()))
         return false;
     return true;
 }
 
 bool
 CodeGeneratorARM::visitRoundF(LRoundF *lir)
 {
     FloatRegister input = ToFloatRegister(lir->input());
     Register output = ToRegister(lir->output());
     FloatRegister tmp = ToFloatRegister(lir->temp());
     Label bail;
-    // Output is either correct, or clamped.  All -0 cases have been translated to a clamped
-    // case.a
+    // Output is either correct, or clamped. All -0 cases have been translated
+    // to a clamped case.
     masm.roundf(input, output, &bail, tmp);
     if (!bailoutFrom(&bail, lir->snapshot()))
         return false;
     return true;
 }
 
 void
 CodeGeneratorARM::emitRoundDouble(FloatRegister src, Register dest, Label *fail)
@@ -1334,19 +1348,19 @@ CodeGeneratorARM::visitValue(LValue *val
 
 bool
 CodeGeneratorARM::visitBox(LBox *box)
 {
     const LDefinition *type = box->getDef(TYPE_INDEX);
 
     JS_ASSERT(!box->getOperand(0)->isConstant());
 
-    // On x86, the input operand and the output payload have the same
-    // virtual register. All that needs to be written is the type tag for
-    // the type definition.
+    // On x86, the input operand and the output payload have the same virtual
+    // register. All that needs to be written is the type tag for the type
+    // definition.
     masm.ma_mov(Imm32(MIRTypeToTag(box->type())), ToRegister(type));
     return true;
 }
 
 bool
 CodeGeneratorARM::visitBoxFloatingPoint(LBoxFloatingPoint *box)
 {
     const LDefinition *payload = box->getDef(PAYLOAD_INDEX);
@@ -1409,40 +1423,38 @@ bool
 CodeGeneratorARM::visitTestDAndBranch(LTestDAndBranch *test)
 {
     const LAllocation *opd = test->input();
     masm.ma_vcmpz(ToFloatRegister(opd));
     masm.as_vmrs(pc);
 
     MBasicBlock *ifTrue = test->ifTrue();
     MBasicBlock *ifFalse = test->ifFalse();
-    // If the compare set the  0 bit, then the result
-    // is definately false.
+    // If the compare set the 0 bit, then the result is definately false.
     jumpToBlock(ifFalse, Assembler::Zero);
-    // it is also false if one of the operands is NAN, which is
-    // shown as Overflow.
+    // It is also false if one of the operands is NAN, which is shown as
+    // Overflow.
     jumpToBlock(ifFalse, Assembler::Overflow);
     jumpToBlock(ifTrue);
     return true;
 }
 
 bool
 CodeGeneratorARM::visitTestFAndBranch(LTestFAndBranch *test)
 {
     const LAllocation *opd = test->input();
     masm.ma_vcmpz_f32(ToFloatRegister(opd));
     masm.as_vmrs(pc);
 
     MBasicBlock *ifTrue = test->ifTrue();
     MBasicBlock *ifFalse = test->ifFalse();
-    // If the compare set the  0 bit, then the result
-    // is definately false.
+    // If the compare set the 0 bit, then the result is definately false.
     jumpToBlock(ifFalse, Assembler::Zero);
-    // it is also false if one of the operands is NAN, which is
-    // shown as Overflow.
+    // It is also false if one of the operands is NAN, which is shown as
+    // Overflow.
     jumpToBlock(ifFalse, Assembler::Overflow);
     jumpToBlock(ifTrue);
     return true;
 }
 
 bool
 CodeGeneratorARM::visitCompareD(LCompareD *comp)
 {
@@ -1624,61 +1636,61 @@ CodeGeneratorARM::visitNotI(LNotI *ins)
     masm.ma_cmp(ToRegister(ins->input()), Imm32(0));
     masm.emitSet(Assembler::Equal, ToRegister(ins->output()));
     return true;
 }
 
 bool
 CodeGeneratorARM::visitNotD(LNotD *ins)
 {
-    // Since this operation is not, we want to set a bit if
-    // the double is falsey, which means 0.0, -0.0 or NaN.
-    // when comparing with 0, an input of 0 will set the Z bit (30)
-    // and NaN will set the V bit (28) of the APSR.
+    // Since this operation is not, we want to set a bit if the double is
+    // falsey, which means 0.0, -0.0 or NaN. When comparing with 0, an input of
+    // 0 will set the Z bit (30) and NaN will set the V bit (28) of the APSR.
     FloatRegister opd = ToFloatRegister(ins->input());
     Register dest = ToRegister(ins->output());
 
-    // Do the compare
+    // Do the compare.
     masm.ma_vcmpz(opd);
     // TODO There are three variations here to compare performance-wise.
     bool nocond = true;
     if (nocond) {
-        // Load the value into the dest register
+        // Load the value into the dest register.
         masm.as_vmrs(dest);
         masm.ma_lsr(Imm32(28), dest, dest);
-        masm.ma_alu(dest, lsr(dest, 2), dest, op_orr); // 28 + 2 = 30
+        // 28 + 2 = 30
+        masm.ma_alu(dest, lsr(dest, 2), dest, OpOrr);
         masm.ma_and(Imm32(1), dest);
     } else {
         masm.as_vmrs(pc);
         masm.ma_mov(Imm32(0), dest);
         masm.ma_mov(Imm32(1), dest, NoSetCond, Assembler::Equal);
         masm.ma_mov(Imm32(1), dest, NoSetCond, Assembler::Overflow);
     }
     return true;
 }
 
 bool
 CodeGeneratorARM::visitNotF(LNotF *ins)
 {
-    // Since this operation is not, we want to set a bit if
-    // the double is falsey, which means 0.0, -0.0 or NaN.
-    // when comparing with 0, an input of 0 will set the Z bit (30)
-    // and NaN will set the V bit (28) of the APSR.
+    // Since this operation is not, we want to set a bit if the double is
+    // falsey, which means 0.0, -0.0 or NaN. When comparing with 0, an input of
+    // 0 will set the Z bit (30) and NaN will set the V bit (28) of the APSR.
     FloatRegister opd = ToFloatRegister(ins->input());
     Register dest = ToRegister(ins->output());
 
-    // Do the compare
+    // Do the compare.
     masm.ma_vcmpz_f32(opd);
     // TODO There are three variations here to compare performance-wise.
     bool nocond = true;
     if (nocond) {
-        // Load the value into the dest register
+        // Load the value into the dest register.
         masm.as_vmrs(dest);
         masm.ma_lsr(Imm32(28), dest, dest);
-        masm.ma_alu(dest, lsr(dest, 2), dest, op_orr); // 28 + 2 = 30
+        // 28 + 2 = 30
+        masm.ma_alu(dest, lsr(dest, 2), dest, OpOrr);
         masm.ma_and(Imm32(1), dest);
     } else {
         masm.as_vmrs(pc);
         masm.ma_mov(Imm32(0), dest);
         masm.ma_mov(Imm32(1), dest, NoSetCond, Assembler::Equal);
         masm.ma_mov(Imm32(1), dest, NoSetCond, Assembler::Overflow);
     }
     return true;
@@ -1721,35 +1733,34 @@ CodeGeneratorARM::visitGuardClass(LGuard
     if (!bailoutIf(Assembler::NotEqual, guard->snapshot()))
         return false;
     return true;
 }
 
 bool
 CodeGeneratorARM::generateInvalidateEpilogue()
 {
-    // Ensure that there is enough space in the buffer for the OsiPoint
-    // patching to occur. Otherwise, we could overwrite the invalidation
-    // epilogue.
-    for (size_t i = 0; i < sizeof(void *); i+= Assembler::nopSize())
+    // Ensure that there is enough space in the buffer for the OsiPoint patching
+    // to occur. Otherwise, we could overwrite the invalidation epilogue.
+    for (size_t i = 0; i < sizeof(void *); i += Assembler::NopSize())
         masm.nop();
 
     masm.bind(&invalidate_);
 
-    // Push the return address of the point that we bailed out at onto the stack
+    // Push the return address of the point that we bailed out at onto the stack.
     masm.Push(lr);
 
     // Push the Ion script onto the stack (when we determine what that pointer is).
     invalidateEpilogueData_ = masm.pushWithPatch(ImmWord(uintptr_t(-1)));
     JitCode *thunk = gen->jitRuntime()->getInvalidationThunk();
 
     masm.branch(thunk);
 
-    // We should never reach this point in JIT code -- the invalidation thunk should
-    // pop the invalidated JS frame and return directly to its caller.
+    // We should never reach this point in JIT code -- the invalidation thunk
+    // should pop the invalidated JS frame and return directly to its caller.
     masm.assumeUnreachable("Should have returned directly to its caller instead of here.");
     return true;
 }
 
 void
 DispatchIonCache::initializeAddCacheState(LInstruction *ins, AddCacheState *addState)
 {
     // Can always use the scratch register on ARM.
--- a/js/src/jit/arm/CodeGenerator-arm.h
+++ b/js/src/jit/arm/CodeGenerator-arm.h
@@ -21,19 +21,19 @@ class CodeGeneratorARM : public CodeGene
     friend class MoveResolverARM;
 
     CodeGeneratorARM *thisFromCtor() {return this;}
 
   protected:
     // Label for the common return path.
     NonAssertingLabel returnLabel_;
     NonAssertingLabel deoptLabel_;
-    // ugh.  this is not going to be pretty to move over.
-    // stack slotted variables are not useful on arm.
-    // it looks like this will need to return one of two types.
+    // Ugh. This is not going to be pretty to move over. Stack slotted variables
+    // are not useful on arm. It looks like this will need to return one of two
+    // types.
     inline Operand ToOperand(const LAllocation &a) {
         if (a.isGeneralReg())
             return Operand(a.toGeneralReg()->reg());
         if (a.isFloatReg())
             return Operand(a.toFloatReg()->reg());
         return Operand(StackPointer, ToStackOffset(&a));
     }
     inline Operand ToOperand(const LAllocation *a) {
--- a/js/src/jit/arm/LIR-arm.h
+++ b/js/src/jit/arm/LIR-arm.h
@@ -139,20 +139,20 @@ class LDivI : public LBinaryMath<1>
 // LSoftDivI is a software divide for ARM cores that don't support a hardware
 // divide instruction.
 //
 // It is implemented as a proper C function so it trashes r0, r1, r2 and r3.
 // The call also trashes lr, and has the ability to trash ip. The function also
 // takes two arguments (dividend in r0, divisor in r1). The LInstruction gets
 // encoded such that the divisor and dividend are passed in their apropriate
 // registers and end their life at the start of the instruction by the use of
-// useFixedAtStart.  The result is returned in r0 and the other three registers
-// that can be trashed are marked as temps.  For the time being, the link
+// useFixedAtStart. The result is returned in r0 and the other three registers
+// that can be trashed are marked as temps. For the time being, the link
 // register is not marked as trashed because we never allocate to the link
-// register.  The FP registers are not trashed.
+// register. The FP registers are not trashed.
 class LSoftDivI : public LBinaryMath<3>
 {
   public:
     LIR_HEADER(SoftDivI);
 
     LSoftDivI(const LAllocation &lhs, const LAllocation &rhs,
               const LDefinition &temp1, const LDefinition &temp2, const LDefinition &temp3) {
         setOperand(0, lhs);
@@ -299,17 +299,17 @@ class LPowHalfD : public LInstructionHel
     const LAllocation *input() {
         return getOperand(0);
     }
     const LDefinition *output() {
         return getDef(0);
     }
 };
 
-// Takes a tableswitch with an integer to decide
+// Takes a tableswitch with an integer to decide.
 class LTableSwitch : public LInstructionHelper<0, 1, 1>
 {
   public:
     LIR_HEADER(TableSwitch);
 
     LTableSwitch(const LAllocation &in, const LDefinition &inputCopy, MTableSwitch *ins) {
         setOperand(0, in);
         setTemp(0, inputCopy);
@@ -327,17 +327,17 @@ class LTableSwitch : public LInstruction
         return getTemp(0);
     }
     // This is added to share the same CodeGenerator prefixes.
     const LDefinition *tempPointer() {
         return nullptr;
     }
 };
 
-// Takes a tableswitch with an integer to decide
+// Takes a tableswitch with an integer to decide.
 class LTableSwitchV : public LInstructionHelper<0, BOX_PIECES, 2>
 {
   public:
     LIR_HEADER(TableSwitchV);
 
     LTableSwitchV(const LDefinition &inputCopy, const LDefinition &floatCopy,
                   MTableSwitch *ins)
     {
--- a/js/src/jit/arm/MacroAssembler-arm.cpp
+++ b/js/src/jit/arm/MacroAssembler-arm.cpp
@@ -21,18 +21,18 @@ using namespace jit;
 
 using mozilla::Abs;
 using mozilla::BitwiseCast;
 
 bool
 isValueDTRDCandidate(ValueOperand &val)
 {
     // In order to be used for a DTRD memory function, the two target registers
-    // need to be a) Adjacent, with the tag larger than the payload, and
-    // b) Aligned to a multiple of two.
+    // need to be a) Adjacent, with the tag larger than the payload, and b)
+    // Aligned to a multiple of two.
     if ((val.typeReg().code() != (val.payloadReg().code() + 1)))
         return false;
     if ((val.payloadReg().code() & 1) != 0)
         return false;
     return true;
 }
 
 void
@@ -41,61 +41,62 @@ MacroAssemblerARM::convertBoolToInt32(Re
     // Note that C++ bool is only 1 byte, so zero extend it to clear the
     // higher-order bits.
     ma_and(Imm32(0xff), source, dest);
 }
 
 void
 MacroAssemblerARM::convertInt32ToDouble(Register src, FloatRegister dest_)
 {
-    // direct conversions aren't possible.
+    // Direct conversions aren't possible.
     VFPRegister dest = VFPRegister(dest_);
     as_vxfer(src, InvalidReg, dest.sintOverlay(),
              CoreToFloat);
     as_vcvt(dest, dest.sintOverlay());
 }
 
 void
 MacroAssemblerARM::convertInt32ToDouble(const Address &src, FloatRegister dest)
 {
     ma_vldr(Operand(src), ScratchDoubleReg);
     as_vcvt(dest, VFPRegister(ScratchDoubleReg).sintOverlay());
 }
 
 void
 MacroAssemblerARM::convertUInt32ToDouble(Register src, FloatRegister dest_)
 {
-    // direct conversions aren't possible.
+    // Direct conversions aren't possible.
     VFPRegister dest = VFPRegister(dest_);
     as_vxfer(src, InvalidReg, dest.uintOverlay(), CoreToFloat);
     as_vcvt(dest, dest.uintOverlay());
 }
 
 void
 MacroAssemblerARM::convertUInt32ToFloat32(Register src, FloatRegister dest_)
 {
-    // direct conversions aren't possible.
+    // Direct conversions aren't possible.
     VFPRegister dest = VFPRegister(dest_);
     as_vxfer(src, InvalidReg, dest.uintOverlay(), CoreToFloat);
     as_vcvt(VFPRegister(dest).singleOverlay(), dest.uintOverlay());
 }
 
 void MacroAssemblerARM::convertDoubleToFloat32(FloatRegister src, FloatRegister dest,
                                                Condition c)
 {
     as_vcvt(VFPRegister(dest).singleOverlay(), VFPRegister(src), false, c);
 }
 
-// there are two options for implementing emitTruncateDouble.
-// 1) convert the floating point value to an integer, if it did not fit,
-//        then it was clamped to INT_MIN/INT_MAX, and we can test it.
-//        NOTE: if the value really was supposed to be INT_MAX / INT_MIN
-//        then it will be wrong.
-// 2) convert the floating point value to an integer, if it did not fit,
-//        then it set one or two bits in the fpcsr.  Check those.
+// There are two options for implementing emitTruncateDouble:
+//
+// 1. Convert the floating point value to an integer, if it did not fit, then it
+// was clamped to INT_MIN/INT_MAX, and we can test it. NOTE: if the value
+// really was supposed to be INT_MAX / INT_MIN then it will be wrong.
+//
+// 2. Convert the floating point value to an integer, if it did not fit, then it
+// set one or two bits in the fpcsr. Check those.
 void
 MacroAssemblerARM::branchTruncateDouble(FloatRegister src, Register dest, Label *fail)
 {
     FloatRegister ScratchSIntReg = ScratchDoubleReg.sintOverlay();
     ma_vcvt_F64_I32(src, ScratchSIntReg);
     ma_vxfer(ScratchSIntReg, dest);
     ma_cmp(dest, Imm32(0x7fffffff));
     ma_cmp(dest, Imm32(0x80000000), Assembler::NotEqual);
@@ -104,62 +105,62 @@ MacroAssemblerARM::branchTruncateDouble(
 
 // Checks whether a double is representable as a 32-bit integer. If so, the
 // integer is written to the output register. Otherwise, a bailout is taken to
 // the given snapshot. This function overwrites the scratch float register.
 void
 MacroAssemblerARM::convertDoubleToInt32(FloatRegister src, Register dest,
                                         Label *fail, bool negativeZeroCheck)
 {
-    // convert the floating point value to an integer, if it did not fit,
-    //     then when we convert it *back* to  a float, it will have a
-    //     different value, which we can test.
+    // Convert the floating point value to an integer, if it did not fit, then
+    // when we convert it *back* to a float, it will have a different value,
+    // which we can test.
     FloatRegister ScratchSIntReg = ScratchDoubleReg.sintOverlay();
     ma_vcvt_F64_I32(src, ScratchSIntReg);
-    // move the value into the dest register.
+    // Move the value into the dest register.
     ma_vxfer(ScratchSIntReg, dest);
     ma_vcvt_I32_F64(ScratchSIntReg, ScratchDoubleReg);
     ma_vcmp(src, ScratchDoubleReg);
     as_vmrs(pc);
     ma_b(fail, Assembler::VFP_NotEqualOrUnordered);
 
     if (negativeZeroCheck) {
         ma_cmp(dest, Imm32(0));
-        // Test and bail for -0.0, when integer result is 0
-        // Move the top word of the double into the output reg, if it is non-zero,
-        // then the original value was -0.0
+        // Test and bail for -0.0, when integer result is 0. Move the top word
+        // of the double into the output reg, if it is non-zero, then the
+        // original value was -0.0.
         as_vxfer(dest, InvalidReg, src, FloatToCore, Assembler::Equal, 1);
         ma_cmp(dest, Imm32(0x80000000), Assembler::Equal);
         ma_b(fail, Assembler::Equal);
     }
 }
 
 // Checks whether a float32 is representable as a 32-bit integer. If so, the
 // integer is written to the output register. Otherwise, a bailout is taken to
 // the given snapshot. This function overwrites the scratch float register.
 void
 MacroAssemblerARM::convertFloat32ToInt32(FloatRegister src, Register dest,
                                          Label *fail, bool negativeZeroCheck)
 {
-    // convert the floating point value to an integer, if it did not fit,
-    //     then when we convert it *back* to  a float, it will have a
-    //     different value, which we can test.
+    // Convert the floating point value to an integer, if it did not fit, then
+    // when we convert it *back* to a float, it will have a different value,
+    // which we can test.
     ma_vcvt_F32_I32(src, ScratchFloat32Reg.sintOverlay());
-    // move the value into the dest register.
+    // Move the value into the dest register.
     ma_vxfer(ScratchFloat32Reg, dest);
     ma_vcvt_I32_F32(ScratchFloat32Reg.sintOverlay(), ScratchFloat32Reg);
     ma_vcmp_f32(src, ScratchFloat32Reg);
     as_vmrs(pc);
     ma_b(fail, Assembler::VFP_NotEqualOrUnordered);
 
     if (negativeZeroCheck) {
         ma_cmp(dest, Imm32(0));
-        // Test and bail for -0.0, when integer result is 0
-        // Move the float into the output reg, and if it is non-zero then
-        // the original value was -0.0
+        // Test and bail for -0.0, when integer result is 0. Move the float into
+        // the output reg, and if it is non-zero then the original value was
+        // -0.0
         as_vxfer(dest, InvalidReg, VFPRegister(src).singleOverlay(), FloatToCore, Assembler::Equal, 0);
         ma_cmp(dest, Imm32(0x80000000), Assembler::Equal);
         ma_b(fail, Assembler::Equal);
     }
 }
 
 void
 MacroAssemblerARM::convertFloat32ToDouble(FloatRegister src, FloatRegister dest) {
@@ -172,17 +173,17 @@ MacroAssemblerARM::branchTruncateFloat32
     ma_vxfer(ScratchFloat32Reg, dest);
     ma_cmp(dest, Imm32(0x7fffffff));
     ma_cmp(dest, Imm32(0x80000000), Assembler::NotEqual);
     ma_b(fail, Assembler::Equal);
 }
 
 void
 MacroAssemblerARM::convertInt32ToFloat32(Register src, FloatRegister dest) {
-    // direct conversions aren't possible.
+    // Direct conversions aren't possible.
     as_vxfer(src, InvalidReg, dest.sintOverlay(),
              CoreToFloat);
     as_vcvt(dest.singleOverlay(), dest.sintOverlay());
 }
 
 void
 MacroAssemblerARM::convertInt32ToFloat32(const Address &src, FloatRegister dest) {
     ma_vldr(Operand(src), ScratchFloat32Reg);
@@ -240,156 +241,146 @@ MacroAssemblerARM::inc64(AbsoluteAddress
 
 bool
 MacroAssemblerARM::alu_dbl(Register src1, Imm32 imm, Register dest, ALUOp op,
                            SetCond_ sc, Condition c)
 {
     if ((sc == SetCond && ! condsAreSafe(op)) || !can_dbl(op))
         return false;
     ALUOp interop = getDestVariant(op);
-    Imm8::TwoImm8mData both = Imm8::encodeTwoImms(imm.value);
+    Imm8::TwoImm8mData both = Imm8::EncodeTwoImms(imm.value);
     if (both.fst.invalid)
         return false;
-    // for the most part, there is no good reason to set the condition
-    // codes for the first instruction.
-    // we can do better things if the second instruction doesn't
-    // have a dest, such as check for overflow by doing first operation
-    // don't do second operation if first operation overflowed.
-    // this preserves the overflow condition code.
-    // unfortunately, it is horribly brittle.
+    // For the most part, there is no good reason to set the condition codes for
+    // the first instruction. We can do better things if the second instruction
+    // doesn't have a dest, such as check for overflow by doing first operation
+    // don't do second operation if first operation overflowed. This preserves
+    // the overflow condition code. Unfortunately, it is horribly brittle.
     as_alu(ScratchRegister, src1, both.fst, interop, NoSetCond, c);
     as_alu(dest, ScratchRegister, both.snd, op, sc, c);
     return true;
 }
 
 
 void
 MacroAssemblerARM::ma_alu(Register src1, Imm32 imm, Register dest,
                           ALUOp op,
                           SetCond_ sc, Condition c)
 {
-    // As it turns out, if you ask for a compare-like instruction
-    // you *probably* want it to set condition codes.
+    // As it turns out, if you ask for a compare-like instruction you *probably*
+    // want it to set condition codes.
     if (dest == InvalidReg)
         JS_ASSERT(sc == SetCond);
 
-    // The operator gives us the ability to determine how
-    // this can be used.
+    // The operator gives us the ability to determine how this can be used.
     Imm8 imm8 = Imm8(imm.value);
-    // ONE INSTRUCTION:
-    // If we can encode it using an imm8m, then do so.
+    // One instruction: If we can encode it using an imm8m, then do so.
     if (!imm8.invalid) {
         as_alu(dest, src1, imm8, op, sc, c);
         return;
     }
-    // ONE INSTRUCTION, NEGATED:
+    // One instruction, negated:
     Imm32 negImm = imm;
     Register negDest;
     ALUOp negOp = ALUNeg(op, dest, &negImm, &negDest);
     Imm8 negImm8 = Imm8(negImm.value);
-    // add r1, r2, -15 can be replaced with
-    // sub r1, r2, 15
-    // for bonus points, dest can be replaced (nearly always invalid => ScratchRegister)
-    // This is useful if we wish to negate tst.  tst has an invalid (aka not used) dest,
-    // but its negation is bic *requires* a dest.  We can accomodate, but it will need to clobber
-    // *something*, and the scratch register isn't being used, so...
-    if (negOp != op_invalid && !negImm8.invalid) {
+    // 'add r1, r2, -15' can be replaced with 'sub r1, r2, 15'. For bonus
+    // points, dest can be replaced (nearly always invalid => ScratchRegister)
+    // This is useful if we wish to negate tst. tst has an invalid (aka not
+    // used) dest, but its negation is bic *requires* a dest. We can accomodate,
+    // but it will need to clobber *something*, and the scratch register isn't
+    // being used, so...
+    if (negOp != OpInvalid && !negImm8.invalid) {
         as_alu(negDest, src1, negImm8, negOp, sc, c);
         return;
     }
 
     if (HasMOVWT()) {
-        // If the operation is a move-a-like then we can try to use movw to
-        // move the bits into the destination.  Otherwise, we'll need to
-        // fall back on a multi-instruction format :(
-        // movw/movt don't set condition codes, so don't hold your breath.
-        if (sc == NoSetCond && (op == op_mov || op == op_mvn)) {
-            // ARMv7 supports movw/movt. movw zero-extends
-            // its 16 bit argument, so we can set the register
-            // this way.
-            // movt leaves the bottom 16 bits in tact, so
-            // it is unsuitable to move a constant that
-            if (op == op_mov && ((imm.value & ~ 0xffff) == 0)) {
+        // If the operation is a move-a-like then we can try to use movw to move
+        // the bits into the destination. Otherwise, we'll need to fall back on
+        // a multi-instruction format :(
+        // movw/movt does not set condition codes, so don't hold your breath.
+        if (sc == NoSetCond && (op == OpMov || op == OpMvn)) {
+            // ARMv7 supports movw/movt. movw zero-extends its 16 bit argument,
+            // so we can set the register this way. movt leaves the bottom 16
+            // bits in tact, so it is unsuitable to move a constant that
+            if (op == OpMov && ((imm.value & ~ 0xffff) == 0)) {
                 JS_ASSERT(src1 == InvalidReg);
                 as_movw(dest, (uint16_t)imm.value, c);
                 return;
             }
 
             // If they asked for a mvn rfoo, imm, where ~imm fits into 16 bits
             // then do it.
-            if (op == op_mvn && (((~imm.value) & ~ 0xffff) == 0)) {
+            if (op == OpMvn && (((~imm.value) & ~ 0xffff) == 0)) {
                 JS_ASSERT(src1 == InvalidReg);
                 as_movw(dest, (uint16_t)~imm.value, c);
                 return;
             }
 
-            // TODO: constant dedup may enable us to add dest, r0, 23 *if*
-            // we are attempting to load a constant that looks similar to one
-            // that already exists
-            // If it can't be done with a single movw
-            // then we *need* to use two instructions
-            // since this must be some sort of a move operation, we can just use
-            // a movw/movt pair and get the whole thing done in two moves.  This
-            // does not work for ops like add, sinc we'd need to do
-            // movw tmp; movt tmp; add dest, tmp, src1
-            if (op == op_mvn)
+            // TODO: constant dedup may enable us to add dest, r0, 23 *if* we
+            // are attempting to load a constant that looks similar to one that
+            // already exists. If it can't be done with a single movw then we
+            // *need* to use two instructions since this must be some sort of a
+            // move operation, we can just use a movw/movt pair and get the
+            // whole thing done in two moves. This does not work for ops like
+            // add, since we'd need to do: movw tmp; movt tmp; add dest, tmp,
+            // src1.
+            if (op == OpMvn)
                 imm.value = ~imm.value;
             as_movw(dest, imm.value & 0xffff, c);
             as_movt(dest, (imm.value >> 16) & 0xffff, c);
             return;
         }
-        // If we weren't doing a movalike, a 16 bit immediate
-        // will require 2 instructions.  With the same amount of
-        // space and (less)time, we can do two 8 bit operations, reusing
-        // the dest register.  e.g.
-        // movw tmp, 0xffff; add dest, src, tmp ror 4
+        // If we weren't doing a movalike, a 16 bit immediate will require 2
+        // instructions. With the same amount of space and (less)time, we can do
+        // two 8 bit operations, reusing the dest register. e.g.
+        //  movw tmp, 0xffff; add dest, src, tmp ror 4
         // vs.
-        // add dest, src, 0xff0; add dest, dest, 0xf000000f
-        // it turns out that there are some immediates that we miss with the
-        // second approach.  A sample value is: add dest, src, 0x1fffe
-        // this can be done by movw tmp, 0xffff; add dest, src, tmp lsl 1
-        // since imm8m's only get even offsets, we cannot encode this.
-        // I'll try to encode as two imm8's first, since they are faster.
-        // Both operations should take 1 cycle, where as add dest, tmp ror 4
-        // takes two cycles to execute.
+        //  add dest, src, 0xff0; add dest, dest, 0xf000000f
+        //
+        // It turns out that there are some immediates that we miss with the
+        // second approach. A sample value is: add dest, src, 0x1fffe this can
+        // be done by movw tmp, 0xffff; add dest, src, tmp lsl 1 since imm8m's
+        // only get even offsets, we cannot encode this. I'll try to encode as
+        // two imm8's first, since they are faster. Both operations should take
+        // 1 cycle, where as add dest, tmp ror 4 takes two cycles to execute.
     }
 
-    // Either a) this isn't ARMv7 b) this isn't a move
-    // start by attempting to generate a two instruction form.
-    // Some things cannot be made into two-inst forms correctly.
-    // namely, adds dest, src, 0xffff.
-    // Since we want the condition codes (and don't know which ones will
-    // be checked), we need to assume that the overflow flag will be checked
-    // and add{,s} dest, src, 0xff00; add{,s} dest, dest, 0xff is not
-    // guaranteed to set the overflow flag the same as the (theoretical)
-    // one instruction variant.
+    // Either a) this isn't ARMv7 b) this isn't a move start by attempting to
+    // generate a two instruction form. Some things cannot be made into two-inst
+    // forms correctly. Namely, adds dest, src, 0xffff. Since we want the
+    // condition codes (and don't know which ones will be checked), we need to
+    // assume that the overflow flag will be checked and add{,s} dest, src,
+    // 0xff00; add{,s} dest, dest, 0xff is not guaranteed to set the overflow
+    // flag the same as the (theoretical) one instruction variant.
     if (alu_dbl(src1, imm, dest, op, sc, c))
         return;
 
     // And try with its negative.
-    if (negOp != op_invalid &&
+    if (negOp != OpInvalid &&
         alu_dbl(src1, negImm, negDest, negOp, sc, c))
         return;
 
-    // Well, damn. We can use two 16 bit mov's, then do the op
-    // or we can do a single load from a pool then op.
+    // Well, damn. We can use two 16 bit mov's, then do the op or we can do a
+    // single load from a pool then op.
     if (HasMOVWT()) {
-        // Try to load the immediate into a scratch register
-        // then use that
+        // Try to load the immediate into a scratch register then use that
         as_movw(ScratchRegister, imm.value & 0xffff, c);
         if ((imm.value >> 16) != 0)
             as_movt(ScratchRegister, (imm.value >> 16) & 0xffff, c);
     } else {
-        // Going to have to use a load.  If the operation is a move, then just move it into the
-        // destination register
-        if (op == op_mov) {
+        // Going to have to use a load. If the operation is a move, then just
+        // move it into the destination register
+        if (op == OpMov) {
             as_Imm32Pool(dest, imm.value, c);
             return;
         } else {
-            // If this isn't just going into a register, then stick it in a temp, and then proceed.
+            // If this isn't just going into a register, then stick it in a
+            // temp, and then proceed.
             as_Imm32Pool(ScratchRegister, imm.value, c);
         }
     }
     as_alu(dest, src1, O2Reg(ScratchRegister), op, sc, c);
 }
 
 void
 MacroAssemblerARM::ma_alu(Register src1, Operand op2, Register dest, ALUOp op,
@@ -420,26 +411,26 @@ NextInst(Instruction *i)
 }
 
 void
 MacroAssemblerARM::ma_movPatchable(Imm32 imm_, Register dest, Assembler::Condition c,
                                    RelocStyle rs, Instruction *i)
 {
     int32_t imm = imm_.value;
     if (i) {
-        // Make sure the current instruction is not an artificial guard
-        // inserted by the assembler buffer.
+        // Make sure the current instruction is not an artificial guard inserted
+        // by the assembler buffer.
         i = i->skipPool();
     }
     switch(rs) {
       case L_MOVWT:
         as_movw(dest, Imm16(imm & 0xffff), c, i);
-        // i can be nullptr here.  that just means "insert in the next in sequence."
-        // NextInst is special cased to not do anything when it is passed nullptr, so
-        // two consecutive instructions will be inserted.
+        // 'i' can be nullptr here. That just means "insert in the next in
+        // sequence." NextInst is special cased to not do anything when it is
+        // passed nullptr, so two consecutive instructions will be inserted.
         i = NextInst(i);
         as_movt(dest, Imm16(imm >> 16 & 0xffff), c, i);
         break;
       case L_LDR:
         if(i == nullptr)
             as_Imm32Pool(dest, imm, c);
         else
             as_WritePoolEntry(i, c, imm);
@@ -461,24 +452,24 @@ MacroAssemblerARM::ma_mov(Register src, 
     if (sc == SetCond || dest != src)
         as_mov(dest, O2Reg(src), sc, c);
 }
 
 void
 MacroAssemblerARM::ma_mov(Imm32 imm, Register dest,
                           SetCond_ sc, Assembler::Condition c)
 {
-    ma_alu(InvalidReg, imm, dest, op_mov, sc, c);
+    ma_alu(InvalidReg, imm, dest, OpMov, sc, c);
 }
 
 void
 MacroAssemblerARM::ma_mov(ImmWord imm, Register dest,
                           SetCond_ sc, Assembler::Condition c)
 {
-    ma_alu(InvalidReg, Imm32(imm.value), dest, op_mov, sc, c);
+    ma_alu(InvalidReg, Imm32(imm.value), dest, OpMov, sc, c);
 }
 
 void
 MacroAssemblerARM::ma_mov(ImmGCPtr ptr, Register dest)
 {
     // As opposed to x86/x64 version, the data relocation has to be executed
     // before to recover the pointer, and not after.
     writeDataRelocation(ptr);
@@ -486,17 +477,17 @@ MacroAssemblerARM::ma_mov(ImmGCPtr ptr, 
     if (HasMOVWT())
         rs = L_MOVWT;
     else
         rs = L_LDR;
 
     ma_movPatchable(Imm32(uintptr_t(ptr.value)), dest, Always, rs);
 }
 
-    // Shifts (just a move with a shifting op2)
+// Shifts (just a move with a shifting op2)
 void
 MacroAssemblerARM::ma_lsl(Imm32 shift, Register src, Register dst)
 {
     as_mov(dst, lsl(src, shift.value));
 }
 void
 MacroAssemblerARM::ma_lsr(Imm32 shift, Register src, Register dst)
 {
@@ -512,17 +503,18 @@ MacroAssemblerARM::ma_ror(Imm32 shift, R
 {
     as_mov(dst, ror(src, shift.value));
 }
 void
 MacroAssemblerARM::ma_rol(Imm32 shift, Register src, Register dst)
 {
     as_mov(dst, rol(src, shift.value));
 }
-    // Shifts (just a move with a shifting op2)
+
+// Shifts (just a move with a shifting op2)
 void
 MacroAssemblerARM::ma_lsl(Register shift, Register src, Register dst)
 {
     as_mov(dst, lsl(src, shift));
 }
 void
 MacroAssemblerARM::ma_lsr(Register shift, Register src, Register dst)
 {
@@ -540,28 +532,27 @@ MacroAssemblerARM::ma_ror(Register shift
 }
 void
 MacroAssemblerARM::ma_rol(Register shift, Register src, Register dst)
 {
     ma_rsb(shift, Imm32(32), ScratchRegister);
     as_mov(dst, ror(src, ScratchRegister));
 }
 
-    // Move not (dest <- ~src)
-
+// Move not (dest <- ~src)
 void
 MacroAssemblerARM::ma_mvn(Imm32 imm, Register dest, SetCond_ sc, Assembler::Condition c)
 {
-    ma_alu(InvalidReg, imm, dest, op_mvn, sc, c);
+    ma_alu(InvalidReg, imm, dest, OpMvn, sc, c);
 }
 
 void
 MacroAssemblerARM::ma_mvn(Register src1, Register dest, SetCond_ sc, Assembler::Condition c)
 {
-    as_alu(dest, InvalidReg, O2Reg(src1), op_mvn, sc, c);
+    as_alu(dest, InvalidReg, O2Reg(src1), OpMvn, sc, c);
 }
 
 // Negate (dest <- -src), src is a register, rather than a general op2.
 void
 MacroAssemblerARM::ma_neg(Register src1, Register dest, SetCond_ sc, Assembler::Condition c)
 {
     as_rsb(dest, src1, Imm8(0), sc, c);
 }
@@ -576,31 +567,30 @@ void
 MacroAssemblerARM::ma_and(Register src1, Register src2, Register dest,
                           SetCond_ sc, Assembler::Condition c)
 {
     as_and(dest, src1, O2Reg(src2), sc, c);
 }
 void
 MacroAssemblerARM::ma_and(Imm32 imm, Register dest, SetCond_ sc, Assembler::Condition c)
 {
-    ma_alu(dest, imm, dest, op_and, sc, c);
+    ma_alu(dest, imm, dest, OpAnd, sc, c);
 }
 void
 MacroAssemblerARM::ma_and(Imm32 imm, Register src1, Register dest,
                           SetCond_ sc, Assembler::Condition c)
 {
-    ma_alu(src1, imm, dest, op_and, sc, c);
-}
-
+    ma_alu(src1, imm, dest, OpAnd, sc, c);
+}
 
 // Bit clear (dest <- dest & ~imm) or (dest <- src1 & ~src2).
 void
 MacroAssemblerARM::ma_bic(Imm32 imm, Register dest, SetCond_ sc, Assembler::Condition c)
 {
-    ma_alu(dest, imm, dest, op_bic, sc, c);
+    ma_alu(dest, imm, dest, OpBic, sc, c);
 }
 
 // Exclusive or.
 void
 MacroAssemblerARM::ma_eor(Register src, Register dest, SetCond_ sc, Assembler::Condition c)
 {
     ma_eor(dest, src, dest, sc, c);
 }
@@ -608,23 +598,23 @@ void
 MacroAssemblerARM::ma_eor(Register src1, Register src2, Register dest,
                           SetCond_ sc, Assembler::Condition c)
 {
     as_eor(dest, src1, O2Reg(src2), sc, c);
 }
 void
 MacroAssemblerARM::ma_eor(Imm32 imm, Register dest, SetCond_ sc, Assembler::Condition c)
 {
-    ma_alu(dest, imm, dest, op_eor, sc, c);
+    ma_alu(dest, imm, dest, OpEor, sc, c);
 }
 void
 MacroAssemblerARM::ma_eor(Imm32 imm, Register src1, Register dest,
        SetCond_ sc, Assembler::Condition c)
 {
-    ma_alu(src1, imm, dest, op_eor, sc, c);
+    ma_alu(src1, imm, dest, OpEor, sc, c);
 }
 
 // Or.
 void
 MacroAssemblerARM::ma_orr(Register src, Register dest, SetCond_ sc, Assembler::Condition c)
 {
     ma_orr(dest, src, dest, sc, c);
 }
@@ -632,177 +622,177 @@ void
 MacroAssemblerARM::ma_orr(Register src1, Register src2, Register dest,
                           SetCond_ sc, Assembler::Condition c)
 {
     as_orr(dest, src1, O2Reg(src2), sc, c);
 }
 void
 MacroAssemblerARM::ma_orr(Imm32 imm, Register dest, SetCond_ sc, Assembler::Condition c)
 {
-    ma_alu(dest, imm, dest, op_orr, sc, c);
+    ma_alu(dest, imm, dest, OpOrr, sc, c);
 }
 void
 MacroAssemblerARM::ma_orr(Imm32 imm, Register src1, Register dest,
                           SetCond_ sc, Assembler::Condition c)
 {
-    ma_alu(src1, imm, dest, op_orr, sc, c);
+    ma_alu(src1, imm, dest, OpOrr, sc, c);
 }
 
 // Arithmetic-based ops.
 // Add with carry.
 void
 MacroAssemblerARM::ma_adc(Imm32 imm, Register dest, SetCond_ sc, Condition c)
 {
-    ma_alu(dest, imm, dest, op_adc, sc, c);
+    ma_alu(dest, imm, dest, OpAdc, sc, c);
 }
 void
 MacroAssemblerARM::ma_adc(Register src, Register dest, SetCond_ sc, Condition c)
 {
-    as_alu(dest, dest, O2Reg(src), op_adc, sc, c);
+    as_alu(dest, dest, O2Reg(src), OpAdc, sc, c);
 }
 void
 MacroAssemblerARM::ma_adc(Register src1, Register src2, Register dest, SetCond_ sc, Condition c)
 {
-    as_alu(dest, src1, O2Reg(src2), op_adc, sc, c);
+    as_alu(dest, src1, O2Reg(src2), OpAdc, sc, c);
 }
 
 // Add.
 void
 MacroAssemblerARM::ma_add(Imm32 imm, Register dest, SetCond_ sc, Condition c)
 {
-    ma_alu(dest, imm, dest, op_add, sc, c);
+    ma_alu(dest, imm, dest, OpAdd, sc, c);
 }
 
 void
 MacroAssemblerARM::ma_add(Register src1, Register dest, SetCond_ sc, Condition c)
 {
-    ma_alu(dest, O2Reg(src1), dest, op_add, sc, c);
+    ma_alu(dest, O2Reg(src1), dest, OpAdd, sc, c);
 }
 void
 MacroAssemblerARM::ma_add(Register src1, Register src2, Register dest, SetCond_ sc, Condition c)
 {
-    as_alu(dest, src1, O2Reg(src2), op_add, sc, c);
+    as_alu(dest, src1, O2Reg(src2), OpAdd, sc, c);
 }
 void
 MacroAssemblerARM::ma_add(Register src1, Operand op, Register dest, SetCond_ sc, Condition c)
 {
-    ma_alu(src1, op, dest, op_add, sc, c);
+    ma_alu(src1, op, dest, OpAdd, sc, c);
 }
 void
 MacroAssemblerARM::ma_add(Register src1, Imm32 op, Register dest, SetCond_ sc, Condition c)
 {
-    ma_alu(src1, op, dest, op_add, sc, c);
+    ma_alu(src1, op, dest, OpAdd, sc, c);
 }
 
 // Subtract with carry.
 void
 MacroAssemblerARM::ma_sbc(Imm32 imm, Register dest, SetCond_ sc, Condition c)
 {
-    ma_alu(dest, imm, dest, op_sbc, sc, c);
+    ma_alu(dest, imm, dest, OpSbc, sc, c);
 }
 void
 MacroAssemblerARM::ma_sbc(Register src1, Register dest, SetCond_ sc, Condition c)
 {
-    as_alu(dest, dest, O2Reg(src1), op_sbc, sc, c);
+    as_alu(dest, dest, O2Reg(src1), OpSbc, sc, c);
 }
 void
 MacroAssemblerARM::ma_sbc(Register src1, Register src2, Register dest, SetCond_ sc, Condition c)
 {
-    as_alu(dest, src1, O2Reg(src2), op_sbc, sc, c);
+    as_alu(dest, src1, O2Reg(src2), OpSbc, sc, c);
 }
 
 // Subtract.
 void
 MacroAssemblerARM::ma_sub(Imm32 imm, Register dest, SetCond_ sc, Condition c)
 {
-    ma_alu(dest, imm, dest, op_sub, sc, c);
+    ma_alu(dest, imm, dest, OpSub, sc, c);
 }
 void
 MacroAssemblerARM::ma_sub(Register src1, Register dest, SetCond_ sc, Condition c)
 {
-    ma_alu(dest, Operand(src1), dest, op_sub, sc, c);
+    ma_alu(dest, Operand(src1), dest, OpSub, sc, c);
 }
 void
 MacroAssemblerARM::ma_sub(Register src1, Register src2, Register dest, SetCond_ sc, Condition c)
 {
-    ma_alu(src1, Operand(src2), dest, op_sub, sc, c);
+    ma_alu(src1, Operand(src2), dest, OpSub, sc, c);
 }
 void
 MacroAssemblerARM::ma_sub(Register src1, Operand op, Register dest, SetCond_ sc, Condition c)
 {
-    ma_alu(src1, op, dest, op_sub, sc, c);
+    ma_alu(src1, op, dest, OpSub, sc, c);
 }
 void
 MacroAssemblerARM::ma_sub(Register src1, Imm32 op, Register dest, SetCond_ sc, Condition c)
 {
-    ma_alu(src1, op, dest, op_sub, sc, c);
+    ma_alu(src1, op, dest, OpSub, sc, c);
 }
 
 // Severse subtract.
 void
 MacroAssemblerARM::ma_rsb(Imm32 imm, Register dest, SetCond_ sc, Condition c)
 {
-    ma_alu(dest, imm, dest, op_rsb, sc, c);
+    ma_alu(dest, imm, dest, OpRsb, sc, c);
 }
 void
 MacroAssemblerARM::ma_rsb(Register src1, Register dest, SetCond_ sc, Condition c)
 {
-    as_alu(dest, dest, O2Reg(src1), op_add, sc, c);
+    as_alu(dest, dest, O2Reg(src1), OpAdd, sc, c);
 }
 void
 MacroAssemblerARM::ma_rsb(Register src1, Register src2, Register dest, SetCond_ sc, Condition c)
 {
-    as_alu(dest, src1, O2Reg(src2), op_rsb, sc, c);
+    as_alu(dest, src1, O2Reg(src2), OpRsb, sc, c);
 }
 void
 MacroAssemblerARM::ma_rsb(Register src1, Imm32 op2, Register dest, SetCond_ sc, Condition c)
 {
-    ma_alu(src1, op2, dest, op_rsb, sc, c);
+    ma_alu(src1, op2, dest, OpRsb, sc, c);
 }
 
 // Reverse subtract with carry.
 void
 MacroAssemblerARM::ma_rsc(Imm32 imm, Register dest, SetCond_ sc, Condition c)
 {
-    ma_alu(dest, imm, dest, op_rsc, sc, c);
+    ma_alu(dest, imm, dest, OpRsc, sc, c);
 }
 void
 MacroAssemblerARM::ma_rsc(Register src1, Register dest, SetCond_ sc, Condition c)
 {
-    as_alu(dest, dest, O2Reg(src1), op_rsc, sc, c);
+    as_alu(dest, dest, O2Reg(src1), OpRsc, sc, c);
 }
 void
 MacroAssemblerARM::ma_rsc(Register src1, Register src2, Register dest, SetCond_ sc, Condition c)
 {
-    as_alu(dest, src1, O2Reg(src2), op_rsc, sc, c);
+    as_alu(dest, src1, O2Reg(src2), OpRsc, sc, c);
 }
 
 // Compares/tests.
 // Compare negative (sets condition codes as src1 + src2 would).
 void
 MacroAssemblerARM::ma_cmn(Register src1, Imm32 imm, Condition c)
 {
-    ma_alu(src1, imm, InvalidReg, op_cmn, SetCond, c);
+    ma_alu(src1, imm, InvalidReg, OpCmn, SetCond, c);
 }
 void
 MacroAssemblerARM::ma_cmn(Register src1, Register src2, Condition c)
 {
-    as_alu(InvalidReg, src2, O2Reg(src1), op_cmn, SetCond, c);
+    as_alu(InvalidReg, src2, O2Reg(src1), OpCmn, SetCond, c);
 }
 void
 MacroAssemblerARM::ma_cmn(Register src1, Operand op, Condition c)
 {
     MOZ_ASSUME_UNREACHABLE("Feature NYI");
 }
 
 // Compare (src - src2).
 void
 MacroAssemblerARM::ma_cmp(Register src1, Imm32 imm, Condition c)
 {
-    ma_alu(src1, imm, InvalidReg, op_cmp, SetCond, c);
+    ma_alu(src1, imm, InvalidReg, OpCmp, SetCond, c);
 }
 
 void
 MacroAssemblerARM::ma_cmp(Register src1, ImmWord ptr, Condition c)
 {
     ma_cmp(src1, Imm32(ptr.value), c);
 }
 
@@ -828,21 +818,21 @@ MacroAssemblerARM::ma_cmp(Register src1,
     }
 }
 void
 MacroAssemblerARM::ma_cmp(Register src1, Register src2, Condition c)
 {
     as_cmp(src1, O2Reg(src2), c);
 }
 
-// Test for equality, (src1^src2).
+// Test for equality, (src1 ^ src2).
 void
 MacroAssemblerARM::ma_teq(Register src1, Imm32 imm, Condition c)
 {
-    ma_alu(src1, imm, InvalidReg, op_teq, SetCond, c);
+    ma_alu(src1, imm, InvalidReg, OpTeq, SetCond, c);
 }
 void
 MacroAssemblerARM::ma_teq(Register src1, Register src2, Condition c)
 {
     as_tst(src1, O2Reg(src2), c);
 }
 void
 MacroAssemblerARM::ma_teq(Register src1, Operand op, Condition c)
@@ -850,17 +840,17 @@ MacroAssemblerARM::ma_teq(Register src1,
     as_teq(src1, op.toOp2(), c);
 }
 
 
 // Test (src1 & src2).
 void
 MacroAssemblerARM::ma_tst(Register src1, Imm32 imm, Condition c)
 {
-    ma_alu(src1, imm, InvalidReg, op_tst, SetCond, c);
+    ma_alu(src1, imm, InvalidReg, OpTst, SetCond, c);
 }
 void
 MacroAssemblerARM::ma_tst(Register src1, Register src2, Condition c)
 {
     as_tst(src1, O2Reg(src2), c);
 }
 void
 MacroAssemblerARM::ma_tst(Register src1, Operand op, Condition c)
@@ -879,18 +869,18 @@ MacroAssemblerARM::ma_mul(Register src1,
 
     ma_mov(imm, ScratchRegister);
     as_mul( dest, src1, ScratchRegister);
 }
 
 Assembler::Condition
 MacroAssemblerARM::ma_check_mul(Register src1, Register src2, Register dest, Condition cond)
 {
-    // TODO: this operation is illegal on armv6 and earlier if src2 == ScratchRegister
-    //       or src2 == dest.
+    // TODO: this operation is illegal on armv6 and earlier if src2 ==
+    // ScratchRegister or src2 == dest.
     if (cond == Equal || cond == NotEqual) {
         as_smull(ScratchRegister, dest, src1, src2, SetCond);
         return cond;
     }
 
     if (cond == Overflow) {
         as_smull(ScratchRegister, dest, src1, src2);
         as_cmp(ScratchRegister, asr(dest, 31));
@@ -917,91 +907,95 @@ MacroAssemblerARM::ma_check_mul(Register
 
     MOZ_ASSUME_UNREACHABLE("Condition NYI");
 }
 
 void
 MacroAssemblerARM::ma_mod_mask(Register src, Register dest, Register hold, Register tmp,
                                int32_t shift)
 {
-    // MATH:
     // We wish to compute x % (1<<y) - 1 for a known constant, y.
-    // first, let b = (1<<y) and C = (1<<y)-1, then think of the 32 bit dividend as
+    //
+    // 1. Let b = (1<<y) and C = (1<<y)-1, then think of the 32 bit dividend as
     // a number in base b, namely c_0*1 + c_1*b + c_2*b^2 ... c_n*b^n
-    // now, since both addition and multiplication commute with modulus,
-    // x % C == (c_0 + c_1*b + ... + c_n*b^n) % C ==
-    // (c_0 % C) + (c_1%C) * (b % C) + (c_2 % C) * (b^2 % C)...
-    // now, since b == C + 1, b % C == 1, and b^n % C == 1
-    // this means that the whole thing simplifies to:
-    // c_0 + c_1 + c_2 ... c_n % C
-    // each c_n can easily be computed by a shift/bitextract, and the modulus can be maintained
-    // by simply subtracting by C whenever the number gets over C.
+    //
+    // 2. Since both addition and multiplication commute with modulus:
+    //   x % C == (c_0 + c_1*b + ... + c_n*b^n) % C ==
+    //    (c_0 % C) + (c_1%C) * (b % C) + (c_2 % C) * (b^2 % C)...
+    //
+    // 3. Since b == C + 1, b % C == 1, and b^n % C == 1 the whole thing
+    // simplifies to: c_0 + c_1 + c_2 ... c_n % C
+    //
+    // Each c_n can easily be computed by a shift/bitextract, and the modulus
+    // can be maintained by simply subtracting by C whenever the number gets
+    // over C.
     int32_t mask = (1 << shift) - 1;
     Label head;
 
-    // hold holds -1 if the value was negative, 1 otherwise.
-    // ScratchRegister holds the remaining bits that have not been processed
-    // lr serves as a temporary location to store extracted bits into as well
-    //    as holding the trial subtraction as a temp value
-    // dest is the accumulator (and holds the final result)
-
-    // move the whole value into tmp, setting the codition codes so we can
-    // muck with them later.
+    // Register 'hold' holds -1 if the value was negative, 1 otherwise. The
+    // ScratchRegister holds the remaining bits that have not been processed lr
+    // serves as a temporary location to store extracted bits into as well as
+    // holding the trial subtraction as a temp value dest is the accumulator
+    // (and holds the final result)
+    //
+    // Move the whole value into tmp, setting the codition codes so we can muck
+    // with them later.
     //
     // Note that we cannot use ScratchRegister in place of tmp here, as ma_and
-    // below on certain architectures move the mask into ScratchRegister
-    // before performing the bitwise and.
+    // below on certain architectures move the mask into ScratchRegister before
+    // performing the bitwise and.
     as_mov(tmp, O2Reg(src), SetCond);
     // Zero out the dest.
     ma_mov(Imm32(0), dest);
     // Set the hold appropriately.
     ma_mov(Imm32(1), hold);
     ma_mov(Imm32(-1), hold, NoSetCond, Signed);
     ma_rsb(Imm32(0), tmp, SetCond, Signed);
     // Begin the main loop.
     bind(&head);
 
     // Extract the bottom bits into lr.
     ma_and(Imm32(mask), tmp, secondScratchReg_);
     // Add those bits to the accumulator.
     ma_add(secondScratchReg_, dest, dest);
-    // Do a trial subtraction, this is the same operation as cmp, but we store the dest
+    // Do a trial subtraction, this is the same operation as cmp, but we store
+    // the dest.
     ma_sub(dest, Imm32(mask), secondScratchReg_, SetCond);
     // If (sum - C) > 0, store sum - C back into sum, thus performing a modulus.
     ma_mov(secondScratchReg_, dest, NoSetCond, NotSigned);
-    // Get rid of the bits that we extracted before, and set the condition codes
+    // Get rid of the bits that we extracted before, and set the condition codes.
     as_mov(tmp, lsr(tmp, shift), SetCond);
     // If the shift produced zero, finish, otherwise, continue in the loop.
     ma_b(&head, NonZero);
-    // Check the hold to see if we need to negate the result.  Hold can only be 1 or -1,
-    // so this will never set the 0 flag.
+    // Check the hold to see if we need to negate the result. Hold can only be
+    // 1 or -1, so this will never set the 0 flag.
     ma_cmp(hold, Imm32(0));
-    // If the hold was non-zero, negate the result to be in line with what JS wants
-    // this will set the condition codes if we try to negate
+    // If the hold was non-zero, negate the result to be in line with what JS
+    // wants this will set the condition codes if we try to negate.
     ma_rsb(Imm32(0), dest, SetCond, Signed);
-    // Since the Zero flag is not set by the compare, we can *only* set the Zero flag
-    // in the rsb, so Zero is set iff we negated zero (e.g. the result of the computation was -0.0).
-
+    // Since the Zero flag is not set by the compare, we can *only* set the Zero
+    // flag in the rsb, so Zero is set iff we negated zero (e.g. the result of
+    // the computation was -0.0).
 }
 
 void
 MacroAssemblerARM::ma_smod(Register num, Register div, Register dest)
 {
     as_sdiv(ScratchRegister, num, div);
     as_mls(dest, num, ScratchRegister, div);
 }
 
 void
 MacroAssemblerARM::ma_umod(Register num, Register div, Register dest)
 {
     as_udiv(ScratchRegister, num, div);
     as_mls(dest, num, ScratchRegister, div);
 }
 
-// division
+// Division
 void
 MacroAssemblerARM::ma_sdiv(Register num, Register div, Register dest, Condition cond)
 {
     as_sdiv(dest, num, div, cond);
 }
 
 void
 MacroAssemblerARM::ma_udiv(Register num, Register div, Register dest, Condition cond)
@@ -1125,144 +1119,169 @@ MacroAssemblerARM::ma_dataTransferN(Load
 }
 
 BufferOffset
 MacroAssemblerARM::ma_dataTransferN(LoadStore ls, int size, bool IsSigned,
                                     Register rn, Imm32 offset, Register rt,
                                     Index mode, Assembler::Condition cc)
 {
     int off = offset.value;
-    // we can encode this as a standard ldr... MAKE IT SO
+    // We can encode this as a standard ldr.
     if (size == 32 || (size == 8 && !IsSigned) ) {
         if (off < 4096 && off > -4096) {
             // This encodes as a single instruction, Emulating mode's behavior
             // in a multi-instruction sequence is not necessary.
             return as_dtr(ls, size, mode, rt, DTRAddr(rn, DtrOffImm(off)), cc);
         }
 
         // We cannot encode this offset in a a single ldr. For mode == index,
         // try to encode it as |add scratch, base, imm; ldr dest, [scratch, +offset]|.
         // This does not wark for mode == PreIndex or mode == PostIndex.
-        // PreIndex is simple, just do the add into the base register first, then do
-        // a PreIndex'ed load. PostIndexed loads can be tricky.  Normally, doing the load with
-        // an index of 0, then doing an add would work, but if the destination is the PC,
-        // you don't get to execute the instruction after the branch, which will lead to
-        // the base register not being updated correctly. Explicitly handle this case, without
+        // PreIndex is simple, just do the add into the base register first,
+        // then do a PreIndex'ed load. PostIndexed loads can be tricky.
+        // Normally, doing the load with an index of 0, then doing an add would
+        // work, but if the destination is the PC, you don't get to execute the
+        // instruction after the branch, which will lead to the base register
+        // not being updated correctly. Explicitly handle this case, without
         // doing anything fancy, then handle all of the other cases.
 
         // mode == Offset
         //  add   scratch, base, offset_hi
         //  ldr   dest, [scratch, +offset_lo]
         //
         // mode == PreIndex
         //  add   base, base, offset_hi
         //  ldr   dest, [base, +offset_lo]!
         //
         // mode == PostIndex, dest == pc
         //  ldr   scratch, [base]
         //  add   base, base, offset_hi
         //  add   base, base, offset_lo
         //  mov   dest, scratch
         // PostIndex with the pc as the destination needs to be handled
-        // specially, since in the code below, the write into 'dest'
-        // is going to alter the control flow, so the following instruction would
-        // never get emitted.
+        // specially, since in the code below, the write into 'dest' is going to
+        // alter the control flow, so the following instruction would never get
+        // emitted.
         //
         // mode == PostIndex, dest != pc
         //  ldr   dest, [base], offset_lo
         //  add   base, base, offset_hi
 
         if (rt == pc && mode == PostIndex && ls == IsLoad) {
             ma_mov(rn, ScratchRegister);
-            ma_alu(rn, offset, rn, op_add);
+            ma_alu(rn, offset, rn, OpAdd);
             return as_dtr(IsLoad, size, Offset, pc, DTRAddr(ScratchRegister, DtrOffImm(0)), cc);
         }
 
         int bottom = off & 0xfff;
         int neg_bottom = 0x1000 - bottom;
-        // For a regular offset, base == ScratchRegister does what we want.  Modify the
-        // scratch register, leaving the actual base unscathed.
+        // For a regular offset, base == ScratchRegister does what we want.
+        // Modify the scratch register, leaving the actual base unscathed.
         Register base = ScratchRegister;
-        // For the preindex case, we want to just re-use rn as the base register, so when
-        // the base register is updated *before* the load, rn is updated.
+        // For the preindex case, we want to just re-use rn as the base
+        // register, so when the base register is updated *before* the load, rn
+        // is updated.
         if (mode == PreIndex)
             base = rn;
         JS_ASSERT(mode != PostIndex);
-        // At this point, both off - bottom and off + neg_bottom will be reasonable-ish quantities.
+        // At this point, both off - bottom and off + neg_bottom will be
+        // reasonable-ish quantities.
         //
-        // Note a neg_bottom of 0x1000 can not be encoded as an immediate negative offset in the
-        // instruction and this occurs when bottom is zero, so this case is guarded against below.
+        // Note a neg_bottom of 0x1000 can not be encoded as an immediate
+        // negative offset in the instruction and this occurs when bottom is
+        // zero, so this case is guarded against below.
         if (off < 0) {
-            Operand2 sub_off = Imm8(-(off-bottom)); // sub_off = bottom - off
+            Operand2 sub_off = Imm8(-(off - bottom)); // sub_off = bottom - off
             if (!sub_off.invalid) {
-                as_sub(ScratchRegister, rn, sub_off, NoSetCond, cc); // - sub_off = off - bottom
+                // - sub_off = off - bottom
+                as_sub(ScratchRegister, rn, sub_off, NoSetCond, cc);
                 return as_dtr(ls, size, Offset, rt, DTRAddr(ScratchRegister, DtrOffImm(bottom)), cc);
             }
-            sub_off = Imm8(-(off+neg_bottom));// sub_off = -neg_bottom - off
+            // sub_off = -neg_bottom - off
+            sub_off = Imm8(-(off + neg_bottom));
             if (!sub_off.invalid && bottom != 0) {
-                JS_ASSERT(neg_bottom < 0x1000);  // Guarded against by: bottom != 0
-                as_sub(ScratchRegister, rn, sub_off, NoSetCond, cc); // - sub_off = neg_bottom + off
+                // Guarded against by: bottom != 0
+                JS_ASSERT(neg_bottom < 0x1000);
+                // - sub_off = neg_bottom + off
+                as_sub(ScratchRegister, rn, sub_off, NoSetCond, cc);
                 return as_dtr(ls, size, Offset, rt, DTRAddr(ScratchRegister, DtrOffImm(-neg_bottom)), cc);
             }
         } else {
-            Operand2 sub_off = Imm8(off-bottom); // sub_off = off - bottom
+            // sub_off = off - bottom
+            Operand2 sub_off = Imm8(off - bottom);
             if (!sub_off.invalid) {
-                as_add(ScratchRegister, rn, sub_off, NoSetCond, cc); //  sub_off = off - bottom
+                //  sub_off = off - bottom
+                as_add(ScratchRegister, rn, sub_off, NoSetCond, cc);
                 return as_dtr(ls, size, Offset, rt, DTRAddr(ScratchRegister, DtrOffImm(bottom)), cc);
             }
-            sub_off = Imm8(off+neg_bottom);// sub_off = neg_bottom + off
+            // sub_off = neg_bottom + off
+            sub_off = Imm8(off + neg_bottom);
             if (!sub_off.invalid && bottom != 0) {
-                JS_ASSERT(neg_bottom < 0x1000);  // Guarded against by: bottom != 0
-                as_add(ScratchRegister, rn, sub_off, NoSetCond,  cc); // sub_off = neg_bottom + off
+                // Guarded against by: bottom != 0
+                JS_ASSERT(neg_bottom < 0x1000);
+                // sub_off = neg_bottom + off
+                as_add(ScratchRegister, rn, sub_off, NoSetCond,  cc);
                 return as_dtr(ls, size, Offset, rt, DTRAddr(ScratchRegister, DtrOffImm(-neg_bottom)), cc);
             }
         }
         ma_mov(offset, ScratchRegister);
         return as_dtr(ls, size, mode, rt, DTRAddr(rn, DtrRegImmShift(ScratchRegister, LSL, 0)));
     } else {
-        // should attempt to use the extended load/store instructions
+        // Should attempt to use the extended load/store instructions.
         if (off < 256 && off > -256)
             return as_extdtr(ls, size, IsSigned, mode, rt, EDtrAddr(rn, EDtrOffImm(off)), cc);
 
-        // We cannot encode this offset in a single extldr.  Try to encode it as
+        // We cannot encode this offset in a single extldr. Try to encode it as
         // an add scratch, base, imm; extldr dest, [scratch, +offset].
         int bottom = off & 0xff;
         int neg_bottom = 0x100 - bottom;
-        // At this point, both off - bottom and off + neg_bottom will be reasonable-ish quantities.
+        // At this point, both off - bottom and off + neg_bottom will be
+        // reasonable-ish quantities.
         //
-        // Note a neg_bottom of 0x100 can not be encoded as an immediate negative offset in the
-        // instruction and this occurs when bottom is zero, so this case is guarded against below.
+        // Note a neg_bottom of 0x100 can not be encoded as an immediate
+        // negative offset in the instruction and this occurs when bottom is
+        // zero, so this case is guarded against below.
         if (off < 0) {
-            Operand2 sub_off = Imm8(-(off-bottom)); // sub_off = bottom - off
+            // sub_off = bottom - off
+            Operand2 sub_off = Imm8(-(off - bottom));
             if (!sub_off.invalid) {
-                as_sub(ScratchRegister, rn, sub_off, NoSetCond, cc); // - sub_off = off - bottom
+                // - sub_off = off - bottom
+                as_sub(ScratchRegister, rn, sub_off, NoSetCond, cc);
                 return as_extdtr(ls, size, IsSigned, Offset, rt,
                                  EDtrAddr(ScratchRegister, EDtrOffImm(bottom)),
                                  cc);
             }
-            sub_off = Imm8(-(off+neg_bottom));// sub_off = -neg_bottom - off
+            // sub_off = -neg_bottom - off
+            sub_off = Imm8(-(off + neg_bottom));
             if (!sub_off.invalid && bottom != 0) {
-                JS_ASSERT(neg_bottom < 0x100);  // Guarded against by: bottom != 0
-                as_sub(ScratchRegister, rn, sub_off, NoSetCond, cc); // - sub_off = neg_bottom + off
+                // Guarded against by: bottom != 0
+                JS_ASSERT(neg_bottom < 0x100);
+                // - sub_off = neg_bottom + off
+                as_sub(ScratchRegister, rn, sub_off, NoSetCond, cc);
                 return as_extdtr(ls, size, IsSigned, Offset, rt,
                                  EDtrAddr(ScratchRegister, EDtrOffImm(-neg_bottom)),
                                  cc);
             }
         } else {
-            Operand2 sub_off = Imm8(off-bottom); // sub_off = off - bottom
+            // sub_off = off - bottom