merge mozilla-inbound to mozilla-central a=merge
authorCarsten "Tomcat" Book <cbook@mozilla.com>
Mon, 01 Jun 2015 15:00:24 +0200
changeset 246484 39c85ec2d64487a2a4226d53d9bde41716e78351
parent 246423 2fa4bb097f031ab2e90c3829f15706e1bc1fb650 (current diff)
parent 246483 c7b9b0f97e3d7a033c86494b380b60226ba24c28 (diff)
child 246485 2d07ec25d0278086ae1393a76bfbfd969edae450
child 246496 a29d6aa8a5a74c19d60fa679a47e2455241fb9b8
child 246526 87b49eca8ced8b363872baf1fb2d58facc612a49
child 246563 0af01d7abf3d5076b80c615fb1298d5558ba2afc
push id28830
push usercbook@mozilla.com
push dateMon, 01 Jun 2015 13:02:44 +0000
treeherdermozilla-central@39c85ec2d644 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersmerge
milestone41.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
merge mozilla-inbound to mozilla-central a=merge
js/src/vm/Interpreter.cpp
testing/mochitest/mach_commands.py
toolkit/components/telemetry/TelemetryEnvironment.jsm
--- a/accessible/atk/AccessibleWrap.cpp
+++ b/accessible/atk/AccessibleWrap.cpp
@@ -795,17 +795,21 @@ getParentCB(AtkObject *aAtkObj)
     return aAtkObj->accessible_parent;
 
   AtkObject* atkParent = nullptr;
   if (AccessibleWrap* wrapper = GetAccessibleWrap(aAtkObj)) {
     Accessible* parent = wrapper->Parent();
     atkParent = parent ? AccessibleWrap::GetAtkObject(parent) : nullptr;
   } else if (ProxyAccessible* proxy = GetProxy(aAtkObj)) {
     ProxyAccessible* parent = proxy->Parent();
-    atkParent = parent ? GetWrapperFor(parent) : nullptr;
+    if (parent)
+      atkParent = GetWrapperFor(parent);
+
+    // Otherwise this should be the proxy for the tab's top level document.
+    atkParent = AccessibleWrap::GetAtkObject(proxy->OuterDocOfRemoteBrowser());
   }
 
   if (atkParent)
     atk_object_set_parent(aAtkObj, atkParent);
 
   return aAtkObj->accessible_parent;
 }
 
@@ -854,16 +858,19 @@ gint
 getIndexInParentCB(AtkObject* aAtkObj)
 {
   // We don't use Accessible::IndexInParent() because we don't include text
   // leaf nodes as children in ATK.
   if (ProxyAccessible* proxy = GetProxy(aAtkObj)) {
     if (ProxyAccessible* parent = proxy->Parent())
       return parent->IndexOfEmbeddedChild(proxy);
 
+    if (proxy->OuterDocOfRemoteBrowser())
+      return 0;
+
     return -1;
   }
 
     AccessibleWrap* accWrap = GetAccessibleWrap(aAtkObj);
     if (!accWrap) {
         return -1;
     }
 
--- a/accessible/ipc/ProxyAccessible.cpp
+++ b/accessible/ipc/ProxyAccessible.cpp
@@ -1,16 +1,20 @@
 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* vim: set ts=2 et sw=2 tw=80: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "ProxyAccessible.h"
 #include "DocAccessibleParent.h"
+#include "DocAccessible.h"
+#include "mozilla/a11y/DocManager.h"
+#include "mozilla/dom/Element.h"
+#include "mozilla/dom/TabParent.h"
 #include "mozilla/unused.h"
 #include "mozilla/a11y/Platform.h"
 #include "RelationType.h"
 #include "mozilla/a11y/Role.h"
 
 namespace mozilla {
 namespace a11y {
 
@@ -958,10 +962,24 @@ ProxyAccessible::MimeType(nsString aMime
 
 void
 ProxyAccessible::URLDocTypeMimeType(nsString& aURL, nsString& aDocType,
                                     nsString& aMimeType)
 {
   unused << mDoc->SendURLDocTypeMimeType(mID, &aURL, &aDocType, &aMimeType);
 }
 
+Accessible*
+ProxyAccessible::OuterDocOfRemoteBrowser() const
+{
+  auto tab = static_cast<dom::TabParent*>(mDoc->Manager());
+  dom::Element* frame = tab->GetOwnerElement();
+  NS_ASSERTION(frame, "why isn't the tab in a frame!");
+  if (!frame)
+    return nullptr;
+
+  DocAccessible* chromeDoc = GetExistingDocAccessible(frame->OwnerDoc());
+  NS_ASSERTION(chromeDoc, "accessible tab in not accessible chromeDocument");
+
+  return chromeDoc ? chromeDoc->GetAccessible(frame) : nullptr;
 }
 }
+}
--- a/accessible/ipc/ProxyAccessible.h
+++ b/accessible/ipc/ProxyAccessible.h
@@ -13,16 +13,17 @@
 #include "nsString.h"
 #include "nsTArray.h"
 #include "nsRect.h"
 #include "Accessible.h"
 
 namespace mozilla {
 namespace a11y {
 
+class Accessible;
 class Attribute;
 class DocAccessibleParent;
 enum class RelationType;
 
 class ProxyAccessible
 {
 public:
 
@@ -60,16 +61,18 @@ public:
   void RemoveChild(ProxyAccessible* aChild)
     { mChildren.RemoveElement(aChild); }
 
   /**
    * Return the proxy for the parent of the wrapped accessible.
    */
   ProxyAccessible* Parent() const { return mParent; }
 
+  Accessible* OuterDocOfRemoteBrowser() const;
+
   /**
    * Get the role of the accessible we're proxying.
    */
   role Role() const { return mRole; }
 
   /*
    * Return the states for the proxied accessible.
    */
--- a/accessible/mac/AccessibleWrap.h
+++ b/accessible/mac/AccessibleWrap.h
@@ -54,20 +54,17 @@ public: // construction, destruction
   virtual nsresult HandleAccEvent(AccEvent* aEvent) override;
 
   /**
    * Ignored means that the accessible might still have children, but is not
    * displayed to the user. it also has no native accessible object represented
    * for it.
    */
   bool IsIgnored();
-  
-  inline bool HasPopup () 
-    { return (NativeState() & mozilla::a11y::states::HASPOPUP); }
-  
+
   /**
    * Returns this accessible's all children, adhering to "flat" accessibles by 
    * not returning their children.
    */
   void GetUnignoredChildren(nsTArray<Accessible*>* aChildrenArray);
   Accessible* GetUnignoredParent() const;
 
 protected:
@@ -103,12 +100,14 @@ private:
   /**
    * We have created our native. This does not mean there is one.
    * This can never go back to false.
    * We need it because checking whether we need a native object cost time.
    */
   bool mNativeInited;  
 };
 
+Class GetTypeFromRole(roles::Role aRole);
+
 } // namespace a11y
 } // namespace mozilla
 
 #endif
--- a/accessible/mac/AccessibleWrap.mm
+++ b/accessible/mac/AccessibleWrap.mm
@@ -10,16 +10,17 @@
 #include "nsAccUtils.h"
 #include "Role.h"
 
 #import "mozAccessible.h"
 #import "mozActionElements.h"
 #import "mozHTMLAccessible.h"
 #import "mozTextAccessible.h"
 
+using namespace mozilla;
 using namespace mozilla::a11y;
 
 AccessibleWrap::
   AccessibleWrap(nsIContent* aContent, DocAccessible* aDoc) :
   Accessible(aContent, aDoc), mNativeObject(nil),  
   mNativeInited(false)
 {
 }
@@ -28,18 +29,20 @@ AccessibleWrap::~AccessibleWrap()
 {
 }
 
 mozAccessible* 
 AccessibleWrap::GetNativeObject()
 {
   NS_OBJC_BEGIN_TRY_ABORT_BLOCK_NIL;
   
-  if (!mNativeInited && !mNativeObject && !IsDefunct() && !AncestorIsFlat())
-    mNativeObject = [[GetNativeType() alloc] initWithAccessible:this];
+  if (!mNativeInited && !mNativeObject && !IsDefunct() && !AncestorIsFlat()) {
+    uintptr_t accWrap = reinterpret_cast<uintptr_t>(this);
+    mNativeObject = [[GetNativeType() alloc] initWithAccessible:accWrap];
+  }
   
   mNativeInited = true;
   
   return mNativeObject;
   
   NS_OBJC_END_TRY_ABORT_BLOCK_NIL;
 }
 
@@ -54,61 +57,17 @@ AccessibleWrap::GetNativeInterface(void*
 Class
 AccessibleWrap::GetNativeType () 
 {
   NS_OBJC_BEGIN_TRY_ABORT_BLOCK_NIL;
 
   if (IsXULTabpanels())
     return [mozPaneAccessible class];
 
-  roles::Role role = Role();
-  switch (role) {
-    case roles::PUSHBUTTON:
-    case roles::SPLITBUTTON:
-    case roles::TOGGLE_BUTTON:
-    {
-      // if this button may show a popup, let's make it of the popupbutton type.
-      return HasPopup() ? [mozPopupButtonAccessible class] : 
-             [mozButtonAccessible class];
-    }
-    
-    case roles::PAGETAB:
-      return [mozButtonAccessible class];
-
-    case roles::CHECKBUTTON:
-      return [mozCheckboxAccessible class];
-      
-    case roles::HEADING:
-      return [mozHeadingAccessible class];
-
-    case roles::PAGETABLIST:
-      return [mozTabsAccessible class];
-
-    case roles::ENTRY:
-    case roles::STATICTEXT:
-    case roles::CAPTION:
-    case roles::ACCEL_LABEL:
-    case roles::PASSWORD_TEXT:
-      // normal textfield (static or editable)
-      return [mozTextAccessible class];
-
-    case roles::TEXT_LEAF:
-      return [mozTextLeafAccessible class];
-
-    case roles::LINK:
-      return [mozLinkAccessible class];
-
-    case roles::COMBOBOX:
-      return [mozPopupButtonAccessible class];
-      
-    default:
-      return [mozAccessible class];
-  }
-  
-  return nil;
+  return GetTypeFromRole(Role());
 
   NS_OBJC_END_TRY_ABORT_BLOCK_NIL;
 }
 
 // this method is very important. it is fired when an accessible object "dies". after this point
 // the object might still be around (because some 3rd party still has a ref to it), but it is
 // in fact 'dead'.
 void
@@ -269,8 +228,57 @@ AccessibleWrap::AncestorIsFlat()
     if (nsAccUtils::MustPrune(parent))
       return true;
 
     parent = parent->Parent();
   }
   // no parent was flat
   return false;
 }
+
+Class
+a11y::GetTypeFromRole(roles::Role aRole) 
+{
+  NS_OBJC_BEGIN_TRY_ABORT_BLOCK_NIL;
+
+  switch (aRole) {
+    case roles::COMBOBOX:
+    case roles::PUSHBUTTON:
+    case roles::SPLITBUTTON:
+    case roles::TOGGLE_BUTTON:
+    {
+        return [mozButtonAccessible class];
+    }
+    
+    case roles::PAGETAB:
+      return [mozButtonAccessible class];
+
+    case roles::CHECKBUTTON:
+      return [mozCheckboxAccessible class];
+      
+    case roles::HEADING:
+      return [mozHeadingAccessible class];
+
+    case roles::PAGETABLIST:
+      return [mozTabsAccessible class];
+
+    case roles::ENTRY:
+    case roles::STATICTEXT:
+    case roles::CAPTION:
+    case roles::ACCEL_LABEL:
+    case roles::PASSWORD_TEXT:
+      // normal textfield (static or editable)
+      return [mozTextAccessible class];
+
+    case roles::TEXT_LEAF:
+      return [mozTextLeafAccessible class];
+
+    case roles::LINK:
+      return [mozLinkAccessible class];
+      
+    default:
+      return [mozAccessible class];
+  }
+  
+  return nil;
+
+  NS_OBJC_END_TRY_ABORT_BLOCK_NIL;
+}
--- a/accessible/mac/Platform.mm
+++ b/accessible/mac/Platform.mm
@@ -2,16 +2,17 @@
 /* vim: set ts=2 et sw=2 tw=80: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #import <Cocoa/Cocoa.h>
 
 #include "Platform.h"
+#include "ProxyAccessible.h"
 
 #include "nsAppShell.h"
 
 namespace mozilla {
 namespace a11y {
 
 // Mac a11y whitelisting
 static bool sA11yShouldBeEnabled = false;
@@ -29,23 +30,33 @@ PlatformInit()
 }
 
 void
 PlatformShutdown()
 {
 }
 
 void
-ProxyCreated(ProxyAccessible*, uint32_t)
+ProxyCreated(ProxyAccessible* aProxy, uint32_t)
 {
+  // Pass in dummy state for now as retrieving proxy state requires IPC.
+  Class type = GetTypeFromRole(aProxy->Role());
+  uintptr_t accWrap = reinterpret_cast<uintptr_t>(aProxy) | IS_PROXY;
+  mozAccessible* mozWrapper = [[type alloc] initWithAccessible:accWrap];
+  aProxy->SetWrapper(reinterpret_cast<uintptr_t>(mozWrapper));
 }
 
 void
-ProxyDestroyed(ProxyAccessible*)
+ProxyDestroyed(ProxyAccessible* aProxy)
 {
+  mozAccessible* wrapper =
+    reinterpret_cast<mozAccessible*>(aProxy->GetWrapper());
+  [wrapper expire];
+  [wrapper release];
+  aProxy->SetWrapper(0);
 }
 
 void
 ProxyEvent(ProxyAccessible*, uint32_t)
 {
 }
 
 void
--- a/accessible/mac/moz.build
+++ b/accessible/mac/moz.build
@@ -25,16 +25,17 @@ UNIFIED_SOURCES += [
     'Platform.mm',
     'RootAccessibleWrap.mm',
 ]
 
 LOCAL_INCLUDES += [
     '/accessible/base',
     '/accessible/generic',
     '/accessible/html',
+    '/accessible/ipc',
     '/accessible/xul',
     '/layout/generic',
     '/layout/xul',
     '/widget',
     '/widget/cocoa',
 ]
 
 FINAL_LIBRARY = 'xul'
--- a/accessible/mac/mozAccessible.h
+++ b/accessible/mac/mozAccessible.h
@@ -52,21 +52,24 @@ static const uintptr_t IS_PROXY = 1;
   mozAccessible* mParent;
 
   /**
    * The role of our gecko accessible.
    */
   mozilla::a11y::role        mRole;
 }
 
-// return the Accessible for this mozAccessible.
-- (mozilla::a11y::AccessibleWrap*) getGeckoAccessible;
+// return the Accessible for this mozAccessible if it exists.
+- (mozilla::a11y::AccessibleWrap*)getGeckoAccessible;
+
+// return the ProxyAccessible for this mozAccessible if it exists.
+- (mozilla::a11y::ProxyAccessible*)getProxyAccessible;
 
 // inits with the gecko owner.
-- (id)initWithAccessible:(mozilla::a11y::AccessibleWrap*)geckoParent;
+- (id)initWithAccessible:(uintptr_t)aGeckoObj;
 
 // our accessible parent (AXParent)
 - (id <mozAccessible>)parent;
 
 // a lazy cache of our accessible children (AXChildren). updated
 - (NSArray*)children;
 
 // returns the size of this accessible.
--- a/accessible/mac/mozAccessible.mm
+++ b/accessible/mac/mozAccessible.mm
@@ -55,25 +55,28 @@ GetClosestInterestingAccessible(id anObj
 
   NS_OBJC_END_TRY_ABORT_BLOCK_NIL;
 }
 
 #pragma mark -
 
 @implementation mozAccessible
  
-- (id)initWithAccessible:(AccessibleWrap*)geckoAccessible
+- (id)initWithAccessible:(uintptr_t)aGeckoAccessible
 {
   NS_OBJC_BEGIN_TRY_ABORT_BLOCK_NIL;
 
   if ((self = [super init])) {
-    mGeckoAccessible = reinterpret_cast<uintptr_t>(geckoAccessible);
-    mRole = geckoAccessible->Role();
+    mGeckoAccessible = aGeckoAccessible;
+    if (aGeckoAccessible & IS_PROXY)
+      mRole = [self getProxyAccessible]->Role();
+    else
+      mRole = [self getGeckoAccessible]->Role();
   }
-   
+
   return self;
 
   NS_OBJC_END_TRY_ABORT_BLOCK_NIL;
 }
 
 - (void)dealloc
 {
   NS_OBJC_BEGIN_TRY_ABORT_BLOCK;
@@ -87,16 +90,25 @@ GetClosestInterestingAccessible(id anObj
 - (mozilla::a11y::AccessibleWrap*)getGeckoAccessible
 {
   // Check if mGeckoAccessible points at a proxy
   if (mGeckoAccessible & IS_PROXY)
     return nil;
 
   return reinterpret_cast<AccessibleWrap*>(mGeckoAccessible);
 }
+
+- (mozilla::a11y::ProxyAccessible*)getProxyAccessible
+{
+  // Check if mGeckoAccessible points at a proxy
+  if (!(mGeckoAccessible & IS_PROXY))
+    return nil;
+
+  return reinterpret_cast<ProxyAccessible*>(mGeckoAccessible & ~IS_PROXY);
+}
  
 #pragma mark -
 
 - (BOOL)accessibilityIsIgnored
 {
   NS_OBJC_BEGIN_TRY_ABORT_BLOCK_RETURN;
 
   // unknown (either unimplemented, or irrelevant) elements are marked as ignored
--- a/accessible/mac/mozActionElements.h
+++ b/accessible/mac/mozActionElements.h
@@ -4,29 +4,28 @@
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #import <Cocoa/Cocoa.h>
 #import "mozAccessible.h"
 
 /* Simple subclasses for things like checkboxes, buttons, etc. */
 
 @interface mozButtonAccessible : mozAccessible
+ {
+ }
+- (BOOL)hasPopup;
 - (void)click;
 - (BOOL)isTab;
 @end
 
 @interface mozCheckboxAccessible : mozButtonAccessible
 // returns one of the constants defined in CheckboxValue
 - (int)isChecked;
 @end
 
-/* Used for buttons that may pop up a menu. */
-@interface mozPopupButtonAccessible : mozButtonAccessible
-@end
-
 /* Class for tabs - not individual tabs */
 @interface mozTabsAccessible : mozAccessible
 {
   NSMutableArray* mTabs;
 }
 -(id)tabs;
 @end
 
--- a/accessible/mac/mozActionElements.mm
+++ b/accessible/mac/mozActionElements.mm
@@ -37,84 +37,102 @@ enum CheckboxValue {
                                                   NSAccessibilitySizeAttribute, // required
                                                   NSAccessibilityWindowAttribute, // required
                                                   NSAccessibilityPositionAttribute, // required
                                                   NSAccessibilityTopLevelUIElementAttribute, // required
                                                   NSAccessibilityHelpAttribute,
                                                   NSAccessibilityEnabledAttribute, // required
                                                   NSAccessibilityFocusedAttribute, // required
                                                   NSAccessibilityTitleAttribute, // required
+                                                  NSAccessibilityChildrenAttribute,
                                                   NSAccessibilityDescriptionAttribute,
 #if DEBUG
                                                   @"AXMozDescription",
 #endif
                                                   nil];
   }
   return attributes;
 
   NS_OBJC_END_TRY_ABORT_BLOCK_NIL;
 }
 
 - (id)accessibilityAttributeValue:(NSString *)attribute
 {
   NS_OBJC_BEGIN_TRY_ABORT_BLOCK_NIL;
 
-  if ([attribute isEqualToString:NSAccessibilityChildrenAttribute])
+  if ([attribute isEqualToString:NSAccessibilityChildrenAttribute]) {
+    if ([self hasPopup])
+      return [self children];
     return nil;
+  }
+
   if ([attribute isEqualToString:NSAccessibilityRoleDescriptionAttribute]) {
     if ([self isTab])
       return utils::LocalizedString(NS_LITERAL_STRING("tab"));
-    
+
     return NSAccessibilityRoleDescription([self role], nil);
   }
-  
+
   return [super accessibilityAttributeValue:attribute];
 
   NS_OBJC_END_TRY_ABORT_BLOCK_NIL;
 }
 
 - (BOOL)accessibilityIsIgnored
 {
   return ![self getGeckoAccessible];
 }
 
 - (NSArray*)accessibilityActionNames
 {
   NS_OBJC_BEGIN_TRY_ABORT_BLOCK_NIL;
 
-  if ([self isEnabled])
+  if ([self isEnabled]) {
+    if ([self hasPopup])
+      return [NSArray arrayWithObjects:NSAccessibilityPressAction,
+              NSAccessibilityShowMenuAction,
+              nil];
     return [NSArray arrayWithObject:NSAccessibilityPressAction];
-    
+  }
   return nil;
 
   NS_OBJC_END_TRY_ABORT_BLOCK_NIL;
 }
 
-- (NSString*)accessibilityActionDescription:(NSString*)action 
+- (NSString*)accessibilityActionDescription:(NSString*)action
 {
   NS_OBJC_BEGIN_TRY_ABORT_BLOCK_NIL;
 
   if ([action isEqualToString:NSAccessibilityPressAction]) {
     if ([self isTab])
       return utils::LocalizedString(NS_LITERAL_STRING("switch"));
-  
+
     return @"press button"; // XXX: localize this later?
   }
-  
+
+  if ([self hasPopup]) {
+    if ([action isEqualToString:NSAccessibilityShowMenuAction])
+      return @"show menu";
+  }
+
   return nil;
 
   NS_OBJC_END_TRY_ABORT_BLOCK_NIL;
 }
 
-- (void)accessibilityPerformAction:(NSString*)action 
+- (void)accessibilityPerformAction:(NSString*)action
 {
   NS_OBJC_BEGIN_TRY_ABORT_BLOCK;
 
-  if ([action isEqualToString:NSAccessibilityPressAction])
+  if ([self isEnabled] && [action isEqualToString:NSAccessibilityPressAction]) {
+    // TODO: this should bring up the menu, but currently doesn't.
+    //       once msaa and atk have merged better, they will implement
+    //       the action needed to show the menu.
     [self click];
+  }
 
   NS_OBJC_END_TRY_ABORT_BLOCK;
 }
 
 - (void)click
 {
   // both buttons and checkboxes have only one action. we should really stop using arbitrary
   // arrays with actions, and define constants for these actions.
@@ -122,16 +140,22 @@ enum CheckboxValue {
 }
 
 - (BOOL)isTab
 {
   AccessibleWrap* accWrap = [self getGeckoAccessible];
   return (accWrap && (accWrap->Role() == roles::PAGETAB));
 }
 
+- (BOOL)hasPopup
+{
+  AccessibleWrap* accWrap = [self getGeckoAccessible];
+  return accWrap && (accWrap->NativeState() & mozilla::a11y::states::HASPOPUP);
+}
+
 @end
 
 @implementation mozCheckboxAccessible
 
 - (NSString*)accessibilityActionDescription:(NSString*)action 
 {
   NS_OBJC_BEGIN_TRY_ABORT_BLOCK_NIL;
 
@@ -165,101 +189,16 @@ enum CheckboxValue {
 
   return [NSNumber numberWithInt:[self isChecked]];
 
   NS_OBJC_END_TRY_ABORT_BLOCK_NIL;
 }
 
 @end
 
-@implementation mozPopupButtonAccessible
-
-- (NSArray *)accessibilityAttributeNames
-{
-  NS_OBJC_BEGIN_TRY_ABORT_BLOCK_NIL;
-
-  static NSArray *attributes = nil;
-  
-  if (!attributes) {
-    attributes = [[NSArray alloc] initWithObjects:NSAccessibilityParentAttribute, // required
-                                                  NSAccessibilityPositionAttribute, // required
-                                                  NSAccessibilityRoleAttribute, // required
-                                                  NSAccessibilitySizeAttribute, // required
-                                                  NSAccessibilityWindowAttribute, // required
-                                                  NSAccessibilityTopLevelUIElementAttribute, // required
-                                                  NSAccessibilityHelpAttribute,
-                                                  NSAccessibilityEnabledAttribute, // required
-                                                  NSAccessibilityFocusedAttribute, // required
-                                                  NSAccessibilityTitleAttribute, // required for popupmenus, and for menubuttons with a title
-                                                  NSAccessibilityChildrenAttribute, // required
-                                                  NSAccessibilityDescriptionAttribute, // required if it has no title attr
-#if DEBUG
-                                                  @"AXMozDescription",
-#endif
-                                                  nil];
-  }
-  return attributes;
-
-  NS_OBJC_END_TRY_ABORT_BLOCK_NIL;
-}
-
-- (id)accessibilityAttributeValue:(NSString *)attribute
-{
-  NS_OBJC_BEGIN_TRY_ABORT_BLOCK_NIL;
-
-  if ([attribute isEqualToString:NSAccessibilityChildrenAttribute]) {
-    return [super children];
-  }
-  return [super accessibilityAttributeValue:attribute];
-
-  NS_OBJC_END_TRY_ABORT_BLOCK_NIL;
-}
-
-- (NSArray *)accessibilityActionNames
-{
-  NS_OBJC_BEGIN_TRY_ABORT_BLOCK_NIL;
-
-  if ([self isEnabled]) {
-    return [NSArray arrayWithObjects:NSAccessibilityPressAction,
-                                     NSAccessibilityShowMenuAction,
-                                     nil];
-  }
-  return nil;
-
-  NS_OBJC_END_TRY_ABORT_BLOCK_NIL;
-}
-
-- (NSString *)accessibilityActionDescription:(NSString *)action
-{
-  NS_OBJC_BEGIN_TRY_ABORT_BLOCK_NIL;
-
-  if ([action isEqualToString:NSAccessibilityShowMenuAction])
-    return @"show menu";
-  return [super accessibilityActionDescription:action];
-
-  NS_OBJC_END_TRY_ABORT_BLOCK_NIL;
-}
-
-- (void)accessibilityPerformAction:(NSString *)action
-{
-  NS_OBJC_BEGIN_TRY_ABORT_BLOCK;
-
-  // both the ShowMenu and Click action do the same thing.
-  if ([self isEnabled]) {
-    // TODO: this should bring up the menu, but currently doesn't.
-    //       once msaa and atk have merged better, they will implement
-    //       the action needed to show the menu.
-    [super click];
-  }
-
-  NS_OBJC_END_TRY_ABORT_BLOCK;
-}
-
-@end
-
 @implementation mozTabsAccessible
 
 - (void)dealloc
 {
   [mTabs release];
 
   [super dealloc];
 }
--- a/build/mach_bootstrap.py
+++ b/build/mach_bootstrap.py
@@ -32,16 +32,17 @@ SEARCH_PATHS = [
     'python/blessings',
     'python/compare-locales',
     'python/configobj',
     'python/jsmin',
     'python/psutil',
     'python/which',
     'python/pystache',
     'python/pyyaml/lib',
+    'python/requests',
     'build',
     'build/pymake',
     'config',
     'dom/bindings',
     'dom/bindings/parser',
     'layout/tools/reftest',
     'other-licenses/ply',
     'xpcom/idl-parser',
--- a/build/mobile/sutagent/android/fencp/FileCursor.java
+++ b/build/mobile/sutagent/android/fencp/FileCursor.java
@@ -60,17 +60,16 @@ public class FileCursor extends Abstract
                 if (lLength != 0) {
                     nCount = (int) (lFileSize / BUFSIZE);
                     if ((lFileSize % BUFSIZE) > 0)
                         nCount++;
                 } else {
                     nCount = 1;
                 }
 
-                mRowIdColumnIndex = 0;
             }
         }
     }
 
     public String getColumnName (int columnIndex) {
         return theColumns[columnIndex];
     }
 
--- a/build/mobile/sutagent/android/ffxcp/FileCursor.java
+++ b/build/mobile/sutagent/android/ffxcp/FileCursor.java
@@ -61,17 +61,16 @@ public class FileCursor extends Abstract
                 if (lLength != 0) {
                     nCount = (int) (lFileSize / BUFSIZE);
                     if ((lFileSize % BUFSIZE) > 0)
                         nCount++;
                 } else {
                     nCount = 1;
                 }
 
-                mRowIdColumnIndex = 0;
             }
         }
     }
 
     public String getColumnName (int columnIndex) {
         return theColumns[columnIndex];
     }
 
--- a/dom/base/nsJSEnvironment.cpp
+++ b/dom/base/nsJSEnvironment.cpp
@@ -2223,17 +2223,17 @@ DOMGCSliceCallback(JSRuntime *aRt, JS::G
     }
 
     case JS::GC_CYCLE_END: {
       PRTime delta = GetCollectionTimeDelta();
 
       if (sPostGCEventsToConsole) {
         NS_NAMED_LITERAL_STRING(kFmt, "GC(T+%.1f) ");
         nsString prefix, gcstats;
-        gcstats.Adopt(aDesc.formatMessage(aRt));
+        gcstats.Adopt(aDesc.formatSummaryMessage(aRt));
         prefix.Adopt(nsTextFormatter::smprintf(kFmt.get(),
                                              double(delta) / PR_USEC_PER_SEC));
         nsString msg = prefix + gcstats;
         nsCOMPtr<nsIConsoleService> cs = do_GetService(NS_CONSOLESERVICE_CONTRACTID);
         if (cs) {
           cs->LogStringMessage(msg.get());
         }
       }
@@ -2299,16 +2299,25 @@ DOMGCSliceCallback(JSRuntime *aRt, JS::G
                                                  NS_INTERSLICE_GC_DELAY,
                                                  nsITimer::TYPE_ONE_SHOT);
       }
 
       if (ShouldTriggerCC(nsCycleCollector_suspectedCount())) {
         nsCycleCollector_dispatchDeferredDeletion();
       }
 
+      if (sPostGCEventsToConsole) {
+        nsString gcstats;
+        gcstats.Adopt(aDesc.formatSliceMessage(aRt));
+        nsCOMPtr<nsIConsoleService> cs = do_GetService(NS_CONSOLESERVICE_CONTRACTID);
+        if (cs) {
+          cs->LogStringMessage(gcstats.get());
+        }
+      }
+
       break;
 
     default:
       MOZ_CRASH("Unexpected GCProgress value");
   }
 
   if (sPrevGCSliceCallback) {
     (*sPrevGCSliceCallback)(aRt, aProgress, aDesc);
--- a/dom/base/nsXMLHttpRequest.cpp
+++ b/dom/base/nsXMLHttpRequest.cpp
@@ -5,16 +5,17 @@
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "nsXMLHttpRequest.h"
 
 #ifndef XP_WIN
 #include <unistd.h>
 #endif
 #include "mozilla/ArrayUtils.h"
+#include "mozilla/CheckedInt.h"
 #include "mozilla/dom/BlobSet.h"
 #include "mozilla/dom/File.h"
 #include "mozilla/dom/XMLHttpRequestUploadBinding.h"
 #include "mozilla/EventDispatcher.h"
 #include "mozilla/EventListenerManager.h"
 #include "mozilla/LoadInfo.h"
 #include "mozilla/MemoryReporting.h"
 #include "nsIDOMDocument.h"
@@ -3988,36 +3989,40 @@ ArrayBufferBuilder::setCapacity(uint32_t
 }
 
 bool
 ArrayBufferBuilder::append(const uint8_t *aNewData, uint32_t aDataLen,
                            uint32_t aMaxGrowth)
 {
   MOZ_ASSERT(!mMapPtr);
 
+  CheckedUint32 neededCapacity = mLength;
+  neededCapacity += aDataLen;
+  if (!neededCapacity.isValid()) {
+    return false;
+  }
   if (mLength + aDataLen > mCapacity) {
-    uint32_t newcap;
+    CheckedUint32 newcap = mCapacity;
     // Double while under aMaxGrowth or if not specified.
     if (!aMaxGrowth || mCapacity < aMaxGrowth) {
-      newcap = mCapacity * 2;
+      newcap *= 2;
     } else {
-      newcap = mCapacity + aMaxGrowth;
+      newcap += aMaxGrowth;
+    }
+
+    if (!newcap.isValid()) {
+      return false;
     }
 
     // But make sure there's always enough to satisfy our request.
-    if (newcap < mLength + aDataLen) {
-      newcap = mLength + aDataLen;
+    if (newcap.value() < neededCapacity.value()) {
+      newcap = neededCapacity;
     }
 
-    // Did we overflow?
-    if (newcap < mCapacity) {
-      return false;
-    }
-
-    if (!setCapacity(newcap)) {
+    if (!setCapacity(newcap.value())) {
       return false;
     }
   }
 
   // Assert that the region isn't overlapping so we can memcpy.
   MOZ_ASSERT(!areOverlappingRegions(aNewData, aDataLen, mDataPtr + mLength,
                                     aDataLen));
 
--- a/dom/canvas/CanvasRenderingContext2D.cpp
+++ b/dom/canvas/CanvasRenderingContext2D.cpp
@@ -3449,27 +3449,31 @@ struct MOZ_STACK_CLASS CanvasBidiProcess
         }
 
         if (!glyphs[i].GetGlyphCount()) {
           continue;
         }
 
         const gfxTextRun::DetailedGlyph *d = mTextRun->GetDetailedGlyphs(i);
 
-        if (glyphs[i].IsMissing() && d->mAdvance > 0) {
-          newGlyph.mIndex = 0;
-          if (rtl) {
-            inlinePos = baselineOriginInline - advanceSum -
-              d->mAdvance * devUnitsPerAppUnit;
-          } else {
-            inlinePos = baselineOriginInline + advanceSum;
+        if (glyphs[i].IsMissing()) {
+          if (d->mAdvance > 0) {
+            // Perhaps we should render a hexbox here, but for now
+            // we just draw the font's .notdef glyph. (See bug 808288.)
+            newGlyph.mIndex = 0;
+            if (rtl) {
+              inlinePos = baselineOriginInline - advanceSum -
+                d->mAdvance * devUnitsPerAppUnit;
+            } else {
+              inlinePos = baselineOriginInline + advanceSum;
+            }
+            blockPos = baselineOriginBlock;
+            advanceSum += d->mAdvance * devUnitsPerAppUnit;
+            glyphBuf.push_back(newGlyph);
           }
-          blockPos = baselineOriginBlock;
-          advanceSum += d->mAdvance * devUnitsPerAppUnit;
-          glyphBuf.push_back(newGlyph);
           continue;
         }
 
         for (uint32_t c = 0; c < glyphs[i].GetGlyphCount(); c++, d++) {
           newGlyph.mIndex = d->mGlyphID;
           if (rtl) {
             inlinePos = baselineOriginInline - advanceSum -
               d->mAdvance * devUnitsPerAppUnit;
--- a/dom/smil/nsSMILAnimationFunction.cpp
+++ b/dom/smil/nsSMILAnimationFunction.cpp
@@ -1,16 +1,18 @@
 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
+#include "nsSMILAnimationFunction.h"
+
 #include "mozilla/dom/SVGAnimationElement.h"
-#include "nsSMILAnimationFunction.h"
+#include "mozilla/Move.h"
 #include "nsISMILAttr.h"
 #include "nsSMILParserUtils.h"
 #include "nsSMILNullType.h"
 #include "nsSMILTimedElement.h"
 #include "nsAttrValueInlines.h"
 #include "nsGkAtoms.h"
 #include "nsCOMPtr.h"
 #include "nsCOMArray.h"
@@ -262,19 +264,17 @@ nsSMILAnimationFunction::ComposeResult(c
       return;
 
     if (NS_FAILED(AccumulateResult(values, result)))
       return;
   }
 
   // If additive animation isn't required or isn't supported, set the value.
   if (!isAdditive || NS_FAILED(aResult.SandwichAdd(result))) {
-    aResult.Swap(result);
-    // Note: The old value of aResult is now in |result|, and it will get
-    // cleaned up when |result| goes out of scope, when this function returns.
+    aResult = Move(result);
   }
 }
 
 int8_t
 nsSMILAnimationFunction::CompareTo(const nsSMILAnimationFunction* aOther) const
 {
   NS_ENSURE_TRUE(aOther, 0);
 
--- a/dom/smil/nsSMILCSSProperty.cpp
+++ b/dom/smil/nsSMILCSSProperty.cpp
@@ -2,21 +2,23 @@
 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 /* representation of a SMIL-animatable CSS property on an element */
 
 #include "nsSMILCSSProperty.h"
+
+#include "mozilla/dom/Element.h"
+#include "mozilla/Move.h"
 #include "nsSMILCSSValueType.h"
 #include "nsSMILValue.h"
 #include "nsComputedDOMStyle.h"
 #include "nsCSSProps.h"
-#include "mozilla/dom/Element.h"
 #include "nsIDOMElement.h"
 #include "nsIDocument.h"
 
 using namespace mozilla::dom;
 
 // Helper function
 static bool
 GetCSSComputedValue(Element* aElem,
@@ -76,17 +78,17 @@ nsSMILCSSProperty::GetBaseValue() const
     //
     // Also, although we can look up the base value of the display property,
     // doing so involves clearing and resetting the property which can cause
     // frames to be recreated which we'd like to avoid.
     //
     // In either case, just return a dummy value (initialized with the right
     // type, so as not to indicate failure).
     nsSMILValue tmpVal(&nsSMILCSSValueType::sSingleton);
-    baseValue.Swap(tmpVal);
+    Swap(baseValue, tmpVal);
     return baseValue;
   }
 
   // GENERAL CASE: Non-Shorthands
   // (1) Put empty string in override style for property mPropID
   // (saving old override style value, so we can set it again when we're done)
   nsICSSDeclaration* overrideDecl = mElement->GetSMILOverrideStyle();
   nsAutoString cachedOverrideStyleVal;
--- a/dom/smil/nsSMILValue.cpp
+++ b/dom/smil/nsSMILValue.cpp
@@ -39,38 +39,54 @@ nsSMILValue::operator=(const nsSMILValue
     DestroyAndReinit(aVal.mType);
   }
 
   mType->Assign(*this, aVal);
 
   return *this;
 }
 
+// Move constructor / reassignment operator:
+nsSMILValue::nsSMILValue(nsSMILValue&& aVal)
+  : mU(aVal.mU), // Copying union is only OK because we clear aVal.mType below.
+    mType(aVal.mType)
+{
+  // Leave aVal with a null type, so that it's safely destructible (and won't
+  // mess with anything referenced by its union, which we've copied).
+  aVal.mType = nsSMILNullType::Singleton();
+}
+
+nsSMILValue&
+nsSMILValue::operator=(nsSMILValue&& aVal)
+{
+  if (!IsNull()) {
+    // Clean up any data we're currently tracking.
+    DestroyAndCheckPostcondition();
+  }
+
+  // Copy the union (which could include a pointer to external memory) & mType:
+  mU = aVal.mU;
+  mType = aVal.mType;
+
+  // Leave aVal with a null type, so that it's safely destructible (and won't
+  // mess with anything referenced by its union, which we've now copied).
+  aVal.mType = nsSMILNullType::Singleton();
+
+  return *this;
+}
+
 bool
 nsSMILValue::operator==(const nsSMILValue& aVal) const
 {
   if (&aVal == this)
     return true;
 
   return mType == aVal.mType && mType->IsEqual(*this, aVal);
 }
 
-void
-nsSMILValue::Swap(nsSMILValue& aOther)
-{
-  nsSMILValue tmp;
-  memcpy(&tmp,    &aOther, sizeof(nsSMILValue));  // tmp    = aOther
-  memcpy(&aOther, this,    sizeof(nsSMILValue));  // aOther = this
-  memcpy(this,    &tmp,    sizeof(nsSMILValue));  // this   = tmp
-
-  // |tmp| is about to die -- we need to clear its mType, so that its
-  // destructor doesn't muck with the data we just transferred out of it.
-  tmp.mType = nsSMILNullType::Singleton();
-}
-
 nsresult
 nsSMILValue::Add(const nsSMILValue& aValueToAdd, uint32_t aCount)
 {
   if (aValueToAdd.mType != mType) {
     NS_ERROR("Trying to add incompatible types");
     return NS_ERROR_FAILURE;
   }
 
--- a/dom/smil/nsSMILValue.h
+++ b/dom/smil/nsSMILValue.h
@@ -28,31 +28,32 @@ public:
 
   ~nsSMILValue()
   {
     mType->Destroy(*this);
   }
 
   const nsSMILValue& operator=(const nsSMILValue& aVal);
 
+  // Move constructor / reassignment operator:
+  nsSMILValue(nsSMILValue&& aVal);
+  nsSMILValue& operator=(nsSMILValue&& aVal);
+
   // Equality operators. These are allowed to be conservative (return false
   // more than you'd expect) - see comment above nsISMILType::IsEqual.
   bool operator==(const nsSMILValue& aVal) const;
   bool operator!=(const nsSMILValue& aVal) const {
     return !(*this == aVal);
   }
 
   bool IsNull() const
   {
     return (mType == nsSMILNullType::Singleton());
   }
 
-  // Swaps the member data (mU & mPtr) of |this| with |aOther|
-  void     Swap(nsSMILValue& aOther);
-
   nsresult Add(const nsSMILValue& aValueToAdd, uint32_t aCount = 1);
   nsresult SandwichAdd(const nsSMILValue& aValueToAdd);
   nsresult ComputeDistance(const nsSMILValue& aTo, double& aDistance) const;
   nsresult Interpolate(const nsSMILValue& aEndVal,
                        double aUnitDistance,
                        nsSMILValue& aResult) const;
 
   union {
--- a/dom/svg/SVGAnimatedLengthList.cpp
+++ b/dom/svg/SVGAnimatedLengthList.cpp
@@ -1,16 +1,18 @@
 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "SVGAnimatedLengthList.h"
+
 #include "DOMSVGAnimatedLengthList.h"
+#include "mozilla/Move.h"
 #include "nsSVGElement.h"
 #include "nsSVGAttrTearoffTable.h"
 #include "nsSMILValue.h"
 #include "SVGLengthListSMILType.h"
 
 namespace mozilla {
 
 nsresult
@@ -133,17 +135,17 @@ SVGAnimatedLengthList::
                                nsSMILValue& aValue,
                                bool& aPreventCachingOfSandwich) const
 {
   nsSMILValue val(&SVGLengthListSMILType::sSingleton);
   SVGLengthListAndInfo *llai = static_cast<SVGLengthListAndInfo*>(val.mU.mPtr);
   nsresult rv = llai->SetValueFromString(aStr);
   if (NS_SUCCEEDED(rv)) {
     llai->SetInfo(mElement, mAxis, mCanZeroPadList);
-    aValue.Swap(val);
+    aValue = Move(val);
 
     // If any of the lengths in the list depend on their context, then we must
     // prevent caching of the entire animation sandwich. This is because the
     // units of a length at a given index can change from sandwich layer to
     // layer, and indeed even be different within a single sandwich layer. If
     // any length in the result of an animation sandwich is the result of the
     // addition of lengths where one or more of those lengths is context
     // dependent, then naturally the resultant length is also context
@@ -176,17 +178,17 @@ SVGAnimatedLengthList::SMILAnimatedLengt
   // from ALL return points. This function must only return THIS variable:
   nsSMILValue val;
 
   nsSMILValue tmp(&SVGLengthListSMILType::sSingleton);
   SVGLengthListAndInfo *llai = static_cast<SVGLengthListAndInfo*>(tmp.mU.mPtr);
   nsresult rv = llai->CopyFrom(mVal->mBaseVal);
   if (NS_SUCCEEDED(rv)) {
     llai->SetInfo(mElement, mAxis, mCanZeroPadList);
-    val.Swap(tmp);
+    val = Move(tmp);
   }
   return val;
 }
 
 nsresult
 SVGAnimatedLengthList::SMILAnimatedLengthList::SetAnimValue(const nsSMILValue& aValue)
 {
   NS_ASSERTION(aValue.mType == &SVGLengthListSMILType::sSingleton,
--- a/dom/svg/SVGAnimatedNumberList.cpp
+++ b/dom/svg/SVGAnimatedNumberList.cpp
@@ -1,16 +1,18 @@
 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "SVGAnimatedNumberList.h"
+
 #include "DOMSVGAnimatedNumberList.h"
+#include "mozilla/Move.h"
 #include "nsSVGElement.h"
 #include "nsSVGAttrTearoffTable.h"
 #include "nsSMILValue.h"
 #include "SVGNumberListSMILType.h"
 
 namespace mozilla {
 
 nsresult
@@ -133,17 +135,17 @@ SVGAnimatedNumberList::
                                nsSMILValue& aValue,
                                bool& aPreventCachingOfSandwich) const
 {
   nsSMILValue val(&SVGNumberListSMILType::sSingleton);
   SVGNumberListAndInfo *nlai = static_cast<SVGNumberListAndInfo*>(val.mU.mPtr);
   nsresult rv = nlai->SetValueFromString(aStr);
   if (NS_SUCCEEDED(rv)) {
     nlai->SetInfo(mElement);
-    aValue.Swap(val);
+    aValue = Move(val);
   }
   aPreventCachingOfSandwich = false;
   return rv;
 }
 
 nsSMILValue
 SVGAnimatedNumberList::SMILAnimatedNumberList::GetBaseValue() const
 {
@@ -152,17 +154,17 @@ SVGAnimatedNumberList::SMILAnimatedNumbe
   // from ALL return points. This function must only return THIS variable:
   nsSMILValue val;
 
   nsSMILValue tmp(&SVGNumberListSMILType::sSingleton);
   SVGNumberListAndInfo *nlai = static_cast<SVGNumberListAndInfo*>(tmp.mU.mPtr);
   nsresult rv = nlai->CopyFrom(mVal->mBaseVal);
   if (NS_SUCCEEDED(rv)) {
     nlai->SetInfo(mElement);
-    val.Swap(tmp);
+    Swap(val, tmp);
   }
   return val;
 }
 
 nsresult
 SVGAnimatedNumberList::SMILAnimatedNumberList::SetAnimValue(const nsSMILValue& aValue)
 {
   NS_ASSERTION(aValue.mType == &SVGNumberListSMILType::sSingleton,
--- a/dom/svg/SVGAnimatedPathSegList.cpp
+++ b/dom/svg/SVGAnimatedPathSegList.cpp
@@ -1,16 +1,18 @@
 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "SVGAnimatedPathSegList.h"
+
 #include "DOMSVGPathSegList.h"
+#include "mozilla/Move.h"
 #include "nsSVGElement.h"
 #include "nsSVGAttrTearoffTable.h"
 #include "nsSMILValue.h"
 #include "SVGPathSegListSMILType.h"
 
 // See the comments in this file's header!
 
 namespace mozilla {
@@ -155,17 +157,17 @@ SVGAnimatedPathSegList::
                                nsSMILValue& aValue,
                                bool& aPreventCachingOfSandwich) const
 {
   nsSMILValue val(SVGPathSegListSMILType::Singleton());
   SVGPathDataAndInfo *list = static_cast<SVGPathDataAndInfo*>(val.mU.mPtr);
   nsresult rv = list->SetValueFromString(aStr);
   if (NS_SUCCEEDED(rv)) {
     list->SetElement(mElement);
-    aValue.Swap(val);
+    aValue = Move(val);
   }
   aPreventCachingOfSandwich = false;
   return rv;
 }
 
 nsSMILValue
 SVGAnimatedPathSegList::SMILAnimatedPathSegList::GetBaseValue() const
 {
@@ -174,17 +176,17 @@ SVGAnimatedPathSegList::SMILAnimatedPath
   // from ALL return points. This function must only return THIS variable:
   nsSMILValue val;
 
   nsSMILValue tmp(SVGPathSegListSMILType::Singleton());
   SVGPathDataAndInfo *list = static_cast<SVGPathDataAndInfo*>(tmp.mU.mPtr);
   nsresult rv = list->CopyFrom(mVal->mBaseVal);
   if (NS_SUCCEEDED(rv)) {
     list->SetElement(mElement);
-    val.Swap(tmp);
+    val = Move(tmp);
   }
   return val;
 }
 
 nsresult
 SVGAnimatedPathSegList::SMILAnimatedPathSegList::SetAnimValue(const nsSMILValue& aValue)
 {
   NS_ASSERTION(aValue.mType == SVGPathSegListSMILType::Singleton(),
--- a/dom/svg/SVGAnimatedPointList.cpp
+++ b/dom/svg/SVGAnimatedPointList.cpp
@@ -1,16 +1,18 @@
 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "SVGAnimatedPointList.h"
+
 #include "DOMSVGPointList.h"
+#include "mozilla/Move.h"
 #include "nsSVGElement.h"
 #include "nsSVGAttrTearoffTable.h"
 #include "nsSMILValue.h"
 #include "SVGPointListSMILType.h"
 
 // See the comments in this file's header!
 
 namespace mozilla {
@@ -158,17 +160,17 @@ SVGAnimatedPointList::
                                nsSMILValue& aValue,
                                bool& aPreventCachingOfSandwich) const
 {
   nsSMILValue val(&SVGPointListSMILType::sSingleton);
   SVGPointListAndInfo *list = static_cast<SVGPointListAndInfo*>(val.mU.mPtr);
   nsresult rv = list->SetValueFromString(aStr);
   if (NS_SUCCEEDED(rv)) {
     list->SetInfo(mElement);
-    aValue.Swap(val);
+    aValue = Move(val);
   }
   aPreventCachingOfSandwich = false;
   return rv;
 }
 
 nsSMILValue
 SVGAnimatedPointList::SMILAnimatedPointList::GetBaseValue() const
 {
@@ -177,17 +179,17 @@ SVGAnimatedPointList::SMILAnimatedPointL
   // from ALL return points. This function must only return THIS variable:
   nsSMILValue val;
 
   nsSMILValue tmp(&SVGPointListSMILType::sSingleton);
   SVGPointListAndInfo *list = static_cast<SVGPointListAndInfo*>(tmp.mU.mPtr);
   nsresult rv = list->CopyFrom(mVal->mBaseVal);
   if (NS_SUCCEEDED(rv)) {
     list->SetInfo(mElement);
-    val.Swap(tmp);
+    Swap(val, tmp);
   }
   return val;
 }
 
 nsresult
 SVGAnimatedPointList::SMILAnimatedPointList::SetAnimValue(const nsSMILValue& aValue)
 {
   NS_ASSERTION(aValue.mType == &SVGPointListSMILType::sSingleton,
--- a/dom/svg/nsSVGAngle.cpp
+++ b/dom/svg/nsSVGAngle.cpp
@@ -1,18 +1,19 @@
 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
-#include "mozilla/ArrayUtils.h"
+#include "nsSVGAngle.h"
 
-#include "nsSVGAngle.h"
+#include "mozilla/ArrayUtils.h"
 #include "mozilla/dom/SVGMarkerElement.h"
+#include "mozilla/Move.h"
 #include "nsContentUtils.h" // NS_ENSURE_FINITE
 #include "nsSMILValue.h"
 #include "nsSVGAttrTearoffTable.h"
 #include "nsTextFormatter.h"
 #include "SVGAngle.h"
 #include "SVGAnimatedAngle.h"
 #include "SVGOrientSMILType.h"
 
@@ -378,17 +379,17 @@ nsSVGAngle::SMILOrient::ValueFromString(
     uint16_t unitType;
     if (!GetValueFromString(aStr, value, &unitType)) {
       return NS_ERROR_DOM_SYNTAX_ERR;
     }
     val.mU.mOrient.mAngle = value;
     val.mU.mOrient.mUnit = unitType;
     val.mU.mOrient.mOrientType = SVG_MARKER_ORIENT_ANGLE;
   }
-  aValue.Swap(val);
+  aValue = Move(val);
   aPreventCachingOfSandwich = false;
 
   return NS_OK;
 }
 
 nsSMILValue
 nsSVGAngle::SMILOrient::GetBaseValue() const
 {
--- a/dom/svg/nsSVGAnimatedTransformList.cpp
+++ b/dom/svg/nsSVGAnimatedTransformList.cpp
@@ -1,17 +1,19 @@
 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "nsSVGAnimatedTransformList.h"
+
 #include "mozilla/dom/SVGAnimatedTransformList.h"
 #include "mozilla/dom/SVGAnimationElement.h"
+#include "mozilla/Move.h"
 #include "nsCharSeparatedTokenizer.h"
 #include "nsSVGTransform.h"
 #include "nsSMILValue.h"
 #include "SVGContentUtils.h"
 #include "SVGTransformListSMILType.h"
 #include "nsIDOMMutationEvent.h"
 
 namespace mozilla {
@@ -243,17 +245,17 @@ nsSVGAnimatedTransformList::SMILAnimated
 
   nsSMILValue val(SVGTransformListSMILType::Singleton());
   SVGTransformSMILData transform(transformType, params);
   if (NS_FAILED(SVGTransformListSMILType::AppendTransform(transform, val))) {
     return; // OOM
   }
 
   // Success! Populate our outparam with parsed value.
-  aResult.Swap(val);
+  aResult = Move(val);
 }
 
 int32_t
 nsSVGAnimatedTransformList::SMILAnimatedTransformList::ParseParameterList(
   const nsAString& aSpec,
   float* aVars,
   int32_t aNVars)
 {
--- a/dom/svg/nsSVGClass.cpp
+++ b/dom/svg/nsSVGClass.cpp
@@ -1,19 +1,21 @@
 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "nsSVGClass.h"
+
+#include "mozilla/dom/SVGAnimatedString.h"
+#include "mozilla/Move.h"
 #include "nsSVGElement.h"
 #include "nsSMILValue.h"
 #include "SMILStringType.h"
-#include "mozilla/dom/SVGAnimatedString.h"
 
 using namespace mozilla;
 using namespace mozilla::dom;
 
 struct DOMAnimatedString final : public SVGAnimatedString
 {
   NS_DECL_CYCLE_COLLECTING_ISUPPORTS
   NS_DECL_CYCLE_COLLECTION_SCRIPT_HOLDER_CLASS(DOMAnimatedString)
@@ -125,17 +127,17 @@ nsresult
 nsSVGClass::SMILString::ValueFromString(const nsAString& aStr,
                                         const dom::SVGAnimationElement* /*aSrcElement*/,
                                         nsSMILValue& aValue,
                                         bool& aPreventCachingOfSandwich) const
 {
   nsSMILValue val(SMILStringType::Singleton());
 
   *static_cast<nsAString*>(val.mU.mPtr) = aStr;
-  aValue.Swap(val);
+  aValue = Move(val);
   aPreventCachingOfSandwich = false;
   return NS_OK;
 }
 
 nsSMILValue
 nsSVGClass::SMILString::GetBaseValue() const
 {
   nsSMILValue val(SMILStringType::Singleton());
--- a/dom/svg/nsSVGString.cpp
+++ b/dom/svg/nsSVGString.cpp
@@ -1,15 +1,17 @@
 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "nsSVGString.h"
+
+#include "mozilla/Move.h"
 #include "nsSVGAttrTearoffTable.h"
 #include "nsSMILValue.h"
 #include "SMILStringType.h"
 
 using namespace mozilla;
 using namespace mozilla::dom;
 
 NS_SVG_VAL_IMPL_CYCLE_COLLECTION_WRAPPERCACHED(nsSVGString::DOMAnimatedString, mSVGElement)
@@ -105,17 +107,17 @@ nsresult
 nsSVGString::SMILString::ValueFromString(const nsAString& aStr,
                                          const dom::SVGAnimationElement* /*aSrcElement*/,
                                          nsSMILValue& aValue,
                                          bool& aPreventCachingOfSandwich) const
 {
   nsSMILValue val(SMILStringType::Singleton());
 
   *static_cast<nsAString*>(val.mU.mPtr) = aStr;
-  aValue.Swap(val);
+  aValue = Move(val);
   aPreventCachingOfSandwich = false;
   return NS_OK;
 }
 
 nsSMILValue
 nsSVGString::SMILString::GetBaseValue() const
 {
   nsSMILValue val(SMILStringType::Singleton());
--- a/dom/svg/nsSVGViewBox.cpp
+++ b/dom/svg/nsSVGViewBox.cpp
@@ -1,15 +1,17 @@
 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "nsSVGViewBox.h"
+
+#include "mozilla/Move.h"
 #include "nsCharSeparatedTokenizer.h"
 #include "nsSMILValue.h"
 #include "nsTextFormatter.h"
 #include "SVGContentUtils.h"
 #include "SVGViewBoxSMILType.h"
 
 #define NUM_VIEWBOX_COMPONENTS 4
 using namespace mozilla;
@@ -295,17 +297,17 @@ nsSVGViewBox::SMILViewBox
 {
   nsSVGViewBoxRect viewBox;
   nsresult res = ToSVGViewBoxRect(aStr, &viewBox);
   if (NS_FAILED(res)) {
     return res;
   }
   nsSMILValue val(&SVGViewBoxSMILType::sSingleton);
   *static_cast<nsSVGViewBoxRect*>(val.mU.mPtr) = viewBox;
-  aValue.Swap(val);
+  aValue = Move(val);
   aPreventCachingOfSandwich = false;
   
   return NS_OK;
 }
 
 nsSMILValue
 nsSVGViewBox::SMILViewBox::GetBaseValue() const
 {
--- a/dom/xul/templates/nsTemplateMap.h
+++ b/dom/xul/templates/nsTemplateMap.h
@@ -11,31 +11,22 @@
 
 class nsTemplateMap {
 protected:
     struct Entry : public PLDHashEntryHdr {
         nsIContent*     mContent;
         nsIContent*     mTemplate;
     };
 
-    PLDHashTable mTable;
-
-    void
-    Init()
-    {
-        PL_DHashTableInit(&mTable, PL_DHashGetStubOps(), sizeof(Entry));
-    }
-
-    void
-    Finish() { PL_DHashTableFinish(&mTable); }
+    PLDHashTable2 mTable;
 
 public:
-    nsTemplateMap() { Init(); }
+    nsTemplateMap() : mTable(PL_DHashGetStubOps(), sizeof(Entry)) { }
 
-    ~nsTemplateMap() { Finish(); }
+    ~nsTemplateMap() { }
 
     void
     Put(nsIContent* aContent, nsIContent* aTemplate) {
         NS_ASSERTION(!PL_DHashTableSearch(&mTable, aContent),
                      "aContent already in map");
 
         Entry* entry = static_cast<Entry*>
             (PL_DHashTableAdd(&mTable, aContent, fallible));
@@ -65,13 +56,13 @@ public:
 
         if (entry)
             NS_IF_ADDREF(*aResult = entry->mTemplate);
         else
             *aResult = nullptr;
     }
 
     void
-    Clear() { Finish(); Init(); }
+    Clear() { mTable.Clear(); }
 };
 
 #endif // nsTemplateMap_h__
 
--- a/embedding/components/build/moz.build
+++ b/embedding/components/build/moz.build
@@ -21,16 +21,17 @@ LOCAL_INCLUDES += [
 ]
 
 if CONFIG['MOZ_WIDGET_TOOLKIT'] == 'windows':
     DEFINES['PROXY_PRINTING'] = 1
     LOCAL_INCLUDES += [
         '../printingui/win',
     ]
 elif CONFIG['MOZ_WIDGET_TOOLKIT'] == 'cocoa':
+    DEFINES['PROXY_PRINTING'] = 1
     LOCAL_INCLUDES += [
         '../printingui/mac',
     ]
 
 if CONFIG['MOZ_PDF_PRINTING']:
     DEFINES['PROXY_PRINTING'] = 1
     LOCAL_INCLUDES += [
         '../printingui/unixshared',
--- a/embedding/components/printingui/ipc/PPrintingTypes.ipdlh
+++ b/embedding/components/printingui/ipc/PPrintingTypes.ipdlh
@@ -67,16 +67,17 @@ struct PrintData {
   nsString toFileName;
   short outputFormat;
   int32_t printPageDelay;
   int32_t resolution;
   int32_t duplex;
   bool isInitializedFromPrinter;
   bool isInitializedFromPrefs;
   bool persistMarginBoxSettings;
+  int32_t optionFlags;
 
   /* Windows-specific things */
   nsString driverName;
   nsString deviceName;
   bool isFramesetDocument;
   bool isFramesetFrameSelected;
   bool isIFrameSelected;
   bool isRangeSelection;
@@ -86,15 +87,26 @@ struct PrintData {
    * information we're already passing, but the generalized settings that
    * we hold in nsIPrintSettings don't map perfectly to GTK's GtkPrintSettings,
    * so there are some nuances. GtkPrintSettings, for example, stores both an
    * internal name for paper size, as well as the display name.
    */
   CStringKeyValue[] GTKPrintSettings;
 
   /**
-   * TODO: OS X specific things - specifically, an array of names for the
-   * document to be supplied by nsIWebBrowserPrint::enumerateDocumentNames
+   * OS X specific things.
    */
+  nsString printJobName;
+  bool printAllPages;
+  bool mustCollate;
+  nsString disposition;
+  /** TODO: Is there an "unsigned short" primitive? **/
+  short pagesAcross;
+  short pagesDown;
+  double printTime;
+  bool detailedErrorReporting;
+  nsString faxNumber;
+  bool addHeaderAndFooter;
+  bool fileNameExtensionHidden;
 };
 
 } // namespace embedding
 } // namespace mozilla
--- a/embedding/components/printingui/ipc/PrintDataUtils.cpp
+++ b/embedding/components/printingui/ipc/PrintDataUtils.cpp
@@ -124,17 +124,33 @@ MockWebBrowserPrint::Cancel()
 {
   return NS_ERROR_NOT_IMPLEMENTED;
 }
 
 NS_IMETHODIMP
 MockWebBrowserPrint::EnumerateDocumentNames(uint32_t* aCount,
                                             char16_t*** aResult)
 {
-  return NS_ERROR_NOT_IMPLEMENTED;
+  *aCount = 0;
+  *aResult = nullptr;
+
+  if (mData.printJobName().IsEmpty()) {
+    return NS_OK;
+  }
+
+  // The only consumer that cares about this is the OS X printing
+  // dialog, and even then, it only cares about the first document
+  // name. That's why we only send a single document name through
+  // PrintData.
+  char16_t** array = (char16_t**) moz_xmalloc(sizeof(char16_t*));
+  array[0] = ToNewUnicode(mData.printJobName());
+
+  *aCount = 1;
+  *aResult = array;
+  return NS_OK;
 }
 
 NS_IMETHODIMP
 MockWebBrowserPrint::ExitPrintPreview()
 {
   return NS_ERROR_NOT_IMPLEMENTED;
 }
 
--- a/gfx/2d/unittest/TestCairo.cpp
+++ b/gfx/2d/unittest/TestCairo.cpp
@@ -44,10 +44,53 @@ TEST(Cairo, Bug825721) {
 
   // This was the crash in 825721.  Note that centerY has to be non-zero,
   // and radius has to be not only large, but in particular range.
   // 825721 has a band-aid fix, where the crash is inevitable, but does
   // not fix the cause.  The same code crashes in cairo standalone.
   TryCircle(0.0, 1.0, 5761126469220696064.0);
 }
 
+TEST(Cairo, Bug1063486) {
+
+  double x1, y1, x2, y2;
+  const double epsilon = .01;
+
+  cairo_surface_t *surf = cairo_image_surface_create(CAIRO_FORMAT_ARGB32, 1, 1);
+  ASSERT_TRUE(surf != nullptr);
+
+  cairo_t *cairo = cairo_create(surf);
+  ASSERT_TRUE(cairo != nullptr);
+
+  printf("Path 1\n");
+  cairo_move_to(cairo, -20, -10);
+  cairo_line_to(cairo, 20, -10);
+  cairo_line_to(cairo, 20, 10);
+  cairo_curve_to(cairo, 10,10, -10,10, -20,10);
+  cairo_curve_to(cairo, -30,10, -30,-10, -20,-10);
+
+  cairo_path_extents(cairo, &x1, &y1, &x2, &y2);
+
+  ASSERT_LT(std::abs(-27.5 - x1), epsilon); // the failing coordinate
+  ASSERT_LT(std::abs(-10 - y1), epsilon);
+  ASSERT_LT(std::abs(20 - x2), epsilon);
+  ASSERT_LT(std::abs(10 - y2), epsilon);
+
+  printf("Path 2\n");
+  cairo_new_path(cairo);
+  cairo_move_to(cairo, 10, 30);
+  cairo_line_to(cairo, 90, 30);
+  cairo_curve_to(cairo, 30,30, 30,30, 10,30);
+  cairo_curve_to(cairo, 0,30, 0,0, 30,5);
+
+  cairo_path_extents(cairo, &x1, &y1, &x2, &y2);
+
+  ASSERT_LT(std::abs(4.019531 - x1), epsilon); // the failing coordinate
+  ASSERT_LT(std::abs(4.437500 - y1), epsilon);
+  ASSERT_LT(std::abs(90. - x2), epsilon);
+  ASSERT_LT(std::abs(30. - y2), epsilon);
+
+  cairo_surface_destroy(surf);
+  cairo_destroy(cairo);
+}
+
 }
 }
--- a/gfx/cairo/cairo/src/cairo-path-bounds.c
+++ b/gfx/cairo/cairo/src/cairo-path-bounds.c
@@ -126,16 +126,17 @@ static cairo_status_t
 	d->y < bounder->extents.p1.y || d->y > bounder->extents.p2.y)
     {
 	return _cairo_spline_bound (_cairo_path_bounder_line_to, bounder,
 				    &bounder->current_point, b, c, d);
     }
     else
     {
 	/* All control points are within the current extents. */
+	bounder->current_point = *d;
 	return CAIRO_STATUS_SUCCESS;
     }
 }
 
 static cairo_status_t
 _cairo_path_bounder_close_path (void *closure)
 {
     return CAIRO_STATUS_SUCCESS;
--- a/gfx/ipc/GfxMessageUtils.h
+++ b/gfx/ipc/GfxMessageUtils.h
@@ -728,16 +728,17 @@ struct ParamTraits<mozilla::layers::Fram
     WriteParam(aMsg, aParam.mExtraResolution);
     WriteParam(aMsg, aParam.mBackgroundColor);
     WriteParam(aMsg, aParam.mDoSmoothScroll);
     WriteParam(aMsg, aParam.mSmoothScrollOffset);
     WriteParam(aMsg, aParam.GetLineScrollAmount());
     WriteParam(aMsg, aParam.GetPageScrollAmount());
     WriteParam(aMsg, aParam.AllowVerticalScrollWithWheel());
     WriteParam(aMsg, aParam.mClipRect);
+    WriteParam(aMsg, aParam.mIsLayersIdRoot);
     WriteParam(aMsg, aParam.GetContentDescription());
   }
 
   static bool ReadContentDescription(const Message* aMsg, void** aIter, paramType* aResult)
   {
     nsCString str;
     if (!ReadParam(aMsg, aIter, &str)) {
       return false;
@@ -771,16 +772,17 @@ struct ParamTraits<mozilla::layers::Fram
             ReadParam(aMsg, aIter, &aResult->mExtraResolution) &&
             ReadParam(aMsg, aIter, &aResult->mBackgroundColor) &&
             ReadParam(aMsg, aIter, &aResult->mDoSmoothScroll) &&
             ReadParam(aMsg, aIter, &aResult->mSmoothScrollOffset) &&
             ReadParam(aMsg, aIter, &aResult->mLineScrollAmount) &&
             ReadParam(aMsg, aIter, &aResult->mPageScrollAmount) &&
             ReadParam(aMsg, aIter, &aResult->mAllowVerticalScrollWithWheel) &&
             ReadParam(aMsg, aIter, &aResult->mClipRect) &&
+            ReadParam(aMsg, aIter, &aResult->mIsLayersIdRoot) &&
             ReadContentDescription(aMsg, aIter, aResult));
   }
 };
 
 template<>
 struct ParamTraits<mozilla::layers::TextureFactoryIdentifier>
 {
   typedef mozilla::layers::TextureFactoryIdentifier paramType;
--- a/gfx/layers/FrameMetrics.h
+++ b/gfx/layers/FrameMetrics.h
@@ -62,16 +62,17 @@ public:
     , mUseDisplayPortMargins(false)
     , mPresShellId(-1)
     , mViewport(0, 0, 0, 0)
     , mExtraResolution()
     , mBackgroundColor(0, 0, 0, 0)
     , mLineScrollAmount(0, 0)
     , mPageScrollAmount(0, 0)
     , mAllowVerticalScrollWithWheel(false)
+    , mIsLayersIdRoot(false)
   {
   }
 
   // Default copy ctor and operator= are fine
 
   bool operator==(const FrameMetrics& aOther) const
   {
     return mCompositionBounds.IsEqualEdges(aOther.mCompositionBounds) &&
@@ -95,17 +96,18 @@ public:
            mUpdateScrollOffset == aOther.mUpdateScrollOffset &&
            mScrollGeneration == aOther.mScrollGeneration &&
            mExtraResolution == aOther.mExtraResolution &&
            mBackgroundColor == aOther.mBackgroundColor &&
            mDoSmoothScroll == aOther.mDoSmoothScroll &&
            mLineScrollAmount == aOther.mLineScrollAmount &&
            mPageScrollAmount == aOther.mPageScrollAmount &&
            mAllowVerticalScrollWithWheel == aOther.mAllowVerticalScrollWithWheel &&
-           mClipRect == aOther.mClipRect;
+           mClipRect == aOther.mClipRect &&
+           mIsLayersIdRoot == aOther.mIsLayersIdRoot;
   }
   bool operator!=(const FrameMetrics& aOther) const
   {
     return !operator==(aOther);
   }
 
   bool IsDefault() const
   {
@@ -520,16 +522,23 @@ public:
   }
   bool HasClipRect() const {
     return mClipRect.isSome();
   }
   const ParentLayerIntRect& ClipRect() const {
     return mClipRect.ref();
   }
 
+  void SetIsLayersIdRoot(bool aValue) {
+    mIsLayersIdRoot = aValue;
+  }
+  bool IsLayersIdRoot() const {
+    return mIsLayersIdRoot;
+  }
+
 private:
 
   // The pres-shell resolution that has been induced on the document containing
   // this scroll frame as a result of zooming this scroll frame (whether via
   // user action, or choosing an initial zoom level on page load). This can
   // only be different from 1.0 for frames that are zoomable, which currently
   // is just the root content document's root scroll frame (mIsRoot = true).
   // This is a plain float rather than a ScaleFactor because in and of itself
@@ -694,16 +703,20 @@ private:
   LayoutDeviceIntSize mPageScrollAmount;
 
   // Whether or not the frame can be vertically scrolled with a mouse wheel.
   bool mAllowVerticalScrollWithWheel;
 
   // The clip rect to use when compositing a layer with this FrameMetrics.
   Maybe<ParentLayerIntRect> mClipRect;
 
+  // Whether these framemetrics are for the root scroll frame (root element if
+  // we don't have a root scroll frame) for its layers id.
+  bool mIsLayersIdRoot;
+
   // WARNING!!!!
   //
   // When adding new fields to FrameMetrics, the following places should be
   // updated to include them (as needed):
   //    FrameMetrics::operator ==
   //    AsyncPanZoomController::NotifyLayersUpdated
   //    The ParamTraits specialization in GfxMessageUtils.h
   //
--- a/gfx/layers/LayersLogging.cpp
+++ b/gfx/layers/LayersLogging.cpp
@@ -208,17 +208,17 @@ AppendToString(std::stringstream& aStrea
   aStream << sfx;
 }
 
 void
 AppendToString(std::stringstream& aStream, const ScrollableLayerGuid& s,
                const char* pfx, const char* sfx)
 {
   aStream << pfx
-          << nsPrintfCString("{ l=%llu, p=%u, v=%llu }", s.mLayersId, s.mPresShellId, s.mScrollId).get()
+          << nsPrintfCString("{ l=%" PRIu64 ", p=%u, v=%" PRIu64 " }", s.mLayersId, s.mPresShellId, s.mScrollId).get()
           << sfx;
 }
 
 void
 AppendToString(std::stringstream& aStream, const Matrix& m,
                const char* pfx, const char* sfx)
 {
   aStream << pfx;
--- a/gfx/layers/apz/src/APZCTreeManager.cpp
+++ b/gfx/layers/apz/src/APZCTreeManager.cpp
@@ -275,30 +275,31 @@ GetEventRegions(const LayerMetricsWrappe
     return EventRegions(nsIntRegion(ParentLayerIntRect::ToUntyped(
       RoundedToInt(aLayer.Metrics().GetCompositionBounds()))));
   }
   return aLayer.GetEventRegions();
 }
 
 already_AddRefed<HitTestingTreeNode>
 APZCTreeManager::RecycleOrCreateNode(TreeBuildingState& aState,
-                                     AsyncPanZoomController* aApzc)
+                                     AsyncPanZoomController* aApzc,
+                                     uint64_t aLayersId)
 {
   // Find a node without an APZC and return it. Note that unless the layer tree
   // actually changes, this loop should generally do an early-return on the
   // first iteration, so it should be cheap in the common case.
   for (size_t i = 0; i < aState.mNodesToDestroy.Length(); i++) {
     nsRefPtr<HitTestingTreeNode> node = aState.mNodesToDestroy[i];
     if (!node->IsPrimaryHolder()) {
       aState.mNodesToDestroy.RemoveElement(node);
-      node->RecycleWith(aApzc);
+      node->RecycleWith(aApzc, aLayersId);
       return node.forget();
     }
   }
-  nsRefPtr<HitTestingTreeNode> node = new HitTestingTreeNode(aApzc, false);
+  nsRefPtr<HitTestingTreeNode> node = new HitTestingTreeNode(aApzc, false, aLayersId);
   return node.forget();
 }
 
 static EventRegionsOverride
 GetEventRegionsOverride(HitTestingTreeNode* aParent,
                        const LayerMetricsWrapper& aLayer)
 {
   // Make it so that if the flag is set on the layer tree, it automatically
@@ -328,17 +329,17 @@ APZCTreeManager::PrepareNodeForLayer(con
 
   const CompositorParent::LayerTreeState* state = CompositorParent::GetIndirectShadowTree(aLayersId);
   if (!(state && state->mController.get())) {
     needsApzc = false;
   }
 
   nsRefPtr<HitTestingTreeNode> node = nullptr;
   if (!needsApzc) {
-    node = RecycleOrCreateNode(aState, nullptr);
+    node = RecycleOrCreateNode(aState, nullptr, aLayersId);
     AttachNodeToTree(node, aParent, aNextSibling);
     node->SetHitTestData(GetEventRegions(aLayer), aLayer.GetTransform(),
         aLayer.GetClipRect() ? Some(ParentLayerIntRegion(*aLayer.GetClipRect())) : Nothing(),
         GetEventRegionsOverride(aParent, aLayer));
     return node;
   }
 
   AsyncPanZoomController* apzc = nullptr;
@@ -408,17 +409,17 @@ APZCTreeManager::PrepareNodeForLayer(con
     bool newApzc = (apzc == nullptr || apzc->IsDestroyed());
     if (newApzc) {
       apzc = MakeAPZCInstance(aLayersId, state->mController);
       apzc->SetCompositorParent(aState.mCompositor);
       if (state->mCrossProcessParent != nullptr) {
         apzc->ShareFrameMetricsAcrossProcesses();
       }
       MOZ_ASSERT(node == nullptr);
-      node = new HitTestingTreeNode(apzc, true);
+      node = new HitTestingTreeNode(apzc, true, aLayersId);
     } else {
       // If we are re-using a node for this layer clear the tree pointers
       // so that it doesn't continue pointing to nodes that might no longer
       // be in the tree. These pointers will get reset properly as we continue
       // building the tree. Also remove it from the set of nodes that are going
       // to be destroyed, because it's going to remain active.
       aState.mNodesToDestroy.RemoveElement(node);
       node->SetPrevSibling(nullptr);
@@ -447,28 +448,28 @@ APZCTreeManager::PrepareNodeForLayer(con
 
     // For testing, log the parent scroll id of every APZC that has a
     // parent. This allows test code to reconstruct the APZC tree.
     // Note that we currently only do this for APZCs in the layer tree
     // that originated the update, because the only identifying information
     // we are logging about APZCs is the scroll id, and otherwise we could
     // confuse APZCs from different layer trees with the same scroll id.
     if (aLayersId == aState.mOriginatingLayersId) {
-      if (apzc->IsRootForLayersId()) {
+      if (apzc->HasNoParentWithSameLayersId()) {
         aState.mPaintLogger.LogTestData(aMetrics.GetScrollId(),
-            "isRootForLayersId", true);
+            "hasNoParentWithSameLayersId", true);
       } else {
         MOZ_ASSERT(apzc->GetParent());
         aState.mPaintLogger.LogTestData(aMetrics.GetScrollId(),
             "parentScrollId", apzc->GetParent()->GetGuid().mScrollId);
       }
     }
 
     if (newApzc) {
-      if (apzc->IsRootForLayersId()) {
+      if (apzc->HasNoParentWithSameLayersId()) {
         // If we just created a new apzc that is the root for its layers ID, then
         // we need to update its zoom constraints which might have arrived before this
         // was created
         ZoomConstraints constraints;
         if (state->mController->GetRootZoomConstraints(&constraints)) {
           apzc->UpdateZoomConstraints(constraints);
         }
       } else {
@@ -482,17 +483,17 @@ APZCTreeManager::PrepareNodeForLayer(con
 
     // Add a guid -> APZC mapping for the newly created APZC.
     insertResult.first->second = apzc;
   } else {
     // We already built an APZC earlier in this tree walk, but we have another layer
     // now that will also be using that APZC. The hit-test region on the APZC needs
     // to be updated to deal with the new layer's hit region.
 
-    node = RecycleOrCreateNode(aState, apzc);
+    node = RecycleOrCreateNode(aState, apzc, aLayersId);
     AttachNodeToTree(node, aParent, aNextSibling);
 
     // Even though different layers associated with a given APZC may be at
     // different levels in the layer tree (e.g. one being an uncle of another),
     // we require from Layout that the CSS transforms up to their common
     // ancestor be the same.
     MOZ_ASSERT(aAncestorTransform == apzc->GetAncestorTransform());
 
@@ -1034,34 +1035,34 @@ APZCTreeManager::UpdateZoomConstraints(c
                                        const ZoomConstraints& aConstraints)
 {
   MonitorAutoLock lock(mTreeLock);
   nsRefPtr<HitTestingTreeNode> node = GetTargetNode(aGuid, nullptr);
   MOZ_ASSERT(!node || node->GetApzc()); // any node returned must have an APZC
 
   // For a given layers id, non-root APZCs inherit the zoom constraints
   // of their root.
-  if (node && node->GetApzc()->IsRootForLayersId()) {
+  if (node && node->GetApzc()->HasNoParentWithSameLayersId()) {
     UpdateZoomConstraintsRecursively(node.get(), aConstraints);
   }
 }
 
 void
 APZCTreeManager::UpdateZoomConstraintsRecursively(HitTestingTreeNode* aNode,
                                                   const ZoomConstraints& aConstraints)
 {
   mTreeLock.AssertCurrentThreadOwns();
 
   if (aNode->IsPrimaryHolder()) {
     MOZ_ASSERT(aNode->GetApzc());
     aNode->GetApzc()->UpdateZoomConstraints(aConstraints);
   }
   for (HitTestingTreeNode* child = aNode->GetLastChild(); child; child = child->GetPrevSibling()) {
     // We can have subtrees with their own layers id - leave those alone.
-    if (child->GetApzc() && child->GetApzc()->IsRootForLayersId()) {
+    if (child->GetApzc() && child->GetApzc()->HasNoParentWithSameLayersId()) {
       continue;
     }
     UpdateZoomConstraintsRecursively(child, aConstraints);
   }
 }
 
 void
 APZCTreeManager::FlushRepaintsToClearScreenToGeckoTransform()
@@ -1338,17 +1339,17 @@ APZCTreeManager::BuildOverscrollHandoffC
   // but do not follow the expected layer tree structure. If there are no
   // scroll parent links we just walk up the tree to find the scroll parent.
   OverscrollHandoffChain* result = new OverscrollHandoffChain;
   AsyncPanZoomController* apzc = aInitialTarget;
   while (apzc != nullptr) {
     result->Add(apzc);
 
     if (apzc->GetScrollHandoffParentId() == FrameMetrics::NULL_SCROLL_ID) {
-      if (!apzc->IsRootForLayersId()) {
+      if (!apzc->HasNoParentWithSameLayersId()) {
         // This probably indicates a bug or missed case in layout code
         NS_WARNING("Found a non-root APZ with no handoff parent");
       }
       apzc = apzc->GetParent();
       continue;
     }
 
     // Guard against a possible infinite-loop condition. If we hit this, the
@@ -1356,17 +1357,17 @@ APZCTreeManager::BuildOverscrollHandoffC
     MOZ_ASSERT(apzc->GetScrollHandoffParentId() != apzc->GetGuid().mScrollId);
 
     // Find the AsyncPanZoomController instance with a matching layersId and
     // the scroll id that matches apzc->GetScrollHandoffParentId(). To do this
     // search the subtree with the same layersId for the apzc with the specified
     // scroll id.
     AsyncPanZoomController* scrollParent = nullptr;
     AsyncPanZoomController* parent = apzc;
-    while (!parent->IsRootForLayersId()) {
+    while (!parent->HasNoParentWithSameLayersId()) {
       parent = parent->GetParent();
       // While walking up to find the root of the subtree, if we encounter the
       // handoff parent, we don't actually need to do the search so we can
       // just abort here.
       if (parent->GetGuid().mScrollId == apzc->GetScrollHandoffParentId()) {
         scrollParent = parent;
         break;
       }
@@ -1456,33 +1457,66 @@ APZCTreeManager::GetAPZCAtPoint(HitTesti
     // If we didn't match anything in the subtree, check |node|.
     if (*aOutHitResult == HitNothing) {
       APZCTM_LOG("Testing ParentLayer point %s (Layer %s) against node %p\n",
           Stringify(aHitTestPoint).c_str(),
           hitTestPointForChildLayers ? Stringify(hitTestPointForChildLayers.ref()).c_str() : "nil",
           node);
       HitTestResult hitResult = node->HitTest(aHitTestPoint);
       if (hitResult != HitTestResult::HitNothing) {
-        result = node->GetNearestContainingApzc();
+        result = node->GetNearestContainingApzcWithSameLayersId();
+        if (!result) {
+          result = FindRootApzcForLayersId(node->GetLayersId());
+          MOZ_ASSERT(result);
+        }
         APZCTM_LOG("Successfully matched APZC %p via node %p (hit result %d)\n",
              result, node, hitResult);
         MOZ_ASSERT(hitResult == HitLayer || hitResult == HitDispatchToContentRegion);
         // If event regions are disabled, *aOutHitResult will be HitLayer
         *aOutHitResult = hitResult;
       }
     }
 
     if (*aOutHitResult != HitNothing) {
       return result;
     }
   }
 
   return nullptr;
 }
 
+AsyncPanZoomController*
+APZCTreeManager::FindRootApzcForLayersId(uint64_t aLayersId) const
+{
+  mTreeLock.AssertCurrentThreadOwns();
+
+  if (!mRootNode) {
+    return nullptr;
+  }
+  std::deque<const HitTestingTreeNode*> queue;
+  queue.push_back(mRootNode);
+  while (!queue.empty()) {
+    const HitTestingTreeNode* node = queue.front();
+    queue.pop_front();
+
+    AsyncPanZoomController* apzc = node->GetApzc();
+    if (apzc && apzc->GetLayersId() == aLayersId && apzc->IsRootForLayersId()) {
+      return apzc;
+    }
+
+    for (HitTestingTreeNode* child = node->GetLastChild();
+         child;
+         child = child->GetPrevSibling()) {
+      queue.push_back(child);
+    }
+  }
+
+  return nullptr;
+}
+
 /* The methods GetScreenToApzcTransform() and GetApzcToGeckoTransform() return
    some useful transformations that input events may need applied. This is best
    illustrated with an example. Consider a chain of layers, L, M, N, O, P, Q, R. Layer L
    is the layer that corresponds to the argument |aApzc|, and layer R is the root
    of the layer tree. Layer M is the parent of L, N is the parent of M, and so on.
    When layer L is displayed to the screen by the compositor, the set of transforms that
    are applied to L are (in order from top to bottom):
 
@@ -1699,16 +1733,16 @@ APZCTreeManager::CommonAncestor(AsyncPan
   return ancestor.forget();
 }
 
 already_AddRefed<AsyncPanZoomController>
 APZCTreeManager::RootAPZCForLayersId(AsyncPanZoomController* aApzc) const
 {
   MonitorAutoLock lock(mTreeLock);
   nsRefPtr<AsyncPanZoomController> apzc = aApzc;
-  while (apzc && !apzc->IsRootForLayersId()) {
+  while (apzc && !apzc->HasNoParentWithSameLayersId()) {
     apzc = apzc->GetParent();
   }
   return apzc.forget();
 }
 
 }
 }
--- a/gfx/layers/apz/src/APZCTreeManager.h
+++ b/gfx/layers/apz/src/APZCTreeManager.h
@@ -407,16 +407,17 @@ private:
   already_AddRefed<HitTestingTreeNode> GetTargetNode(const ScrollableLayerGuid& aGuid,
                                                      GuidComparator aComparator);
   HitTestingTreeNode* FindTargetNode(HitTestingTreeNode* aNode,
                                      const ScrollableLayerGuid& aGuid,
                                      GuidComparator aComparator);
   AsyncPanZoomController* GetAPZCAtPoint(HitTestingTreeNode* aNode,
                                          const ParentLayerPoint& aHitTestPoint,
                                          HitTestResult* aOutHitResult);
+  AsyncPanZoomController* FindRootApzcForLayersId(uint64_t aLayersId) const;
   already_AddRefed<AsyncPanZoomController> GetMultitouchTarget(AsyncPanZoomController* aApzc1, AsyncPanZoomController* aApzc2) const;
   already_AddRefed<AsyncPanZoomController> CommonAncestor(AsyncPanZoomController* aApzc1, AsyncPanZoomController* aApzc2) const;
   already_AddRefed<AsyncPanZoomController> RootAPZCForLayersId(AsyncPanZoomController* aApzc) const;
   already_AddRefed<AsyncPanZoomController> GetTouchInputBlockAPZC(const MultiTouchInput& aEvent,
                                                                   HitTestResult* aOutHitResult);
   nsEventStatus ProcessTouchInput(MultiTouchInput& aInput,
                                   ScrollableLayerGuid* aOutTargetGuid,
                                   uint64_t* aOutInputBlockId);
@@ -428,17 +429,18 @@ private:
                              uint64_t* aOutInputBlockId);
   void UpdateWheelTransaction(WidgetInputEvent& aEvent);
   void UpdateZoomConstraintsRecursively(HitTestingTreeNode* aNode,
                                         const ZoomConstraints& aConstraints);
   void FlushRepaintsToClearScreenToGeckoTransform();
   void FlushRepaintsRecursively(HitTestingTreeNode* aNode);
 
   already_AddRefed<HitTestingTreeNode> RecycleOrCreateNode(TreeBuildingState& aState,
-                                                           AsyncPanZoomController* aApzc);
+                                                           AsyncPanZoomController* aApzc,
+                                                           uint64_t aLayersId);
   HitTestingTreeNode* PrepareNodeForLayer(const LayerMetricsWrapper& aLayer,
                                           const FrameMetrics& aMetrics,
                                           uint64_t aLayersId,
                                           const gfx::Matrix4x4& aAncestorTransform,
                                           HitTestingTreeNode* aParent,
                                           HitTestingTreeNode* aNextSibling,
                                           TreeBuildingState& aState);
 
--- a/gfx/layers/apz/src/AsyncPanZoomController.cpp
+++ b/gfx/layers/apz/src/AsyncPanZoomController.cpp
@@ -2895,16 +2895,17 @@ void AsyncPanZoomController::NotifyLayer
     mFrameMetrics.SetCompositionBounds(aLayerMetrics.GetCompositionBounds());
     mFrameMetrics.SetRootCompositionSize(aLayerMetrics.GetRootCompositionSize());
     mFrameMetrics.SetPresShellResolution(aLayerMetrics.GetPresShellResolution());
     mFrameMetrics.SetCumulativeResolution(aLayerMetrics.GetCumulativeResolution());
     mFrameMetrics.SetHasScrollgrab(aLayerMetrics.GetHasScrollgrab());
     mFrameMetrics.SetLineScrollAmount(aLayerMetrics.GetLineScrollAmount());
     mFrameMetrics.SetPageScrollAmount(aLayerMetrics.GetPageScrollAmount());
     mFrameMetrics.SetClipRect(aLayerMetrics.GetClipRect());
+    mFrameMetrics.SetIsLayersIdRoot(aLayerMetrics.IsLayersIdRoot());
 
     if (scrollOffsetUpdated) {
       APZC_LOG("%p updating scroll offset from %s to %s\n", this,
         ToString(mFrameMetrics.GetScrollOffset()).c_str(),
         ToString(aLayerMetrics.GetScrollOffset()).c_str());
 
       mFrameMetrics.CopyScrollInfoFrom(aLayerMetrics);
 
--- a/gfx/layers/apz/src/AsyncPanZoomController.h
+++ b/gfx/layers/apz/src/AsyncPanZoomController.h
@@ -894,20 +894,26 @@ public:
     mParent = aParent;
   }
 
   AsyncPanZoomController* GetParent() const {
     return mParent;
   }
 
   /* Returns true if there is no APZC higher in the tree with the same
-   * layers id.
+   * layers id. Deprecated. New code shouldn't use this. Old code should be
+   * updated to not use this.
    */
+  bool HasNoParentWithSameLayersId() const {
+    return !mParent || (mParent->mLayersId != mLayersId);
+  }
+
   bool IsRootForLayersId() const {
-    return !mParent || (mParent->mLayersId != mLayersId);
+    ReentrantMonitorAutoEnter lock(mMonitor);
+    return mFrameMetrics.IsLayersIdRoot();
   }
 
 private:
   // This is a raw pointer to avoid introducing a reference cycle between
   // AsyncPanZoomController and APZCTreeManager. Since these objects don't
   // live on the main thread, we can't use the cycle collector with them.
   // The APZCTreeManager owns the lifetime of the APZCs, so nulling this
   // pointer out in Destroy() will prevent accessing deleted memory.
@@ -1082,16 +1088,21 @@ public:
     mAsyncTransformAppliedToContent = true;
   }
 
   bool GetAsyncTransformAppliedToContent() const
   {
     return mAsyncTransformAppliedToContent;
   }
 
+  uint64_t GetLayersId() const
+  {
+    return mLayersId;
+  }
+
 private:
   // Extra offset to add in SampleContentTransformForFrame for testing
   CSSPoint mTestAsyncScrollOffset;
   // Extra zoom to include in SampleContentTransformForFrame for testing
   LayerToParentLayerScale mTestAsyncZoom;
   // Flag to track whether or not the APZ transform is not used. This
   // flag is recomputed for every composition frame.
   bool mAsyncTransformAppliedToContent;
--- a/gfx/layers/apz/src/HitTestingTreeNode.cpp
+++ b/gfx/layers/apz/src/HitTestingTreeNode.cpp
@@ -13,32 +13,38 @@
 #include "mozilla/layers/AsyncCompositionManager.h"     // for ViewTransform::operator Matrix4x4()
 #include "nsPrintfCString.h"                            // for nsPrintfCString
 #include "UnitTransforms.h"                             // for ViewAs
 
 namespace mozilla {
 namespace layers {
 
 HitTestingTreeNode::HitTestingTreeNode(AsyncPanZoomController* aApzc,
-                                       bool aIsPrimaryHolder)
+                                       bool aIsPrimaryHolder,
+                                       uint64_t aLayersId)
   : mApzc(aApzc)
   , mIsPrimaryApzcHolder(aIsPrimaryHolder)
+  , mLayersId(aLayersId)
   , mOverride(EventRegionsOverride::NoOverride)
 {
   if (mIsPrimaryApzcHolder) {
     MOZ_ASSERT(mApzc);
   }
+  MOZ_ASSERT(!mApzc || mApzc->GetLayersId() == mLayersId);
 }
 
 void
-HitTestingTreeNode::RecycleWith(AsyncPanZoomController* aApzc)
+HitTestingTreeNode::RecycleWith(AsyncPanZoomController* aApzc,
+                                uint64_t aLayersId)
 {
   MOZ_ASSERT(!mIsPrimaryApzcHolder);
   Destroy(); // clear out tree pointers
   mApzc = aApzc;
+  mLayersId = aLayersId;
+  MOZ_ASSERT(!mApzc || mApzc->GetLayersId() == mLayersId);
   // The caller is expected to call SetHitTestData to repopulate the hit-test
   // fields.
 }
 
 HitTestingTreeNode::~HitTestingTreeNode()
 {
 }
 
@@ -52,16 +58,18 @@ HitTestingTreeNode::Destroy()
   mParent = nullptr;
 
   if (mApzc) {
     if (mIsPrimaryApzcHolder) {
       mApzc->Destroy();
     }
     mApzc = nullptr;
   }
+
+  mLayersId = 0;
 }
 
 void
 HitTestingTreeNode::SetLastChild(HitTestingTreeNode* aChild)
 {
   mLastChild = aChild;
   if (aChild) {
     aChild->mParent = this;
@@ -142,22 +150,41 @@ HitTestingTreeNode::GetNearestContaining
   for (const HitTestingTreeNode* n = this; n; n = n->GetParent()) {
     if (n->GetApzc()) {
       return n->GetApzc();
     }
   }
   return nullptr;
 }
 
+AsyncPanZoomController*
+HitTestingTreeNode::GetNearestContainingApzcWithSameLayersId() const
+{
+  for (const HitTestingTreeNode* n = this;
+       n && n->mLayersId == mLayersId;
+       n = n->GetParent()) {
+    if (n->GetApzc()) {
+      return n->GetApzc();
+    }
+  }
+  return nullptr;
+}
+
 bool
 HitTestingTreeNode::IsPrimaryHolder() const
 {
   return mIsPrimaryApzcHolder;
 }
 
+uint64_t
+HitTestingTreeNode::GetLayersId() const
+{
+  return mLayersId;
+}
+
 void
 HitTestingTreeNode::SetHitTestData(const EventRegions& aRegions,
                                    const gfx::Matrix4x4& aTransform,
                                    const Maybe<ParentLayerIntRegion>& aClipRegion,
                                    const EventRegionsOverride& aOverride)
 {
   mEventRegions = aRegions;
   mTransform = aTransform;
@@ -224,17 +251,18 @@ HitTestingTreeNode::GetEventRegionsOverr
 
 void
 HitTestingTreeNode::Dump(const char* aPrefix) const
 {
   if (mPrevSibling) {
     mPrevSibling->Dump(aPrefix);
   }
   printf_stderr("%sHitTestingTreeNode (%p) APZC (%p) g=(%s) %s%sr=(%s) t=(%s) c=(%s)\n",
-    aPrefix, this, mApzc.get(), mApzc ? Stringify(mApzc->GetGuid()).c_str() : "",
+    aPrefix, this, mApzc.get(),
+    mApzc ? Stringify(mApzc->GetGuid()).c_str() : nsPrintfCString("l=%" PRIu64, mLayersId).get(),
     (mOverride & EventRegionsOverride::ForceDispatchToContent) ? "fdtc " : "",
     (mOverride & EventRegionsOverride::ForceEmptyHitRegion) ? "fehr " : "",
     Stringify(mEventRegions).c_str(), Stringify(mTransform).c_str(),
     mClipRegion ? Stringify(mClipRegion.ref()).c_str() : "none");
   if (mLastChild) {
     mLastChild->Dump(nsPrintfCString("%s  ", aPrefix).get());
   }
 }
--- a/gfx/layers/apz/src/HitTestingTreeNode.h
+++ b/gfx/layers/apz/src/HitTestingTreeNode.h
@@ -48,18 +48,19 @@ class AsyncPanZoomController;
  * properties into a separate tree.
  */
 class HitTestingTreeNode {
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(HitTestingTreeNode);
 
 private:
   ~HitTestingTreeNode();
 public:
-  HitTestingTreeNode(AsyncPanZoomController* aApzc, bool aIsPrimaryHolder);
-  void RecycleWith(AsyncPanZoomController* aApzc);
+  HitTestingTreeNode(AsyncPanZoomController* aApzc, bool aIsPrimaryHolder,
+                     uint64_t aLayersId);
+  void RecycleWith(AsyncPanZoomController* aApzc, uint64_t aLayersId);
   void Destroy();
 
   /* Tree construction methods */
 
   void SetLastChild(HitTestingTreeNode* aChild);
   void SetPrevSibling(HitTestingTreeNode* aSibling);
   void MakeRoot();
 
@@ -70,17 +71,19 @@ public:
   HitTestingTreeNode* GetLastChild() const;
   HitTestingTreeNode* GetPrevSibling() const;
   HitTestingTreeNode* GetParent() const;
 
   /* APZC related methods */
 
   AsyncPanZoomController* GetApzc() const;
   AsyncPanZoomController* GetNearestContainingApzc() const;
+  AsyncPanZoomController* GetNearestContainingApzcWithSameLayersId() const;
   bool IsPrimaryHolder() const;
+  uint64_t GetLayersId() const;
 
   /* Hit test related methods */
 
   void SetHitTestData(const EventRegions& aRegions,
                       const gfx::Matrix4x4& aTransform,
                       const Maybe<ParentLayerIntRegion>& aClipRegion,
                       const EventRegionsOverride& aOverride);
   bool IsOutsideClip(const ParentLayerPoint& aPoint) const;
@@ -101,16 +104,18 @@ private:
 
   nsRefPtr<HitTestingTreeNode> mLastChild;
   nsRefPtr<HitTestingTreeNode> mPrevSibling;
   nsRefPtr<HitTestingTreeNode> mParent;
 
   nsRefPtr<AsyncPanZoomController> mApzc;
   bool mIsPrimaryApzcHolder;
 
+  uint64_t mLayersId;
+
   /* Let {L,M} be the {layer, scrollable metrics} pair that this node
    * corresponds to in the layer tree. mEventRegions contains the event regions
    * from L, in the case where event-regions are enabled. If event-regions are
    * disabled, it will contain the visible region of L, which we use as an
    * approximation to the hit region for the purposes of obscuring other layers.
    * This value is in L's LayerPixels.
    */
   EventRegions mEventRegions;
--- a/gfx/layers/apz/test/apz_test_utils.js
+++ b/gfx/layers/apz/test/apz_test_utils.js
@@ -86,16 +86,16 @@ function addRoot(root, id) {
 // content process that triggered the paint, is reconstructed (as
 // the APZ test data only contains information abot this subtree).
 function buildApzcTree(paint) {
   // The APZC tree can potentially have multiple root nodes,
   // so we invent a node that is the parent of all roots.
   // This 'root' does not correspond to an APZC.
   var root = makeNode(-1);
   for (var scrollId in paint) {
-    if ("isRootForLayersId" in paint[scrollId]) {
+    if ("hasNoParentWithSameLayersId" in paint[scrollId]) {
       addRoot(root, scrollId);
     } else if ("parentScrollId" in paint[scrollId]) {
       addLink(root, scrollId, paint[scrollId]["parentScrollId"]);
     }
   }
   return root;
 }
--- a/gfx/tests/gtest/TestAsyncPanZoomController.cpp
+++ b/gfx/tests/gtest/TestAsyncPanZoomController.cpp
@@ -1889,16 +1889,20 @@ protected:
 
   nsRefPtr<TestAPZCTreeManager> manager;
 
 protected:
   static void SetScrollableFrameMetrics(Layer* aLayer, FrameMetrics::ViewID aScrollId,
                                         CSSRect aScrollableRect = CSSRect(-1, -1, -1, -1)) {
     FrameMetrics metrics;
     metrics.SetScrollId(aScrollId);
+    // By convention in this test file, START_SCROLL_ID is the root, so mark it as such.
+    if (aScrollId == FrameMetrics::START_SCROLL_ID) {
+      metrics.SetIsLayersIdRoot(true);
+    }
     IntRect layerBound = aLayer->GetVisibleRegion().GetBounds();
     metrics.SetCompositionBounds(ParentLayerRect(layerBound.x, layerBound.y,
                                                  layerBound.width, layerBound.height));
     metrics.SetScrollableRect(aScrollableRect);
     metrics.SetScrollOffset(CSSPoint(0, 0));
     metrics.SetPageScrollAmount(LayoutDeviceIntSize(50, 100));
     metrics.SetAllowVerticalScrollWithWheel();
     aLayer->SetFrameMetrics(metrics);
@@ -2859,26 +2863,28 @@ protected:
     registration = MakeUnique<ScopedLayerTreeRegistration>(0, root, mcc);
     manager->UpdateHitTestingTree(nullptr, root, false, 0, 0);
     rootApzc = ApzcOf(root);
   }
 
   void CreateBug1119497LayerTree() {
     const char* layerTreeSyntax = "c(tt)";
     // LayerID                     0 12
-    // 0 is the root and doesn't have an APZC
-    // 1 is behind 2 and does have an APZC
-    // 2 entirely covers 1 and should take all the input events
+    // 0 is the root and has an APZC
+    // 1 is behind 2 and has an APZC
+    // 2 entirely covers 1 and should take all the input events, but has no APZC
+    // so hits to 2 should go to to the root APZC
     nsIntRegion layerVisibleRegions[] = {
       nsIntRegion(IntRect(0, 0, 100, 100)),
       nsIntRegion(IntRect(0, 0, 100, 100)),
       nsIntRegion(IntRect(0, 0, 100, 100)),
     };
     root = CreateLayerTree(layerTreeSyntax, layerVisibleRegions, nullptr, lm, layers);
 
+    SetScrollableFrameMetrics(root, FrameMetrics::START_SCROLL_ID);
     SetScrollableFrameMetrics(layers[1], FrameMetrics::START_SCROLL_ID + 1);
 
     registration = MakeUnique<ScopedLayerTreeRegistration>(0, root, mcc);
     manager->UpdateHitTestingTree(nullptr, root, false, 0, 0);
   }
 
   void CreateBug1117712LayerTree() {
     const char* layerTreeSyntax = "c(c(t)t)";
@@ -3001,18 +3007,18 @@ TEST_F(APZEventRegionsTester, Obscuratio
 }
 
 TEST_F(APZEventRegionsTester, Bug1119497) {
   CreateBug1119497LayerTree();
 
   HitTestResult result;
   nsRefPtr<AsyncPanZoomController> hit = manager->GetTargetAPZC(ScreenPoint(50, 50), &result);
   // We should hit layers[2], so |result| will be HitLayer but there's no
-  // actual APZC in that parent chain, so |hit| should be nullptr.
-  EXPECT_EQ(nullptr, hit.get());
+  // actual APZC on layers[2], so it will be the APZC of the root layer.
+  EXPECT_EQ(ApzcOf(layers[0]), hit.get());
   EXPECT_EQ(HitTestResult::HitLayer, result);
 }
 
 TEST_F(APZEventRegionsTester, Bug1117712) {
   CreateBug1117712LayerTree();
 
   TestAsyncPanZoomController* apzc2 = ApzcOf(layers[2]);
 
--- a/gfx/tests/gtest/moz.build
+++ b/gfx/tests/gtest/moz.build
@@ -27,16 +27,17 @@ UNIFIED_SOURCES += [
 ]
 
 # Because of gkmedia on windows we won't find these
 # symbols in xul.dll.
 if CONFIG['MOZ_WIDGET_TOOLKIT'] != 'windows':
     UNIFIED_SOURCES += [ '/gfx/2d/unittest/%s' % p for p in [
         'TestBase.cpp',
         'TestBugs.cpp',
+        'TestCairo.cpp',
         'TestPoint.cpp',
         'TestScaling.cpp',
     ]]
     UNIFIED_SOURCES += [
         'TestMoz2D.cpp',
         'TestRect.cpp',
     ]
 
--- a/gfx/thebes/gfxFont.cpp
+++ b/gfx/thebes/gfxFont.cpp
@@ -2244,17 +2244,17 @@ gfxFont::Measure(gfxTextRun *aTextRun,
             x += direction*advance;
         } else {
             allGlyphsInvisible = false;
             uint32_t glyphCount = glyphData->GetGlyphCount();
             if (glyphCount > 0) {
                 const gfxTextRun::DetailedGlyph *details =
                     aTextRun->GetDetailedGlyphs(i);
                 NS_ASSERTION(details != nullptr,
-                             "detaiedGlyph record should not be missing!");
+                             "detailedGlyph record should not be missing!");
                 uint32_t j;
                 for (j = 0; j < glyphCount; ++j, ++details) {
                     uint32_t glyphIndex = details->mGlyphID;
                     gfxPoint glyphPt(x + details->mXOffset, details->mYOffset);
                     double advance = details->mAdvance;
                     gfxRect glyphRect;
                     if (glyphData->IsMissing() || !extents ||
                         !extents->GetTightGlyphExtentsAppUnits(this,
--- a/gfx/thebes/gfxGDIFont.cpp
+++ b/gfx/thebes/gfxGDIFont.cpp
@@ -332,36 +332,37 @@ gfxGDIFont::Initialize()
                         ROUND((ascent - descent + lineGap) * mFUnitsConvFactor);
                     lineHeight = std::max(lineHeight, mMetrics->maxHeight);
                     mMetrics->externalLeading =
                         lineHeight - mMetrics->maxHeight;
                 }
             }
         }
 
-        // Cache the width of a single space.
+        WORD glyph;
         SIZE size;
-        GetTextExtentPoint32W(dc.GetDC(), L" ", 1, &size);
-        mMetrics->spaceWidth = ROUND(size.cx);
-
-        // Cache the width of digit zero.
-        // XXX MSDN (http://msdn.microsoft.com/en-us/library/ms534223.aspx)
-        // does not say what the failure modes for GetTextExtentPoint32 are -
-        // is it safe to assume it will fail iff the font has no '0'?
-        if (GetTextExtentPoint32W(dc.GetDC(), L"0", 1, &size)) {
-            mMetrics->zeroOrAveCharWidth = ROUND(size.cx);
-        } else {
-            mMetrics->zeroOrAveCharWidth = mMetrics->aveCharWidth;
-        }
-
-        WORD glyph;
         DWORD ret = GetGlyphIndicesW(dc.GetDC(), L" ", 1, &glyph,
                                      GGI_MARK_NONEXISTING_GLYPHS);
         if (ret != GDI_ERROR && glyph != 0xFFFF) {
             mSpaceGlyph = glyph;
+            // Cache the width of a single space.
+            GetTextExtentPoint32W(dc.GetDC(), L" ", 1, &size);
+            mMetrics->spaceWidth = ROUND(size.cx);
+        } else {
+            mMetrics->spaceWidth = mMetrics->aveCharWidth;
+        }
+
+        // Cache the width of digit zero, if available.
+        ret = GetGlyphIndicesW(dc.GetDC(), L"0", 1, &glyph,
+                               GGI_MARK_NONEXISTING_GLYPHS);
+        if (ret != GDI_ERROR && glyph != 0xFFFF) {
+            GetTextExtentPoint32W(dc.GetDC(), L"0", 1, &size);
+            mMetrics->zeroOrAveCharWidth = ROUND(size.cx);
+        } else {
+            mMetrics->zeroOrAveCharWidth = mMetrics->aveCharWidth;
         }
 
         SanitizeMetrics(mMetrics, GetFontEntry()->mIsBadUnderlineFont);
     }
 
     if (IsSyntheticBold()) {
         mMetrics->aveCharWidth += GetSyntheticBoldOffset();
         mMetrics->maxAdvance += GetSyntheticBoldOffset();
--- a/gfx/thebes/gfxHarfBuzzShaper.cpp
+++ b/gfx/thebes/gfxHarfBuzzShaper.cpp
@@ -1786,20 +1786,28 @@ gfxHarfBuzzShaper::SetGlyphsFromRun(gfxC
             gfxTextRun::CompressedGlyph::IsSimpleAdvance(advance) &&
             charGlyphs[baseCharIndex].IsClusterStart() &&
             iOffset == 0 && b_offset == 0 &&
             b_advance == 0 && bPos == 0)
         {
             charGlyphs[baseCharIndex].SetSimpleGlyph(advance,
                                                      ginfo[glyphStart].codepoint);
         } else {
-            // collect all glyphs in a list to be assigned to the first char;
+            // Collect all glyphs in a list to be assigned to the first char;
             // there must be at least one in the clump, and we already measured
             // its advance, hence the placement of the loop-exit test and the
-            // measurement of the next glyph
+            // measurement of the next glyph.
+            // For vertical orientation, we add a "base offset" to compensate
+            // for the positioning within the cluster being based on horizontal
+            // glyph origin/offset.
+            hb_position_t baseIOffset, baseBOffset;
+            if (aVertical) {
+                baseIOffset = 2 * (i_offset - i_advance);
+                baseBOffset = GetGlyphHAdvance(ginfo[glyphStart].codepoint);
+            }
             while (1) {
                 gfxTextRun::DetailedGlyph* details =
                     detailedGlyphs.AppendElement();
                 details->mGlyphID = ginfo[glyphStart].codepoint;
 
                 details->mXOffset = iOffset;
                 details->mAdvance = advance;
 
@@ -1812,19 +1820,19 @@ gfxHarfBuzzShaper::SetGlyphsFromRun(gfxC
                         roundB ? appUnitsPerDevUnit * FixedToIntRound(b_advance)
                         : floor(hb2appUnits * b_advance + 0.5);
                 }
                 if (++glyphStart >= glyphEnd) {
                     break;
                 }
 
                 if (aVertical) {
-                    i_offset = posInfo[glyphStart].y_offset;
+                    i_offset = baseIOffset - posInfo[glyphStart].y_offset;
                     i_advance = posInfo[glyphStart].y_advance;
-                    b_offset = posInfo[glyphStart].x_offset;
+                    b_offset = baseBOffset - posInfo[glyphStart].x_offset;
                     b_advance = posInfo[glyphStart].x_advance;
                 } else {
                     i_offset = posInfo[glyphStart].x_offset;
                     i_advance = posInfo[glyphStart].x_advance;
                     b_offset = posInfo[glyphStart].y_offset;
                     b_advance = posInfo[glyphStart].y_advance;
                 }
 
--- a/js/public/GCAPI.h
+++ b/js/public/GCAPI.h
@@ -331,17 +331,18 @@ enum GCProgress {
 
 struct JS_PUBLIC_API(GCDescription) {
     bool isCompartment_;
     JSGCInvocationKind invocationKind_;
 
     GCDescription(bool isCompartment, JSGCInvocationKind kind)
       : isCompartment_(isCompartment), invocationKind_(kind) {}
 
-    char16_t* formatMessage(JSRuntime* rt) const;
+    char16_t* formatSliceMessage(JSRuntime* rt) const;
+    char16_t* formatSummaryMessage(JSRuntime* rt) const;
     char16_t* formatJSON(JSRuntime* rt, uint64_t timestamp) const;
 
     JS::dbg::GarbageCollectionEvent::Ptr toGCEvent(JSRuntime* rt) const;
 };
 
 typedef void
 (* GCSliceCallback)(JSRuntime* rt, GCProgress progress, const GCDescription& desc);
 
--- a/js/public/RootingAPI.h
+++ b/js/public/RootingAPI.h
@@ -71,17 +71,17 @@
  *   then redundant rooting of multiple copies of the GC thing can be avoided.
  *   Second, if the caller does not pass a rooted value a compile error will be
  *   generated, which is quicker and easier to fix than when relying on a
  *   separate rooting analysis.
  *
  * - MutableHandle<T> is a non-const reference to Rooted<T>. It is used in the
  *   same way as Handle<T> and includes a |set(const T& v)| method to allow
  *   updating the value of the referenced Rooted<T>. A MutableHandle<T> can be
- *   created from a Rooted<T> by using |Rooted<T>::operator&()|.
+ *   created with an implicit cast from a Rooted<T>*.
  *
  * In some cases the small performance overhead of exact rooting (measured to
  * be a few nanoseconds on desktop) is too much. In these cases, try the
  * following:
  *
  * - Move all Rooted<T> above inner loops: this allows you to re-use the root
  *   on each iteration of the loop.
  *
@@ -1180,12 +1180,10 @@ CallTraceCallbackOnNonHeap(T* v, const T
     JS::Heap<T>* asHeapT = reinterpret_cast<JS::Heap<T>*>(v);
     aCallbacks.Trace(asHeapT, aName, aClosure);
 }
 
 } /* namespace gc */
 } /* namespace js */
 
 #undef DELETE_ASSIGNMENT_OPS
-#undef DECLARE_NONPOINTER_MUTABLE_ACCESSOR_METHODS
-#undef DECLARE_NONPOINTER_ACCESSOR_METHODS
 
 #endif  /* js_RootingAPI_h */
--- a/js/src/gc/Statistics.cpp
+++ b/js/src/gc/Statistics.cpp
@@ -28,220 +28,16 @@ using namespace js::gc;
 using namespace js::gcstats;
 
 using mozilla::PodArrayZero;
 using mozilla::PodZero;
 
 /* Except for the first and last, slices of less than 10ms are not reported. */
 static const int64_t SLICE_MIN_REPORT_TIME = 10 * PRMJ_USEC_PER_MSEC;
 
-class gcstats::StatisticsSerializer
-{
-    typedef Vector<char, 128, SystemAllocPolicy> CharBuffer;
-    CharBuffer buf_;
-    bool asJSON_;
-    bool needComma_;
-    bool oom_;
-
-    static const int MaxFieldValueLength = 128;
-
-  public:
-    enum Mode {
-        AsJSON = true,
-        AsText = false
-    };
-
-    explicit StatisticsSerializer(Mode asJSON)
-      : buf_(), asJSON_(asJSON), needComma_(false), oom_(false)
-    {}
-
-    bool isJSON() { return asJSON_; }
-
-    bool isOOM() { return oom_; }
-
-    void endLine() {
-        if (!asJSON_) {
-            p("\n");
-            needComma_ = false;
-        }
-    }
-
-    void extra(const char* str) {
-        if (!asJSON_) {
-            needComma_ = false;
-            p(str);
-        }
-    }
-
-    void appendString(const char* name, const char* value) {
-        put(name, value, "", true);
-    }
-
-    void appendNumber(const char* name, const char* vfmt, const char* units, ...) {
-        va_list va;
-        va_start(va, units);
-        append(name, vfmt, va, units);
-        va_end(va);
-    }
-
-    void appendDecimal(const char* name, const char* units, double d) {
-        if (d < 0)
-            d = 0;
-        if (asJSON_)
-            appendNumber(name, "%d.%03d", units, (int)d, (int)(d * 1000.) % 1000);
-        else
-            appendNumber(name, "%.1f", units, d);
-    }
-
-    void appendIfNonzeroMS(const char* name, double v) {
-        if (asJSON_ || v >= 0.1)
-            appendDecimal(name, "ms", v);
-    }
-
-    void beginObject(const char* name) {
-        if (needComma_)
-            pJSON(", ");
-        if (asJSON_ && name) {
-            putKey(name);
-            pJSON(": ");
-        }
-        pJSON("{");
-        needComma_ = false;
-    }
-
-    void endObject() {
-        needComma_ = false;
-        pJSON("}");
-        needComma_ = true;
-    }
-
-    void beginArray(const char* name) {
-        if (needComma_)
-            pJSON(", ");
-        if (asJSON_)
-            putKey(name);
-        pJSON(": [");
-        needComma_ = false;
-    }
-
-    void endArray() {
-        needComma_ = false;
-        pJSON("]");
-        needComma_ = true;
-    }
-
-    char16_t* finishJSString() {
-        char* buf = finishCString();
-        if (!buf)
-            return nullptr;
-
-        size_t nchars = strlen(buf);
-        char16_t* out = js_pod_malloc<char16_t>(nchars + 1);
-        if (!out) {
-            oom_ = true;
-            js_free(buf);
-            return nullptr;
-        }
-
-        CopyAndInflateChars(out, buf, nchars);
-        js_free(buf);
-
-        out[nchars] = 0;
-        return out;
-    }
-
-    char* finishCString() {
-        if (oom_)
-            return nullptr;
-
-        buf_.append('\0');
-
-        char* buf = buf_.extractRawBuffer();
-        if (!buf)
-            oom_ = true;
-
-        return buf;
-    }
-
-  private:
-    void append(const char* name, const char* vfmt,
-                va_list va, const char* units)
-    {
-        char val[MaxFieldValueLength];
-        JS_vsnprintf(val, MaxFieldValueLength, vfmt, va);
-        put(name, val, units, false);
-    }
-
-    void p(const char* cstr) {
-        if (oom_)
-            return;
-
-        if (!buf_.append(cstr, strlen(cstr)))
-            oom_ = true;
-    }
-
-    void p(const char c) {
-        if (oom_)
-            return;
-
-        if (!buf_.append(c))
-            oom_ = true;
-    }
-
-    void pJSON(const char* str) {
-        if (asJSON_)
-            p(str);
-    }
-
-    void put(const char* name, const char* val, const char* units, bool valueIsQuoted) {
-        if (needComma_)
-            p(", ");
-        needComma_ = true;
-
-        putKey(name);
-        p(": ");
-        if (valueIsQuoted)
-            putQuoted(val);
-        else
-            p(val);
-        if (!asJSON_)
-            p(units);
-    }
-
-    void putQuoted(const char* str) {
-        pJSON("\"");
-        p(str);
-        pJSON("\"");
-    }
-
-    void putKey(const char* str) {
-        if (!asJSON_) {
-            p(str);
-            return;
-        }
-
-        p("\"");
-        const char* c = str;
-        while (*c) {
-            if (*c == ' ' || *c == '\t')
-                p('_');
-            else if (isupper(*c))
-                p(tolower(*c));
-            else if (*c == '+')
-                p("added_");
-            else if (*c == '-')
-                p("removed_");
-            else if (*c != '(' && *c != ')')
-                p(*c);
-            c++;
-        }
-        p("\"");
-    }
-};
-
 /*
  * If this fails, then you can either delete this assertion and allow all
  * larger-numbered reasons to pile up in the last telemetry bucket, or switch
  * to GC_REASON_3 and bump the max value.
  */
 JS_STATIC_ASSERT(JS::gcreason::NUM_TELEMETRY_REASONS >= JS::gcreason::NUM_REASONS);
 
 const char*
@@ -438,35 +234,21 @@ struct AllPhaseIterator {
         ++current;
     }
 
     bool done() const {
         return phases[current].parent == PHASE_MULTI_PARENTS;
     }
 };
 
-static void
-FormatPhaseTimes(StatisticsSerializer& ss, const char* name, Statistics::PhaseTimeTable times)
-{
-    ss.beginObject(name);
-
-    for (AllPhaseIterator iter(times); !iter.done(); iter.advance()) {
-        Phase phase;
-        size_t dagSlot;
-        iter.get(&phase, &dagSlot);
-        ss.appendIfNonzeroMS(phases[phase].name, t(times[dagSlot][phase]));
-    }
-    ss.endObject();
-}
-
 void
-Statistics::gcDuration(int64_t* total, int64_t* maxPause)
+Statistics::gcDuration(int64_t* total, int64_t* maxPause) const
 {
     *total = *maxPause = 0;
-    for (SliceData* slice = slices.begin(); slice != slices.end(); slice++) {
+    for (const SliceData* slice = slices.begin(); slice != slices.end(); slice++) {
         *total += slice->duration();
         if (slice->duration() > *maxPause)
             *maxPause = slice->duration();
     }
     if (*maxPause > maxPauseInInterval)
         maxPauseInInterval = *maxPause;
 }
 
@@ -475,100 +257,16 @@ Statistics::sccDurations(int64_t* total,
 {
     *total = *maxPause = 0;
     for (size_t i = 0; i < sccTimes.length(); i++) {
         *total += sccTimes[i];
         *maxPause = Max(*maxPause, sccTimes[i]);
     }
 }
 
-bool
-Statistics::formatData(StatisticsSerializer& ss, uint64_t timestamp)
-{
-    MOZ_ASSERT(!aborted);
-
-    int64_t total, longest;
-    gcDuration(&total, &longest);
-
-    int64_t sccTotal, sccLongest;
-    sccDurations(&sccTotal, &sccLongest);
-
-    double mmu20 = computeMMU(20 * PRMJ_USEC_PER_MSEC);
-    double mmu50 = computeMMU(50 * PRMJ_USEC_PER_MSEC);
-
-    ss.beginObject(nullptr);
-    if (ss.isJSON())
-        ss.appendNumber("Timestamp", "%llu", "", (unsigned long long)timestamp);
-    if (slices.length() > 1 || ss.isJSON())
-        ss.appendDecimal("Max Pause", "ms", t(longest));
-    else
-        ss.appendString("Reason", ExplainReason(slices[0].reason));
-    ss.appendDecimal("Total Time", "ms", t(total));
-    ss.appendNumber("Zones Collected", "%d", "", zoneStats.collectedZoneCount);
-    ss.appendNumber("Total Zones", "%d", "", zoneStats.zoneCount);
-    ss.appendNumber("Total Compartments", "%d", "", zoneStats.compartmentCount);
-    ss.appendNumber("Minor GCs", "%d", "", counts[STAT_MINOR_GC]);
-    ss.appendNumber("Store Buffer Overflows", "%d", "", counts[STAT_STOREBUFFER_OVERFLOW]);
-    ss.appendNumber("MMU (20ms)", "%d", "%", int(mmu20 * 100));
-    ss.appendNumber("MMU (50ms)", "%d", "%", int(mmu50 * 100));
-    ss.appendDecimal("SCC Sweep Total", "ms", t(sccTotal));
-    ss.appendDecimal("SCC Sweep Max Pause", "ms", t(sccLongest));
-    if (nonincrementalReason_ || ss.isJSON()) {
-        ss.appendString("Nonincremental Reason",
-                        nonincrementalReason_ ? nonincrementalReason_ : "none");
-    }
-    ss.appendNumber("Allocated", "%u", "MB", unsigned(preBytes / 1024 / 1024));
-    ss.appendNumber("+Chunks", "%d", "", counts[STAT_NEW_CHUNK]);
-    ss.appendNumber("-Chunks", "%d", "", counts[STAT_DESTROY_CHUNK]);
-    ss.endLine();
-
-    if (slices.length() > 1 || ss.isJSON()) {
-        ss.beginArray("Slices");
-        for (size_t i = 0; i < slices.length(); i++) {
-            int64_t width = slices[i].duration();
-            if (i != 0 && i != slices.length() - 1 && width < SLICE_MIN_REPORT_TIME &&
-                !slices[i].resetReason && !ss.isJSON())
-            {
-                continue;
-            }
-
-            char budgetDescription[200];
-            slices[i].budget.describe(budgetDescription, sizeof(budgetDescription) - 1);
-
-            ss.beginObject(nullptr);
-            ss.extra("    ");
-            ss.appendNumber("Slice", "%d", "", i);
-            ss.appendDecimal("Pause", "", t(width));
-            ss.extra(" (");
-            ss.appendDecimal("When", "ms", t(slices[i].start - slices[0].start));
-            ss.appendString("Reason", ExplainReason(slices[i].reason));
-            ss.appendString("Budget", budgetDescription);
-            if (ss.isJSON()) {
-                ss.appendDecimal("Page Faults", "",
-                                 double(slices[i].endFaults - slices[i].startFaults));
-
-                ss.appendNumber("Start Timestamp", "%llu", "", (unsigned long long)slices[i].start);
-                ss.appendNumber("End Timestamp", "%llu", "", (unsigned long long)slices[i].end);
-            }
-            if (slices[i].resetReason)
-                ss.appendString("Reset", slices[i].resetReason);
-            ss.extra("): ");
-            FormatPhaseTimes(ss, "Times", slices[i].phaseTimes);
-            ss.endLine();
-            ss.endObject();
-        }
-        ss.endArray();
-    }
-    ss.extra("    Totals: ");
-    FormatPhaseTimes(ss, "Totals", phaseTimes);
-    ss.endObject();
-
-    return !ss.isOOM();
-}
-
 typedef Vector<UniqueChars, 8, SystemAllocPolicy> FragmentVector;
 
 static UniqueChars
 Join(const FragmentVector& fragments, const char* separator = "") {
     const size_t separatorLength = strlen(separator);
     size_t length = 0;
     for (size_t i = 0; i < fragments.length(); ++i) {
         length += fragments[i] ? strlen(fragments[i].get()) : 0;
@@ -613,16 +311,129 @@ SumChildTimes(size_t phaseSlot, Phase ph
                 total += phaseTimes[dagSlot][child];
             }
         }
     }
     return total;
 }
 
 UniqueChars
+Statistics::formatCompactSliceMessage() const
+{
+    // Skip if we OOM'ed.
+    if (slices.length() == 0)
+        return UniqueChars(nullptr);
+
+    const size_t index = slices.length() - 1;
+    const SliceData& slice = slices[index];
+
+    char budgetDescription[200];
+    slice.budget.describe(budgetDescription, sizeof(budgetDescription) - 1);
+
+    const char* format =
+        "GC Slice %u - Pause: %.3fms of %s budget (@ %.3fms); Reason: %s; Reset: %s%s; Times: ";
+    char buffer[1024];
+    memset(buffer, 0, sizeof(buffer));
+    JS_snprintf(buffer, sizeof(buffer), format, index,
+                t(slice.duration()), budgetDescription, t(slice.start - slices[0].start),
+                ExplainReason(slice.reason),
+                slice.resetReason ? "yes - " : "no", slice.resetReason ? slice.resetReason : "");
+
+    FragmentVector fragments;
+    if (!fragments.append(make_string_copy(buffer)) ||
+        !fragments.append(formatCompactSlicePhaseTimes(slices[index].phaseTimes)))
+    {
+        return UniqueChars(nullptr);
+    }
+    return Join(fragments);
+}
+
+UniqueChars
+Statistics::formatCompactSummaryMessage() const
+{
+    const double bytesPerMiB = 1024 * 1024;
+
+    FragmentVector fragments;
+    if (!fragments.append(make_string_copy("Summary - ")))
+        return UniqueChars(nullptr);
+
+    int64_t total, longest;
+    gcDuration(&total, &longest);
+
+    const double mmu20 = computeMMU(20 * PRMJ_USEC_PER_MSEC);
+    const double mmu50 = computeMMU(50 * PRMJ_USEC_PER_MSEC);
+
+    char buffer[1024];
+    if (!nonincrementalReason_) {
+        JS_snprintf(buffer, sizeof(buffer),
+                    "Max Pause: %.3fms; MMU 20ms: %.1f%%; MMU 50ms: %.1f%%; Total: %.3fms; ",
+                    t(longest), mmu20 * 100., mmu50 * 100., t(total));
+    } else {
+        JS_snprintf(buffer, sizeof(buffer), "Non-Incremental: %.3fms; ", t(total));
+    }
+    if (!fragments.append(make_string_copy(buffer)))
+        return UniqueChars(nullptr);
+
+    JS_snprintf(buffer, sizeof(buffer),
+                "Zones: %d of %d; Compartments: %d of %d; HeapSize: %.3f MiB; "\
+                "HeapChange (abs): %+d (%d); ",
+                zoneStats.collectedZoneCount, zoneStats.zoneCount,
+                zoneStats.collectedCompartmentCount, zoneStats.compartmentCount,
+                double(preBytes) / bytesPerMiB,
+                counts[STAT_NEW_CHUNK] - counts[STAT_DESTROY_CHUNK],
+                counts[STAT_NEW_CHUNK] + counts[STAT_DESTROY_CHUNK]);
+    if (!fragments.append(make_string_copy(buffer)))
+        return UniqueChars(nullptr);
+
+    MOZ_ASSERT_IF(counts[STAT_ARENA_RELOCATED], gckind == GC_SHRINK);
+    if (gckind == GC_SHRINK) {
+        JS_snprintf(buffer, sizeof(buffer),
+                    "Kind: %s; Relocated: %.3f MiB; ",
+                    ExplainInvocationKind(gckind),
+                    double(ArenaSize * counts[STAT_ARENA_RELOCATED]) / bytesPerMiB);
+        if (!fragments.append(make_string_copy(buffer)))
+            return UniqueChars(nullptr);
+    }
+
+    return Join(fragments);
+}
+
+UniqueChars
+Statistics::formatCompactSlicePhaseTimes(PhaseTimeTable phaseTimes) const
+{
+    static const int64_t MaxUnaccountedTimeUS = 100;
+
+    FragmentVector fragments;
+    char buffer[128];
+    for (AllPhaseIterator iter(phaseTimes); !iter.done(); iter.advance()) {
+        Phase phase;
+        size_t dagSlot;
+        size_t level;
+        iter.get(&phase, &dagSlot, &level);
+        MOZ_ASSERT(level < 4);
+
+        int64_t ownTime = phaseTimes[dagSlot][phase];
+        int64_t childTime = SumChildTimes(dagSlot, phase, phaseTimes);
+        if (ownTime > MaxUnaccountedTimeUS) {
+            JS_snprintf(buffer, sizeof(buffer), "%s: %.3fms", phases[phase].name, t(ownTime));
+            if (!fragments.append(make_string_copy(buffer)))
+                return UniqueChars(nullptr);
+
+            if (childTime && (ownTime - childTime) > MaxUnaccountedTimeUS) {
+                MOZ_ASSERT(level < 3);
+                JS_snprintf(buffer, sizeof(buffer), "%s: %.3fms", "Other", t(ownTime - childTime));
+                if (!fragments.append(make_string_copy(buffer)))
+                    return UniqueChars(nullptr);
+            }
+        }
+    }
+    return Join(fragments, ", ");
+}
+
+UniqueChars
 Statistics::formatDetailedMessage()
 {
     FragmentVector fragments;
 
     if (!fragments.append(formatDetailedDescription()))
         return UniqueChars(nullptr);
 
     if (slices.length() > 1) {
@@ -914,24 +725,16 @@ Statistics::formatJsonPhaseTimes(PhaseTi
                     name.get(), ownTime / 1000, ownTime % 1000);
 
         if (!fragments.append(make_string_copy(buffer)))
             return UniqueChars(nullptr);
     }
     return Join(fragments, ",");
 }
 
-char16_t*
-Statistics::formatMessage()
-{
-    StatisticsSerializer ss(StatisticsSerializer::AsText);
-    formatData(ss, 0);
-    return ss.finishJSString();
-}
-
 Statistics::Statistics(JSRuntime* rt)
   : runtime(rt),
     startupTime(PRMJ_Now()),
     fp(nullptr),
     gcDepth(0),
     nonincrementalReason_(nullptr),
     timedGCStart(0),
     preBytes(0),
@@ -1007,28 +810,18 @@ Statistics::Statistics(JSRuntime* rt)
             if (!fp)
                 MOZ_CRASH("Failed to open MOZ_GCTIMER log file.");
         }
     }
 }
 
 Statistics::~Statistics()
 {
-    if (fp) {
-        StatisticsSerializer ss(StatisticsSerializer::AsText);
-        FormatPhaseTimes(ss, "", phaseTotals);
-        char* msg = ss.finishCString();
-        if (msg) {
-            fprintf(fp, "TOTALS\n%s\n\n-------\n", msg);
-            js_free(msg);
-        }
-
-        if (fp != stdout && fp != stderr)
-            fclose(fp);
-    }
+    if (fp && fp != stdout && fp != stderr)
+        fclose(fp);
 }
 
 JS::GCSliceCallback
 Statistics::setSliceCallback(JS::GCSliceCallback newCallback)
 {
     JS::GCSliceCallback oldCallback = sliceCallback;
     sliceCallback = newCallback;
     return oldCallback;
@@ -1309,17 +1102,17 @@ Statistics::endSCC(unsigned scc, int64_t
  * is affecting the responsiveness of the system. MMU measurements are given
  * with respect to a certain window size. If we report MMU(50ms) = 80%, then
  * that means that, for any 50ms window of time, at least 80% of the window is
  * devoted to the mutator. In other words, the GC is running for at most 20% of
  * the window, or 10ms. The GC can run multiple slices during the 50ms window
  * as long as the total time it spends is at most 10ms.
  */
 double
-Statistics::computeMMU(int64_t window)
+Statistics::computeMMU(int64_t window) const
 {
     MOZ_ASSERT(!slices.empty());
 
     int64_t gc = slices[0].end - slices[0].start;
     int64_t gcMax = gc;
 
     if (gc >= window)
         return 0.0;
--- a/js/src/gc/Statistics.h
+++ b/js/src/gc/Statistics.h
@@ -96,18 +96,16 @@ enum Stat {
     STAT_STOREBUFFER_OVERFLOW,
 
     // Number of arenas relocated by compacting GC.
     STAT_ARENA_RELOCATED,
 
     STAT_LIMIT
 };
 
-class StatisticsSerializer;
-
 struct ZoneGCStats
 {
     /* Number of zones collected in this GC. */
     int collectedZoneCount;
 
     /* Total number of zones in the Runtime at the start of this GC. */
     int zoneCount;
 
@@ -181,17 +179,18 @@ struct Statistics
     void count(Stat s) {
         MOZ_ASSERT(s < STAT_LIMIT);
         counts[s]++;
     }
 
     int64_t beginSCC();
     void endSCC(unsigned scc, int64_t start);
 
-    char16_t* formatMessage();
+    UniqueChars formatCompactSliceMessage() const;
+    UniqueChars formatCompactSummaryMessage() const;
     UniqueChars formatJsonMessage(uint64_t timestamp);
     UniqueChars formatDetailedMessage();
 
     JS::GCSliceCallback setSliceCallback(JS::GCSliceCallback callback);
 
     int64_t clearMaxGCPauseAccumulator();
     int64_t getMaxGCPauseSinceClear();
 
@@ -228,17 +227,17 @@ struct Statistics
 
     typedef Vector<SliceData, 8, SystemAllocPolicy> SliceDataVector;
     typedef SliceDataVector::ConstRange SliceRange;
 
     SliceRange sliceRange() const { return slices.all(); }
     size_t slicesLength() const { return slices.length(); }
 
     /* Create a convenient typedef for referring tables of phase times. */
-    typedef int64_t (*PhaseTimeTable)[PHASE_LIMIT];
+    typedef int64_t const (*PhaseTimeTable)[PHASE_LIMIT];
 
   private:
     JSRuntime* runtime;
 
     int64_t startupTime;
 
     /* File pointer used for MOZ_GCTIMER output. */
     FILE* fp;
@@ -272,17 +271,17 @@ struct Statistics
 
     /* Number of events of this type for this GC. */
     unsigned int counts[STAT_LIMIT];
 
     /* Allocated space before the GC started. */
     size_t preBytes;
 
     /* Records the maximum GC pause in an API-controlled interval (in us). */
-    int64_t maxPauseInInterval;
+    mutable int64_t maxPauseInInterval;
 
     /* Phases that are currently on stack. */
     Phase phaseNesting[MAX_NESTING];
     size_t phaseNestingDepth;
     size_t activeDagSlot;
 
     /*
      * To avoid recursive nesting, we discontinue a callback phase when any
@@ -304,31 +303,32 @@ struct Statistics
      */
     bool aborted;
 
     void beginGC(JSGCInvocationKind kind);
     void endGC();
 
     void recordPhaseEnd(Phase phase);
 
-    void gcDuration(int64_t* total, int64_t* maxPause);
+    void gcDuration(int64_t* total, int64_t* maxPause) const;
     void sccDurations(int64_t* total, int64_t* maxPause);
     void printStats();
-    bool formatData(StatisticsSerializer& ss, uint64_t timestamp);
+
+    UniqueChars formatCompactSlicePhaseTimes(PhaseTimeTable phaseTimes) const;
 
     UniqueChars formatDetailedDescription();
     UniqueChars formatDetailedSliceDescription(unsigned i, const SliceData& slice);
     UniqueChars formatDetailedPhaseTimes(PhaseTimeTable phaseTimes);
     UniqueChars formatDetailedTotals();
 
     UniqueChars formatJsonDescription(uint64_t timestamp);
     UniqueChars formatJsonSliceDescription(unsigned i, const SliceData& slice);
     UniqueChars formatJsonPhaseTimes(PhaseTimeTable phaseTimes);
 
-    double computeMMU(int64_t resolution);
+    double computeMMU(int64_t resolution) const;
 };
 
 struct AutoGCSlice
 {
     AutoGCSlice(Statistics& stats, const ZoneGCStats& zoneStats, JSGCInvocationKind gckind,
                 SliceBudget budget, JS::gcreason::Reason reason
                 MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
       : stats(stats)
--- a/js/src/jit/BaselineIC.cpp
+++ b/js/src/jit/BaselineIC.cpp
@@ -3803,17 +3803,17 @@ IsOptimizableElementPropertyName(JSConte
         return false;
 
     return true;
 }
 
 static bool
 TryAttachNativeGetValueElemStub(JSContext* cx, HandleScript script, jsbytecode* pc,
                                 ICGetElem_Fallback* stub, HandleNativeObject obj,
-                                HandleValue key)
+                                HandleValue key, bool* attached)
 {
     RootedId id(cx);
     if (!IsOptimizableElementPropertyName(cx, key, &id))
         return true;
 
     RootedPropertyName propName(cx, JSID_TO_ATOM(id)->asPropertyName());
     bool needsAtomize = !key.toString()->isAtom();
     bool isCallElem = (JSOp(*pc) == JSOP_CALLELEM);
@@ -3853,48 +3853,49 @@ TryAttachNativeGetValueElemStub(JSContex
                                                               : ICGetElemNativeStub::DynamicSlot;
         ICGetElemNativeCompiler compiler(cx, kind, isCallElem, monitorStub, obj, holder, propName,
                                          acctype, needsAtomize, offset);
         ICStub* newStub = compiler.getStub(compiler.getStubSpace(script));
         if (!newStub)
             return false;
 
         stub->addNewStub(newStub);
+        *attached = true;
     }
     return true;
 }
 
 static bool
 TryAttachNativeGetAccessorElemStub(JSContext* cx, HandleScript script, jsbytecode* pc,
                                    ICGetElem_Fallback* stub, HandleNativeObject obj,
-                                   HandleValue key, bool* attached)
+                                   HandleValue key, bool* attached, bool* isTemporarilyUnoptimizable)
 {
     MOZ_ASSERT(!*attached);
 
     RootedId id(cx);
     if (!IsOptimizableElementPropertyName(cx, key, &id))
         return true;
 
     RootedPropertyName propName(cx, JSID_TO_ATOM(id)->asPropertyName());
     bool needsAtomize = !key.toString()->isAtom();
     bool isCallElem = (JSOp(*pc) == JSOP_CALLELEM);
 
     RootedShape shape(cx);
     RootedObject baseHolder(cx);
     if (!EffectlesslyLookupProperty(cx, obj, propName, &baseHolder, &shape))
         return false;
-    if(!baseHolder || baseHolder->isNative())
+    if (!baseHolder || baseHolder->isNative())
         return true;
 
     HandleNativeObject holder = baseHolder.as<NativeObject>();
 
     bool getterIsScripted = false;
-    bool isTemporarilyUnoptimizable = false;
     if (IsCacheableGetPropCall(cx, obj, baseHolder, shape, &getterIsScripted,
-                               &isTemporarilyUnoptimizable, /*isDOMProxy=*/false)) {
+                               isTemporarilyUnoptimizable, /*isDOMProxy=*/false))
+    {
         RootedFunction getter(cx, &shape->getterObject()->as<JSFunction>());
 
 #if JS_HAS_NO_SUCH_METHOD
         // It's unlikely that a getter function will be used in callelem locations.
         // Just don't attach stubs in that case to avoid issues with __noSuchMethod__ handling.
         if (isCallElem)
             return true;
 #endif
@@ -3998,33 +3999,34 @@ IsNativeOrUnboxedDenseElementAccess(Hand
         return false;
     if (key.isInt32() && key.toInt32() >= 0 && !IsAnyTypedArray(obj.get()))
         return true;
     return false;
 }
 
 static bool
 TryAttachGetElemStub(JSContext* cx, JSScript* script, jsbytecode* pc, ICGetElem_Fallback* stub,
-                     HandleValue lhs, HandleValue rhs, HandleValue res)
+                     HandleValue lhs, HandleValue rhs, HandleValue res, bool* attached)
 {
     bool isCallElem = (JSOp(*pc) == JSOP_CALLELEM);
 
     // Check for String[i] => Char accesses.
     if (lhs.isString() && rhs.isInt32() && res.isString() &&
         !stub->hasStub(ICStub::GetElem_String))
     {
         // NoSuchMethod handling doesn't apply to string targets.
 
         JitSpew(JitSpew_BaselineIC, "  Generating GetElem(String[Int32]) stub");
         ICGetElem_String::Compiler compiler(cx);
         ICStub* stringStub = compiler.getStub(compiler.getStubSpace(script));
         if (!stringStub)
             return false;
 
         stub->addNewStub(stringStub);
+        *attached = true;
         return true;
     }
 
     if (lhs.isMagic(JS_OPTIMIZED_ARGUMENTS) && rhs.isInt32() &&
         !ArgumentsGetElemStubExists(stub, ICGetElem_Arguments::Magic))
     {
         // Any script with a CALLPROP on arguments (arguments.foo())
         // should not have optimized arguments.
@@ -4033,16 +4035,17 @@ TryAttachGetElemStub(JSContext* cx, JSSc
         JitSpew(JitSpew_BaselineIC, "  Generating GetElem(MagicArgs[Int32]) stub");
         ICGetElem_Arguments::Compiler compiler(cx, stub->fallbackMonitorStub()->firstMonitorStub(),
                                                ICGetElem_Arguments::Magic, false);
         ICStub* argsStub = compiler.getStub(compiler.getStubSpace(script));
         if (!argsStub)
             return false;
 
         stub->addNewStub(argsStub);
+        *attached = true;
         return true;
     }
 
     // Otherwise, GetElem is only optimized on objects.
     if (!lhs.isObject())
         return true;
     RootedObject obj(cx, &lhs.toObject());
 
@@ -4055,54 +4058,59 @@ TryAttachGetElemStub(JSContext* cx, JSSc
             JitSpew(JitSpew_BaselineIC, "  Generating GetElem(ArgsObj[Int32]) stub");
             ICGetElem_Arguments::Compiler compiler(
                 cx, stub->fallbackMonitorStub()->firstMonitorStub(), which, isCallElem);
             ICStub* argsStub = compiler.getStub(compiler.getStubSpace(script));
             if (!argsStub)
                 return false;
 
             stub->addNewStub(argsStub);
+            *attached = true;
             return true;
         }
     }
 
     // Check for NativeObject[int] dense accesses.
     if (IsNativeDenseElementAccess(obj, rhs)) {
         JitSpew(JitSpew_BaselineIC, "  Generating GetElem(Native[Int32] dense) stub");
         ICGetElem_Dense::Compiler compiler(cx, stub->fallbackMonitorStub()->firstMonitorStub(),
                                            obj->as<NativeObject>().lastProperty(), isCallElem);
         ICStub* denseStub = compiler.getStub(compiler.getStubSpace(script));
         if (!denseStub)
             return false;
 
         stub->addNewStub(denseStub);
+        *attached = true;
         return true;
     }
 
     // Check for NativeObject[id] shape-optimizable accesses.
     if (obj->isNative() && rhs.isString()) {
         RootedScript rootedScript(cx, script);
         if (!TryAttachNativeGetValueElemStub(cx, rootedScript, pc, stub,
-            obj.as<NativeObject>(), rhs))
+                                             obj.as<NativeObject>(), rhs, attached))
         {
             return false;
         }
+        if (*attached)
+            return true;
         script = rootedScript;
     }
 
     // Check for UnboxedArray[int] accesses.
     if (obj->is<UnboxedArrayObject>() && rhs.isInt32() && rhs.toInt32() >= 0) {
         JitSpew(JitSpew_BaselineIC, "  Generating GetElem(UnboxedArray[Int32]) stub");
         ICGetElem_UnboxedArray::Compiler compiler(cx, stub->fallbackMonitorStub()->firstMonitorStub(),
                                                   obj->group());
         ICStub* unboxedStub = compiler.getStub(compiler.getStubSpace(script));
         if (!unboxedStub)
             return false;
 
         stub->addNewStub(unboxedStub);
+        *attached = true;
         return true;
     }
 
     // Check for TypedArray[int] => Number and TypedObject[int] => Number accesses.
     if ((IsAnyTypedArray(obj.get()) || IsPrimitiveArrayTypedObject(obj)) &&
         rhs.isNumber() &&
         res.isNumber() &&
         !TypedArrayGetElemStubExists(stub, obj))
@@ -4126,16 +4134,17 @@ TryAttachGetElemStub(JSContext* cx, JSSc
 
         JitSpew(JitSpew_BaselineIC, "  Generating GetElem(TypedArray[Int32]) stub");
         ICGetElem_TypedArray::Compiler compiler(cx, obj->maybeShape(), TypedThingElementType(obj));
         ICStub* typedArrayStub = compiler.getStub(compiler.getStubSpace(script));
         if (!typedArrayStub)
             return false;
 
         stub->addNewStub(typedArrayStub);
+        *attached = true;
         return true;
     }
 
     // GetElem operations on non-native objects cannot be cached by either
     // Baseline or Ion. Indicate this in the cache so that Ion does not
     // generate a cache for this op.
     if (!obj->isNative())
         stub->noteNonNativeAccess();
@@ -4174,25 +4183,30 @@ DoGetElemFallback(JSContext* cx, Baselin
         if (isOptimizedArgs)
             TypeScript::Monitor(cx, frame->script(), pc, res);
     }
 
     bool attached = false;
     if (stub->numOptimizedStubs() >= ICGetElem_Fallback::MAX_OPTIMIZED_STUBS) {
         // TODO: Discard all stubs in this IC and replace with inert megamorphic stub.
         // But for now we just bail.
+        stub->noteUnoptimizableAccess();
         attached = true;
     }
 
     // Try to attach an optimized getter stub.
+    bool isTemporarilyUnoptimizable = false;
     if (!attached && lhs.isObject() && lhs.toObject().isNative() && rhs.isString()){
         RootedScript rootedScript(cx, frame->script());
         RootedNativeObject obj(cx, &lhs.toObject().as<NativeObject>());
-        if (!TryAttachNativeGetAccessorElemStub(cx, rootedScript, pc, stub, obj, rhs, &attached))
-            return false;
+        if (!TryAttachNativeGetAccessorElemStub(cx, rootedScript, pc, stub, obj, rhs, &attached,
+                                                &isTemporarilyUnoptimizable))
+        {
+            return false;
+        }
         script = rootedScript;
     }
 
     if (!isOptimizedArgs) {
         if (!GetElementOperation(cx, op, &lhsCopy, rhs, res))
             return false;
         TypeScript::Monitor(cx, frame->script(), pc, res);
     }
@@ -4204,21 +4218,21 @@ DoGetElemFallback(JSContext* cx, Baselin
     // Add a type monitor stub for the resulting value.
     if (!stub->addMonitorStubForValue(cx, frame->script(), res))
         return false;
 
     if (attached)
         return true;
 
     // Try to attach an optimized stub.
-    if (!TryAttachGetElemStub(cx, frame->script(), pc, stub, lhs, rhs, res))
-        return false;
-
-    // If we ever add a way to note unoptimizable accesses here, propagate the
-    // isTemporarilyUnoptimizable state from TryAttachNativeGetElemStub to here.
+    if (!TryAttachGetElemStub(cx, frame->script(), pc, stub, lhs, rhs, res, &attached))
+        return false;
+
+    if (!attached && !isTemporarilyUnoptimizable)
+        stub->noteUnoptimizableAccess();
 
     return true;
 }
 
 typedef bool (*DoGetElemFallbackFn)(JSContext*, BaselineFrame*, ICGetElem_Fallback*,
                                     HandleValue, HandleValue, MutableHandleValue);
 static const VMFunction DoGetElemFallbackInfo =
     FunctionInfo<DoGetElemFallbackFn>(DoGetElemFallback, TailCall, PopValues(2));
@@ -9943,17 +9957,17 @@ GetTemplateObjectForNative(JSContext* cx
 
         ObjectGroup* group = ObjectGroup::allocationSiteGroup(cx, script, pc, JSProto_Array);
         if (!group)
             return false;
         res->setGroup(group);
         return true;
     }
 
-    if (native == js::array_concat) {
+    if (native == js::array_concat || native == js::array_slice) {
         if (args.thisv().isObject() && !args.thisv().toObject().isSingleton()) {
             res.set(NewFullyAllocatedArrayTryReuseGroup(cx, &args.thisv().toObject(), 0,
                                                         TenuredObject, /* forceAnalyze = */ true));
             if (!res)
                 return false;
         }
     }
 
@@ -12677,21 +12691,25 @@ ICGetIntrinsic_Constant::ICGetIntrinsic_
   : ICStub(GetIntrinsic_Constant, stubCode),
     value_(value)
 { }
 
 ICGetIntrinsic_Constant::~ICGetIntrinsic_Constant()
 { }
 
 ICGetProp_Primitive::ICGetProp_Primitive(JitCode* stubCode, ICStub* firstMonitorStub,
-                                         Shape* protoShape, uint32_t offset)
+                                         JSValueType primitiveType, Shape* protoShape,
+                                         uint32_t offset)
   : ICMonitoredStub(GetProp_Primitive, stubCode, firstMonitorStub),
     protoShape_(protoShape),
     offset_(offset)
-{ }
+{
+    extra_ = uint16_t(primitiveType);
+    MOZ_ASSERT(JSValueType(extra_) == primitiveType);
+}
 
 ICGetPropNativeStub::ICGetPropNativeStub(ICStub::Kind kind, JitCode* stubCode,
                                          ICStub* firstMonitorStub,
                                          ReceiverGuard guard, uint32_t offset)
   : ICMonitoredStub(kind, stubCode, firstMonitorStub),
     receiverGuard_(guard),
     offset_(offset)
 { }
--- a/js/src/jit/BaselineIC.h
+++ b/js/src/jit/BaselineIC.h
@@ -2633,16 +2633,17 @@ class ICGetElem_Fallback : public ICMoni
     friend class ICStubSpace;
 
     explicit ICGetElem_Fallback(JitCode* stubCode)
       : ICMonitoredFallbackStub(ICStub::GetElem_Fallback, stubCode)
     { }
 
     static const uint16_t EXTRA_NON_NATIVE = 0x1;
     static const uint16_t EXTRA_NEGATIVE_INDEX = 0x2;
+    static const uint16_t EXTRA_UNOPTIMIZABLE_ACCESS = 0x4;
 
   public:
     static const uint32_t MAX_OPTIMIZED_STUBS = 16;
 
     void noteNonNativeAccess() {
         extra_ |= EXTRA_NON_NATIVE;
     }
     bool hasNonNativeAccess() const {
@@ -2650,16 +2651,22 @@ class ICGetElem_Fallback : public ICMoni
     }
 
     void noteNegativeIndex() {
         extra_ |= EXTRA_NEGATIVE_INDEX;
     }
     bool hasNegativeIndex() const {
         return extra_ & EXTRA_NEGATIVE_INDEX;
     }
+    void noteUnoptimizableAccess() {
+        extra_ |= EXTRA_UNOPTIMIZABLE_ACCESS;
+    }
+    bool hadUnoptimizableAccess() const {
+        return extra_ & EXTRA_UNOPTIMIZABLE_ACCESS;
+    }
 
     // Compiler for this stub kind.
     class Compiler : public ICStubCompiler {
       protected:
         bool generateStubCode(MacroAssembler& masm);
 
       public:
         explicit Compiler(JSContext* cx)
@@ -4104,23 +4111,27 @@ class ICGetProp_Primitive : public ICMon
 
   protected: // Protected to silence Clang warning.
     // Shape of String.prototype/Number.prototype to check for.
     HeapPtrShape protoShape_;
 
     // Fixed or dynamic slot offset.
     uint32_t offset_;
 
-    ICGetProp_Primitive(JitCode* stubCode, ICStub* firstMonitorStub,
+    ICGetProp_Primitive(JitCode* stubCode, ICStub* firstMonitorStub, JSValueType primitiveType,
                         Shape* protoShape, uint32_t offset);
 
   public:
     HeapPtrShape& protoShape() {
         return protoShape_;
     }
+    JSValueType primitiveType() const {
+        return JSValueType(extra_);
+    }
+
     static size_t offsetOfProtoShape() {
         return offsetof(ICGetProp_Primitive, protoShape_);
     }
 
     static size_t offsetOfOffset() {
         return offsetof(ICGetProp_Primitive, offset_);
     }
 
@@ -4150,17 +4161,17 @@ class ICGetProp_Primitive : public ICMon
             prototype_(cx, prototype),
             isFixedSlot_(isFixedSlot),
             offset_(offset)
         {}
 
         ICStub* getStub(ICStubSpace* space) {
             RootedShape protoShape(cx, prototype_->as<NativeObject>().lastProperty());
             return newStub<ICGetProp_Primitive>(space, getStubCode(), firstMonitorStub_,
-                                                protoShape, offset_);
+                                                primitiveType_, protoShape, offset_);
         }
     };
 };
 
 // Stub for accessing a string's length.
 class ICGetProp_StringLength : public ICStub
 {
     friend class ICStubSpace;
--- a/js/src/jit/BaselineInspector.cpp
+++ b/js/src/jit/BaselineInspector.cpp
@@ -697,16 +697,90 @@ BaselineInspector::commonSetPropFunction
     }
 
     if (!*holder)
         return false;
 
     return true;
 }
 
+MIRType
+BaselineInspector::expectedPropertyAccessInputType(jsbytecode* pc)
+{
+    if (!hasBaselineScript())
+        return MIRType_Value;
+
+    const ICEntry& entry = icEntryFromPC(pc);
+    MIRType type = MIRType_None;
+
+    for (ICStub* stub = entry.firstStub(); stub; stub = stub->next()) {
+        MIRType stubType;
+        switch (stub->kind()) {
+          case ICStub::GetProp_Fallback:
+            if (stub->toGetProp_Fallback()->hadUnoptimizableAccess())
+                return MIRType_Value;
+            continue;
+
+          case ICStub::GetElem_Fallback:
+            if (stub->toGetElem_Fallback()->hadUnoptimizableAccess())
+                return MIRType_Value;
+            continue;
+
+          case ICStub::GetProp_Generic:
+            return MIRType_Value;
+
+          case ICStub::GetProp_ArgumentsLength:
+          case ICStub::GetElem_Arguments:
+            // Either an object or magic arguments.
+            return MIRType_Value;
+
+          case ICStub::GetProp_ArrayLength:
+          case ICStub::GetProp_Native:
+          case ICStub::GetProp_NativeDoesNotExist:
+          case ICStub::GetProp_NativePrototype:
+          case ICStub::GetProp_Unboxed:
+          case ICStub::GetProp_TypedObject:
+          case ICStub::GetProp_CallScripted:
+          case ICStub::GetProp_CallNative:
+          case ICStub::GetProp_CallDOMProxyNative:
+          case ICStub::GetProp_CallDOMProxyWithGenerationNative:
+          case ICStub::GetProp_DOMProxyShadowed:
+          case ICStub::GetElem_NativeSlot:
+          case ICStub::GetElem_NativePrototypeSlot:
+          case ICStub::GetElem_NativePrototypeCallNative:
+          case ICStub::GetElem_NativePrototypeCallScripted:
+          case ICStub::GetElem_String:
+          case ICStub::GetElem_Dense:
+          case ICStub::GetElem_TypedArray:
+            stubType = MIRType_Object;
+            break;
+
+          case ICStub::GetProp_Primitive:
+            stubType = MIRTypeFromValueType(stub->toGetProp_Primitive()->primitiveType());
+            break;
+
+          case ICStub::GetProp_StringLength:
+            stubType = MIRType_String;
+            break;
+
+          default:
+            MOZ_CRASH("Unexpected stub");
+        }
+
+        if (type != MIRType_None) {
+            if (type != stubType)
+                return MIRType_Value;
+        } else {
+            type = stubType;
+        }
+    }
+
+    return (type == MIRType_None) ? MIRType_Value : type;
+}
+
 bool
 BaselineInspector::instanceOfData(jsbytecode* pc, Shape** shape, uint32_t* slot,
                                   JSObject** prototypeObject)
 {
     MOZ_ASSERT(*pc == JSOP_INSTANCEOF);
 
     if (!hasBaselineScript())
         return false;
--- a/js/src/jit/BaselineInspector.h
+++ b/js/src/jit/BaselineInspector.h
@@ -99,16 +99,17 @@ class BaselineInspector
 
     SetElemICInspector setElemICInspector(jsbytecode* pc) {
         return makeICInspector<SetElemICInspector>(pc, ICStub::SetElem_Fallback);
     }
 
     MIRType expectedResultType(jsbytecode* pc);
     MCompare::CompareType expectedCompareType(jsbytecode* pc);
     MIRType expectedBinaryArithSpecialization(jsbytecode* pc);
+    MIRType expectedPropertyAccessInputType(jsbytecode* pc);
 
     bool hasSeenNonNativeGetElement(jsbytecode* pc);
     bool hasSeenNegativeIndexGetElement(jsbytecode* pc);
     bool hasSeenAccessedGetter(jsbytecode* pc);
     bool hasSeenDoubleResult(jsbytecode* pc);
     bool hasSeenNonStringIterMore(jsbytecode* pc);
 
     bool isOptimizableCallStringSplit(jsbytecode* pc, JSString** stringOut, JSString** stringArg,
--- a/js/src/jit/CodeGenerator.cpp
+++ b/js/src/jit/CodeGenerator.cpp
@@ -7143,16 +7143,51 @@ CodeGenerator::visitArrayConcat(LArrayCo
     masm.bind(&call);
 
     pushArg(temp1);
     pushArg(ToRegister(lir->rhs()));
     pushArg(ToRegister(lir->lhs()));
     callVM(ArrayConcatDenseInfo, lir);
 }
 
+typedef JSObject* (*ArraySliceDenseFn)(JSContext*, HandleObject, int32_t, int32_t, HandleObject);
+static const VMFunction ArraySliceDenseInfo = FunctionInfo<ArraySliceDenseFn>(array_slice_dense);
+
+void
+CodeGenerator::visitArraySlice(LArraySlice* lir)
+{
+    Register object = ToRegister(lir->object());
+    Register begin = ToRegister(lir->begin());
+    Register end = ToRegister(lir->end());
+    Register temp1 = ToRegister(lir->temp1());
+    Register temp2 = ToRegister(lir->temp2());
+
+    Label call, fail;
+
+    // Try to allocate an object.
+    masm.createGCObject(temp1, temp2, lir->mir()->templateObj(), lir->mir()->initialHeap(), &fail);
+
+    // Fixup the group of the result in case it doesn't match the template object.
+    masm.loadPtr(Address(object, JSObject::offsetOfGroup()), temp2);
+    masm.storePtr(temp2, Address(temp1, JSObject::offsetOfGroup()));
+
+    masm.jump(&call);
+    {
+        masm.bind(&fail);
+        masm.movePtr(ImmPtr(nullptr), temp1);
+    }
+    masm.bind(&call);
+
+    pushArg(temp1);
+    pushArg(end);
+    pushArg(begin);
+    pushArg(object);
+    callVM(ArraySliceDenseInfo, lir);
+}
+
 typedef JSString* (*ArrayJoinFn)(JSContext*, HandleObject, HandleString);
 static const VMFunction ArrayJoinInfo = FunctionInfo<ArrayJoinFn>(jit::ArrayJoin);
 
 void
 CodeGenerator::visitArrayJoin(LArrayJoin* lir)
 {
     pushArg(ToRegister(lir->separator()));
     pushArg(ToRegister(lir->array()));
--- a/js/src/jit/CodeGenerator.h
+++ b/js/src/jit/CodeGenerator.h
@@ -260,16 +260,17 @@ class CodeGenerator : public CodeGenerat
                            Register elementsTemp, Register lengthTemp, TypedOrValueRegister out);
     void visitArrayPopShiftV(LArrayPopShiftV* lir);
     void visitArrayPopShiftT(LArrayPopShiftT* lir);
     void emitArrayPush(LInstruction* lir, const MArrayPush* mir, Register obj,
                        ConstantOrRegister value, Register elementsTemp, Register length);
     void visitArrayPushV(LArrayPushV* lir);
     void visitArrayPushT(LArrayPushT* lir);
     void visitArrayConcat(LArrayConcat* lir);
+    void visitArraySlice(LArraySlice* lir);
     void visitArrayJoin(LArrayJoin* lir);
     void visitLoadUnboxedScalar(LLoadUnboxedScalar* lir);
     void visitLoadTypedArrayElementHole(LLoadTypedArrayElementHole* lir);
     void visitStoreUnboxedScalar(LStoreUnboxedScalar* lir);
     void visitStoreTypedArrayElementHole(LStoreTypedArrayElementHole* lir);
     void visitCompareExchangeTypedArrayElement(LCompareExchangeTypedArrayElement* lir);
     void visitAtomicTypedArrayElementBinop(LAtomicTypedArrayElementBinop* lir);
     void visitAtomicTypedArrayElementBinopForEffect(LAtomicTypedArrayElementBinopForEffect* lir);
--- a/js/src/jit/IonBuilder.cpp
+++ b/js/src/jit/IonBuilder.cpp
@@ -7189,37 +7189,36 @@ IonBuilder::testSingletonProperty(JSObje
             return nullptr;
 
         obj = obj->getProto();
     }
 
     return nullptr;
 }
 
-bool
-IonBuilder::testSingletonPropertyTypes(MDefinition* obj, JSObject* singleton, PropertyName* name,
-                                       bool* testObject, bool* testString)
+JSObject*
+IonBuilder::testSingletonPropertyTypes(MDefinition* obj, PropertyName* name)
 {
     // As for TestSingletonProperty, but the input is any value in a type set
-    // rather than a specific object. If testObject is set then the constant
-    // result can only be used after ensuring the input is an object.
-
-    *testObject = false;
-    *testString = false;
+    // rather than a specific object.
 
     TemporaryTypeSet* types = obj->resultTypeSet();
     if (types && types->unknownObject())
-        return false;
+        return nullptr;
 
     JSObject* objectSingleton = types ? types->maybeSingleton() : nullptr;
     if (objectSingleton)
-        return testSingletonProperty(objectSingleton, name) == singleton;
+        return testSingletonProperty(objectSingleton, name);
+
+    MIRType objType = obj->type();
+    if (objType == MIRType_Value && types)
+        objType = types->getKnownMIRType();
 
     JSProtoKey key;
-    switch (obj->type()) {
+    switch (objType) {
       case MIRType_String:
         key = JSProto_String;
         break;
 
       case MIRType_Symbol:
         key = JSProto_Symbol;
         break;
 
@@ -7227,71 +7226,67 @@ IonBuilder::testSingletonPropertyTypes(M
       case MIRType_Double:
         key = JSProto_Number;
         break;
 
       case MIRType_Boolean:
         key = JSProto_Boolean;
         break;
 
-      case MIRType_Object:
-      case MIRType_Value: {
+      case MIRType_Object: {
         if (!types)
-            return false;
-
-        if (types->hasType(TypeSet::StringType())) {
-            key = JSProto_String;
-            *testString = true;
-            break;
-        }
-
-        if (!types->maybeObject())
-            return false;
+            return nullptr;
 
         // For property accesses which may be on many objects, we just need to
         // find a prototype common to all the objects; if that prototype
         // has the singleton property, the access will not be on a missing property.
+        JSObject* singleton = nullptr;
         for (unsigned i = 0; i < types->getObjectCount(); i++) {
             TypeSet::ObjectKey* key = types->getObject(i);
             if (!key)
                 continue;
             if (analysisContext)
                 key->ensureTrackedProperty(analysisContext, NameToId(name));
 
             const Class* clasp = key->clasp();
             if (!ClassHasEffectlessLookup(clasp) || ObjectHasExtraOwnProperty(compartment, key, name))
-                return false;
+                return nullptr;
             if (key->unknownProperties())
-                return false;
+                return nullptr;
             HeapTypeSetKey property = key->property(NameToId(name));
             if (property.isOwnProperty(constraints()))
-                return false;
+                return nullptr;
 
             if (JSObject* proto = key->proto().toObjectOrNull()) {
                 // Test this type.
-                if (testSingletonProperty(proto, name) != singleton)
-                    return false;
+                JSObject* thisSingleton = testSingletonProperty(proto, name);
+                if (!thisSingleton)
+                    return nullptr;
+                if (singleton) {
+                    if (thisSingleton != singleton)
+                        return nullptr;
+                } else {
+                    singleton = thisSingleton;
+                }
             } else {
                 // Can't be on the prototype chain with no prototypes...
-                return false;
+                return nullptr;
             }
         }
-        // If this is not a known object, a test will be needed.
-        *testObject = (obj->type() != MIRType_Object);
-        return true;
+        return singleton;
       }
       default:
-        return false;
+        return nullptr;
     }
 
     JSObject* proto = GetBuiltinPrototypePure(&script()->global(), key);
     if (proto)
-        return testSingletonProperty(proto, name) == singleton;
-
-    return false;
+        return testSingletonProperty(proto, name);
+
+    return nullptr;
 }
 
 bool
 IonBuilder::pushTypeBarrier(MDefinition* def, TemporaryTypeSet* observed, BarrierKind kind)
 {
     MOZ_ASSERT(def == current->peek(-1));
 
     MDefinition* replace = addTypeBarrier(current->pop(), observed, kind);
@@ -7726,16 +7721,18 @@ IonBuilder::jsop_getelem()
 
         if (!resumeAfter(ins))
             return false;
 
         TemporaryTypeSet* types = bytecodeTypes(pc);
         return pushTypeBarrier(ins, types, BarrierKind::TypeSet);
     }
 
+    obj = maybeUnboxForPropertyAccess(obj);
+
     bool emitted = false;
 
     trackOptimizationAttempt(TrackedStrategy::GetElem_TypedObject);
     if (!getElemTryTypedObject(&emitted, obj, index) || emitted)
         return emitted;
 
     trackOptimizationAttempt(TrackedStrategy::GetElem_Dense);
     if (!getElemTryDense(&emitted, obj, index) || emitted)
@@ -10004,16 +10001,31 @@ IonBuilder::shouldAbortOnPreliminaryGrou
                 preliminary = true;
             }
         }
     }
 
     return preliminary;
 }
 
+MDefinition*
+IonBuilder::maybeUnboxForPropertyAccess(MDefinition* def)
+{
+    if (def->type() != MIRType_Value)
+        return def;
+
+    MIRType type = inspector->expectedPropertyAccessInputType(pc);
+    if (type == MIRType_Value || !def->mightBeType(type))
+        return def;
+
+    MUnbox* unbox = MUnbox::New(alloc(), def, type, MUnbox::Fallible);
+    current->add(unbox);
+    return unbox;
+}
+
 bool
 IonBuilder::jsop_getprop(PropertyName* name)
 {
     bool emitted = false;
     startTrackingOptimizations();
 
     MDefinition* obj = current->pop();
     TemporaryTypeSet* types = bytecodeTypes(pc);
@@ -10029,16 +10041,18 @@ IonBuilder::jsop_getprop(PropertyName* n
             return emitted;
 
         // Try to optimize arguments.callee.
         trackOptimizationAttempt(TrackedStrategy::GetProp_ArgumentsCallee);
         if (!getPropTryArgumentsCallee(&emitted, obj, name) || emitted)
             return emitted;
     }
 
+    obj = maybeUnboxForPropertyAccess(obj);
+
     BarrierKind barrier = PropertyReadNeedsTypeBarrier(analysisContext, constraints(),
                                                        obj, name, types);
 
     // Try to optimize to a specific constant.
     trackOptimizationAttempt(TrackedStrategy::GetProp_InferredConstant);
     if (barrier == BarrierKind::NoBarrier) {
         if (!getPropTryInferredConstant(&emitted, obj, name, types) || emitted)
             return emitted;
@@ -10296,34 +10310,29 @@ IonBuilder::getPropTryArgumentsCallee(bo
 }
 
 bool
 IonBuilder::getPropTryConstant(bool* emitted, MDefinition* obj, PropertyName* name,
                                TemporaryTypeSet* types)
 {
     MOZ_ASSERT(*emitted == false);
 
-    JSObject* singleton = types ? types->maybeSingleton() : nullptr;
-    if (!singleton) {
-        trackOptimizationOutcome(TrackedOutcome::NotSingleton);
-        return true;
-    }
-
-    bool testObject, testString;
-    if (!testSingletonPropertyTypes(obj, singleton, name, &testObject, &testString))
+    if (!types->mightBeMIRType(MIRType_Object)) {
+        // If we have not observed an object result here, don't look for a
+        // singleton constant.
+        trackOptimizationOutcome(TrackedOutcome::NotObject);
+        return true;
+    }
+
+    JSObject* singleton = testSingletonPropertyTypes(obj, name);
+    if (!singleton)
         return true;
 
     // Property access is a known constant -- safe to emit.
-    MOZ_ASSERT(!testString || !testObject);
-    if (testObject)
-        current->add(MGuardObject::New(alloc(), obj));
-    else if (testString)
-        current->add(MGuardString::New(alloc(), obj));
-    else
-        obj->setImplicitlyUsedUnchecked();
+    obj->setImplicitlyUsedUnchecked();
 
     pushConstant(ObjectValue(*singleton));
 
     trackOptimizationSuccess();
     *emitted = true;
     return true;
 }
 
--- a/js/src/jit/IonBuilder.h
+++ b/js/src/jit/IonBuilder.h
@@ -417,16 +417,17 @@ class IonBuilder
     bool storeSlot(MDefinition* obj, size_t slot, size_t nfixed,
                    MDefinition* value, bool needsBarrier,
                    MIRType slotType = MIRType_None);
     bool storeSlot(MDefinition* obj, Shape* shape, MDefinition* value, bool needsBarrier,
                    MIRType slotType = MIRType_None);
     bool shouldAbortOnPreliminaryGroups(MDefinition *obj);
 
     MDefinition* tryInnerizeWindow(MDefinition* obj);
+    MDefinition* maybeUnboxForPropertyAccess(MDefinition* def);
 
     // jsop_getprop() helpers.
     bool checkIsDefinitelyOptimizedArguments(MDefinition* obj, bool* isOptimizedArgs);
     bool getPropTryInferredConstant(bool* emitted, MDefinition* obj, PropertyName* name,
                                     TemporaryTypeSet* types);
     bool getPropTryArgumentsLength(bool* emitted, MDefinition* obj);
     bool getPropTryArgumentsCallee(bool* emitted, MDefinition* obj, PropertyName* name);
     bool getPropTryConstant(bool* emitted, MDefinition* obj, PropertyName* name,
@@ -740,16 +741,17 @@ class IonBuilder
     // The known MIR type of getInlineReturnTypeSet.
     MIRType getInlineReturnType();
 
     // Array natives.
     InliningStatus inlineArray(CallInfo& callInfo);
     InliningStatus inlineArrayPopShift(CallInfo& callInfo, MArrayPopShift::Mode mode);
     InliningStatus inlineArrayPush(CallInfo& callInfo);
     InliningStatus inlineArrayConcat(CallInfo& callInfo);
+    InliningStatus inlineArraySlice(CallInfo& callInfo);
     InliningStatus inlineArrayJoin(CallInfo& callInfo);
     InliningStatus inlineArraySplice(CallInfo& callInfo);
 
     // Math natives.
     InliningStatus inlineMathAbs(CallInfo& callInfo);
     InliningStatus inlineMathFloor(CallInfo& callInfo);
     InliningStatus inlineMathCeil(CallInfo& callInfo);
     InliningStatus inlineMathClz32(CallInfo& callInfo);
@@ -935,18 +937,18 @@ class IonBuilder
 
     bool annotateGetPropertyCache(MDefinition* obj, MGetPropertyCache* getPropCache,
                                   TemporaryTypeSet* objTypes,
                                   TemporaryTypeSet* pushedTypes);
 
     MGetPropertyCache* getInlineableGetPropertyCache(CallInfo& callInfo);
 
     JSObject* testSingletonProperty(JSObject* obj, PropertyName* name);
-    bool testSingletonPropertyTypes(MDefinition* obj, JSObject* singleton, PropertyName* name,
-                                    bool* testObject, bool* testString);
+    JSObject* testSingletonPropertyTypes(MDefinition* obj, PropertyName* name);
+
     uint32_t getDefiniteSlot(TemporaryTypeSet* types, PropertyName* name, uint32_t* pnfixed,
                              BaselineInspector::ObjectGroupVector& convertUnboxedGroups);
     MDefinition* convertUnboxedObjects(MDefinition* obj,
                                        const BaselineInspector::ObjectGroupVector& list);
     uint32_t getUnboxedOffset(TemporaryTypeSet* types, PropertyName* name,
                               JSValueType* punboxedType);
     MInstruction* loadUnboxedProperty(MDefinition* obj, size_t offset, JSValueType unboxedType,
                                       BarrierKind barrier, TemporaryTypeSet* types);
--- a/js/src/jit/LIR-Common.h
+++ b/js/src/jit/LIR-Common.h
@@ -4940,16 +4940,49 @@ class LArrayConcat : public LCallInstruc
     const LDefinition* temp1() {
         return getTemp(0);
     }
     const LDefinition* temp2() {
         return getTemp(1);
     }
 };
 
+class LArraySlice : public LCallInstructionHelper<1, 3, 2>
+{
+  public:
+    LIR_HEADER(ArraySlice)
+
+    LArraySlice(const LAllocation& obj, const LAllocation& begin, const LAllocation& end,
+                const LDefinition& temp1, const LDefinition& temp2) {
+        setOperand(0, obj);
+        setOperand(1, begin);
+        setOperand(2, end);
+        setTemp(0, temp1);
+        setTemp(1, temp2);
+    }
+    const MArraySlice* mir() const {
+        return mir_->toArraySlice();
+    }
+    const LAllocation* object() {
+        return getOperand(0);
+    }
+    const LAllocation* begin() {
+        return getOperand(1);
+    }
+    const LAllocation* end() {
+        return getOperand(2);
+    }
+    const LDefinition* temp1() {
+        return getTemp(0);
+    }
+    const LDefinition* temp2() {
+        return getTemp(1);
+    }
+};
+
 class LArrayJoin : public LCallInstructionHelper<1, 2, 0>
 {
   public:
     LIR_HEADER(ArrayJoin)
 
     LArrayJoin(const LAllocation& array, const LAllocation& sep) {
         setOperand(0, array);
         setOperand(1, sep);
--- a/js/src/jit/LOpcodes.h
+++ b/js/src/jit/LOpcodes.h
@@ -233,16 +233,17 @@
     _(StoreUnboxedScalar)           \
     _(StoreUnboxedPointer)          \
     _(ConvertUnboxedObjectToNative) \
     _(ArrayPopShiftV)               \
     _(ArrayPopShiftT)               \
     _(ArrayPushV)                   \
     _(ArrayPushT)                   \
     _(ArrayConcat)                  \
+    _(ArraySlice)                   \
     _(ArrayJoin)                    \
     _(StoreElementHoleV)            \
     _(StoreElementHoleT)            \
     _(LoadTypedArrayElementHole)    \
     _(LoadTypedArrayElementStatic)  \
     _(StoreTypedArrayElementHole)   \
     _(StoreTypedArrayElementStatic) \
     _(CompareExchangeTypedArrayElement) \
--- a/js/src/jit/Lowering.cpp
+++ b/js/src/jit/Lowering.cpp
@@ -2900,16 +2900,33 @@ LIRGenerator::visitArrayConcat(MArrayCon
                                                   useFixed(ins->rhs(), CallTempReg2),
                                                   tempFixed(CallTempReg3),
                                                   tempFixed(CallTempReg4));
     defineReturn(lir, ins);
     assignSafepoint(lir, ins);
 }
 
 void
+LIRGenerator::visitArraySlice(MArraySlice* ins)
+{
+    MOZ_ASSERT(ins->type() == MIRType_Object);
+    MOZ_ASSERT(ins->object()->type() == MIRType_Object);
+    MOZ_ASSERT(ins->begin()->type() == MIRType_Int32);
+    MOZ_ASSERT(ins->end()->type() == MIRType_Int32);
+
+    LArraySlice* lir = new(alloc()) LArraySlice(useFixed(ins->object(), CallTempReg0),
+                                                useFixed(ins->begin(), CallTempReg1),
+                                                useFixed(ins->end(), CallTempReg2),
+                                                tempFixed(CallTempReg3),
+                                                tempFixed(CallTempReg4));
+    defineReturn(lir, ins);
+    assignSafepoint(lir, ins);
+}
+
+void
 LIRGenerator::visitArrayJoin(MArrayJoin* ins)
 {
     MOZ_ASSERT(ins->type() == MIRType_String);
     MOZ_ASSERT(ins->array()->type() == MIRType_Object);
     MOZ_ASSERT(ins->sep()->type() == MIRType_String);
 
     LArrayJoin* lir = new(alloc()) LArrayJoin(useRegisterAtStart(ins->array()),
                                               useRegisterAtStart(ins->sep()));
--- a/js/src/jit/Lowering.h
+++ b/js/src/jit/Lowering.h
@@ -202,16 +202,17 @@ class LIRGenerator : public LIRGenerator
     void visitStoreElementHole(MStoreElementHole* ins);
     void visitStoreUnboxedObjectOrNull(MStoreUnboxedObjectOrNull* ins);
     void visitStoreUnboxedString(MStoreUnboxedString* ins);
     void visitConvertUnboxedObjectToNative(MConvertUnboxedObjectToNative* ins);
     void visitEffectiveAddress(MEffectiveAddress* ins);
     void visitArrayPopShift(MArrayPopShift* ins);
     void visitArrayPush(MArrayPush* ins);
     void visitArrayConcat(MArrayConcat* ins);
+    void visitArraySlice(MArraySlice* ins);
     void visitArrayJoin(MArrayJoin* ins);
     void visitLoadUnboxedScalar(MLoadUnboxedScalar* ins);
     void visitLoadTypedArrayElementHole(MLoadTypedArrayElementHole* ins);
     void visitLoadTypedArrayElementStatic(MLoadTypedArrayElementStatic* ins);
     void visitStoreUnboxedScalar(MStoreUnboxedScalar* ins);
     void visitStoreTypedArrayElementHole(MStoreTypedArrayElementHole* ins);
     void visitClampToUint8(MClampToUint8* ins);
     void visitLoadFixedSlot(MLoadFixedSlot* ins);
--- a/js/src/jit/MCallOptimize.cpp
+++ b/js/src/jit/MCallOptimize.cpp
@@ -77,16 +77,18 @@ IonBuilder::inlineNativeCall(CallInfo& c
     if (native == js::array_pop)
         return inlineArrayPopShift(callInfo, MArrayPopShift::Pop);
     if (native == js::array_shift)
         return inlineArrayPopShift(callInfo, MArrayPopShift::Shift);
     if (native == js::array_push)
         return inlineArrayPush(callInfo);
     if (native == js::array_concat)
         return inlineArrayConcat(callInfo);
+    if (native == js::array_slice)
+        return inlineArraySlice(callInfo);
     if (native == js::array_splice)
         return inlineArraySplice(callInfo);
 
     // Math natives.
     if (native == js::math_abs)
         return inlineMathAbs(callInfo);
     if (native == js::math_floor)
         return inlineMathFloor(callInfo);
@@ -977,16 +979,117 @@ IonBuilder::inlineArrayConcat(CallInfo& 
     current->push(ins);
 
     if (!resumeAfter(ins))
         return InliningStatus_Error;
     return InliningStatus_Inlined;
 }
 
 IonBuilder::InliningStatus
+IonBuilder::inlineArraySlice(CallInfo& callInfo)
+{
+    if (callInfo.constructing()) {
+        trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
+        return InliningStatus_NotInlined;
+    }
+
+    // Ensure |this| and result are objects.
+    if (getInlineReturnType() != MIRType_Object)
+        return InliningStatus_NotInlined;
+    if (callInfo.thisArg()->type() != MIRType_Object)
+        return InliningStatus_NotInlined;
+
+    // Arguments for the sliced region must be integers.
+    if (callInfo.argc() > 0) {
+        if (callInfo.getArg(0)->type() != MIRType_Int32)
+            return InliningStatus_NotInlined;
+        if (callInfo.argc() > 1) {
+            if (callInfo.getArg(1)->type() != MIRType_Int32)
+                return InliningStatus_NotInlined;
+        }
+    }
+
+    // |this| must be a dense array.
+    TemporaryTypeSet* thisTypes = callInfo.thisArg()->resultTypeSet();
+    if (!thisTypes)
+        return InliningStatus_NotInlined;
+
+    const Class* clasp = thisTypes->getKnownClass(constraints());
+    if (clasp != &ArrayObject::class_ && clasp != &UnboxedArrayObject::class_)
+        return InliningStatus_NotInlined;
+    if (thisTypes->hasObjectFlags(constraints(), OBJECT_FLAG_SPARSE_INDEXES |
+                                  OBJECT_FLAG_LENGTH_OVERFLOW))
+    {
+        trackOptimizationOutcome(TrackedOutcome::ArrayBadFlags);
+        return InliningStatus_NotInlined;
+    }
+
+    JSValueType unboxedType = JSVAL_TYPE_MAGIC;
+    if (clasp == &UnboxedArrayObject::class_) {
+        unboxedType = UnboxedArrayElementType(constraints(), callInfo.thisArg(), nullptr);
+        if (unboxedType == JSVAL_TYPE_MAGIC)
+            return InliningStatus_NotInlined;
+    }
+
+    // Watch out for indexed properties on the prototype.
+    if (ArrayPrototypeHasIndexedProperty(constraints(), script())) {
+        trackOptimizationOutcome(TrackedOutcome::ProtoIndexedProps);
+        return InliningStatus_NotInlined;
+    }
+
+    // The group of the result will be dynamically fixed up to match the input
+    // object, allowing us to handle 'this' objects that might have more than
+    // one group. Make sure that no singletons can be sliced here.
+    for (unsigned i = 0; i < thisTypes->getObjectCount(); i++) {
+        TypeSet::ObjectKey* key = thisTypes->getObject(i);
+        if (key && key->isSingleton())
+            return InliningStatus_NotInlined;
+    }
+
+    // Inline the call.
+    JSObject* templateObj = inspector->getTemplateObjectForNative(pc, js::array_slice);
+    if (!templateObj)
+        return InliningStatus_NotInlined;
+
+    callInfo.setImplicitlyUsedUnchecked();
+
+    MDefinition* begin;
+    if (callInfo.argc() > 0)
+        begin = callInfo.getArg(0);
+    else
+        begin = constant(Int32Value(0));
+
+    MDefinition* end;
+    if (callInfo.argc() > 1) {
+        end = callInfo.getArg(1);
+    } else if (clasp == &ArrayObject::class_) {
+        MElements* elements = MElements::New(alloc(), callInfo.thisArg());
+        current->add(elements);
+
+        end = MArrayLength::New(alloc(), elements);
+        current->add(end->toInstruction());
+    } else {
+        end = MUnboxedArrayLength::New(alloc(), callInfo.thisArg());
+        current->add(end->toInstruction());
+    }
+
+    MArraySlice* ins = MArraySlice::New(alloc(), constraints(),
+                                        callInfo.thisArg(), begin, end,
+                                        templateObj,
+                                        templateObj->group()->initialHeap(constraints()),
+                                        unboxedType);
+    current->add(ins);
+    current->push(ins);
+
+    if (!resumeAfter(ins))
+        return InliningStatus_Error;
+    return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
 IonBuilder::inlineMathAbs(CallInfo& callInfo)
 {
     if (callInfo.argc() != 1 || callInfo.constructing()) {
         trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
         return InliningStatus_NotInlined;
     }
 
     MIRType returnType = getInlineReturnType();
--- a/js/src/jit/MIR.h
+++ b/js/src/jit/MIR.h
@@ -9148,16 +9148,80 @@ class MArrayConcat
         return AliasSet::Store(AliasSet::BoxedOrUnboxedElements(unboxedType()) |
                                AliasSet::ObjectFields);
     }
     bool possiblyCalls() const override {
         return true;
     }
 };
 
+// Array.prototype.slice on a dense array.
+class MArraySlice
+  : public MTernaryInstruction,
+    public Mix3Policy<ObjectPolicy<0>, IntPolicy<1>, IntPolicy<2>>::Data
+{
+    AlwaysTenuredObject templateObj_;
+    gc::InitialHeap initialHeap_;
+    JSValueType unboxedType_;
+
+    MArraySlice(CompilerConstraintList* constraints, MDefinition* obj,
+                MDefinition* begin, MDefinition* end,
+                JSObject* templateObj, gc::InitialHeap initialHeap, JSValueType unboxedType)
+      : MTernaryInstruction(obj, begin, end),
+        templateObj_(templateObj),
+        initialHeap_(initialHeap),
+        unboxedType_(unboxedType)
+    {
+        setResultType(MIRType_Object);
+        setResultTypeSet(obj->resultTypeSet());
+    }
+
+  public:
+    INSTRUCTION_HEADER(ArraySlice)
+
+    static MArraySlice* New(TempAllocator& alloc, CompilerConstraintList* constraints,
+                            MDefinition* obj, MDefinition* begin, MDefinition* end,
+                            JSObject* templateObj, gc::InitialHeap initialHeap,
+                            JSValueType unboxedType)
+    {
+        return new(alloc) MArraySlice(constraints, obj, begin, end, templateObj,
+                                      initialHeap, unboxedType);
+    }
+
+    MDefinition* object() const {
+        return getOperand(0);
+    }
+    MDefinition* begin() const {
+        return getOperand(1);
+    }
+    MDefinition* end() const {
+        return getOperand(2);
+    }
+
+    JSObject* templateObj() const {
+        return templateObj_;
+    }
+
+    gc::InitialHeap initialHeap() const {
+        return initialHeap_;
+    }
+
+    JSValueType unboxedType() const {
+        return unboxedType_;
+    }
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::Store(AliasSet::BoxedOrUnboxedElements(unboxedType()) |
+                               AliasSet::ObjectFields);
+    }
+    bool possiblyCalls() const override {
+        return true;
+    }
+};
+
 class MArrayJoin
     : public MBinaryInstruction,
       public MixPolicy<ObjectPolicy<0>, StringPolicy<1> >::Data
 {
     MArrayJoin(MDefinition* array, MDefinition* sep)
         : MBinaryInstruction(array, sep)
     {
         setResultType(MIRType_String);
--- a/js/src/jit/MOpcodes.h
+++ b/js/src/jit/MOpcodes.h
@@ -195,16 +195,17 @@ namespace jit {
     _(StoreElementHole)                                                     \
     _(StoreUnboxedScalar)                                                   \
     _(StoreUnboxedObjectOrNull)                                             \
     _(StoreUnboxedString)                                                   \
     _(ConvertUnboxedObjectToNative)                                         \
     _(ArrayPopShift)                                                        \
     _(ArrayPush)                                                            \
     _(ArrayConcat)                                                          \
+    _(ArraySlice)                                                           \
     _(ArrayJoin)                                                            \
     _(LoadTypedArrayElementHole)                                            \
     _(LoadTypedArrayElementStatic)                                          \
     _(StoreTypedArrayElementHole)                                           \
     _(StoreTypedArrayElementStatic)                                         \
     _(CompareExchangeTypedArrayElement)                                     \
     _(AtomicTypedArrayElementBinop)                                         \
     _(EffectiveAddress)                                                     \
--- a/js/src/jit/arm/Assembler-arm.cpp
+++ b/js/src/jit/arm/Assembler-arm.cpp
@@ -21,22 +21,22 @@ using namespace js;
 using namespace js::jit;
 
 using mozilla::CountLeadingZeroes32;
 
 void dbg_break() {}
 
 // Note this is used for inter-AsmJS calls and may pass arguments and results in
 // floating point registers even if the system ABI does not.
-ABIArgGenerator::ABIArgGenerator() :
-    intRegIndex_(0),
+ABIArgGenerator::ABIArgGenerator()
+  : intRegIndex_(0),
     floatRegIndex_(0),
     stackOffset_(0),
     current_()
-{}
+{ }
 
 ABIArg
 ABIArgGenerator::next(MIRType type)
 {
     switch (type) {
       case MIRType_Int32:
       case MIRType_Pointer:
         if (intRegIndex_ == NumIntArgRegs) {
@@ -493,25 +493,27 @@ InstMOV::AsTHIS(const Instruction& i)
 
 bool
 InstMOV::IsTHIS(const Instruction& i)
 {
     return InstALU::IsTHIS(i) && InstALU::AsTHIS(i)->checkOp1(r0) && InstALU::AsTHIS(i)->checkOp(OpMov);
 }
 
 Op2Reg
-Operand2::toOp2Reg() {
+Operand2::toOp2Reg() const {
     return *(Op2Reg*)this;
 }
+
 O2RegImmShift
-Op2Reg::toO2RegImmShift() {
+Op2Reg::toO2RegImmShift() const {
     return *(O2RegImmShift*)this;
 }
+
 O2RegRegShift
-Op2Reg::toO2RegRegShift() {
+Op2Reg::toO2RegRegShift() const {
     return *(O2RegRegShift*)this;
 }
 
 Imm16::Imm16(Instruction& inst)
   : lower(inst.encode() & 0xfff),
     upper(inst.encode() >> 16),
     invalid(0xfff)
 { }
@@ -1252,17 +1254,17 @@ js::jit::VFPImm::VFPImm(uint32_t top)
 }
 
 BOffImm::BOffImm(Instruction& inst)
   : data(inst.encode() & 0x00ffffff)
 {
 }
 
 Instruction*
-BOffImm::getDest(Instruction* src)
+BOffImm::getDest(Instruction* src) const
 {
     // TODO: It is probably worthwhile to verify that src is actually a branch.
     // NOTE: This does not explicitly shift the offset of the destination left by 2,
     // since it is indexing into an array of instruction sized objects.
     return &src[(((int32_t)data << 8) >> 8) + 2];
 }
 
 const js::jit::DoubleEncoder::DoubleEntry js::jit::DoubleEncoder::table[256] = {
@@ -1420,130 +1422,129 @@ Assembler::nopAlign(int alignment)
 
 BufferOffset
 Assembler::as_nop()
 {
     return writeInst(0xe320f000);
 }
 
 static uint32_t
-EncodeAlu(Register dest, Register src1, Operand2 op2, ALUOp op, SetCond_ sc,
-          Assembler::Condition c)
+EncodeAlu(Register dest, Register src1, Operand2 op2, ALUOp op, SBit s, Assembler::Condition c)
 {
-    return (int)op | (int)sc | (int) c | op2.encode() |
+    return (int)op | (int)s | (int)c | op2.encode() |
            ((dest == InvalidReg) ? 0 : RD(dest)) |
            ((src1 == InvalidReg) ? 0 : RN(src1));
 }
 
 BufferOffset
 Assembler::as_alu(Register dest, Register src1, Operand2 op2,
-                  ALUOp op, SetCond_ sc, Condition c)
+                  ALUOp op, SBit s, Condition c)
 {
-    return writeInst(EncodeAlu(dest, src1, op2, op, sc, c));
+    return writeInst(EncodeAlu(dest, src1, op2, op, s, c));
 }
 
 BufferOffset
-Assembler::as_mov(Register dest, Operand2 op2, SetCond_ sc, Condition c)
+Assembler::as_mov(Register dest, Operand2 op2, SBit s, Condition c)
 {
-    return as_alu(dest, InvalidReg, op2, OpMov, sc, c);
+    return as_alu(dest, InvalidReg, op2, OpMov, s, c);
 }
 
 /* static */ void
-Assembler::as_alu_patch(Register dest, Register src1, Operand2 op2, ALUOp op, SetCond_ sc,
+Assembler::as_alu_patch(Register dest, Register src1, Operand2 op2, ALUOp op, SBit s,
                         Condition c, uint32_t* pos)
 {
-    WriteInstStatic(EncodeAlu(dest, src1, op2, op, sc, c), pos);
+    WriteInstStatic(EncodeAlu(dest, src1, op2, op, s, c), pos);
 }
 
 /* static */ void
-Assembler::as_mov_patch(Register dest, Operand2 op2, SetCond_ sc, Condition c, uint32_t* pos)
+Assembler::as_mov_patch(Register dest, Operand2 op2, SBit s, Condition c, uint32_t* pos)
 {
-    as_alu_patch(dest, InvalidReg, op2, OpMov, sc, c, pos);
+    as_alu_patch(dest, InvalidReg, op2, OpMov, s, c, pos);
 }
 
 BufferOffset
-Assembler::as_mvn(Register dest, Operand2 op2, SetCond_ sc, Condition c)
+Assembler::as_mvn(Register dest, Operand2 op2, SBit s, Condition c)
 {
-    return as_alu(dest, InvalidReg, op2, OpMvn, sc, c);
+    return as_alu(dest, InvalidReg, op2, OpMvn, s, c);
 }
 
 // Logical operations.
 BufferOffset
-Assembler::as_and(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c)
+Assembler::as_and(Register dest, Register src1, Operand2 op2, SBit s, Condition c)
 {
-    return as_alu(dest, src1, op2, OpAnd, sc, c);
+    return as_alu(dest, src1, op2, OpAnd, s, c);
 }
 BufferOffset
-Assembler::as_bic(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c)
+Assembler::as_bic(Register dest, Register src1, Operand2 op2, SBit s, Condition c)
 {
-    return as_alu(dest, src1, op2, OpBic, sc, c);
+    return as_alu(dest, src1, op2, OpBic, s, c);
 }
 BufferOffset
-Assembler::as_eor(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c)
+Assembler::as_eor(Register dest, Register src1, Operand2 op2, SBit s, Condition c)
 {
-    return as_alu(dest, src1, op2, OpEor, sc, c);
+    return as_alu(dest, src1, op2, OpEor, s, c);
 }
 BufferOffset
-Assembler::as_orr(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c)
+Assembler::as_orr(Register dest, Register src1, Operand2 op2, SBit s, Condition c)
 {
-    return as_alu(dest, src1, op2, OpOrr, sc, c);
+    return as_alu(dest, src1, op2, OpOrr, s, c);
 }
 
 // Mathematical operations.
 BufferOffset
-Assembler::as_adc(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c)
+Assembler::as_adc(Register dest, Register src1, Operand2 op2, SBit s, Condition c)
 {
-    return as_alu(dest, src1, op2, OpAdc, sc, c);
+    return as_alu(dest, src1, op2, OpAdc, s, c);
 }
 BufferOffset
-Assembler::as_add(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c)
+Assembler::as_add(Register dest, Register src1, Operand2 op2, SBit s, Condition c)
 {
-    return as_alu(dest, src1, op2, OpAdd, sc, c);
+    return as_alu(dest, src1, op2, OpAdd, s, c);
 }
 BufferOffset
-Assembler::as_sbc(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c)
+Assembler::as_sbc(Register dest, Register src1, Operand2 op2, SBit s, Condition c)
 {
-    return as_alu(dest, src1, op2, OpSbc, sc, c);
+    return as_alu(dest, src1, op2, OpSbc, s, c);
 }
 BufferOffset
-Assembler::as_sub(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c)
+Assembler::as_sub(Register dest, Register src1, Operand2 op2, SBit s, Condition c)
 {
-    return as_alu(dest, src1, op2, OpSub, sc, c);
+    return as_alu(dest, src1, op2, OpSub, s, c);
 }
 BufferOffset
-Assembler::as_rsb(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c)
+Assembler::as_rsb(Register dest, Register src1, Operand2 op2, SBit s, Condition c)
 {
-    return as_alu(dest, src1, op2, OpRsb, sc, c);
+    return as_alu(dest, src1, op2, OpRsb, s, c);
 }
 BufferOffset
-Assembler::as_rsc(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c)
+Assembler::as_rsc(Register dest, Register src1, Operand2 op2, SBit s, Condition c)
 {
-    return as_alu(dest, src1, op2, OpRsc, sc, c);
+    return as_alu(dest, src1, op2, OpRsc, s, c);
 }
 
 // Test operations.
 BufferOffset
 Assembler::as_cmn(Register src1, Operand2 op2, Condition c)
 {
-    return as_alu(InvalidReg, src1, op2, OpCmn, SetCond, c);
+    return as_alu(InvalidReg, src1, op2, OpCmn, SetCC, c);
 }
 BufferOffset
 Assembler::as_cmp(Register src1, Operand2 op2, Condition c)
 {
-    return as_alu(InvalidReg, src1, op2, OpCmp, SetCond, c);
+    return as_alu(InvalidReg, src1, op2, OpCmp, SetCC, c);
 }
 BufferOffset
 Assembler::as_teq(Register src1, Operand2 op2, Condition c)
 {
-    return as_alu(InvalidReg, src1, op2, OpTeq, SetCond, c);
+    return as_alu(InvalidReg, src1, op2, OpTeq, SetCC, c);
 }
 BufferOffset
 Assembler::as_tst(Register src1, Operand2 op2, Condition c)
 {
-    return as_alu(InvalidReg, src1, op2, OpTst, SetCond, c);
+    return as_alu(InvalidReg, src1, op2, OpTst, SetCC, c);
 }
 
 static MOZ_CONSTEXPR_VAR Register NoAddend = { Registers::pc };
 
 static const int SignExtend = 0x06000070;
 
 enum SignExtend {
     SxSxtb = 10 << 20,
@@ -1614,69 +1615,69 @@ Assembler::as_movt_patch(Register dest, 
 {
     WriteInstStatic(EncodeMovT(dest, imm, c), (uint32_t*)pos);
 }
 
 static const int mull_tag = 0x90;
 
 BufferOffset
 Assembler::as_genmul(Register dhi, Register dlo, Register rm, Register rn,
-                     MULOp op, SetCond_ sc, Condition c)
+                     MULOp op, SBit s, Condition c)
 {
 
-    return writeInst(RN(dhi) | maybeRD(dlo) | RM(rm) | rn.code() | op | sc | c | mull_tag);
+    return writeInst(RN(dhi) | maybeRD(dlo) | RM(rm) | rn.code() | op | s | c | mull_tag);
 }
 BufferOffset
-Assembler::as_mul(Register dest, Register src1, Register src2, SetCond_ sc, Condition c)
+Assembler::as_mul(Register dest, Register src1, Register src2, SBit s, Condition c)
 {
-    return as_genmul(dest, InvalidReg, src1, src2, OpmMul, sc, c);
+    return as_genmul(dest, InvalidReg, src1, src2, OpmMul, s, c);
 }
 BufferOffset
 Assembler::as_mla(Register dest, Register acc, Register src1, Register src2,
-                  SetCond_ sc, Condition c)
+                  SBit s, Condition c)
 {
-    return as_genmul(dest, acc, src1, src2, OpmMla, sc, c);
+    return as_genmul(dest, acc, src1, src2, OpmMla, s, c);
 }
 BufferOffset
 Assembler::as_umaal(Register destHI, Register destLO, Register src1, Register src2, Condition c)
 {
-    return as_genmul(destHI, destLO, src1, src2, OpmUmaal, NoSetCond, c);
+    return as_genmul(destHI, destLO, src1, src2, OpmUmaal, LeaveCC, c);
 }
 BufferOffset
 Assembler::as_mls(Register dest, Register acc, Register src1, Register src2, Condition c)
 {
-    return as_genmul(dest, acc, src1, src2, OpmMls, NoSetCond, c);
+    return as_genmul(dest, acc, src1, src2, OpmMls, LeaveCC, c);
 }
 
 BufferOffset
 Assembler::as_umull(Register destHI, Register destLO, Register src1, Register src2,
-                    SetCond_ sc, Condition c)
+                    SBit s, Condition c)
 {
-    return as_genmul(destHI, destLO, src1, src2, OpmUmull, sc, c);
+    return as_genmul(destHI, destLO, src1, src2, OpmUmull, s, c);
 }
 
 BufferOffset
 Assembler::as_umlal(Register destHI, Register destLO, Register src1, Register src2,
-                    SetCond_ sc, Condition c)
+                    SBit s, Condition c)
 {
-    return as_genmul(destHI, destLO, src1, src2, OpmUmlal, sc, c);
+    return as_genmul(destHI, destLO, src1, src2, OpmUmlal, s, c);
 }
 
 BufferOffset
 Assembler::as_smull(Register destHI, Register destLO, Register src1, Register src2,
-                    SetCond_ sc, Condition c)
+                    SBit s, Condition c)
 {
-    return as_genmul(destHI, destLO, src1, src2, OpmSmull, sc, c);
+    return as_genmul(destHI, destLO, src1, src2, OpmSmull, s, c);
 }
 
 BufferOffset
 Assembler::as_smlal(Register destHI, Register destLO, Register src1, Register src2,
-                    SetCond_ sc, Condition c)
+                    SBit s, Condition c)
 {
-    return as_genmul(destHI, destLO, src1, src2, OpmSmlal, sc, c);
+    return as_genmul(destHI, destLO, src1, src2, OpmSmlal, s, c);
 }
 
 BufferOffset
 Assembler::as_sdiv(Register rd, Register rn, Register rm, Condition c)
 {
     return writeInst(0x0710f010 | c | RN(rd) | RM(rm) | rn.code());
 }
 
@@ -1711,17 +1712,18 @@ Assembler::as_dtr(LoadStore ls, int size
 
 /* static */ void
 Assembler::as_dtr_patch(LoadStore ls, int size, Index mode, Register rt, DTRAddr addr, Condition c,
                         uint32_t* dest)
 {
     WriteInstStatic(EncodeDtr(ls, size, mode, rt, addr, c), dest);
 }
 
-class PoolHintData {
+class PoolHintData
+{
   public:
     enum LoadType {
         // Set 0 to bogus, since that is the value most likely to be
         // accidentally left somewhere.
         PoolBOGUS  = 0,
         PoolDTR    = 1,
         PoolBranch = 2,
         PoolVDTR   = 3
@@ -1754,57 +1756,58 @@ class PoolHintData {
         MOZ_ASSERT(index_ == index);
         cond_ = cond >> 28;
         MOZ_ASSERT(cond_ == cond >> 28);
         loadType_ = lt;
         ONES = ExpectedOnes;
         destReg_ = destReg.id();
         destType_ = destReg.isDouble();
     }
-    Assembler::Condition getCond() {
+    Assembler::Condition getCond() const {
         return Assembler::Condition(cond_ << 28);
     }
 
-    Register getReg() {
+    Register getReg() const {
         return Register::FromCode(destReg_);
     }
-    VFPRegister getVFPReg() {
+    VFPRegister getVFPReg() const {
         VFPRegister r = VFPRegister(destReg_, destType_ ? VFPRegister::Double : VFPRegister::Single);
         return r;
     }
 
-    int32_t getIndex() {
+    int32_t getIndex() const {
         return index_;
     }
     void setIndex(uint32_t index) {
         MOZ_ASSERT(ONES == ExpectedOnes && loadType_ != PoolBOGUS);
         index_ = index;
         MOZ_ASSERT(index_ == index);
     }
 
-    LoadType getLoadType() {
+    LoadType getLoadType() const {
         // If this *was* a PoolBranch, but the branch has already been bound
         // then this isn't going to look like a real poolhintdata, but we still
         // want to lie about it so everyone knows it *used* to be a branch.
         if (ONES != ExpectedOnes)
             return PoolHintData::PoolBranch;
         return loadType_;
     }
 
-    bool isValidPoolHint() {
+    bool isValidPoolHint() const {
         // Most instructions cannot have a condition that is 0xf. Notable
         // exceptions are blx and the entire NEON instruction set. For the
         // purposes of pool loads, and possibly patched branches, the possible
         // instructions are ldr and b, neither of which can have a condition
         // code of 0xf.
         return ONES == ExpectedOnes;
     }
 };
 
-union PoolHintPun {
+union PoolHintPun
+{
     PoolHintData phd;
     uint32_t raw;
 };
 
 // Handles all of the other integral data transferring functions: ldrsb, ldrsh,
 // ldrd, etc. The size is given in bits.
 BufferOffset
 Assembler::as_extdtr(LoadStore ls, int size, bool IsSigned, Index mode,
@@ -1839,18 +1842,17 @@ Assembler::as_extdtr(LoadStore ls, int s
     return writeInst(extra_bits2 << 5 | extra_bits1 << 20 | 0x90 |
                      addr.encode() | RT(rt) | mode | c);
 }
 
 BufferOffset
 Assembler::as_dtm(LoadStore ls, Register rn, uint32_t mask,
                 DTMMode mode, DTMWriteBack wb, Condition c)
 {
-    return writeInst(0x08000000 | RN(rn) | ls |
-                     mode | mask | c | wb);
+    return writeInst(0x08000000 | RN(rn) | ls | mode | mask | c | wb);
 }
 
 BufferOffset
 Assembler::as_Imm32Pool(Register dest, uint32_t value, Condition c)
 {
     PoolHintPun php;
     php.phd.init(0, c, PoolHintData::PoolDTR, dest);
     return m_buffer.allocEntry(1, 1, (uint8_t*)&php.raw, (uint8_t*)&value);
@@ -2051,22 +2053,24 @@ Assembler::as_isb_trap()
 
 // bx can *only* branch to a register, never to an immediate.
 BufferOffset
 Assembler::as_bx(Register r, Condition c)
 {
     BufferOffset ret = writeInst(((int) c) | OpBx | r.code());
     return ret;
 }
+
 void
 Assembler::WritePoolGuard(BufferOffset branch, Instruction* dest, BufferOffset afterPool)
 {
     BOffImm off = afterPool.diffB<BOffImm>(branch);
     *dest = InstBImm(off, Always);
 }
+
 // Branch can branch to an immediate *or* to a register.
 // Branches to immediates are pc relative, branches to registers are absolute.
 BufferOffset
 Assembler::as_b(BOffImm off, Condition c)
 {
     BufferOffset ret = writeBranchInst(((int)c) | OpB | off.encode());
     return ret;
 }
@@ -2101,16 +2105,17 @@ Assembler::as_b(Label* l, Condition c)
         old = LabelBase::INVALID_OFFSET;
         BOffImm inv;
         ret = as_b(inv, c);
     }
     DebugOnly<int32_t> check = l->use(ret.getOffset());
     MOZ_ASSERT(check == old);
     return ret;
 }
+
 BufferOffset
 Assembler::as_b(BOffImm off, Condition c, BufferOffset inst)
 {
     *editSrc(inst) = InstBImm(off, c);
     return inst;
 }
 
 // blx can go to either an immediate or a register.
@@ -2163,16 +2168,17 @@ Assembler::as_bl(Label* l, Condition c)
         old = LabelBase::INVALID_OFFSET;
         BOffImm inv;
         ret = as_bl(inv, c);
     }
     DebugOnly<int32_t> check = l->use(ret.getOffset());
     MOZ_ASSERT(check == old);
     return ret;
 }
+
 BufferOffset
 Assembler::as_bl(BOffImm off, Condition c, BufferOffset inst)
 {
     *editSrc(inst) = InstBLImm(off, c);
     return inst;
 }
 
 BufferOffset
@@ -2190,16 +2196,17 @@ Assembler::as_msr(Register r, Condition 
     return writeInst(0x012cf000 | int(c) | r.code());
 }
 
 // VFP instructions!
 enum vfp_tags {
     VfpTag   = 0x0C000A00,
     VfpArith = 0x02000000
 };
+
 BufferOffset
 Assembler::writeVFPInst(vfp_size sz, uint32_t blob)
 {
     MOZ_ASSERT((sz & blob) == 0);
     MOZ_ASSERT((VfpTag & blob) == 0);
     return writeInst(VfpTag | sz | blob);
 }
 
@@ -2220,53 +2227,47 @@ Assembler::as_vfp_float(VFPRegister vd, 
     // Make sure we believe that all of our operands are the same kind.
     MOZ_ASSERT_IF(!vn.isMissing(), vd.equiv(vn));
     MOZ_ASSERT_IF(!vm.isMissing(), vd.equiv(vm));
     vfp_size sz = vd.isDouble() ? IsDouble : IsSingle;
     return writeVFPInst(sz, VD(vd) | VN(vn) | VM(vm) | op | VfpArith | c);
 }
 
 BufferOffset
-Assembler::as_vadd(VFPRegister vd, VFPRegister vn, VFPRegister vm,
-                 Condition c)
+Assembler::as_vadd(VFPRegister vd, VFPRegister vn, VFPRegister vm, Condition c)
 {
     return as_vfp_float(vd, vn, vm, OpvAdd, c);
 }
 
 BufferOffset
-Assembler::as_vdiv(VFPRegister vd, VFPRegister vn, VFPRegister vm,
-                 Condition c)
+Assembler::as_vdiv(VFPRegister vd, VFPRegister vn, VFPRegister vm, Condition c)
 {
     return as_vfp_float(vd, vn, vm, OpvDiv, c);
 }
 
 BufferOffset
-Assembler::as_vmul(VFPRegister vd, VFPRegister vn, VFPRegister vm,
-                 Condition c)
+Assembler::as_vmul(VFPRegister vd, VFPRegister vn, VFPRegister vm, Condition c)
 {
     return as_vfp_float(vd, vn, vm, OpvMul, c);
 }
 
 BufferOffset
-Assembler::as_vnmul(VFPRegister vd, VFPRegister vn, VFPRegister vm,
-                  Condition c)
+Assembler::as_vnmul(VFPRegister vd, VFPRegister vn, VFPRegister vm, Condition c)
 {
     return as_vfp_float(vd, vn, vm, OpvMul, c);
 }
 
 BufferOffset
-Assembler::as_vnmla(VFPRegister vd, VFPRegister vn, VFPRegister vm,
-                  Condition c)
+Assembler::as_vnmla(VFPRegister vd, VFPRegister vn, VFPRegister vm, Condition c)
 {
     MOZ_CRASH("Feature NYI");
 }
 
 BufferOffset
-Assembler::as_vnmls(VFPRegister vd, VFPRegister vn, VFPRegister vm,
-                  Condition c)
+Assembler::as_vnmls(VFPRegister vd, VFPRegister vn, VFPRegister vm, Condition c)
 {
     MOZ_CRASH("Feature NYI");
 }
 
 BufferOffset
 Assembler::as_vneg(VFPRegister vd, VFPRegister vm, Condition c)
 {
     return as_vfp_float(vd, NoVFPRegister, vm, OpvNeg, c);
@@ -2280,40 +2281,40 @@ Assembler::as_vsqrt(VFPRegister vd, VFPR
 
 BufferOffset
 Assembler::as_vabs(VFPRegister vd, VFPRegister vm, Condition c)
 {
     return as_vfp_float(vd, NoVFPRegister, vm, OpvAbs, c);
 }
 
 BufferOffset
-Assembler::as_vsub(VFPRegister vd, VFPRegister vn, VFPRegister vm,
-                 Condition c)
+Assembler::as_vsub(VFPRegister vd, VFPRegister vn, VFPRegister vm, Condition c)
 {
     return as_vfp_float(vd, vn, vm, OpvSub, c);
 }
 
 BufferOffset
-Assembler::as_vcmp(VFPRegister vd, VFPRegister vm,
-                 Condition c)
+Assembler::as_vcmp(VFPRegister vd, VFPRegister vm, Condition c)
 {
     return as_vfp_float(vd, NoVFPRegister, vm, OpvCmp, c);
 }
+
 BufferOffset
 Assembler::as_vcmpz(VFPRegister vd, Condition c)
 {
     return as_vfp_float(vd, NoVFPRegister, NoVFPRegister, OpvCmpz, c);
 }
 
 // Specifically, a move between two same sized-registers.
 BufferOffset
 Assembler::as_vmov(VFPRegister vd, VFPRegister vsrc, Condition c)
 {
     return as_vfp_float(vd, NoVFPRegister, vsrc, OpvMov, c);
 }
+
 // Transfer between Core and VFP.
 
 // Unlike the next function, moving between the core registers and vfp registers
 // can't be *that* properly typed. Namely, since I don't want to munge the type
 // VFPRegister to also include core registers. Thus, the core and vfp registers
 // are passed in based on their type, and src/dest is determined by the
 // float2core.
 
@@ -2334,25 +2335,23 @@ Assembler::as_vxfer(Register vt1, Regist
         // If we are transferring a single half of the double then it must be
         // moving a VFP reg to a core reg.
         MOZ_ASSERT_IF(vt2 == InvalidReg, f2c == FloatToCore);
         idx = idx << 21;
     } else {
         MOZ_ASSERT(idx == 0);
     }
 
-    if (vt2 == InvalidReg) {
-        return writeVFPInst(sz, WordTransfer | f2c | c |
-                            RT(vt1) | maybeRN(vt2) | VN(vm) | idx);
-    } else {
-        // We are doing a 64 bit transfer.
-        return writeVFPInst(sz, DoubleTransfer | f2c | c |
-                            RT(vt1) | maybeRN(vt2) | VM(vm) | idx);
-    }
+    if (vt2 == InvalidReg)
+        return writeVFPInst(sz, WordTransfer | f2c | c | RT(vt1) | maybeRN(vt2) | VN(vm) | idx);
+
+    // We are doing a 64 bit transfer.
+    return writeVFPInst(sz, DoubleTransfer | f2c | c | RT(vt1) | maybeRN(vt2) | VM(vm) | idx);
 }
+
 enum vcvt_destFloatness {
     VcvtToInteger = 1 << 18,
     VcvtToFloat  = 0 << 18
 };
 enum vcvt_toZero {
     VcvtToZero = 1 << 7, // Use the default rounding mode, which rounds truncates.
     VcvtToFPSCR = 0 << 7 // Use whatever rounding mode the fpscr specifies.
 };
@@ -2379,19 +2378,19 @@ Assembler::as_vcvt(VFPRegister vd, VFPRe
         return writeVFPInst(sz, c | 0x02B700C0 | VM(vm) | VD(vd));
     }
 
     // At least one of the registers should be a float.
     vcvt_destFloatness destFloat;
     vcvt_Signedness opSign;
     vcvt_toZero doToZero = VcvtToFPSCR;
     MOZ_ASSERT(vd.isFloat() || vm.isFloat());
-    if (vd.isSingle() || vm.isSingle()) {
+    if (vd.isSingle() || vm.isSingle())
         sz = IsSingle;
-    }
+
     if (vd.isFloat()) {
         destFloat = VcvtToFloat;
         opSign = (vm.isSInt()) ? VcvtFromSigned : VcvtFromUnsigned;
     } else {
         destFloat = VcvtToInteger;
         opSign = (vd.isSInt()) ? VcvtToSigned : VcvtToUnsigned;
         doToZero = useFPSCR ? VcvtToFPSCR : VcvtToZero;
     }
@@ -2454,16 +2453,17 @@ Assembler::as_vdtm(LoadStore st, Registe
 BufferOffset
 Assembler::as_vimm(VFPRegister vd, VFPImm imm, Condition c)
 {
     MOZ_ASSERT(imm.isValid());
     vfp_size sz = vd.isDouble() ? IsDouble : IsSingle;
     return writeVFPInst(sz,  c | imm.encode() | VD(vd) | 0x02B00000);
 
 }
+
 BufferOffset
 Assembler::as_vmrs(Register r, Condition c)
 {
     return writeInst(c | 0x0ef10a10 | RT(r));
 }
 
 BufferOffset
 Assembler::as_vmsr(Register r, Condition c)
@@ -2572,17 +2572,16 @@ Assembler::retarget(Label* label, Label*
             DebugOnly<uint32_t> prev = target->use(label->offset());
             MOZ_ASSERT((int32_t)prev == Label::INVALID_OFFSET);
         }
     }
     label->reset();
 
 }
 
-
 static int stopBKPT = -1;
 void
 Assembler::as_bkpt()
 {
     // This is a count of how many times a breakpoint instruction has been
     // generated. It is embedded into the instruction for debugging
     // purposes. Gdb will print "bkpt xxx" when you attempt to dissassemble a
     // breakpoint with the number xxx embedded into it. If this breakpoint is
@@ -2627,16 +2626,17 @@ ptrdiff_t
 Assembler::GetBranchOffset(const Instruction* i_)
 {
     MOZ_ASSERT(i_->is<InstBranchImm>());
     InstBranchImm* i = i_->as<InstBranchImm>();
     BOffImm dest;
     i->extractImm(&dest);
     return dest.decode();
 }
+
 void
 Assembler::RetargetNearBranch(Instruction* i, int offset, bool final)
 {
     Assembler::Condition c;
     i->extractCond(&c);
     RetargetNearBranch(i, offset, c, final);
 }
 
@@ -2662,17 +2662,18 @@ Assembler::RetargetFarBranch(Instruction
     if (!i->is<InstLDR>()) {
         new (i) InstLDR(Offset, pc, DTRAddr(pc, DtrOffImm(offset - 8)), cond);
         AutoFlushICache::flush(uintptr_t(i), 4);
     }
     *slot = dest;
 
 }
 
-struct PoolHeader : Instruction {
+struct PoolHeader : Instruction
+{
     struct Header
     {
         // The size should take into account the pool header.
         // The size is in units of Instruction (4 bytes), not byte.
         uint32_t size : 15;
         bool isNatural : 1;
         uint32_t ONES : 16;
 
@@ -2703,64 +2704,63 @@ struct PoolHeader : Instruction {
     uint32_t size() const {
         Header tmp(this);
         return tmp.size;
     }
     uint32_t isNatural() const {
         Header tmp(this);
         return tmp.isNatural;
     }
+
     static bool IsTHIS(const Instruction& i) {
         return (*i.raw() & 0xffff0000) == 0xffff0000;
     }
     static const PoolHeader* AsTHIS(const Instruction& i) {
         if (!IsTHIS(i))
             return nullptr;
         return static_cast<const PoolHeader*>(&i);
     }
 };
 
-
 void
 Assembler::WritePoolHeader(uint8_t* start, Pool* p, bool isNatural)
 {
-    static_assert(sizeof(PoolHeader) == 4,
-                  "PoolHandler must have the correct size.");
+    static_assert(sizeof(PoolHeader) == 4, "PoolHandler must have the correct size.");
     uint8_t* pool = start + 4;
     // Go through the usual rigmarole to get the size of the pool.
     pool += p->getPoolSize();
     uint32_t size = pool - start;
     MOZ_ASSERT((size & 3) == 0);
     size = size >> 2;
     MOZ_ASSERT(size < (1 << 15));
     PoolHeader header(size, isNatural);
     *(PoolHeader*)start = header;
 }
 
-
 // The size of an arbitrary 32-bit call in the instruction stream. On ARM this
 // sequence is |pc = ldr pc - 4; imm32| given that we never reach the imm32.
 uint32_t
 Assembler::PatchWrite_NearCallSize()
 {
     return sizeof(uint32_t);
 }
+
 void
 Assembler::PatchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall)
 {
     Instruction* inst = (Instruction*) start.raw();
     // Overwrite whatever instruction used to be here with a call. Since the
     // destination is in the same function, it will be within range of the
     // 24 << 2 byte bl instruction.
     uint8_t* dest = toCall.raw();
     new (inst) InstBLImm(BOffImm(dest - (uint8_t*)inst) , Always);
     // Ensure everyone sees the code that was just written into memory.
     AutoFlushICache::flush(uintptr_t(inst), 4);
-
 }
+
 void
 Assembler::PatchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue,
                                    PatchedImmPtr expectedValue)
 {
     Instruction* ptr = (Instruction*) label.raw();
     InstructionIterator iter(ptr);
     Register dest;
     Assembler::RelocStyle rs;
@@ -2791,17 +2791,16 @@ void
 Assembler::PatchWrite_Imm32(CodeLocationLabel label, Imm32 imm) {
     // Raw is going to be the return address.
     uint32_t* raw = (uint32_t*)label.raw();
     // Overwrite the 4 bytes before the return address, which will end up being
     // the call instruction.
     *(raw - 1) = imm.value;
 }
 
-
 uint8_t*
 Assembler::NextInstruction(uint8_t* inst_, uint32_t* count)
 {
     Instruction* inst = reinterpret_cast<Instruction*>(inst_);
     if (count != nullptr)
         *count += sizeof(Instruction);
     return reinterpret_cast<uint8_t*>(inst->next());
 }
@@ -2816,17 +2815,18 @@ InstIsGuard(Instruction* inst, const Poo
     if (!(inst->is<InstBXReg>() || inst->is<InstBImm>()))
         return false;
     // See if the next instruction is a pool header.
     *ph = (inst + 1)->as<const PoolHeader>();
     return *ph != nullptr;
 }
 
 static bool
-InstIsBNop(Instruction* inst) {
+InstIsBNop(Instruction* inst)
+{
     // In some special situations, it is necessary to insert a NOP into the
     // instruction stream that nobody knows about, since nobody should know
     // about it, make sure it gets skipped when Instruction::next() is called.
     // this generates a very specific nop, namely a branch to the next
     // instruction.
     Assembler::Condition c;
     inst->extractCond(&c);
     if (c != Assembler::Always)
@@ -3019,23 +3019,24 @@ void Assembler::UpdateBoundsCheck(uint32
 #ifdef DEBUG
     Operand2 op = cmp->extractOp2();
     MOZ_ASSERT(op.isImm8());
 #endif
 
     Imm8 imm8 = Imm8(heapSize);
     MOZ_ASSERT(!imm8.invalid);
 
-    *inst = InstALU(InvalidReg, index, imm8, OpCmp, SetCond, Always);
+    *inst = InstALU(InvalidReg, index, imm8, OpCmp, SetCC, Always);
     // NOTE: we don't update the Auto Flush Cache!  this function is currently
     // only called from within AsmJSModule::patchHeapAccesses, which does that
     // for us. Don't call this!
 }
 
-InstructionIterator::InstructionIterator(Instruction* i_) : i(i_)
+InstructionIterator::InstructionIterator(Instruction* i_)
+  : i(i_)
 {
     // Work around pools with an artificial pool guard and around nop-fill.
     i = i->skipPool();
 }
 
 uint32_t Assembler::NopFill = 0;
 
 uint32_t
--- a/js/src/jit/arm/Assembler-arm.h
+++ b/js/src/jit/arm/Assembler-arm.h
@@ -69,19 +69,21 @@ class ABIArgGenerator
 {
     unsigned intRegIndex_;
     unsigned floatRegIndex_;
     uint32_t stackOffset_;
     ABIArg current_;
 
   public:
     ABIArgGenerator();
+
     ABIArg next(MIRType argType);
     ABIArg& current() { return current_; }
     uint32_t stackBytesConsumedSoFar() const { return stackOffset_; }
+
     static const Register NonArgReturnReg0;
     static const Register NonArgReturnReg1;
     static const Register NonReturn_VolatileReg0;
     static const Register NonReturn_VolatileReg1;
 };
 
 static MOZ_CONSTEXPR_VAR Register PreBarrierReg = r1;
 
@@ -232,17 +234,16 @@ enum IsImmDTR_ {
     IsNotImmDTR = 1 << 25
 };
 // For the extra memory operations, ldrd, ldrsb, ldrh.
 enum IsImmEDTR_ {
     IsImmEDTR    = 1 << 22,
     IsNotImmEDTR = 0 << 22
 };
 
-
 enum ShiftType {
     LSL = 0, // << 5
     LSR = 1, // << 5
     ASR = 2, // << 5
     ROR = 3, // << 5
     RRX = ROR // RRX is encoded as ROR with a 0 offset.
 };
 
@@ -268,24 +269,27 @@ enum DTMMode {
     IB = I | B
 };
 
 enum DTMWriteBack {
     WriteBack   = 1 << 21,
     NoWriteBack = 0 << 21
 };
 
-enum SetCond_ {
-    SetCond   = 1 << 20,
-    NoSetCond = 0 << 20
+// Condition code updating mode.
+enum SBit {
+    SetCC   = 1 << 20,  // Set condition code.
+    LeaveCC = 0 << 20   // Leave condition code unchanged.
 };
+
 enum LoadStore {
     IsLoad  = 1 << 20,
     IsStore = 0 << 20
 };
+
 // You almost never want to use this directly. Instead, you wantto pass in a
 // signed constant, and let this bit be implicitly set for you. This is however,
 // necessary if we want a negative index.
 enum IsUp_ {
     IsUp   = 1 << 23,
     IsDown = 0 << 23
 };
 enum ALUOp {
@@ -336,26 +340,29 @@ enum VFPOp {
     OpvDiv  = 0x8 << 20,
     OpvMov  = 0xB << 20 | 0x1 << 6,
     OpvAbs  = 0xB << 20 | 0x3 << 6,
     OpvNeg  = 0xB << 20 | 0x1 << 6 | 0x1 << 16,
     OpvSqrt = 0xB << 20 | 0x3 << 6 | 0x1 << 16,
     OpvCmp  = 0xB << 20 | 0x1 << 6 | 0x4 << 16,
     OpvCmpz  = 0xB << 20 | 0x1 << 6 | 0x5 << 16
 };
+
 // Negate the operation, AND negate the immediate that we were passed in.
 ALUOp ALUNeg(ALUOp op, Register dest, Imm32* imm, Register* negDest);
 bool can_dbl(ALUOp op);
 bool condsAreSafe(ALUOp op);
+
 // If there is a variant of op that has a dest (think cmp/sub) return that
 // variant of it.
 ALUOp getDestVariant(ALUOp op);
 
 static const ValueOperand JSReturnOperand = ValueOperand(JSReturnReg_Type, JSReturnReg_Data);
 static const ValueOperand softfpReturnOperand = ValueOperand(r1, r0);
+
 // All of these classes exist solely to shuffle data into the various operands.
 // For example Operand2 can be an imm8, a register-shifted-by-a-constant or a
 // register-shifted-by-a-register. We represent this in C++ by having a base
 // class Operand2, which just stores the 32 bits of data as they will be encoded
 // in the instruction. You cannot directly create an Operand2 since it is
 // tricky, and not entirely sane to do so. Instead, you create one of its child
 // classes, e.g. Imm8. Imm8's constructor takes a single integer argument. Imm8
 // will verify that its argument can be encoded as an ARM 12 bit imm8, encode it
@@ -367,57 +374,62 @@ static const ValueOperand softfpReturnOp
 // it for its component Imm8data structures. The reason this is so horribly
 // round-about is we wanted to have Imm8 and RegisterShiftedRegister inherit
 // directly from Operand2 but have all of them take up only a single word of
 // storage. We also wanted to avoid passing around raw integers at all since
 // they are error prone.
 class Op2Reg;
 class O2RegImmShift;
 class O2RegRegShift;
+
 namespace datastore {
+
 struct Reg
 {
     // The "second register".
     uint32_t RM : 4;
     // Do we get another register for shifting.
     uint32_t RRS : 1;
     ShiftType Type : 2;
     // We'd like this to be a more sensible encoding, but that would need to be
     // a struct and that would not pack :(
     uint32_t ShiftAmount : 5;
     uint32_t pad : 20;
 
     Reg(uint32_t rm, ShiftType type, uint32_t rsr, uint32_t shiftamount)
       : RM(rm), RRS(rsr), Type(type), ShiftAmount(shiftamount), pad(0)
     { }
 
-    uint32_t encode() {
-        return RM | RRS << 4 | Type << 5 | ShiftAmount << 7;
-    }
     explicit Reg(const Op2Reg& op) {
         memcpy(this, &op, sizeof(*this));
     }
+
+    uint32_t encode() const {
+        return RM | RRS << 4 | Type << 5 | ShiftAmount << 7;
+    }
 };
 
 // Op2 has a mode labelled "<imm8m>", which is arm's magical immediate encoding.
 // Some instructions actually get 8 bits of data, which is called Imm8Data
 // below. These should have edit distance > 1, but this is how it is for now.
 struct Imm8mData
 {
   private:
     uint32_t data : 8;
     uint32_t rot : 4;
     // Throw in an extra bit that will be 1 if we can't encode this properly.
     // if we can encode it properly, a simple "|" will still suffice to meld it
     // into the instruction.
     uint32_t buff : 19;
+
   public:
     uint32_t invalid : 1;
 
-    uint32_t encode() {
+  public:
+    uint32_t encode() const {
         MOZ_ASSERT(!invalid);
         return data | rot << 8;
     };
 
     // Default constructor makes an invalid immediate.
     Imm8mData()
       : data(0xff), rot(0xf), invalid(1)
     { }
@@ -433,37 +445,45 @@ struct Imm8mData
 struct Imm8Data
 {
   private:
     uint32_t imm4L : 4;
     uint32_t pad : 4;
     uint32_t imm4H : 4;
 
   public:
-    uint32_t encode() {
+    Imm8Data(uint32_t imm)
+      : imm4L(imm & 0xf), imm4H(imm >> 4)
+    {
+        MOZ_ASSERT(imm <= 0xff);
+    }
+
+  public:
+    uint32_t encode() const {
         return imm4L | (imm4H << 8);
     };
-    Imm8Data(uint32_t imm) : imm4L(imm & 0xf), imm4H(imm >> 4) {
-        MOZ_ASSERT(imm <= 0xff);
-    }
 };
 
 // VLDR/VSTR take an 8 bit offset, which is implicitly left shifted by 2.
 struct Imm8VFPOffData
 {
   private:
     uint32_t data;
 
   public:
-    uint32_t encode() {
+    Imm8VFPOffData(uint32_t imm)
+      : data (imm)
+    {
+        MOZ_ASSERT((imm & ~(0xff)) == 0);
+    }
+
+  public:
+    uint32_t encode() const {
         return data;
     };
-    Imm8VFPOffData(uint32_t imm) : data (imm) {
-        MOZ_ASSERT((imm & ~(0xff)) == 0);
-    }
 };
 
 // ARM can magically encode 256 very special immediates to be moved into a
 // register.
 struct Imm8VFPImmData
 {
     // This structure's members are public and it has no constructor to
     // initialize them, for a very special reason. Were this structure to
@@ -473,115 +493,128 @@ struct Imm8VFPImmData
     // the constructor MOZ_CONSTEXPR, but, again, some of our supported
     // compilers don't support MOZ_CONSTEXPR! So we are reduced to public
     // members and eschewing a constructor in hopes that the initialization
     // of DoubleEncoder's table is correct.
     uint32_t imm4L : 4;
     uint32_t imm4H : 4;
     int32_t isInvalid : 24;
 
-    uint32_t encode() {
+    uint32_t encode() const {
         // This assert is an attempting at ensuring that we don't create random
         // instances of this structure and then asking to encode() it.
         MOZ_ASSERT(isInvalid == 0);
         return imm4L | (imm4H << 16);
     };
 };
 
 struct Imm12Data
 {
     uint32_t data : 12;
-    uint32_t encode() {
-        return data;
-    }
 
     Imm12Data(uint32_t imm)
       : data(imm)
     {
         MOZ_ASSERT(data == imm);
     }
 
+    uint32_t encode() const {
+        return data;
+    }
 };
 
 struct RIS
 {
     uint32_t ShiftAmount : 5;
-    uint32_t encode () {
-        return ShiftAmount;
-    }
 
     RIS(uint32_t imm)
       : ShiftAmount(imm)
     {
         MOZ_ASSERT(ShiftAmount == imm);
     }
-    explicit RIS(Reg r) : ShiftAmount(r.ShiftAmount) {}
+
+    explicit RIS(Reg r)
+      : ShiftAmount(r.ShiftAmount)
+    { }
+
+    uint32_t encode() const {
+        return ShiftAmount;
+    }
 };
 
 struct RRS
 {
     uint32_t MustZero : 1;
     // The register that holds the shift amount.
     uint32_t RS : 4;
 
     RRS(uint32_t rs)
       : RS(rs)
     {
         MOZ_ASSERT(rs == RS);
     }
 
-    uint32_t encode () {
+    uint32_t encode() const {
         return RS << 1;
     }
 };
 
 } // namespace datastore
 
 class MacroAssemblerARM;
 class Operand;
 class Operand2
 {
     friend class Operand;
     friend class MacroAssemblerARM;
     friend class InstALU;
+
   public:
     uint32_t oper : 31;
     uint32_t invalid : 1;
-    bool isO2Reg() {
-        return !(oper & IsImmOp2);
-    }
-    Op2Reg toOp2Reg();
-    bool isImm8() {
-        return oper & IsImmOp2;
-    }
 
   protected:
-    Operand2(datastore::Imm8mData base)
+    explicit Operand2(datastore::Imm8mData base)
       : oper(base.invalid ? -1 : (base.encode() | (uint32_t)IsImmOp2)),
         invalid(base.invalid)
     { }
 
-    Operand2(datastore::Reg base)
+    explicit Operand2(datastore::Reg base)
       : oper(base.encode() | (uint32_t)IsNotImmOp2)
     { }
 
   private:
-    Operand2(int blob)
+    explicit Operand2(int blob)
       : oper(blob)
     { }
 
   public:
-    uint32_t encode() {
+    bool isO2Reg() const {
+        return !(oper & IsImmOp2);
+    }
+
+    Op2Reg toOp2Reg() const;
+
+    bool isImm8() const {
+        return oper & IsImmOp2;
+    }
+
+    uint32_t encode() const {
         return oper;
     }
 };
 
 class Imm8 : public Operand2
 {
   public:
+    explicit Imm8(uint32_t imm)
+      : Operand2(EncodeImm(imm))
+    { }
+
+  public:
     static datastore::Imm8mData EncodeImm(uint32_t imm) {
         // mozilla::CountLeadingZeroes32(imm) requires imm != 0.
         if (imm == 0)
             return datastore::Imm8mData(0, 0);
         int left = mozilla::CountLeadingZeroes32(imm) & 30;
         // See if imm is a simple value that can be encoded with a rotate of 0.
         // This is effectively imm <= 0xff, but I assume this can be optimized
         // more.
@@ -602,372 +635,387 @@ class Imm8 : public Operand2
             return datastore::Imm8mData();
         // Rather than masking out bits and checking for 0, just rotate the
         // immediate that we were passed in, and see if it fits into 8 bits.
         unsigned int mask = imm << (8 - right) | imm >> (24 + right);
         if (mask <= 0xff)
             return datastore::Imm8mData(mask, (8 - right) >> 1);
         return datastore::Imm8mData();
     }
+
     // Pair template?
     struct TwoImm8mData
     {
         datastore::Imm8mData fst, snd;
 
         TwoImm8mData()
           : fst(), snd()
         { }
 
         TwoImm8mData(datastore::Imm8mData _fst, datastore::Imm8mData _snd)
           : fst(_fst), snd(_snd)
         { }
     };
 
     static TwoImm8mData EncodeTwoImms(uint32_t);
-    Imm8(uint32_t imm)
-      : Operand2(EncodeImm(imm))
-    { }
 };
 
 class Op2Reg : public Operand2
 {
   public:
-    Op2Reg(Register rm, ShiftType type, datastore::RIS shiftImm)
+    explicit Op2Reg(Register rm, ShiftType type, datastore::RIS shiftImm)
       : Operand2(datastore::Reg(rm.code(), type, 0, shiftImm.encode()))
     { }
 
-    Op2Reg(Register rm, ShiftType type, datastore::RRS shiftReg)
+    explicit Op2Reg(Register rm, ShiftType type, datastore::RRS shiftReg)
       : Operand2(datastore::Reg(rm.code(), type, 1, shiftReg.encode()))
     { }
-    bool isO2RegImmShift() {
+
+  public:
+    bool isO2RegImmShift() const {
         datastore::Reg r(*this);
         return !r.RRS;
     }
-    O2RegImmShift toO2RegImmShift();
-    bool isO2RegRegShift() {
+    O2RegImmShift toO2RegImmShift() const;
+
+    bool isO2RegRegShift() const {
         datastore::Reg r(*this);
         return r.RRS;
     }
-    O2RegRegShift toO2RegRegShift();
+    O2RegRegShift toO2RegRegShift() const;
 
-    bool checkType(ShiftType type) {
+    bool checkType(ShiftType type) const {
         datastore::Reg r(*this);
         return r.Type == type;
     }
-    bool checkRM(Register rm) {
+    bool checkRM(Register rm) const {
         datastore::Reg r(*this);
         return r.RM == rm.code();
     }
-    bool getRM(Register* rm) {
+    bool getRM(Register* rm) const {
         datastore::Reg r(*this);
         *rm = Register::FromCode(r.RM);
         return true;
     }
 };
 
 class O2RegImmShift : public Op2Reg
 {
   public:
-    O2RegImmShift(Register rn, ShiftType type, uint32_t shift)
+    explicit O2RegImmShift(Register rn, ShiftType type, uint32_t shift)
       : Op2Reg(rn, type, datastore::RIS(shift))
     { }
-    int getShift() {
+
+  public:
+    int getShift() const {
         datastore::Reg r(*this);
         datastore::RIS ris(r);
         return ris.ShiftAmount;
     }
 };
 
 class O2RegRegShift : public Op2Reg
 {
   public:
-    O2RegRegShift(Register rn, ShiftType type, Register rs)
+    explicit O2RegRegShift(Register rn, ShiftType type, Register rs)
       : Op2Reg(rn, type, datastore::RRS(rs.code()))
     { }
 };
 
 O2RegImmShift O2Reg(Register r);
-O2RegImmShift lsl (Register r, int amt);
-O2RegImmShift lsr (Register r, int amt);
-O2RegImmShift asr (Register r, int amt);
-O2RegImmShift rol (Register r, int amt);
-O2RegImmShift ror (Register r, int amt);
+O2RegImmShift lsl(Register r, int amt);
+O2RegImmShift lsr(Register r, int amt);
+O2RegImmShift asr(Register r, int amt);
+O2RegImmShift rol(Register r, int amt);
+O2RegImmShift ror(Register r, int amt);
 
-O2RegRegShift lsl (Register r, Register amt);
-O2RegRegShift lsr (Register r, Register amt);
-O2RegRegShift asr (Register r, Register amt);
-O2RegRegShift ror (Register r, Register amt);
+O2RegRegShift lsl(Register r, Register amt);
+O2RegRegShift lsr(Register r, Register amt);
+O2RegRegShift asr(Register r, Register amt);
+O2RegRegShift ror(Register r, Register amt);
 
 // An offset from a register to be used for ldr/str. This should include the
 // sign bit, since ARM has "signed-magnitude" offsets. That is it encodes an
 // unsigned offset, then the instruction specifies if the offset is positive or
 // negative. The +/- bit is necessary if the instruction set wants to be able to
 // have a negative register offset e.g. ldr pc, [r1,-r2];
 class DtrOff
 {
     uint32_t data;
 
   protected:
-    DtrOff(datastore::Imm12Data immdata, IsUp_ iu)
+    explicit DtrOff(datastore::Imm12Data immdata, IsUp_ iu)
       : data(immdata.encode() | (uint32_t)IsImmDTR | ((uint32_t)iu))
     { }
 
-    DtrOff(datastore::Reg reg, IsUp_ iu = IsUp)
+    explicit DtrOff(datastore::Reg reg, IsUp_ iu = IsUp)
       : data(reg.encode() | (uint32_t) IsNotImmDTR | iu)
     { }
 
   public:
-    uint32_t encode() { return data; }
+    uint32_t encode() const { return data; }
 };
 
 class DtrOffImm : public DtrOff
 {
   public:
-    DtrOffImm(int32_t imm)
+    explicit DtrOffImm(int32_t imm)
       : DtrOff(datastore::Imm12Data(mozilla::Abs(imm)), imm >= 0 ? IsUp : IsDown)
     {
         MOZ_ASSERT(mozilla::Abs(imm) < 4096);
     }
 };
 
 class DtrOffReg : public DtrOff
 {
     // These are designed to be called by a constructor of a subclass.
     // Constructing the necessary RIS/RRS structures are annoying.
   protected:
-    DtrOffReg(Register rn, ShiftType type, datastore::RIS shiftImm, IsUp_ iu = IsUp)
+    explicit DtrOffReg(Register rn, ShiftType type, datastore::RIS shiftImm, IsUp_ iu = IsUp)
       : DtrOff(datastore::Reg(rn.code(), type, 0, shiftImm.encode()), iu)
     { }
 
-    DtrOffReg(Register rn, ShiftType type, datastore::RRS shiftReg, IsUp_ iu = IsUp)
+    explicit DtrOffReg(Register rn, ShiftType type, datastore::RRS shiftReg, IsUp_ iu = IsUp)
       : DtrOff(datastore::Reg(rn.code(), type, 1, shiftReg.encode()), iu)
     { }
 };
 
 class DtrRegImmShift : public DtrOffReg
 {
   public:
-    DtrRegImmShift(Register rn, ShiftType type, uint32_t shift, IsUp_ iu = IsUp)
+    explicit DtrRegImmShift(Register rn, ShiftType type, uint32_t shift, IsUp_ iu = IsUp)
       : DtrOffReg(rn, type, datastore::RIS(shift), iu)
     { }
 };
 
 class DtrRegRegShift : public DtrOffReg
 {
   public:
-    DtrRegRegShift(Register rn, ShiftType type, Register rs, IsUp_ iu = IsUp)
+    explicit DtrRegRegShift(Register rn, ShiftType type, Register rs, IsUp_ iu = IsUp)
       : DtrOffReg(rn, type, datastore::RRS(rs.code()), iu)
     { }
 };
 
 // We will frequently want to bundle a register with its offset so that we have
 // an "operand" to a load instruction.
 class DTRAddr
 {
+    friend class Operand;
+
     uint32_t data;
 
   public:
-    DTRAddr(Register reg, DtrOff dtr)
+    explicit DTRAddr(Register reg, DtrOff dtr)
       : data(dtr.encode() | (reg.code() << 16))
     { }
 
-    uint32_t encode() {
+  private:
+    explicit DTRAddr(uint32_t blob)
+      : data(blob)
+    { }
+
+  public:
+    uint32_t encode() const {
         return data;
     }
-    Register getBase() {
+
+    Register getBase() const {
         return Register::FromCode((data >> 16) &0xf);
     }
-  private:
-    friend class Operand;
-    DTRAddr(uint32_t blob)
-      : data(blob)
-    { }
 };
 
 // Offsets for the extended data transfer instructions:
 // ldrsh, ldrd, ldrsb, etc.
 class EDtrOff
 {
     uint32_t data;
 
   protected:
-    EDtrOff(datastore::Imm8Data imm8, IsUp_ iu = IsUp)
+    explicit EDtrOff(datastore::Imm8Data imm8, IsUp_ iu = IsUp)
       : data(imm8.encode() | IsImmEDTR | (uint32_t)iu)
     { }
 
-    EDtrOff(Register rm, IsUp_ iu = IsUp)
+    explicit EDtrOff(Register rm, IsUp_ iu = IsUp)
       : data(rm.code() | IsNotImmEDTR | iu)
     { }
 
   public:
-    uint32_t encode() {
+    uint32_t encode() const {
         return data;
     }
 };
 
 class EDtrOffImm : public EDtrOff
 {
   public:
-    EDtrOffImm(int32_t imm)
+    explicit EDtrOffImm(int32_t imm)
       : EDtrOff(datastore::Imm8Data(mozilla::Abs(imm)), (imm >= 0) ? IsUp : IsDown)
     {
         MOZ_ASSERT(mozilla::Abs(imm) < 256);
     }
 };
 
 // This is the most-derived class, since the extended data transfer instructions
 // don't support any sort of modifying the "index" operand.
 class EDtrOffReg : public EDtrOff
 {
   public:
-    EDtrOffReg(Register rm)
+    explicit EDtrOffReg(Register rm)
       : EDtrOff(rm)
     { }
 };
 
 class EDtrAddr
 {
     uint32_t data;
 
   public:
-    EDtrAddr(Register r, EDtrOff off)
+    explicit EDtrAddr(Register r, EDtrOff off)
       : data(RN(r) | off.encode())
     { }
 
-    uint32_t encode() {
+    uint32_t encode() const {
         return data;
     }
 };
 
 class VFPOff
 {
     uint32_t data;
 
   protected:
-    VFPOff(datastore::Imm8VFPOffData imm, IsUp_ isup)
+    explicit VFPOff(datastore::Imm8VFPOffData imm, IsUp_ isup)
       : data(imm.encode() | (uint32_t)isup)
     { }
 
   public:
-    uint32_t encode() {
+    uint32_t encode() const {
         return data;
     }
 };
 
 class VFPOffImm : public VFPOff
 {
   public:
-    VFPOffImm(int32_t imm)
+    explicit VFPOffImm(int32_t imm)
       : VFPOff(datastore::Imm8VFPOffData(mozilla::Abs(imm) / 4), imm < 0 ? IsDown : IsUp)
     {
         MOZ_ASSERT(mozilla::Abs(imm) <= 255 * 4);
     }
 };
+
 class VFPAddr
 {
     friend class Operand;
 
     uint32_t data;
 
+  public:
+    explicit VFPAddr(Register base, VFPOff off)
+      : data(RN(base) | off.encode())
+    { }
+
   protected:
     VFPAddr(uint32_t blob)
       : data(blob)
     { }
 
   public:
-    VFPAddr(Register base, VFPOff off)
-      : data(RN(base) | off.encode())
-    { }
-
-    uint32_t encode() {
+    uint32_t encode() const {
         return data;
     }
 };
 
-class VFPImm {
+class VFPImm
+{
     uint32_t data;
 
   public:
+    explicit VFPImm(uint32_t topWordOfDouble);
+
+  public:
     static const VFPImm One;
 
-    VFPImm(uint32_t topWordOfDouble);
-
-    uint32_t encode() {
+    uint32_t encode() const {
         return data;
     }
-    bool isValid() {
+    bool isValid() const {
         return data != -1U;
     }
 };
 
 // A BOffImm is an immediate that is used for branches. Namely, it is the offset
 // that will be encoded in the branch instruction. This is the only sane way of
 // constructing a branch.
 class BOffImm
 {
+    friend class InstBranchImm;
+
     uint32_t data;
 
   public:
-    uint32_t encode() {
-        return data;
-    }
-    int32_t decode() {
-        return ((((int32_t)data) << 8) >> 6) + 8;
-    }
-
     explicit BOffImm(int offset)
       : data ((offset - 8) >> 2 & 0x00ffffff)
     {
         MOZ_ASSERT((offset & 0x3) == 0);
         if (!IsInRange(offset))
             CrashAtUnhandlableOOM("BOffImm");
     }
-    static bool IsInRange(int offset)
-    {
+
+    explicit BOffImm()
+      : data(INVALID)
+    { }
+
+  private:
+    BOffImm(Instruction& inst);
+
+  public:
+    static const int INVALID = 0x00800000;
+
+    uint32_t encode() const {
+        return data;
+    }
+    int32_t decode() const {
+        return ((((int32_t)data) << 8) >> 6) + 8;
+    }
+
+    static bool IsInRange(int offset) {
         if ((offset - 8) < -33554432)
             return false;
         if ((offset - 8) > 33554428)
             return false;
         return true;
     }
-    static const int INVALID = 0x00800000;
-    BOffImm()
-      : data(INVALID)
-    { }
 
-    bool isInvalid() {
+    bool isInvalid() const {
         return data == uint32_t(INVALID);
     }
-    Instruction* getDest(Instruction* src);
-
-  private:
-    friend class InstBranchImm;
-    BOffImm(Instruction& inst);
+    Instruction* getDest(Instruction* src) const;
 };
 
 class Imm16
 {
     uint32_t lower : 12;
     uint32_t pad : 4;
     uint32_t upper : 4;
     uint32_t invalid : 12;
 
   public:
-    Imm16();
-    Imm16(uint32_t imm);
-    Imm16(Instruction& inst);
+    explicit Imm16();
+    explicit Imm16(uint32_t imm);
+    explicit Imm16(Instruction& inst);
 
-    uint32_t encode() {
+    uint32_t encode() const {
         return lower | upper << 16;
     }
-    uint32_t decode() {
+    uint32_t decode() const {
         return lower | upper << 12;
     }
 
-    bool isInvalid () {
+    bool isInvalid () const {
         return invalid;
     }
 };
 
 // I would preffer that these do not exist, since there are essentially no
 // instructions that would ever take more than one of these, however, the MIR
 // wants to only have one type of arguments to functions, so bugger.
 class Operand
@@ -983,36 +1031,37 @@ class Operand
 
   private:
     Tag_ Tag : 3;
     uint32_t reg : 5;
     int32_t offset;
     uint32_t data;
 
   public:
-    Operand (Register reg_)
+    explicit Operand(Register reg_)
       : Tag(OP2), reg(reg_.code())
     { }
 
-    Operand (FloatRegister freg)
+    explicit Operand(FloatRegister freg)
       : Tag(FOP), reg(freg.code())
     { }
 
-    Operand (Register base, Imm32 off)
+    explicit Operand(Register base, Imm32 off)
       : Tag(MEM), reg(base.code()), offset(off.value)
     { }
 
-    Operand (Register base, int32_t off)
+    explicit Operand(Register base, int32_t off)
       : Tag(MEM), reg(base.code()), offset(off)
     { }
 
-    Operand (const Address& addr)
+    explicit Operand(const Address& addr)
       : Tag(MEM), reg(addr.base.code()), offset(addr.offset)
     { }
 
+  public:
     Tag_ getTag() const {
         return Tag;
     }
 
     Operand2 toOp2() const {
         MOZ_ASSERT(Tag == OP2);
         return O2Reg(Register::FromCode(reg));
     }
@@ -1023,16 +1072,17 @@ class Operand
     }
 
     void toAddr(Register* r, Imm32* dest) const {
         MOZ_ASSERT(Tag == MEM);
         *r = Register::FromCode(reg);
         *dest = Imm32(offset);
     }
     Address toAddress() const {
+        MOZ_ASSERT(Tag == MEM);
         return Address(Register::FromCode(reg), offset);
     }
     int32_t disp() const {
         MOZ_ASSERT(Tag == MEM);
         return offset;
     }
 
     int32_t base() const {
@@ -1047,16 +1097,17 @@ class Operand
     }
     VFPAddr toVFPAddr() const {
         return VFPAddr(baseReg(), VFPOffImm(offset));
     }
 };
 
 void
 PatchJump(CodeLocationJump& jump_, CodeLocationLabel label);
+
 static inline void
 PatchBackedge(CodeLocationJump& jump_, CodeLocationLabel label, JitRuntime::BackedgeTarget target)
 {
     PatchJump(jump_, label);
 }
 
 class InstructionIterator;
 class Assembler;
@@ -1174,18 +1225,18 @@ class Assembler : public AssemblerShared
     uint32_t actualOffset(uint32_t) const;
     uint32_t actualIndex(uint32_t) const;
     static uint8_t* PatchableJumpAddress(JitCode* code, uint32_t index);
     BufferOffset actualOffset(BufferOffset) const;
     static uint32_t NopFill;
     static uint32_t GetNopFill();
     static uint32_t AsmPoolMaxOffset;
     static uint32_t GetPoolMaxOffset();
+
   protected:
-
     // Structure for fixing up pc-relative loads/jumps when a the machine code
     // gets moved (executable copy, gc, etc.).
     struct RelativePatch
     {
         void* target;
         Relocation::Kind kind;
         RelativePatch(void* target, Relocation::Kind kind)
             : target(target), kind(kind)
@@ -1209,18 +1260,17 @@ class Assembler : public AssemblerShared
   public:
     // For the alignment fill use NOP: 0x0320f000 or (Always | InstNOP::NopInst).
     // For the nopFill use a branch to the next instruction: 0xeaffffff.
     Assembler()
       : m_buffer(1, 1, 8, GetPoolMaxOffset(), 8, 0xe320f000, 0xeaffffff, GetNopFill()),
         isFinished(false),
         dtmActive(false),
         dtmCond(Always)
-    {
-    }
+    { }
 
     // We need to wait until an AutoJitContextAlloc is created by the
     // MacroAssembler, before allocating any space.
     void initWithAllocator() {
         m_buffer.initWithAllocator();
     }
 
     static Condition InvertCondition(Condition cond);
@@ -1313,91 +1363,87 @@ class Assembler : public AssemblerShared
 
   public:
     void writeCodePointer(AbsoluteLabel* label);
 
     void haltingAlign(int alignment);
     void nopAlign(int alignment);
     BufferOffset as_nop();
     BufferOffset as_alu(Register dest, Register src1, Operand2 op2,
-                        ALUOp op, SetCond_ sc = NoSetCond, Condition c = Always);
+                        ALUOp op, SBit s = LeaveCC, Condition c = Always);
     BufferOffset as_mov(Register dest,
-                        Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always);
+                        Operand2 op2, SBit s = LeaveCC, Condition c = Always);
     BufferOffset as_mvn(Register dest, Operand2 op2,
-                        SetCond_ sc = NoSetCond, Condition c = Always);
+                        SBit s = LeaveCC, Condition c = Always);
 
     static void as_alu_patch(Register dest, Register src1, Operand2 op2,
-                             ALUOp op, SetCond_ sc, Condition c, uint32_t* pos);
+                             ALUOp op, SBit s, Condition c, uint32_t* pos);
     static void as_mov_patch(Register dest,
-                             Operand2 op2, SetCond_ sc, Condition c, uint32_t* pos);
+                             Operand2 op2, SBit s, Condition c, uint32_t* pos);
 
     // Logical operations:
     BufferOffset as_and(Register dest, Register src1,
-                Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always);
+                Operand2 op2, SBit s = LeaveCC, Condition c = Always);
     BufferOffset as_bic(Register dest, Register src1,
-                Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always);
+                Operand2 op2, SBit s = LeaveCC, Condition c = Always);
     BufferOffset as_eor(Register dest, Register src1,
-                Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always);
+                Operand2 op2, SBit s = LeaveCC, Condition c = Always);
     BufferOffset as_orr(Register dest, Register src1,
-                Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always);
+                Operand2 op2, SBit s = LeaveCC, Condition c = Always);
     // Mathematical operations:
     BufferOffset as_adc(Register dest, Register src1,
-                Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always);
+                Operand2 op2, SBit s = LeaveCC, Condition c = Always);
     BufferOffset as_add(Register dest, Register src1,
-                Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always);
+                Operand2 op2, SBit s = LeaveCC, Condition c = Always);
     BufferOffset as_sbc(Register dest, Register src1,
-                Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always);
+                Operand2 op2, SBit s = LeaveCC, Condition c = Always);
     BufferOffset as_sub(Register dest, Register src1,
-                Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always);
+                Operand2 op2, SBit s = LeaveCC, Condition c = Always);
     BufferOffset as_rsb(Register dest, Register src1,
-                Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always);
+                Operand2 op2, SBit s = LeaveCC, Condition c = Always);
     BufferOffset as_rsc(Register dest, Register src1,
-                Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always);
+                Operand2 op2, SBit s = LeaveCC, Condition c = Always);
     // Test operations:
-    BufferOffset as_cmn(Register src1, Operand2 op2,
-                Condition c = Always);
-    BufferOffset as_cmp(Register src1, Operand2 op2,
-                Condition c = Always);
-    BufferOffset as_teq(Register src1, Operand2 op2,
-                Condition c = Always);
-    BufferOffset as_tst(Register src1, Operand2 op2,
-                Condition c = Always);
+    BufferOffset as_cmn(Register src1, Operand2 op2, Condition c = Always);
+    BufferOffset as_cmp(Register src1, Operand2 op2, Condition c = Always);
+    BufferOffset as_teq(Register src1, Operand2 op2, Condition c = Always);
+    BufferOffset as_tst(Register src1, Operand2 op2, Condition c = Always);
+
     // Sign extension operations:
     BufferOffset as_sxtb(Register dest, Register src, int rotate, Condition c = Always);
     BufferOffset as_sxth(Register dest, Register src, int rotate, Condition c = Always);
     BufferOffset as_uxtb(Register dest, Register src, int rotate, Condition c = Always);
     BufferOffset as_uxth(Register dest, Register src, int rotate, Condition c = Always);
 
     // Not quite ALU worthy, but useful none the less: These also have the issue
-    // of these being formatted completly differently from the standard ALU
-    // operations.
+    // of these being formatted completly differently from the standard ALU operations.
     BufferOffset as_movw(Register dest, Imm16 imm, Condition c = Always);
     BufferOffset as_movt(Register dest, Imm16 imm, Condition c = Always);
 
     static void as_movw_patch(Register dest, Imm16 imm, Condition c, Instruction* pos);
     static void as_movt_patch(Register dest, Imm16 imm, Condition c, Instruction* pos);
 
     BufferOffset as_genmul(Register d1, Register d2, Register rm, Register rn,
-                   MULOp op, SetCond_ sc, Condition c = Always);
+                   MULOp op, SBit s, Condition c = Always);
     BufferOffset as_mul(Register dest, Register src1, Register src2,
-                SetCond_ sc = NoSetCond, Condition c = Always);
+                SBit s = LeaveCC, Condition c = Always);
     BufferOffset as_mla(Register dest, Register acc, Register src1, Register src2,
-                SetCond_ sc = NoSetCond, Condition c = Always);
+                SBit s = LeaveCC, Condition c = Always);
     BufferOffset as_umaal(Register dest1, Register dest2, Register src1, Register src2,
                   Condition c = Always);
     BufferOffset as_mls(Register dest, Register acc, Register src1, Register src2,
                 Condition c = Always);
     BufferOffset as_umull(Register dest1, Register dest2, Register src1, Register src2,
-                SetCond_ sc = NoSetCond, Condition c = Always);
+                SBit s = LeaveCC, Condition c = Always);
     BufferOffset as_umlal(Register dest1, Register dest2, Register src1, Register src2,
-                SetCond_ sc = NoSetCond, Condition c = Always);
+                SBit s = LeaveCC, Condition c = Always);
     BufferOffset as_smull(Register dest1, Register dest2, Register src1, Register src2,
-                SetCond_ sc = NoSetCond, Condition c = Always);
+                SBit s = LeaveCC, Condition c = Always);
     BufferOffset as_smlal(Register dest1, Register dest2, Register src1, Register src2,
-                SetCond_ sc = NoSetCond, Condition c = Always);
+                SBit s = LeaveCC, Condition c = Always);
 
     BufferOffset as_sdiv(Register dest, Register num, Register div, Condition c = Always);
     BufferOffset as_udiv(Register dest, Register num, Register div, Condition c = Always);
     BufferOffset as_clz(Register dest, Register src, Condition c = Always);
 
     // Data transfer instructions: ldr, str, ldrb, strb.
     // Using an int to differentiate between 8 bits and 32 bits is overkill.
     BufferOffset as_dtr(LoadStore ls, int size, Index mode,
@@ -1415,17 +1461,18 @@ class Assembler : public AssemblerShared
                 DTMMode mode, DTMWriteBack wb, Condition c = Always);
 
     // Overwrite a pool entry with new data.
     static void WritePoolEntry(Instruction* addr, Condition c, uint32_t data);
 
     // Load a 32 bit immediate from a pool into a register.
     BufferOffset as_Imm32Pool(Register dest, uint32_t value, Condition c = Always);
     // Make a patchable jump that can target the entire 32 bit address space.
-    BufferOffset as_BranchPool(uint32_t value, RepatchLabel* label, ARMBuffer::PoolEntry* pe = nullptr, Condition c = Always);
+    BufferOffset as_BranchPool(uint32_t value, RepatchLabel* label,
+                               ARMBuffer::PoolEntry* pe = nullptr, Condition c = Always);
 
     // Load a 64 bit floating point immediate from a pool into a register.
     BufferOffset as_FImm64Pool(VFPRegister dest, double value, Condition c = Always);
     // Load a 32 bit floating point immediate from a pool into a register.
     BufferOffset as_FImm32Pool(VFPRegister dest, float value, Condition c = Always);
 
     // Atomic instructions: ldrex, ldrexh, ldrexb, strex, strexh, strexb.
     //
@@ -1439,20 +1486,18 @@ class Assembler : public AssemblerShared
     BufferOffset as_ldrexh(Register rt, Register rn, Condition c = Always);
     BufferOffset as_ldrexb(Register rt, Register rn, Condition c = Always);
 
     // STREX rd, rt, [rn]
     BufferOffset as_strex(Register rd, Register rt, Register rn, Condition c = Always);
     BufferOffset as_strexh(Register rd, Register rt, Register rn, Condition c = Always);
     BufferOffset as_strexb(Register rd, Register rt, Register rn, Condition c = Always);
 
-    // Memory synchronization: dmb, dsb, isb.
-    //
+    // Memory synchronization.
     // These are available from ARMv7 forward.
-
     BufferOffset as_dmb(BarrierOption option = BarrierSY);
     BufferOffset as_dsb(BarrierOption option = BarrierSY);
     BufferOffset as_isb();
 
     // Memory synchronization for architectures before ARMv7.
     BufferOffset as_dsb_trap();
     BufferOffset as_dmb_trap();
     BufferOffset as_isb_trap();
@@ -1482,67 +1527,50 @@ class Assembler : public AssemblerShared
     BufferOffset as_bl();
     // bl #imm can have a condition code, blx #imm cannot.
     // blx reg can be conditional.
     BufferOffset as_bl(Label* l, Condition c);
     BufferOffset as_bl(BOffImm off, Condition c, BufferOffset inst);
 
     BufferOffset as_mrs(Register r, Condition c = Always);
     BufferOffset as_msr(Register r, Condition c = Always);
+
     // VFP instructions!
   private:
-
     enum vfp_size {
         IsDouble = 1 << 8,
         IsSingle = 0 << 8
     };
 
     BufferOffset writeVFPInst(vfp_size sz, uint32_t blob);
 
     static void WriteVFPInstStatic(vfp_size sz, uint32_t blob, uint32_t* dest);
 
     // Unityped variants: all registers hold the same (ieee754 single/double)
     // notably not included are vcvt; vmov vd, #imm; vmov rt, vn.
     BufferOffset as_vfp_float(VFPRegister vd, VFPRegister vn, VFPRegister vm,
                               VFPOp op, Condition c = Always);
 
   public:
-    BufferOffset as_vadd(VFPRegister vd, VFPRegister vn, VFPRegister vm,
-                 Condition c = Always);
-
-    BufferOffset as_vdiv(VFPRegister vd, VFPRegister vn, VFPRegister vm,
-                 Condition c = Always);
-
-    BufferOffset as_vmul(VFPRegister vd, VFPRegister vn, VFPRegister vm,
-                 Condition c = Always);
-
-    BufferOffset as_vnmul(VFPRegister vd, VFPRegister vn, VFPRegister vm,
-                  Condition c = Always);
-
-    BufferOffset as_vnmla(VFPRegister vd, VFPRegister vn, VFPRegister vm,
-                  Condition c = Always);
-
-    BufferOffset as_vnmls(VFPRegister vd, VFPRegister vn, VFPRegister vm,
-                  Condition c = Always);
-
+    BufferOffset as_vadd(VFPRegister vd, VFPRegister vn, VFPRegister vm, Condition c = Always);
+    BufferOffset as_vdiv(VFPRegister vd, VFPRegister vn, VFPRegister vm, Condition c = Always);
+    BufferOffset as_vmul(VFPRegister vd, VFPRegister vn, VFPRegister vm, Condition c = Always);
+    BufferOffset as_vnmul(VFPRegister vd, VFPRegister vn, VFPRegister vm, Condition c = Always);
+    BufferOffset as_vnmla(VFPRegister vd, VFPRegister vn, VFPRegister vm, Condition c = Always);
+    BufferOffset as_vnmls(VFPRegister vd, VFPRegister vn, VFPRegister vm, Condition c = Always);
     BufferOffset as_vneg(VFPRegister vd, VFPRegister vm, Condition c = Always);
-
     BufferOffset as_vsqrt(VFPRegister vd, VFPRegister vm, Condition c = Always);
-
     BufferOffset as_vabs(VFPRegister vd, VFPRegister vm, Condition c = Always);
-
-    BufferOffset as_vsub(VFPRegister vd, VFPRegister vn, VFPRegister vm,
-                 Condition c = Always);
-
-    BufferOffset as_vcmp(VFPRegister vd, VFPRegister vm,
-                 Condition c = Always);
+    BufferOffset as_vsub(VFPRegister vd, VFPRegister vn, VFPRegister vm, Condition c = Always);
+    BufferOffset as_vcmp(VFPRegister vd, VFPRegister vm, Condition c = Always);
     BufferOffset as_vcmpz(VFPRegister vd,  Condition c = Always);
 
     // Specifically, a move between two same sized-registers.
     BufferOffset as_vmov(VFPRegister vd, VFPRegister vsrc, Condition c = Always);
+
     // Transfer between Core and VFP.
     enum FloatToCore_ {
         FloatToCore = 1 << 20,
         CoreToFloat = 0 << 20
     };
 
   private:
     enum VFPXferSize {
@@ -1559,36 +1587,39 @@ class Assembler : public AssemblerShared
 
     BufferOffset as_vxfer(Register vt1, Register vt2, VFPRegister vm, FloatToCore_ f2c,
                   Condition c = Always, int idx = 0);
 
     // Our encoding actually allows just the src and the dest (and their types)
     // to uniquely specify the encoding that we are going to use.
     BufferOffset as_vcvt(VFPRegister vd, VFPRegister vm, bool useFPSCR = false,
                          Condition c = Always);
+
     // Hard coded to a 32 bit fixed width result for now.
-    BufferOffset as_vcvtFixed(VFPRegister vd, bool isSigned, uint32_t fixedPoint, bool toFixed, Condition c = Always);
+    BufferOffset as_vcvtFixed(VFPRegister vd, bool isSigned, uint32_t fixedPoint,
+                              bool toFixed, Condition c = Always);
 
     // Transfer between VFP and memory.
     BufferOffset as_vdtr(LoadStore ls, VFPRegister vd, VFPAddr addr,
                          Condition c = Always /* vfp doesn't have a wb option*/);
 
     static void as_vdtr_patch(LoadStore ls, VFPRegister vd, VFPAddr addr,
-                              Condition c  /* vfp doesn't have a wb option*/, uint32_t* dest);
+                              Condition c /* vfp doesn't have a wb option */, uint32_t* dest);
 
     // VFP's ldm/stm work differently from the standard arm ones. You can only
     // transfer a range.
 
     BufferOffset as_vdtm(LoadStore st, Register rn, VFPRegister vd, int length,
-                 /*also has update conditions*/Condition c = Always);
+                 /* also has update conditions */ Condition c = Always);
 
     BufferOffset as_vimm(VFPRegister vd, VFPImm imm, Condition c = Always);
 
     BufferOffset as_vmrs(Register r, Condition c = Always);
     BufferOffset as_vmsr(Register r, Condition c = Always);
+
     // Label operations.
     bool nextLink(BufferOffset b, BufferOffset* next);
     void bind(Label* label, BufferOffset boff = BufferOffset());
     void bind(RepatchLabel* label);
     uint32_t currentOffset() {
         return nextOffset().getOffset();
     }
     void retarget(Label* label, Label* target);
@@ -1906,16 +1937,17 @@ class InstDTR : public Instruction
 JS_STATIC_ASSERT(sizeof(InstDTR) == sizeof(Instruction));
 
 class InstLDR : public InstDTR
 {
   public:
     InstLDR(Index mode, Register rt, DTRAddr addr, Assembler::Condition c)
         : InstDTR(IsLoad, IsWord, mode, rt, addr, c)
     { }
+
     static bool IsTHIS(const Instruction& i);
     static InstLDR* AsTHIS(const Instruction& i);
 
 };
 JS_STATIC_ASSERT(sizeof(InstDTR) == sizeof(InstLDR));
 
 class InstNOP : public Instruction
 {
@@ -1934,78 +1966,87 @@ class InstNOP : public Instruction
 class InstBranchReg : public Instruction
 {
   protected:
     // Don't use BranchTag yourself, use a derived instruction.
     enum BranchTag {
         IsBX  = 0x012fff10,
         IsBLX = 0x012fff30
     };
+
     static const uint32_t IsBRegMask = 0x0ffffff0;
+
     InstBranchReg(BranchTag tag, Register rm, Assembler::Condition c)
       : Instruction(tag | rm.code(), c)
     { }
+
   public:
     static bool IsTHIS (const Instruction& i);
     static InstBranchReg* AsTHIS (const Instruction& i);
+
     // Get the register that is being branched to
     void extractDest(Register* dest);
     // Make sure we are branching to a pre-known register
     bool checkDest(Register dest);
 };
 JS_STATIC_ASSERT(sizeof(InstBranchReg) == sizeof(Instruction));
 
 // Branching to an immediate offset, or calling an immediate offset
 class InstBranchImm : public Instruction
 {
   protected:
     enum BranchTag {
         IsB   = 0x0a000000,
         IsBL  = 0x0b000000
     };
+
     static const uint32_t IsBImmMask = 0x0f000000;
 
     InstBranchImm(BranchTag tag, BOffImm off, Assembler::Condition c)
       : Instruction(tag | off.encode(), c)
     { }
 
   public:
     static bool IsTHIS (const Instruction& i);
     static InstBranchImm* AsTHIS (const Instruction& i);
+
     void extractImm(BOffImm* dest);
 };
 JS_STATIC_ASSERT(sizeof(InstBranchImm) == sizeof(Instruction));
 
 // Very specific branching instructions.
 class InstBXReg : public InstBranchReg
 {
   public:
     static bool IsTHIS (const Instruction& i);
     static InstBXReg* AsTHIS (const Instruction& i);
 };
+
 class InstBLXReg : public InstBranchReg
 {
   public:
     InstBLXReg(Register reg, Assembler::Condition c)
       : InstBranchReg(IsBLX, reg, c)
     { }
 
     static bool IsTHIS (const Instruction& i);
     static InstBLXReg* AsTHIS (const Instruction& i);
 };
+
 class InstBImm : public InstBranchImm
 {
   public:
     InstBImm(BOffImm off, Assembler::Condition c)
       : InstBranchImm(IsB, off, c)
     { }
 
     static bool IsTHIS (const Instruction& i);
     static InstBImm* AsTHIS (const Instruction& i);
 };
+
 class InstBLImm : public InstBranchImm
 {
   public:
     InstBLImm(BOffImm off, Assembler::Condition c)
       : InstBranchImm(IsBL, off, c)
     { }
 
     static bool IsTHIS (const Instruction& i);
@@ -2051,29 +2092,33 @@ class InstMovW : public InstMovWT
 };
 
 class InstMovT : public InstMovWT
 {
   public:
     InstMovT (Register rd, Imm16 imm, Assembler::Condition c)
       : InstMovWT(rd, imm, IsT, c)
     { }
+
     static bool IsTHIS (const Instruction& i);
     static InstMovT* AsTHIS (const Instruction& i);
 };
 
 class InstALU : public Instruction
 {
     static const int32_t ALUMask = 0xc << 24;
+
   public:
-    InstALU (Register rd, Register rn, Operand2 op2, ALUOp op, SetCond_ sc, Assembler::Condition c)
-        : Instruction(maybeRD(rd) | maybeRN(rn) | op2.encode() | op | sc, c)
+    InstALU (Register rd, Register rn, Operand2 op2, ALUOp op, SBit s, Assembler::Condition c)
+      : Instruction(maybeRD(rd) | maybeRN(rn) | op2.encode() | op | s, c)
     { }
+
     static bool IsTHIS (const Instruction& i);
     static InstALU* AsTHIS (const Instruction& i);
+
     void extractOp(ALUOp* ret);
     bool checkOp(ALUOp op);
     void extractDest(Register* ret);
     bool checkDest(Register rd);
     void extractOp1(Register* ret);
     bool checkOp1(Register rn);
     Operand2 extractOp2();
 };
@@ -2088,60 +2133,67 @@ class InstCMP : public InstALU
 class InstMOV : public InstALU
 {
   public:
     static bool IsTHIS (const Instruction& i);
     static InstMOV* AsTHIS (const Instruction& i);
 };
 
 
-class InstructionIterator {
+class InstructionIterator
+{
   private:
     Instruction* i;
+
   public:
-    InstructionIterator(Instruction* i_);
+    explicit InstructionIterator(Instruction* i_);
+
     Instruction* next() {
         i = i->next();
         return cur();
     }
     Instruction* cur() const {
         return i;
     }
 };
 
 static const uint32_t NumIntArgRegs = 4;
+
 // There are 16 *float* registers available for arguments
 // If doubles are used, only half the number of registers are available.
 static const uint32_t NumFloatArgRegs = 16;
 
 static inline bool
 GetIntArgReg(uint32_t usedIntArgs, uint32_t usedFloatArgs, Register* out)
 {
     if (usedIntArgs >= NumIntArgRegs)
         return false;
+
     *out = Register::FromCode(usedIntArgs);
     return true;
 }
 
 // Get a register in which we plan to put a quantity that will be used as an
 // integer argument. This differs from GetIntArgReg in that if we have no more
 // actual argument registers to use we will fall back on using whatever
 // CallTempReg* don't overlap the argument registers, and only fail once those
 // run out too.
 static inline bool
 GetTempRegForIntArg(uint32_t usedIntArgs, uint32_t usedFloatArgs, Register* out)
 {
     if (GetIntArgReg(usedIntArgs, usedFloatArgs, out))
         return true;
+
     // Unfortunately, we have to assume things about the point at which
     // GetIntArgReg returns false, because we need to know how many registers it
     // can allocate.
     usedIntArgs -= NumIntArgRegs;
     if (usedIntArgs >= NumCallTempNonArgRegs)
         return false;
+
     *out = CallTempNonArgRegs[usedIntArgs];
     return true;
 }
 
 
 #if !defined(JS_CODEGEN_ARM_HARDFP) || defined(JS_ARM_SIMULATOR)
 
 static inline uint32_t
@@ -2215,50 +2267,56 @@ GetDoubleArgStackDisp(uint32_t usedIntAr
     doubleSlots *= 2;
     return (intSlots + doubleSlots + *padding) * sizeof(intptr_t);
 }
 
 #endif
 
 
 
-class DoubleEncoder {
+class DoubleEncoder
+{
     struct DoubleEntry
     {
         uint32_t dblTop;
         datastore::Imm8VFPImmData data;
     };
 
     static const DoubleEntry table[256];
 
   public:
-    bool lookup(uint32_t top, datastore::Imm8VFPImmData* ret) {
+    bool lookup(uint32_t top, datastore::Imm8VFPImmData* ret) const {
         for (int i = 0; i < 256; i++) {
             if (table[i].dblTop == top) {
                 *ret = table[i].data;
                 return true;
             }
         }
         return false;
     }
 };
 
-class AutoForbidPools {
+class AutoForbidPools
+{
     Assembler* masm_;
+
   public:
     // The maxInst argument is the maximum number of word sized instructions
     // that will be allocated within this context. It is used to determine if
     // the pool needs to be dumped before entering this content. The debug code
     // checks that no more than maxInst instructions are actually allocated.
     //
     // Allocation of pool entries is not supported within this content so the
     // code can not use large integers or float constants etc.
-    AutoForbidPools(Assembler* masm, size_t maxInst) : masm_(masm) {
+    AutoForbidPools(Assembler* masm, size_t maxInst)
+      : masm_(masm)
+    {
         masm_->enterNoPool(maxInst);
     }
+
     ~AutoForbidPools() {
         masm_->leaveNoPool();
     }
 };
 
 } // namespace jit
 } // namespace js
 
--- a/js/src/jit/arm/BaselineIC-arm.cpp
+++ b/js/src/jit/arm/BaselineIC-arm.cpp
@@ -24,18 +24,18 @@ ICCompare_Int32::Compiler::generateStubC
     // Guard that R0 is an integer and R1 is an integer.
     Label failure;
     masm.branchTestInt32(Assembler::NotEqual, R0, &failure);
     masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
 
     // Compare payload regs of R0 and R1.
     Assembler::Condition cond = JSOpToCondition(op, /* signed = */true);
     masm.cmp32(R0.payloadReg(), R1.payloadReg());
-    masm.ma_mov(Imm32(1), R0.payloadReg(), NoSetCond, cond);
-    masm.ma_mov(Imm32(0), R0.payloadReg(), NoSetCond, Assembler::InvertCondition(cond));
+    masm.ma_mov(Imm32(1), R0.payloadReg(), LeaveCC, cond);
+    masm.ma_mov(Imm32(0), R0.payloadReg(), LeaveCC, Assembler::InvertCondition(cond));
 
     // Result is implicitly boxed already.
     masm.tagValue(JSVAL_TYPE_BOOLEAN, R0.payloadReg(), R0);
     EmitReturnFromIC(masm);
 
     // Failure case - jump to next stub.
     masm.bind(&failure);
     EmitStubGuardFailure(masm);
@@ -52,17 +52,17 @@ ICCompare_Double::Compiler::generateStub
 
     Register dest = R0.scratchReg();
 
     Assembler::DoubleCondition doubleCond = JSOpToDoubleCondition(op);
     Assembler::Condition cond = Assembler::ConditionFromDoubleCondition(doubleCond);
 
     masm.compareDouble(FloatReg0, FloatReg1);
     masm.ma_mov(Imm32(0), dest);
-    masm.ma_mov(Imm32(1), dest, NoSetCond, cond);
+    masm.ma_mov(Imm32(1), dest, LeaveCC, cond);
 
     masm.tagValue(JSVAL_TYPE_BOOLEAN, dest, R0);
     EmitReturnFromIC(masm);
 
     // Failure case - jump to next stub.
     masm.bind(&failure);
     EmitStubGuardFailure(masm);
     return true;
@@ -88,28 +88,28 @@ ICBinaryArith_Int32::Compiler::generateS
     // DIV and MOD need an extra non-volatile ValueOperand to hold R0.
     AllocatableGeneralRegisterSet savedRegs(availableGeneralRegs(2));
     savedRegs.set() = GeneralRegisterSet::Intersect(GeneralRegisterSet::NonVolatile(), savedRegs.set());
     ValueOperand savedValue = savedRegs.takeAnyValue();
 
     Label maybeNegZero, revertRegister;
     switch(op_) {
       case JSOP_ADD:
-        masm.ma_add(R0.payloadReg(), R1.payloadReg(), scratchReg, SetCond);
+        masm.ma_add(R0.payloadReg(), R1.payloadReg(), scratchReg, SetCC);
 
         // Just jump to failure on overflow. R0 and R1 are preserved, so we can
         // just jump to the next stub.
         masm.j(Assembler::Overflow, &failure);
 
         // Box the result and return. We know R0.typeReg() already contains the
         // integer tag, so we just need to move the result value into place.
         masm.mov(scratchReg, R0.payloadReg());
         break;
       case JSOP_SUB:
-        masm.ma_sub(R0.payloadReg(), R1.payloadReg(), scratchReg, SetCond);
+        masm.ma_sub(R0.payloadReg(), R1.payloadReg(), scratchReg, SetCC);
         masm.j(Assembler::Overflow, &failure);
         masm.mov(scratchReg, R0.payloadReg());
         break;
       case JSOP_MUL: {
         Assembler::Condition cond = masm.ma_check_mul(R0.payloadReg(), R1.payloadReg(), scratchReg,
                                                       Assembler::Overflow);
         masm.j(cond, &failure);
 
--- a/js/src/jit/arm/CodeGenerator-arm.cpp
+++ b/js/src/jit/arm/CodeGenerator-arm.cpp
@@ -130,17 +130,17 @@ CodeGeneratorARM::visitCompare(LCompare*
     const LAllocation* right = comp->getOperand(1);
     const LDefinition* def = comp->getDef(0);
 
     if (right->isConstant())
         masm.ma_cmp(ToRegister(left), Imm32(ToInt32(right)));
     else
         masm.ma_cmp(ToRegister(left), ToOperand(right));
     masm.ma_mov(Imm32(0), ToRegister(def));
-    masm.ma_mov(Imm32(1), ToRegister(def), NoSetCond, cond);
+    masm.ma_mov(Imm32(1), ToRegister(def), LeaveCC, cond);
 }
 
 void
 CodeGeneratorARM::visitCompareAndBranch(LCompareAndBranch* comp)
 {
     Assembler::Condition cond = JSOpToCondition(comp->cmpMir()->compareType(), comp->jsop());
     if (comp->right()->isConstant())
         masm.ma_cmp(ToRegister(comp->left()), Imm32(ToInt32(comp->right())));
@@ -375,35 +375,35 @@ CodeGeneratorARM::visitSqrtF(LSqrtF* ins
 void
 CodeGeneratorARM::visitAddI(LAddI* ins)
 {
     const LAllocation* lhs = ins->getOperand(0);
     const LAllocation* rhs = ins->getOperand(1);
     const LDefinition* dest = ins->getDef(0);
 
     if (rhs->isConstant())
-        masm.ma_add(ToRegister(lhs), Imm32(ToInt32(rhs)), ToRegister(dest), SetCond);
+        masm.ma_add(ToRegister(lhs), Imm32(ToInt32(rhs)), ToRegister(dest), SetCC);
     else
-        masm.ma_add(ToRegister(lhs), ToOperand(rhs), ToRegister(dest), SetCond);
+        masm.ma_add(ToRegister(lhs), ToOperand(rhs), ToRegister(dest), SetCC);
 
     if (ins->snapshot())
         bailoutIf(Assembler::Overflow, ins->snapshot());
 }
 
 void
 CodeGeneratorARM::visitSubI(LSubI* ins)
 {
     const LAllocation* lhs = ins->getOperand(0);
     const LAllocation* rhs = ins->getOperand(1);
     const LDefinition* dest = ins->getDef(0);
 
     if (rhs->isConstant())
-        masm.ma_sub(ToRegister(lhs), Imm32(ToInt32(rhs)), ToRegister(dest), SetCond);
+        masm.ma_sub(ToRegister(lhs), Imm32(ToInt32(rhs)), ToRegister(dest), SetCC);
     else
-        masm.ma_sub(ToRegister(lhs), ToOperand(rhs), ToRegister(dest), SetCond);
+        masm.ma_sub(ToRegister(lhs), ToOperand(rhs), ToRegister(dest), SetCC);
 
     if (ins->snapshot())
         bailoutIf(Assembler::Overflow, ins->snapshot());
 }
 
 void
 CodeGeneratorARM::visitMulI(LMulI* ins)
 {
@@ -421,27 +421,27 @@ CodeGeneratorARM::visitMulI(LMulI* ins)
         if (mul->canBeNegativeZero() && constant <= 0) {
             Assembler::Condition bailoutCond = (constant == 0) ? Assembler::LessThan : Assembler::Equal;
             masm.ma_cmp(ToRegister(lhs), Imm32(0));
             bailoutIf(bailoutCond, ins->snapshot());
         }
         // TODO: move these to ma_mul.
         switch (constant) {
           case -1:
-            masm.ma_rsb(ToRegister(lhs), Imm32(0), ToRegister(dest), SetCond);
+            masm.ma_rsb(ToRegister(lhs), Imm32(0), ToRegister(dest), SetCC);
             break;
           case 0:
             masm.ma_mov(Imm32(0), ToRegister(dest));
             return; // Escape overflow check;
           case 1:
             // Nop
             masm.ma_mov(ToRegister(lhs), ToRegister(dest));
             return; // Escape overflow check;
           case 2:
-            masm.ma_add(ToRegister(lhs), ToRegister(lhs), ToRegister(dest), SetCond);
+            masm.ma_add(ToRegister(lhs), ToRegister(lhs), ToRegister(dest), SetCC);
             // Overflow is handled later.
             break;
           default: {
             bool handled = false;
             if (constant > 0) {
                 // Try shift and add sequences for a positive constant.
                 if (!mul->canOverflow()) {
                     // If it cannot overflow, we can do lots of optimizations.
@@ -641,17 +641,17 @@ CodeGeneratorARM::visitDivPowTwoI(LDivPo
     Register lhs = ToRegister(ins->numerator());
     Register output = ToRegister(ins->output());
     int32_t shift = ins->shift();
 
     if (shift != 0) {
         MDiv* mir = ins->mir();
         if (!mir->isTruncated()) {
             // If the remainder is != 0, bailout since this must be a double.
-            masm.as_mov(ScratchRegister, lsl(lhs, 32 - shift), SetCond);
+            masm.as_mov(ScratchRegister, lsl(lhs, 32 - shift), SetCC);
             bailoutIf(Assembler::NonZero, ins->snapshot());
         }
 
         if (!mir->canBeNegativeDividend()) {
             // Numerator is unsigned, so needs no adjusting. Do the shift.
             masm.as_mov(output, asr(lhs, shift));
             return;
         }
@@ -806,21 +806,21 @@ void
 CodeGeneratorARM::visitModPowTwoI(LModPowTwoI* ins)
 {
     Register in = ToRegister(ins->getOperand(0));
     Register out = ToRegister(ins->getDef(0));
     MMod* mir = ins->mir();
     Label fin;
     // bug 739870, jbramley has a different sequence that may help with speed
     // here.
-    masm.ma_mov(in, out, SetCond);
+    masm.ma_mov(in, out, SetCC);
     masm.ma_b(&fin, Assembler::Zero);
-    masm.ma_rsb(Imm32(0), out, NoSetCond, Assembler::Signed);
+    masm.ma_rsb(Imm32(0), out, LeaveCC, Assembler::Signed);
     masm.ma_and(Imm32((1 << ins->shift()) - 1), out);
-    masm.ma_rsb(Imm32(0), out, SetCond, Assembler::Signed);
+    masm.ma_rsb(Imm32(0), out, SetCC, Assembler::Signed);
     if (mir->canBeNegativeDividend()) {
         if (!mir->isTruncated()) {
             MOZ_ASSERT(mir->fallible());
             bailoutIf(Assembler::Zero, ins->snapshot());
         } else {
             // -0|0 == 0
         }
     }
@@ -1095,18 +1095,18 @@ CodeGeneratorARM::emitTableSwitchDispatc
     // switch table before the table actually starts. Since the only other
     // unhandled case is the default case (both out of range high and out of
     // range low) I then insert a branch to default case into the extra slot,
     // which ensures we don't attempt to execute the address table.
     Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
 
     int32_t cases = mir->numCases();
     // Lower value with low value.
-    masm.ma_sub(index, Imm32(mir->low()), index, SetCond);
-    masm.ma_rsb(index, Imm32(cases - 1), index, SetCond, Assembler::NotSigned);
+    masm.ma_sub(index, Imm32(mir->low()), index, SetCC);
+    masm.ma_rsb(index, Imm32(cases - 1), index, SetCC, Assembler::NotSigned);
     // Inhibit pools within the following sequence because we are indexing into
     // a pc relative table. The region will have one instruction for ma_ldr, one
     // for ma_b, and each table case takes one word.
     AutoForbidPools afp(&masm, 1 + 1 + cases);
     masm.ma_ldr(DTRAddr(pc, DtrRegImmShift(index, LSL, 2)), pc, Offset, Assembler::NotSigned);
     masm.ma_b(defaultcase);
 
     // To fill in the CodeLabels for the case entries, we need to first generate
@@ -1607,18 +1607,18 @@ CodeGeneratorARM::visitNotD(LNotD* ins)
         masm.as_vmrs(dest);
         masm.ma_lsr(Imm32(28), dest, dest);
         // 28 + 2 = 30
         masm.ma_alu(dest, lsr(dest, 2), dest, OpOrr);
         masm.ma_and(Imm32(1), dest);
     } else {
         masm.as_vmrs(pc);
         masm.ma_mov(Imm32(0), dest);
-        masm.ma_mov(Imm32(1), dest, NoSetCond, Assembler::Equal);
-        masm.ma_mov(Imm32(1), dest, NoSetCond, Assembler::Overflow);
+        masm.ma_mov(Imm32(1), dest, LeaveCC, Assembler::Equal);
+        masm.ma_mov(Imm32(1), dest, LeaveCC, Assembler::Overflow);
     }
 }
 
 void
 CodeGeneratorARM::visitNotF(LNotF* ins)
 {
     // Since this operation is not, we want to set a bit if the double is
     // falsey, which means 0.0, -0.0 or NaN. When comparing with 0, an input of
@@ -1635,18 +1635,18 @@ CodeGeneratorARM::visitNotF(LNotF* ins)
         masm.as_vmrs(dest);
         masm.ma_lsr(Imm32(28), dest, dest);
         // 28 + 2 = 30
         masm.ma_alu(dest, lsr(dest, 2), dest, OpOrr);
         masm.ma_and(Imm32(1), dest);
     } else {
         masm.as_vmrs(pc);
         masm.ma_mov(Imm32(0), dest);
-        masm.ma_mov(Imm32(1), dest, NoSetCond, Assembler::Equal);
-        masm.ma_mov(Imm32(1), dest, NoSetCond, Assembler::Overflow);
+        masm.ma_mov(Imm32(1), dest, LeaveCC, Assembler::Equal);
+        masm.ma_mov(Imm32(1), dest, LeaveCC, Assembler::Overflow);
     }
 }
 
 void
 CodeGeneratorARM::visitGuardShape(LGuardShape* guard)
 {
     Register obj = ToRegister(guard->input());
     Register tmp = ToRegister(guard->tempInt());
@@ -1788,19 +1788,19 @@ CodeGeneratorARM::visitAsmJSLoadHeap(LAs
 
     if (ptr->isConstant()) {
         MOZ_ASSERT(!mir->needsBoundsCheck());
         int32_t ptrImm = ptr->toConstant()->toInt32();
         MOZ_ASSERT(ptrImm >= 0);
         if (isFloat) {
             VFPRegister vd(ToFloatRegister(ins->output()));
             if (size == 32)
-                masm.ma_vldr(Operand(HeapReg, ptrImm), vd.singleOverlay(), Assembler::Always);
+                masm.ma_vldr(Address(HeapReg, ptrImm), vd.singleOverlay(), Assembler::Always);
             else
-                masm.ma_vldr(Operand(HeapReg, ptrImm), vd, Assembler::Always);
+                masm.ma_vldr(Address(HeapReg, ptrImm), vd, Assembler::Always);
         }  else {
             masm.ma_dataTransferN(IsLoad, size, isSigned, HeapReg, Imm32(ptrImm),
                                   ToRegister(ins->output()), Offset, Assembler::Always);
         }
         memoryBarrier(mir->barrierAfter());
         return;
     }
 
@@ -1821,27 +1821,27 @@ CodeGeneratorARM::visitAsmJSLoadHeap(LAs
         return;
     }
 
     BufferOffset bo = masm.ma_BoundsCheck(ptrReg);
     if (isFloat) {
         FloatRegister dst = ToFloatRegister(ins->output());
         VFPRegister vd(dst);
         if (size == 32) {
-            masm.ma_vldr(Operand(GlobalReg, AsmJSNaN32GlobalDataOffset - AsmJSGlobalRegBias),
+            masm.ma_vldr(Address(GlobalReg, AsmJSNaN32GlobalDataOffset - AsmJSGlobalRegBias),
                          vd.singleOverlay(), Assembler::AboveOrEqual);
             masm.ma_vldr(vd.singleOverlay(), HeapReg, ptrReg, 0, Assembler::Below);
         } else {
-            masm.ma_vldr(Operand(GlobalReg, AsmJSNaN64GlobalDataOffset - AsmJSGlobalRegBias),
+            masm.ma_vldr(Address(GlobalReg, AsmJSNaN64GlobalDataOffset - AsmJSGlobalRegBias),
                          vd, Assembler::AboveOrEqual);
             masm.ma_vldr(vd, HeapReg, ptrReg, 0, Assembler::Below);
         }
     } else {
         Register d = ToRegister(ins->output());
-        masm.ma_mov(Imm32(0), d, NoSetCond, Assembler::AboveOrEqual);
+        masm.ma_mov(Imm32(0), d, LeaveCC, Assembler::AboveOrEqual);
         masm.ma_dataTransferN(IsLoad, size, isSigned, HeapReg, ptrReg, d, Offset, Assembler::Below);
     }
     memoryBarrier(mir->barrierAfter());
     masm.append(AsmJSHeapAccess(bo.getOffset()));
 }
 
 void
 CodeGeneratorARM::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins)
@@ -1865,19 +1865,19 @@ CodeGeneratorARM::visitAsmJSStoreHeap(LA
     memoryBarrier(mir->barrierBefore());
     if (ptr->isConstant()) {
         MOZ_ASSERT(!mir->needsBoundsCheck());
         int32_t ptrImm = ptr->toConstant()->toInt32();
         MOZ_ASSERT(ptrImm >= 0);
         if (isFloat) {
             VFPRegister vd(ToFloatRegister(ins->value()));
             if (size == 32)
-                masm.ma_vstr(vd.singleOverlay(), Operand(HeapReg, ptrImm), Assembler::Always);
+                masm.ma_vstr(vd.singleOverlay(), Address(HeapReg, ptrImm), Assembler::Always);
             else
-                masm.ma_vstr(vd, Operand(HeapReg, ptrImm), Assembler::Always);
+                masm.ma_vstr(vd, Address(HeapReg, ptrImm), Assembler::Always);
         } else {
             masm.ma_dataTransferN(IsStore, size, isSigned, HeapReg, Imm32(ptrImm),
                                   ToRegister(ins->value()), Offset, Assembler::Always);
         }
         memoryBarrier(mir->barrierAfter());
         return;
     }
 
@@ -2084,17 +2084,17 @@ CodeGeneratorARM::visitAsmJSAtomicBinopC
         MOZ_CRASH("Unknown op");
     }
 }
 
 void
 CodeGeneratorARM::visitAsmJSPassStackArg(LAsmJSPassStackArg* ins)
 {
     const MAsmJSPassStackArg* mir = ins->mir();
-    Operand dst(StackPointer, mir->spOffset());
+    Address dst(StackPointer, mir->spOffset());
     if (ins->arg()->isConstant()) {
         //masm.as_bkpt();
         masm.ma_storeImm(Imm32(ToInt32(ins->arg())), dst);
     } else {
         if (ins->arg()->isGeneralReg())
             masm.ma_str(ToRegister(ins->arg()), dst);
         else
             masm.ma_vstr(ToFloatRegister(ins->arg()), dst);
@@ -2226,38 +2226,38 @@ void
 CodeGeneratorARM::visitAsmJSLoadGlobalVar(LAsmJSLoadGlobalVar* ins)
 {
     const MAsmJSLoadGlobalVar* mir = ins->mir();
     unsigned addr = mir->globalDataOffset() - AsmJSGlobalRegBias;
     if (mir->type() == MIRType_Int32) {
         masm.ma_dtr(IsLoad, GlobalReg, Imm32(addr), ToRegister(ins->output()));
     } else if (mir->type() == MIRType_Float32) {
         VFPRegister vd(ToFloatRegister(ins->output()));
-        masm.ma_vldr(Operand(GlobalReg, addr), vd.singleOverlay());
+        masm.ma_vldr(Address(GlobalReg, addr), vd.singleOverlay());
     } else {
-        masm.ma_vldr(Operand(GlobalReg, addr), ToFloatRegister(ins->output()));
+        masm.ma_vldr(Address(GlobalReg, addr), ToFloatRegister(ins->output()));
     }
 }
 
 void
 CodeGeneratorARM::visitAsmJSStoreGlobalVar(LAsmJSStoreGlobalVar* ins)
 {
     const MAsmJSStoreGlobalVar* mir = ins->mir();
 
     MIRType type = mir->value()->type();
     MOZ_ASSERT(IsNumberType(type));
 
     unsigned addr = mir->globalDataOffset() - AsmJSGlobalRegBias;
     if (type == MIRType_Int32) {
         masm.ma_dtr(IsStore, GlobalReg, Imm32(addr), ToRegister(ins->value()));
     } else if (type == MIRType_Float32) {
         VFPRegister vd(ToFloatRegister(ins->value()));
-        masm.ma_vstr(vd.singleOverlay(), Operand(GlobalReg, addr));
+        masm.ma_vstr(vd.singleOverlay(), Address(GlobalReg, addr));
     } else {
-        masm.ma_vstr(ToFloatRegister(ins->value()), Operand(GlobalReg, addr));
+        masm.ma_vstr(ToFloatRegister(ins->value()), Address(GlobalReg, addr));
     }
 }
 
 void
 CodeGeneratorARM::visitAsmJSLoadFuncPtr(LAsmJSLoadFuncPtr* ins)
 {
     const MAsmJSLoadFuncPtr* mir = ins->mir();
 
@@ -2270,17 +2270,17 @@ CodeGeneratorARM::visitAsmJSLoadFuncPtr(
     masm.ma_ldr(DTRAddr(GlobalReg, DtrRegImmShift(tmp, LSL, 0)), out);
 }
 
 void
 CodeGeneratorARM::visitAsmJSLoadFFIFunc(LAsmJSLoadFFIFunc* ins)
 {
     const MAsmJSLoadFFIFunc* mir = ins->mir();
 
-    masm.ma_ldr(Operand(GlobalReg, mir->globalDataOffset() - AsmJSGlobalRegBias),
+    masm.ma_ldr(Address(GlobalReg, mir->globalDataOffset() - AsmJSGlobalRegBias),
                 ToRegister(ins->output()));
 }
 
 void
 CodeGeneratorARM::visitNegI(LNegI* ins)
 {
     Register input = ToRegister(ins->input());
     masm.ma_neg(input, ToRegister(ins->output()));
--- a/js/src/jit/arm/MacroAssembler-arm.cpp
+++ b/js/src/jit/arm/MacroAssembler-arm.cpp
@@ -52,17 +52,17 @@ MacroAssemblerARM::convertInt32ToDouble(
     as_vxfer(src, InvalidReg, dest.sintOverlay(),
              CoreToFloat);
     as_vcvt(dest, dest.sintOverlay());
 }
 
 void
 MacroAssemblerARM::convertInt32ToDouble(const Address& src, FloatRegister dest)
 {
-    ma_vldr(Operand(src), ScratchDoubleReg);
+    ma_vldr(src, ScratchDoubleReg);
     as_vcvt(dest, VFPRegister(ScratchDoubleReg).sintOverlay());
 }
 
 void
 MacroAssemblerARM::convertInt32ToDouble(const BaseIndex& src, FloatRegister dest)
 {
     Register base = src.base;
     uint32_t scale = Imm32::ShiftOf(src.scale).value;
@@ -209,17 +209,17 @@ MacroAssemblerARM::convertInt32ToFloat32
     // Direct conversions aren't possible.
     as_vxfer(src, InvalidReg, dest.sintOverlay(),
              CoreToFloat);
     as_vcvt(dest.singleOverlay(), dest.sintOverlay());
 }
 
 void
 MacroAssemblerARM::convertInt32ToFloat32(const Address& src, FloatRegister dest) {
-    ma_vldr(Operand(src), ScratchFloat32Reg);
+    ma_vldr(src, ScratchFloat32Reg);
     as_vcvt(dest, VFPRegister(ScratchFloat32Reg).sintOverlay());
 }
 
 void
 MacroAssemblerARM::addDouble(FloatRegister src, FloatRegister dest)
 {
     ma_vadd(dest, src, dest);
 }
@@ -253,114 +253,114 @@ MacroAssemblerARM::inc64(AbsoluteAddress
 {
 
     ma_strd(r0, r1, EDtrAddr(sp, EDtrOffImm(-8)), PreIndex);
 
     ma_mov(Imm32((int32_t)dest.addr), ScratchRegister);
 
     ma_ldrd(EDtrAddr(ScratchRegister, EDtrOffImm(0)), r0, r1);
 
-    ma_add(Imm32(1), r0, SetCond);
-    ma_adc(Imm32(0), r1, NoSetCond);
+    ma_add(Imm32(1), r0, SetCC);
+    ma_adc(Imm32(0), r1, LeaveCC);
 
     ma_strd(r0, r1, EDtrAddr(ScratchRegister, EDtrOffImm(0)));
 
     ma_ldrd(EDtrAddr(sp, EDtrOffImm(8)), r0, r1, PostIndex);
 
 }
 
 bool
 MacroAssemblerARM::alu_dbl(Register src1, Imm32 imm, Register dest, ALUOp op,
-                           SetCond_ sc, Condition c)
-{
-    if ((sc == SetCond && ! condsAreSafe(op)) || !can_dbl(op))
+                           SBit s, Condition c)
+{
+    if ((s == SetCC && ! condsAreSafe(op)) || !can_dbl(op))
         return false;
     ALUOp interop = getDestVariant(op);
     Imm8::TwoImm8mData both = Imm8::EncodeTwoImms(imm.value);
     if (both.fst.invalid)
         return false;
     // For the most part, there is no good reason to set the condition codes for
     // the first instruction. We can do better things if the second instruction
     // doesn't have a dest, such as check for overflow by doing first operation
     // don't do second operation if first operation overflowed. This preserves
     // the overflow condition code. Unfortunately, it is horribly brittle.
-    as_alu(ScratchRegister, src1, both.fst, interop, NoSetCond, c);
-    as_alu(dest, ScratchRegister, both.snd, op, sc, c);
+    as_alu(ScratchRegister, src1, Operand2(both.fst), interop, LeaveCC, c);
+    as_alu(dest, ScratchRegister, Operand2(both.snd), op, s, c);
     return true;
 }
 
 
 void
 MacroAssemblerARM::ma_alu(Register src1, Imm32 imm, Register dest,
                           ALUOp op,
-                          SetCond_ sc, Condition c)
+                          SBit s, Condition c)
 {
     // As it turns out, if you ask for a compare-like instruction you *probably*
     // want it to set condition codes.
     if (dest == InvalidReg)
-        MOZ_ASSERT(sc == SetCond);
+        MOZ_ASSERT(s == SetCC);
 
     // The operator gives us the ability to determine how this can be used.
     Imm8 imm8 = Imm8(imm.value);
     // One instruction: If we can encode it using an imm8m, then do so.
     if (!imm8.invalid) {
-        as_alu(dest, src1, imm8, op, sc, c);
+        as_alu(dest, src1, imm8, op, s, c);
         return;
     }
     // One instruction, negated:
     Imm32 negImm = imm;
     Register negDest;
     ALUOp negOp = ALUNeg(op, dest, &negImm, &negDest);
     Imm8 negImm8 = Imm8(negImm.value);
     // 'add r1, r2, -15' can be replaced with 'sub r1, r2, 15'. For bonus
     // points, dest can be replaced (nearly always invalid => ScratchRegister)
     // This is useful if we wish to negate tst. tst has an invalid (aka not
     // used) dest, but its negation is bic *requires* a dest. We can accomodate,
     // but it will need to clobber *something*, and the scratch register isn't
     // being used, so...
     if (negOp != OpInvalid && !negImm8.invalid) {
-        as_alu(negDest, src1, negImm8, negOp, sc, c);
+        as_alu(negDest, src1, negImm8, negOp, s, c);
         return;
     }
 
     if (HasMOVWT()) {
         // If the operation is a move-a-like then we can try to use movw to move
         // the bits into the destination. Otherwise, we'll need to fall back on
         // a multi-instruction format :(
         // movw/movt does not set condition codes, so don't hold your breath.
-        if (sc == NoSetCond && (op == OpMov || op == OpMvn)) {
+        if (s == LeaveCC && (op == OpMov || op == OpMvn)) {
             // ARMv7 supports movw/movt. movw zero-extends its 16 bit argument,
             // so we can set the register this way. movt leaves the bottom 16
             // bits in tact, so it is unsuitable to move a constant that
             if (op == OpMov && ((imm.value & ~ 0xffff) == 0)) {
                 MOZ_ASSERT(src1 == InvalidReg);
-                as_movw(dest, (uint16_t)imm.value, c);
+                as_movw(dest, Imm16((uint16_t)imm.value), c);
                 return;
             }
 
             // If they asked for a mvn rfoo, imm, where ~imm fits into 16 bits
             // then do it.
             if (op == OpMvn && (((~imm.value) & ~ 0xffff) == 0)) {
                 MOZ_ASSERT(src1 == InvalidReg);
-                as_movw(dest, (uint16_t)~imm.value, c);
+                as_movw(dest, Imm16((uint16_t)~imm.value), c);
                 return;
             }
 
             // TODO: constant dedup may enable us to add dest, r0, 23 *if* we
             // are attempting to load a constant that looks similar to one that
             // already exists. If it can't be done with a single movw then we
             // *need* to use two instructions since this must be some sort of a
             // move operation, we can just use a movw/movt pair and get the
             // whole thing done in two moves. This does not work for ops like
             // add, since we'd need to do: movw tmp; movt tmp; add dest, tmp,
             // src1.
             if (op == OpMvn)
                 imm.value = ~imm.value;
-            as_movw(dest, imm.value & 0xffff, c);
-            as_movt(dest, (imm.value >> 16) & 0xffff, c);
+            as_movw(dest, Imm16(imm.value & 0xffff), c);
+            as_movt(dest, Imm16((imm.value >> 16) & 0xffff), c);
             return;
         }
         // If we weren't doing a movalike, a 16 bit immediate will require 2
         // instructions. With the same amount of space and (less)time, we can do
         // two 8 bit operations, reusing the dest register. e.g.
         //  movw tmp, 0xffff; add dest, src, tmp ror 4
         // vs.
         //  add dest, src, 0xff0; add dest, dest, 0xf000000f
@@ -375,58 +375,58 @@ MacroAssemblerARM::ma_alu(Register src1,
 
     // Either a) this isn't ARMv7 b) this isn't a move start by attempting to
     // generate a two instruction form. Some things cannot be made into two-inst
     // forms correctly. Namely, adds dest, src, 0xffff. Since we want the
     // condition codes (and don't know which ones will be checked), we need to
     // assume that the overflow flag will be checked and add{,s} dest, src,
     // 0xff00; add{,s} dest, dest, 0xff is not guaranteed to set the overflow
     // flag the same as the (theoretical) one instruction variant.
-    if (alu_dbl(src1, imm, dest, op, sc, c))
+    if (alu_dbl(src1, imm, dest, op, s, c))
         return;
 
     // And try with its negative.
     if (negOp != OpInvalid &&
-        alu_dbl(src1, negImm, negDest, negOp, sc, c))
+        alu_dbl(src1, negImm, negDest, negOp, s, c))
         return;
 
     // Well, damn. We can use two 16 bit mov's, then do the op or we can do a
     // single load from a pool then op.
     if (HasMOVWT()) {
         // Try to load the immediate into a scratch register then use that
-        as_movw(ScratchRegister, imm.value & 0xffff, c);
+        as_movw(ScratchRegister, Imm16(imm.value & 0xffff), c);
         if ((imm.value >> 16) != 0)
-            as_movt(ScratchRegister, (imm.value >> 16) & 0xffff, c);
+            as_movt(ScratchRegister, Imm16((imm.value >> 16) & 0xffff), c);
     } else {
         // Going to have to use a load. If the operation is a move, then just
         // move it into the destination register
         if (op == OpMov) {
             as_Imm32Pool(dest, imm.value, c);
             return;
         } else {
             // If this isn't just going into a register, then stick it in a
             // temp, and then proceed.
             as_Imm32Pool(ScratchRegister, imm.value, c);
         }
     }
-    as_alu(dest, src1, O2Reg(ScratchRegister), op, sc, c);
+    as_alu(dest, src1, O2Reg(ScratchRegister), op, s, c);
 }
 
 void
 MacroAssemblerARM::ma_alu(Register src1, Operand op2, Register dest, ALUOp op,
-            SetCond_ sc, Assembler::Condition c)
+            SBit s, Assembler::Condition c)
 {
     MOZ_ASSERT(op2.getTag() == Operand::OP2);
-    as_alu(dest, src1, op2.toOp2(), op, sc, c);
-}
-
-void
-MacroAssemblerARM::ma_alu(Register src1, Operand2 op2, Register dest, ALUOp op, SetCond_ sc, Condition c)
-{
-    as_alu(dest, src1, op2, op, sc, c);
+    as_alu(dest, src1, op2.toOp2(), op, s, c);
+}
+
+void
+MacroAssemblerARM::ma_alu(Register src1, Operand2 op2, Register dest, ALUOp op, SBit s, Condition c)
+{
+    as_alu(dest, src1, op2, op, s, c);
 }
 
 void
 MacroAssemblerARM::ma_nop()
 {
     as_nop();
 }
 
@@ -479,34 +479,34 @@ MacroAssemblerARM::ma_mov_patch(Imm32 im
 /* static */ void
 MacroAssemblerARM::ma_mov_patch(ImmPtr imm, Register dest, Assembler::Condition c,
                                 RelocStyle rs, Instruction* i)
 {
     ma_mov_patch(Imm32(int32_t(imm.value)), dest, c, rs, i);
 }
 
 void
-MacroAssemblerARM::ma_mov(Register src, Register dest, SetCond_ sc, Assembler::Condition c)
-{
-    if (sc == SetCond || dest != src)
-        as_mov(dest, O2Reg(src), sc, c);
+MacroAssemblerARM::ma_mov(Register src, Register dest, SBit s, Assembler::Condition c)
+{
+    if (s == SetCC || dest != src)
+        as_mov(dest, O2Reg(src), s, c);
 }
 
 void
 MacroAssemblerARM::ma_mov(Imm32 imm, Register dest,
-                          SetCond_ sc, Assembler::Condition c)
-{
-    ma_alu(InvalidReg, imm, dest, OpMov, sc, c);
+                          SBit s, Assembler::Condition c)
+{
+    ma_alu(InvalidReg, imm, dest, OpMov, s, c);
 }
 
 void
 MacroAssemblerARM::ma_mov(ImmWord imm, Register dest,
-                          SetCond_ sc, Assembler::Condition c)
-{
-    ma_alu(InvalidReg, Imm32(imm.value), dest, OpMov, sc, c);
+                          SBit s, Assembler::Condition c)
+{
+    ma_alu(InvalidReg, Imm32(imm.value), dest, OpMov, s, c);
 }
 
 void
 MacroAssemblerARM::ma_mov(ImmGCPtr ptr, Register dest)
 {
     // As opposed to x86/x64 version, the data relocation has to be executed
     // before to recover the pointer, and not after.
     writeDataRelocation(ptr);
@@ -571,265 +571,265 @@ void
 MacroAssemblerARM::ma_rol(Register shift, Register src, Register dst)
 {
     ma_rsb(shift, Imm32(32), ScratchRegister);
     as_mov(dst, ror(src, ScratchRegister));
 }
 
 // Move not (dest <- ~src)
 void
-MacroAssemblerARM::ma_mvn(Imm32 imm, Register dest, SetCond_ sc, Assembler::Condition c)
-{
-    ma_alu(InvalidReg, imm, dest, OpMvn, sc, c);
-}
-
-void
-MacroAssemblerARM::ma_mvn(Register src1, Register dest, SetCond_ sc, Assembler::Condition c)
-{
-    as_alu(dest, InvalidReg, O2Reg(src1), OpMvn, sc, c);
+MacroAssemblerARM::ma_mvn(Imm32 imm, Register dest, SBit s, Assembler::Condition c)
+{
+    ma_alu(InvalidReg, imm, dest, OpMvn, s, c);
+}
+
+void
+MacroAssemblerARM::ma_mvn(Register src1, Register dest, SBit s, Assembler::Condition c)
+{
+    as_alu(dest, InvalidReg, O2Reg(src1), OpMvn, s, c);
 }
 
 // Negate (dest <- -src), src is a register, rather than a general op2.
 void
-MacroAssemblerARM::ma_neg(Register src1, Register dest, SetCond_ sc, Assembler::Condition c)
-{
-    as_rsb(dest, src1, Imm8(0), sc, c);
+MacroAssemblerARM::ma_neg(Register src1, Register dest, SBit s, Assembler::Condition c)
+{
+    as_rsb(dest, src1, Imm8(0), s, c);
 }
 
 // And.
 void
-MacroAssemblerARM::ma_and(Register src, Register dest, SetCond_ sc, Assembler::Condition c)
+MacroAssemblerARM::ma_and(Register src, Register dest, SBit s, Assembler::Condition c)
 {
     ma_and(dest, src, dest);
 }
 void
 MacroAssemblerARM::ma_and(Register src1, Register src2, Register dest,
-                          SetCond_ sc, Assembler::Condition c)
-{
-    as_and(dest, src1, O2Reg(src2), sc, c);
-}
-void
-MacroAssemblerARM::ma_and(Imm32 imm, Register dest, SetCond_ sc, Assembler::Condition c)
-{
-    ma_alu(dest, imm, dest, OpAnd, sc, c);
+                          SBit s, Assembler::Condition c)
+{
+    as_and(dest, src1, O2Reg(src2), s, c);
+}
+void
+MacroAssemblerARM::ma_and(Imm32 imm, Register dest, SBit s, Assembler::Condition c)
+{
+    ma_alu(dest, imm, dest, OpAnd, s, c);
 }
 void
 MacroAssemblerARM::ma_and(Imm32 imm, Register src1, Register dest,
-                          SetCond_ sc, Assembler::Condition c)
-{
-    ma_alu(src1, imm, dest, OpAnd, sc, c);
+                          SBit s, Assembler::Condition c)
+{
+    ma_alu(src1, imm, dest, OpAnd, s, c);
 }
 
 // Bit clear (dest <- dest & ~imm) or (dest <- src1 & ~src2).
 void
-MacroAssemblerARM::ma_bic(Imm32 imm, Register dest, SetCond_ sc, Assembler::Condition c)
-{
-    ma_alu(dest, imm, dest, OpBic, sc, c);
+MacroAssemblerARM::ma_bic(Imm32 imm, Register dest, SBit s, Assembler::Condition c)
+{
+    ma_alu(dest, imm, dest, OpBic, s, c);
 }
 
 // Exclusive or.
 void
-MacroAssemblerARM::ma_eor(Register src, Register dest, SetCond_ sc, Assembler::Condition c)
-{
-    ma_eor(dest, src, dest, sc, c);
+MacroAssemblerARM::ma_eor(Register src, Register dest, SBit s, Assembler::Condition c)
+{
+    ma_eor(dest, src, dest, s, c);
 }
 void
 MacroAssemblerARM::ma_eor(Register src1, Register src2, Register dest,
-                          SetCond_ sc, Assembler::Condition c)
-{
-    as_eor(dest, src1, O2Reg(src2), sc, c);
-}
-void
-MacroAssemblerARM::ma_eor(Imm32 imm, Register dest, SetCond_ sc, Assembler::Condition c)
-{
-    ma_alu(dest, imm, dest, OpEor, sc, c);
+                          SBit s, Assembler::Condition c)
+{
+    as_eor(dest, src1, O2Reg(src2), s, c);
+}
+void
+MacroAssemblerARM::ma_eor(Imm32 imm, Register dest, SBit s, Assembler::Condition c)
+{
+    ma_alu(dest, imm, dest, OpEor, s, c);
 }
 void
 MacroAssemblerARM::ma_eor(Imm32 imm, Register src1, Register dest,
-       SetCond_ sc, Assembler::Condition c)
-{
-    ma_alu(src1, imm, dest, OpEor, sc, c);
+       SBit s, Assembler::Condition c)
+{
+    ma_alu(src1, imm, dest, OpEor, s, c);
 }
 
 // Or.
 void
-MacroAssemblerARM::ma_orr(Register src, Register dest, SetCond_ sc, Assembler::Condition c)
-{
-    ma_orr(dest, src, dest, sc, c);
+MacroAssemblerARM::ma_orr(Register src, Register dest, SBit s, Assembler::Condition c)
+{
+    ma_orr(dest, src, dest, s, c);
 }
 void
 MacroAssemblerARM::ma_orr(Register src1, Register src2, Register dest,
-                          SetCond_ sc, Assembler::Condition c)
-{
-    as_orr(dest, src1, O2Reg(src2), sc, c);
-}
-void
-MacroAssemblerARM::ma_orr(Imm32 imm, Register dest, SetCond_ sc, Assembler::Condition c)
-{
-    ma_alu(dest, imm, dest, OpOrr, sc, c);
+                          SBit s, Assembler::Condition c)
+{
+    as_orr(dest, src1, O2Reg(src2), s, c);
+}
+void
+MacroAssemblerARM::ma_orr(Imm32 imm, Register dest, SBit s, Assembler::Condition c)
+{
+    ma_alu(dest, imm, dest, OpOrr, s, c);
 }
 void
 MacroAssemblerARM::ma_orr(Imm32 imm, Register src1, Register dest,
-                          SetCond_ sc, Assembler::Condition c)
-{
-    ma_alu(src1, imm, dest, OpOrr, sc, c);
+                          SBit s, Assembler::Condition c)
+{
+    ma_alu(src1, imm, dest, OpOrr, s, c);
 }
 
 // Arithmetic-based ops.
 // Add with carry.
 void
-MacroAssemblerARM::ma_adc(Imm32 imm, Register dest, SetCond_ sc, Condition c)
-{
-    ma_alu(dest, imm, dest, OpAdc, sc, c);
-}
-void
-MacroAssemblerARM::ma_adc(Register src, Register dest, SetCond_ sc, Condition c)
-{
-    as_alu(dest, dest, O2Reg(src), OpAdc, sc, c);
-}
-void
-MacroAssemblerARM::ma_adc(Register src1, Register src2, Register dest, SetCond_ sc, Condition c)
-{
-    as_alu(dest, src1, O2Reg(src2), OpAdc, sc, c);
+MacroAssemblerARM::ma_adc(Imm32 imm, Register dest, SBit s, Condition c)
+{
+    ma_alu(dest, imm, dest, OpAdc, s, c);
+}
+void
+MacroAssemblerARM::ma_adc(Register src, Register dest, SBit s, Condition c)
+{
+    as_alu(dest, dest, O2Reg(src), OpAdc, s, c);
+}
+void
+MacroAssemblerARM::ma_adc(Register src1, Register src2, Register dest, SBit s, Condition c)
+{
+    as_alu(dest, src1, O2Reg(src2), OpAdc, s, c);
 }
 
 // Add.
 void
-MacroAssemblerARM::ma_add(Imm32 imm, Register dest, SetCond_ sc, Condition c)
-{
-    ma_alu(dest, imm, dest, OpAdd, sc, c);
-}
-
-void
-MacroAssemblerARM::ma_add(Register src1, Register dest, SetCond_ sc, Condition c)
-{
-    ma_alu(dest, O2Reg(src1), dest, OpAdd, sc, c);
-}
-void
-MacroAssemblerARM::ma_add(Register src1, Register src2, Register dest, SetCond_ sc, Condition c)
-{
-    as_alu(dest, src1, O2Reg(src2), OpAdd, sc, c);
-}
-void
-MacroAssemblerARM::ma_add(Register src1, Operand op, Register dest, SetCond_ sc, Condition c)
-{
-    ma_alu(src1, op, dest, OpAdd, sc, c);
-}
-void
-MacroAssemblerARM::ma_add(Register src1, Imm32 op, Register dest, SetCond_ sc, Condition c)
-{
-    ma_alu(src1, op, dest, OpAdd, sc, c);
+MacroAssemblerARM::ma_add(Imm32 imm, Register dest, SBit s, Condition c)
+{
+    ma_alu(dest, imm, dest, OpAdd, s, c);
+}
+
+void
+MacroAssemblerARM::ma_add(Register src1, Register dest, SBit s, Condition c)
+{
+    ma_alu(dest, O2Reg(src1), dest, OpAdd, s, c);
+}
+void
+MacroAssemblerARM::ma_add(Register src1, Register src2, Register dest, SBit s, Condition c)
+{
+    as_alu(dest, src1, O2Reg(src2), OpAdd, s, c);
+}
+void
+MacroAssemblerARM::ma_add(Register src1, Operand op, Register dest, SBit s, Condition c)
+{
+    ma_alu(src1, op, dest, OpAdd, s, c);
+}
+void
+MacroAssemblerARM::ma_add(Register src1, Imm32 op, Register dest, SBit s, Condition c)
+{
+    ma_alu(src1, op, dest, OpAdd, s, c);
 }
 
 // Subtract with carry.
 void
-MacroAssemblerARM::ma_sbc(Imm32 imm, Register dest, SetCond_ sc, Condition c)
-{
-    ma_alu(dest, imm, dest, OpSbc, sc, c);
-}
-void
-MacroAssemblerARM::ma_sbc(Register src1, Register dest, SetCond_ sc, Condition c)
-{
-    as_alu(dest, dest, O2Reg(src1), OpSbc, sc, c);
-}
-void
-MacroAssemblerARM::ma_sbc(Register src1, Register src2, Register dest, SetCond_ sc, Condition c)
-{
-    as_alu(dest, src1, O2Reg(src2), OpSbc, sc, c);
+MacroAssemblerARM::ma_sbc(Imm32 imm, Register dest, SBit s, Condition c)
+{
+    ma_alu(dest, imm, dest, OpSbc, s, c);
+}
+void
+MacroAssemblerARM::ma_sbc(Register src1, Register dest, SBit s, Condition c)
+{
+    as_alu(dest, dest, O2Reg(src1), OpSbc, s, c);
+}
+void
+MacroAssemblerARM::ma_sbc(Register src1, Register src2, Register dest, SBit s, Condition c)
+{
+    as_alu(dest, src1, O2Reg(src2), OpSbc, s, c);
 }
 
 // Subtract.
 void
-MacroAssemblerARM::ma_sub(Imm32 imm, Register dest, SetCond_ sc, Condition c)
-{
-    ma_alu(dest, imm, dest, OpSub, sc, c);
-}
-void
-MacroAssemblerARM::ma_sub(Register src1, Register dest, SetCond_ sc, Condition c)
-{
-    ma_alu(dest, Operand(src1), dest, OpSub, sc, c);
-}
-void
-MacroAssemblerARM::ma_sub(Register src1, Register src2, Register dest, SetCond_ sc, Condition c)
-{
-    ma_alu(src1, Operand(src2), dest, OpSub, sc, c);
-}
-void
-MacroAssemblerARM::ma_sub(Register src1, Operand op, Register dest, SetCond_ sc, Condition c)
-{
-    ma_alu(src1, op, dest, OpSub, sc, c);
-}
-void
-MacroAssemblerARM::ma_sub(Register src1, Imm32 op, Register dest, SetCond_ sc, Condition c)
-{
-    ma_alu(src1, op, dest, OpSub, sc, c);
+MacroAssemblerARM::ma_sub(Imm32 imm, Register dest, SBit s, Condition c)
+{
+    ma_alu(dest, imm, dest, OpSub, s, c);
+}
+void
+MacroAssemblerARM::ma_sub(Register src1, Register dest, SBit s, Condition c)
+{
+    ma_alu(dest, Operand(src1), dest, OpSub, s, c);
+}
+void
+MacroAssemblerARM::ma_sub(Register src1, Register src2, Register dest, SBit s, Condition c)
+{
+    ma_alu(src1, Operand(src2), dest, OpSub, s, c);
+}
+void
+MacroAssemblerARM::ma_sub(Register src1, Operand op, Register dest, SBit s, Condition c)
+{
+    ma_alu(src1, op, dest, OpSub, s, c);
+}
+void
+MacroAssemblerARM::ma_sub(Register src1, Imm32 op, Register dest, SBit s, Condition c)
+{
+    ma_alu(src1, op, dest, OpSub, s, c);
 }
 
 // Severse subtract.
 void
-MacroAssemblerARM::ma_rsb(Imm32 imm, Register dest, SetCond_ sc, Condition c)
-{
-    ma_alu(dest, imm, dest, OpRsb, sc, c);
-}
-void
-MacroAssemblerARM::ma_rsb(Register src1, Register dest, SetCond_ sc, Condition c)
-{
-    as_alu(dest, dest, O2Reg(src1), OpAdd, sc, c);
-}
-void
-MacroAssemblerARM::ma_rsb(Register src1, Register src2, Register dest, SetCond_ sc, Condition c)
-{
-    as_alu(dest, src1, O2Reg(src2), OpRsb, sc, c);
-}
-void
-MacroAssemblerARM::ma_rsb(Register src1, Imm32 op2, Register dest, SetCond_ sc, Condition c)
-{
-    ma_alu(src1, op2, dest, OpRsb, sc, c);
+MacroAssemblerARM::ma_rsb(Imm32 imm, Register dest, SBit s, Condition c)
+{
+    ma_alu(dest, imm, dest, OpRsb, s, c);
+}
+void
+MacroAssemblerARM::ma_rsb(Register src1, Register dest, SBit s, Condition c)
+{
+    as_alu(dest, dest, O2Reg(src1), OpAdd, s, c);
+}
+void
+MacroAssemblerARM::ma_rsb(Register src1, Register src2, Register dest, SBit s, Condition c)
+{
+    as_alu(dest, src1, O2Reg(src2), OpRsb, s, c);
+}
+void
+MacroAssemblerARM::ma_rsb(Register src1, Imm32 op2, Register dest, SBit s, Condition c)
+{
+    ma_alu(src1, op2, dest, OpRsb, s, c);
 }
 
 // Reverse subtract with carry.
 void
-MacroAssemblerARM::ma_rsc(Imm32 imm, Register dest, SetCond_ sc, Condition c)
-{
-    ma_alu(dest, imm, dest, OpRsc, sc, c);
-}
-void
-MacroAssemblerARM::ma_rsc(Register src1, Register dest, SetCond_ sc, Condition c)
-{
-    as_alu(dest, dest, O2Reg(src1), OpRsc, sc, c);
-}
-void
-MacroAssemblerARM::ma_rsc(Register src1, Register src2, Register dest, SetCond_ sc, Condition c)
-{
-    as_alu(dest, src1, O2Reg(src2), OpRsc, sc, c);
+MacroAssemblerARM::ma_rsc(Imm32 imm, Register dest, SBit s, Condition c)
+{
+    ma_alu(dest, imm, dest, OpRsc, s, c);
+}
+void
+MacroAssemblerARM::ma_rsc(Register src1, Register dest, SBit s, Condition c)
+{
+    as_alu(dest, dest, O2Reg(src1), OpRsc, s, c);
+}
+void
+MacroAssemblerARM::ma_rsc(Register src1, Register src2, Register dest, SBit s, Condition c)
+{
+    as_alu(dest, src1, O2Reg(src2), OpRsc, s, c);
 }
 
 // Compares/tests.
 // Compare negative (sets condition codes as src1 + src2 would).
 void
 MacroAssemblerARM::ma_cmn(Register src1, Imm32 imm, Condition c)
 {
-    ma_alu(src1, imm, InvalidReg, OpCmn, SetCond, c);
+    ma_alu(src1, imm, InvalidReg, OpCmn, SetCC, c);
 }
 void
 MacroAssemblerARM::ma_cmn(Register src1, Register src2, Condition c)
 {
-    as_alu(InvalidReg, src2, O2Reg(src1), OpCmn, SetCond, c);
+    as_alu(InvalidReg, src2, O2Reg(src1), OpCmn, SetCC, c);
 }
 void
 MacroAssemblerARM::ma_cmn(Register src1, Operand op, Condition c)
 {
     MOZ_CRASH("Feature NYI");
 }
 
 // Compare (src - src2).
 void
 MacroAssemblerARM::ma_cmp(Register src1, Imm32 imm, Condition c)
 {
-    ma_alu(src1, imm, InvalidReg, OpCmp, SetCond, c);
+    ma_alu(src1, imm, InvalidReg, OpCmp, SetCC, c);
 }
 
 void
 MacroAssemblerARM::ma_cmp(Register src1, ImmWord ptr, Condition c)
 {
     ma_cmp(src1, Imm32(ptr.value), c);
 }
 
@@ -842,34 +842,34 @@ MacroAssemblerARM::ma_cmp(Register src1,
 void
 MacroAssemblerARM::ma_cmp(Register src1, Operand op, Condition c)
 {
     switch (op.getTag()) {
       case Operand::OP2:
         as_cmp(src1, op.toOp2(), c);
         break;
       case Operand::MEM:
-        ma_ldr(op, ScratchRegister);
+        ma_ldr(op.toAddress(), ScratchRegister);
         as_cmp(src1, O2Reg(ScratchRegister), c);
         break;
       default:
         MOZ_CRASH("trying to compare FP and integer registers");
     }
 }
 void
 MacroAssemblerARM::ma_cmp(Register src1, Register src2, Condition c)
 {
     as_cmp(src1, O2Reg(src2), c);
 }
 
 // Test for equality, (src1 ^ src2).
 void
 MacroAssemblerARM::ma_teq(Register src1, Imm32 imm, Condition c)
 {
-    ma_alu(src1, imm, InvalidReg, OpTeq, SetCond, c);
+    ma_alu(src1, imm, InvalidReg, OpTeq, SetCC, c);
 }
 void
 MacroAssemblerARM::ma_teq(Register src1, Register src2, Condition c)
 {
     as_tst(src1, O2Reg(src2), c);
 }
 void
 MacroAssemblerARM::ma_teq(Register src1, Operand op, Condition c)
@@ -877,17 +877,17 @@ MacroAssemblerARM::ma_teq(Register src1,
     as_teq(src1, op.toOp2(), c);
 }
 
 
 // Test (src1 & src2).
 void
 MacroAssemblerARM::ma_tst(Register src1, Imm32 imm, Condition c)
 {
-    ma_alu(src1, imm, InvalidReg, OpTst, SetCond, c);
+    ma_alu(src1, imm, InvalidReg, OpTst, SetCC, c);
 }
 void
 MacroAssemblerARM::ma_tst(Register src1, Register src2, Condition c)
 {
     as_tst(src1, O2Reg(src2), c);
 }
 void
 MacroAssemblerARM::ma_tst(Register src1, Operand op, Condition c)
@@ -909,17 +909,17 @@ MacroAssemblerARM::ma_mul(Register src1,
 }
 
 Assembler::Condition
 MacroAssemblerARM::ma_check_mul(Register src1, Register src2, Register dest, Condition cond)
 {
     // TODO: this operation is illegal on armv6 and earlier if src2 ==
     // ScratchRegister or src2 == dest.
     if (cond == Equal || cond == NotEqual) {
-        as_smull(ScratchRegister, dest, src1, src2, SetCond);
+        as_smull(ScratchRegister, dest, src1, src2, SetCC);
         return cond;
     }
 
     if (cond == Overflow) {
         as_smull(ScratchRegister, dest, src1, src2);
         as_cmp(ScratchRegister, asr(dest, 31));
         return NotEqual;
     }
@@ -927,17 +927,17 @@ MacroAssemblerARM::ma_check_mul(Register
     MOZ_CRASH("Condition NYI");
 }
 
 Assembler::Condition
 MacroAssemblerARM::ma_check_mul(Register src1, Imm32 imm, Register dest, Condition cond)
 {
     ma_mov(imm, ScratchRegister);
     if (cond == Equal || cond == NotEqual) {
-        as_smull(ScratchRegister, dest, ScratchRegister, src1, SetCond);
+        as_smull(ScratchRegister, dest, ScratchRegister, src1, SetCC);
         return cond;
     }
 
     if (cond == Overflow) {
         as_smull(ScratchRegister, dest, ScratchRegister, src1);
         as_cmp(ScratchRegister, asr(dest, 31));
         return NotEqual;
     }
@@ -974,45 +974,45 @@ MacroAssemblerARM::ma_mod_mask(Register 
     // (and holds the final result)
     //
     // Move the whole value into tmp, setting the codition codes so we can muck
     // with them later.
     //
     // Note that we cannot use ScratchRegister in place of tmp here, as ma_and
     // below on certain architectures move the mask into ScratchRegister before
     // performing the bitwise and.
-    as_mov(tmp, O2Reg(src), SetCond);
+    as_mov(tmp, O2Reg(src), SetCC);
     // Zero out the dest.
     ma_mov(Imm32(0), dest);
     // Set the hold appropriately.
     ma_mov(Imm32(1), hold);
-    ma_mov(Imm32(-1), hold, NoSetCond, Signed);
-    ma_rsb(Imm32(0), tmp, SetCond, Signed);
+    ma_mov(Imm32(-1), hold, LeaveCC, Signed);
+    ma_rsb(Imm32(0), tmp, SetCC, Signed);
     // Begin the main loop.
     bind(&head);
 
     // Extract the bottom bits into lr.
     ma_and(Imm32(mask), tmp, secondScratchReg_);
     // Add those bits to the accumulator.
     ma_add(secondScratchReg_, dest, dest);
     // Do a trial subtraction, this is the same operation as cmp, but we store
     // the dest.
-    ma_sub(dest, Imm32(mask), secondScratchReg_, SetCond);
+    ma_sub(dest, Imm32(mask), secondScratchReg_, SetCC);
     // If (sum - C) > 0, store sum - C back into sum, thus performing a modulus.
-    ma_mov(secondScratchReg_, dest, NoSetCond, NotSigned);
+    ma_mov(secondScratchReg_, dest, LeaveCC, NotSigned);
     // Get rid of the bits that we extracted before, and set the condition codes.
-    as_mov(tmp, lsr(tmp, shift), SetCond);
+    as_mov(tmp, lsr(tmp, shift), SetCC);
     // If the shift produced zero, finish, otherwise, continue in the loop.
     ma_b(&head, NonZero);
     // Check the hold to see if we need to negate the result. Hold can only be
     // 1 or -1, so this will never set the 0 flag.
     ma_cmp(hold, Imm32(0));
     // If the hold was non-zero, negate the result to be in line with what JS
     // wants this will set the condition codes if we try to negate.
-    ma_rsb(Imm32(0), dest, SetCond, Signed);
+    ma_rsb(Imm32(0), dest, SetCC, Signed);
     // Since the Zero flag is not set by the compare, we can *only* set the Zero
     // flag in the rsb, so Zero is set iff we negated zero (e.g. the result of
     // the computation was -0.0).
 }
 
 void
 MacroAssemblerARM::ma_smod(Register num, Register div, Register dest)
 {
@@ -1063,25 +1063,23 @@ MacroAssemblerARM::ma_dtr(LoadStore ls, 
 
 void
 MacroAssemblerARM::ma_str(Register rt, DTRAddr addr, Index mode, Condition cc)
 {
     as_dtr(IsStore, 32, mode, rt, addr, cc);
 }
 
 void
-MacroAssemblerARM::ma_dtr(LoadStore ls, Register rt, const Operand& addr, Index mode, Condition cc)
-{
-    ma_dataTransferN(ls, 32, true,
-                     Register::FromCode(addr.base()), Imm32(addr.disp()),
-                     rt, mode, cc);
-}
-
-void
-MacroAssemblerARM::ma_str(Register rt, const Operand& addr, Index mode, Condition cc)
+MacroAssemblerARM::ma_dtr(LoadStore ls, Register rt, const Address& addr, Index mode, Condition cc)
+{
+    ma_dataTransferN(ls, 32, true, addr.base, Imm32(addr.offset), rt, mode, cc);
+}
+
+void
+MacroAssemblerARM::ma_str(Register rt, const Address& addr, Index mode, Condition cc)
 {
     ma_dtr(IsStore, rt, addr, mode, cc);
 }
 void
 MacroAssemblerARM::ma_strd(Register rt, DebugOnly<Register> rt2, EDtrAddr addr, Index mode, Condition cc)
 {
     MOZ_ASSERT((rt.code() & 1) == 0);
     MOZ_ASSERT(rt2.value.code() == rt.code() + 1);
@@ -1089,17 +1087,17 @@ MacroAssemblerARM::ma_strd(Register rt, 
 }
 
 void
 MacroAssemblerARM::ma_ldr(DTRAddr addr, Register rt, Index mode, Condition cc)
 {
     as_dtr(IsLoad, 32, mode, rt, addr, cc);
 }
 void
-MacroAssemblerARM::ma_ldr(const Operand& addr, Register rt, Index mode, Condition cc)
+MacroAssemblerARM::ma_ldr(const Address& addr, Register rt, Index mode, Condition cc)
 {
     ma_dtr(IsLoad, rt, addr, mode, cc);
 }
 
 void
 MacroAssemblerARM::ma_ldrb(DTRAddr addr, Register rt, Index mode, Condition cc)
 {
     as_dtr(IsLoad, 8, mode, rt, addr, cc);
@@ -1229,43 +1227,43 @@ MacroAssemblerARM::ma_dataTransferN(Load
         //
         // Note a neg_bottom of 0x1000 can not be encoded as an immediate
         // negative offset in the instruction and this occurs when bottom is
         // zero, so this case is guarded against below.
         if (off < 0) {
             Operand2 sub_off = Imm8(-(off - bottom)); // sub_off = bottom - off
             if (!sub_off.invalid) {
                 // - sub_off = off - bottom
-                as_sub(ScratchRegister, rn, sub_off, NoSetCond, cc);
+                as_sub(ScratchRegister, rn, sub_off, LeaveCC, cc);
                 return as_dtr(ls, size, Offset, rt, DTRAddr(ScratchRegister, DtrOffImm(bottom)), cc);
             }
             // sub_off = -neg_bottom - off
             sub_off = Imm8(-(off + neg_bottom));
             if (!sub_off.invalid && bottom != 0) {
                 // Guarded against by: bottom != 0
                 MOZ_ASSERT(neg_bottom < 0x1000);
                 // - sub_off = neg_bottom + off
-                as_sub(ScratchRegister, rn, sub_off, NoSetCond, cc);
+                as_sub(ScratchRegister, rn, sub_off, LeaveCC, cc);
                 return as_dtr(ls, size, Offset, rt, DTRAddr(ScratchRegister, DtrOffImm(-neg_bottom)), cc);
             }
         } else {
             // sub_off = off - bottom
             Operand2 sub_off = Imm8(off - bottom);
             if (!sub_off.invalid) {
                 //  sub_off = off - bottom
-                as_add(ScratchRegister, rn, sub_off, NoSetCond, cc);
+                as_add(ScratchRegister, rn, sub_off, LeaveCC, cc);
                 return as_dtr(ls, size, Offset, rt, DTRAddr(ScratchRegister, DtrOffImm(bottom)), cc);
             }
             // sub_off = neg_bottom + off
             sub_off = Imm8(off + neg_bottom);
             if (!sub_off.invalid && bottom != 0) {
                 // Guarded against by: bottom != 0
                 MOZ_ASSERT(neg_bottom < 0x1000);
                 // sub_off = neg_bottom + off
-                as_add(ScratchRegister, rn, sub_off, NoSetCond,  cc);
+                as_add(ScratchRegister, rn, sub_off, LeaveCC,  cc);
                 return as_dtr(ls, size, Offset, rt, DTRAddr(ScratchRegister, DtrOffImm(-neg_bottom)), cc);
             }
         }
         ma_mov(offset, ScratchRegister);
         return as_dtr(ls, size, mode, rt, DTRAddr(rn, DtrRegImmShift(ScratchRegister, LSL, 0)));
     } else {
         // Should attempt to use the extended load/store instructions.
         if (off < 256 && off > -256)
@@ -1281,49 +1279,49 @@ MacroAssemblerARM::ma_dataTransferN(Load
         // Note a neg_bottom of 0x100 can not be encoded as an immediate
         // negative offset in the instruction and this occurs when bottom is
         // zero, so this case is guarded against below.
         if (off < 0) {
             // sub_off = bottom - off
             Operand2 sub_off = Imm8(-(off - bottom));
             if (!sub_off.invalid) {
                 // - sub_off = off - bottom
-                as_sub(ScratchRegister, rn, sub_off, NoSetCond, cc);
+                as_sub(ScratchRegister, rn, sub_off, LeaveCC, cc);
                 return as_extdtr(ls, size, IsSigned, Offset, rt,
                                  EDtrAddr(ScratchRegister, EDtrOffImm(bottom)),
                                  cc);
             }
             // sub_off = -neg_bottom - off
             sub_off = Imm8(-(off + neg_bottom));
             if (!sub_off.invalid && bottom != 0) {
                 // Guarded against by: bottom != 0
                 MOZ_ASSERT(neg_bottom < 0x100);
                 // - sub_off = neg_bottom + off
-                as_sub(ScratchRegister, rn, sub_off, NoSetCond, cc);
+                as_sub(ScratchRegister, rn, sub_off, LeaveCC, cc);
                 return as_extdtr(ls, size, IsSigned, Offset, rt,
                                  EDtrAddr(ScratchRegister, EDtrOffImm(-neg_bottom)),
                                  cc);
             }
         } else {
             // sub_off = off - bottom
             Operand2 sub_off = Imm8(off - bottom);
             if (!sub_off.invalid) {
                 // sub_off = off - bottom
-                as_add(ScratchRegister, rn, sub_off, NoSetCond, cc);
+                as_add(ScratchRegister, rn, sub_off, LeaveCC, cc);
                 return as_extdtr(ls, size, IsSigned, Offset, rt,
                                  EDtrAddr(ScratchRegister, EDtrOffImm(bottom)),
                                  cc);
             }
             // sub_off = neg_bottom + off
             sub_off = Imm8(off + neg_bottom);
             if (!sub_off.invalid && bottom != 0) {
                 // Guarded against by: bottom != 0
                 MOZ_ASSERT(neg_bottom < 0x100);
                 // sub_off = neg_bottom + off
-                as_add(ScratchRegister, rn, sub_off, NoSetCond,  cc);
+                as_add(ScratchRegister, rn, sub_off, LeaveCC,  cc);
                 return as_extdtr(ls, size, IsSigned, Offset, rt,
                                  EDtrAddr(ScratchRegister, EDtrOffImm(-neg_bottom)),
                                  cc);
             }
         }
         ma_mov(offset, ScratchRegister);
         return as_extdtr(ls, size, IsSigned, mode, rt, EDtrAddr(rn, EDtrOffReg(ScratchRegister)), cc);
     }
@@ -1704,107 +1702,107 @@ MacroAssemblerARM::ma_vxfer(FloatRegiste
 
 void
 MacroAssemblerARM::ma_vxfer(Register src1, Register src2, FloatRegister dest, Condition cc)
 {
     as_vxfer(src1, src2, VFPRegister(dest), CoreToFloat, cc);
 }
 
 BufferOffset
-MacroAssemblerARM::ma_vdtr(LoadStore ls, const Operand& addr, VFPRegister rt, Condition cc)
-{
-    int off = addr.disp();
+MacroAssemblerARM::ma_vdtr(LoadStore ls, const Address& addr, VFPRegister rt, Condition cc)
+{
+    int off = addr.offset;
     MOZ_ASSERT((off & 3) == 0);
-    Register base = Register::FromCode(addr.base());
+    Register base = addr.base;
     if (off > -1024 && off < 1024)
-        return as_vdtr(ls, rt, addr.toVFPAddr(), cc);
+        return as_vdtr(ls, rt, Operand(addr).toVFPAddr(), cc);
 
     // We cannot encode this offset in a a single ldr. Try to encode it as an
     // add scratch, base, imm; ldr dest, [scratch, +offset].
     int bottom = off & (0xff << 2);
     int neg_bottom = (0x100 << 2) - bottom;
     // At this point, both off - bottom and off + neg_bottom will be
     // reasonable-ish quantities.
     //
     // Note a neg_bottom of 0x400 can not be encoded as an immediate negative
     // offset in the instruction and this occurs when bottom is zero, so this
     // case is guarded against below.
     if (off < 0) {
         // sub_off = bottom - off
         Operand2 sub_off = Imm8(-(off - bottom));
         if (!sub_off.invalid) {
             // - sub_off = off - bottom
-            as_sub(ScratchRegister, base, sub_off, NoSetCond, cc);
+            as_sub(ScratchRegister, base, sub_off, LeaveCC, cc);
             return as_vdtr(ls, rt, VFPAddr(ScratchRegister, VFPOffImm(bottom)), cc);
         }
         // sub_off = -neg_bottom - off
         sub_off = Imm8(-(off + neg_bottom));
         if (!sub_off.invalid && bottom != 0) {
             // Guarded against by: bottom != 0
             MOZ_ASSERT(neg_bottom < 0x400);
             // - sub_off = neg_bottom + off
-            as_sub(ScratchRegister, base, sub_off, NoSetCond, cc);
+            as_sub(ScratchRegister, base, sub_off, LeaveCC, cc);
             return as_vdtr(ls, rt, VFPAddr(ScratchRegister, VFPOffImm(-neg_bottom)), cc);
         }
     } else {
         // sub_off = off - bottom
         Operand2 sub_off = Imm8(off - bottom);
         if (!sub_off.invalid) {
             // sub_off = off - bottom
-            as_add(ScratchRegister, base, sub_off, NoSetCond, cc);
+            as_add(ScratchRegister, base, sub_off, LeaveCC, cc);
             return as_vdtr(ls, rt, VFPAddr(ScratchRegister, VFPOffImm(bottom)), cc);
         }
         // sub_off = neg_bottom + off
         sub_off = Imm8(off + neg_bottom);
         if (!sub_off.invalid && bottom != 0) {
             // Guarded against by: bottom != 0
             MOZ_ASSERT(neg_bottom < 0x400);
             // sub_off = neg_bottom + off
-            as_add(ScratchRegister, base, sub_off, NoSetCond, cc);
+            as_add(ScratchRegister, base, sub_off, LeaveCC, cc);
             return as_vdtr(ls, rt, VFPAddr(ScratchRegister, VFPOffImm(-neg_bottom)), cc);
         }
     }
-    ma_add(base, Imm32(off), ScratchRegister, NoSetCond, cc);
+    ma_add(base, Imm32(off), ScratchRegister, LeaveCC, cc);
     return as_vdtr(ls, rt, VFPAddr(ScratchRegister, VFPOffImm(0)), cc);
 }
 
 BufferOffset
 MacroAssemblerARM::ma_vldr(VFPAddr addr, VFPRegister dest, Condition cc)
 {
     return as_vdtr(IsLoad, dest, addr, cc);
 }
 BufferOffset
-MacroAssemblerARM::ma_vldr(const Operand& addr, VFPRegister dest, Condition cc)
+MacroAssemblerARM::ma_vldr(const Address& addr, VFPRegister dest, Condition cc)
 {
     return ma_vdtr(IsLoad, addr, dest, cc);
 }
 BufferOffset
 MacroAssemblerARM::ma_vldr(VFPRegister src, Register base, Register index, int32_t shift, Condition cc)
 {
-    as_add(ScratchRegister, base, lsl(index, shift), NoSetCond, cc);
-    return ma_vldr(Operand(ScratchRegister, 0), src, cc);
+    as_add(ScratchRegister, base, lsl(index, shift), LeaveCC, cc);
+    return ma_vldr(Address(ScratchRegister, 0), src, cc);
 }
 
 BufferOffset
 MacroAssemblerARM::ma_vstr(VFPRegister src, VFPAddr addr, Condition cc)
 {
     return as_vdtr(IsStore, src, addr, cc);
 }
 
 BufferOffset
-MacroAssemblerARM::ma_vstr(VFPRegister src, const Operand& addr, Condition cc)
+MacroAssemblerARM::ma_vstr(VFPRegister src, const Address& addr, Condition cc)
 {
     return ma_vdtr(IsStore, addr, src, cc);
 }
 BufferOffset
 MacroAssemblerARM::ma_vstr(VFPRegister src, Register base, Register index, int32_t shift,
                            int32_t offset, Condition cc)
 {
-    as_add(ScratchRegister, base, lsl(index, shift), NoSetCond, cc);
-    return ma_vstr(src, Operand(ScratchRegister, offset), cc);
+    as_add(ScratchRegister, base, lsl(index, shift), LeaveCC, cc);
+    return ma_vstr(src, Address(ScratchRegister, offset), cc);
 }
 
 void
 MacroAssemblerARMCompat::buildFakeExitFrame(Register scratch, uint32_t* offset)
 {
     DebugOnly<uint32_t> initialDepth = framePushed();
     uint32_t descriptor = MakeFrameDescriptor(framePushed(), JitFrame_IonJS);
 
@@ -1926,81 +1924,81 @@ void
 MacroAssemblerARMCompat::freeStack(Register amount)
 {
     ma_add(amount, sp);
 }
 
 void
 MacroAssemblerARMCompat::add32(Register src, Register dest)
 {
-    ma_add(src, dest, SetCond);
+    ma_add(src, dest, SetCC);
 }
 
 void
 MacroAssemblerARMCompat::add32(Imm32 imm, Register dest)
 {
-    ma_add(imm, dest, SetCond);
+    ma_add(imm, dest, SetCC);
 }
 
 void
 MacroAssemblerARMCompat::xor32(Imm32 imm, Register dest)
 {
-    ma_eor(imm, dest, SetCond);
+    ma_eor(imm, dest, SetCC);
 }
 
 void
 MacroAssemblerARMCompat::add32(Imm32 imm, const Address& dest)
 {
     load32(dest, ScratchRegister);
-    ma_add(imm, ScratchRegister, SetCond);
+    ma_add(imm, ScratchRegister, SetCC);
     store32(ScratchRegister, dest);
 }
 
 void
 MacroAssemblerARMCompat::sub32(Imm32 imm, Register dest)
 {
-    ma_sub(imm, dest, SetCond);
+    ma_sub(imm, dest, SetCC);
 }
 
 void
 MacroAssemblerARMCompat::sub32(Register src, Register dest)
 {
-    ma_sub(src, dest, SetCond);
+    ma_sub(src, dest, SetCC);
 }
 
 void
 MacroAssemblerARMCompat::and32(Register src, Register dest)
 {
-    ma_and(src, dest, SetCond);
+    ma_and(src, dest, SetCC);
 }
 
 void
 MacroAssemblerARMCompat::and32(Imm32 imm, Register dest)
 {
-    ma_and(imm, dest, SetCond);
+    ma_and(imm, dest, SetCC);
 }
 
 void
 MacroAssemblerARMCompat::and32(const Address& src, Register dest)
 {
     load32(src, ScratchRegister);
-    ma_and(ScratchRegister, dest, SetCond);
+    ma_and(ScratchRegister, dest, SetCC);
 }
 
 void
 MacroAssemblerARMCompat::addPtr(Register src, Register dest)
 {
     ma_add(src, dest);
 }
 
 void
 MacroAssemblerARMCompat::addPtr(const Address& src, Register dest)
 {
     load32(src, ScratchRegister);
-    ma_add(ScratchRegister, dest, SetCond);
+    ma_add(ScratchRegister, dest, SetCC);
 }
 
 void
 MacroAssemblerARMCompat::not32(Register reg)
 {
     ma_mvn(reg, reg);
 }
 
@@ -2227,17 +2225,17 @@ MacroAssemblerARMCompat::load32(const Ba
 void
 MacroAssemblerARMCompat::load32(AbsoluteAddress address, Register dest)
 {
     loadPtr(address, dest);
 }
 void
 MacroAssemblerARMCompat::loadPtr(const Address& address, Register dest)
 {
-    ma_ldr(Operand(address), dest);
+    ma_ldr(address, dest);
 }
 
 void
 MacroAssemblerARMCompat::loadPtr(const BaseIndex& src, Register dest)
 {
     Register base = src.base;
     uint32_t scale = Imm32::ShiftOf(src.scale).value;
 
@@ -2256,91 +2254,84 @@ MacroAssemblerARMCompat::loadPtr(Absolut
 }
 void
 MacroAssemblerARMCompat::loadPtr(AsmJSAbsoluteAddress address, Register dest)
 {
     movePtr(AsmJSImmPtr(address.kind()), ScratchRegister);
     loadPtr(Address(ScratchRegister, 0x0), dest);
 }
 
-Operand payloadOf(const Address& address) {
-    return Operand(address.base, address.offset);
-}
-Operand tagOf(const Address& address) {
-    return Operand(address.base, address.offset + 4);
-}
-
 void
 MacroAssemblerARMCompat::loadPrivate(const Address& address, Register dest)
 {
-    ma_ldr(payloadOf(address), dest);
+    ma_ldr(ToPayload(address), dest);
 }
 
 void
 MacroAssemblerARMCompat::loadDouble(const Address& address, FloatRegister dest)
 {
-    ma_vldr(Operand(address), dest);
+    ma_vldr(address, dest);
 }
 
 void
 MacroAssemblerARMCompat::loadDouble(const BaseIndex& src, FloatRegister dest)
 {
     // VFP instructions don't even support register Base + register Index modes,
     // so just add the index, then handle the offset like normal.
     Register base = src.base;
     Register index = src.index;
     uint32_t scale = Imm32::ShiftOf(src.scale).value;
     int32_t offset = src.offset;
     as_add(ScratchRegister, base, lsl(index, scale));
 
-    ma_vldr(Operand(ScratchRegister, offset), dest);
+    ma_vldr(Address(ScratchRegister, offset), dest);
 }
 
 void
 MacroAssemblerARMCompat::loadFloatAsDouble(const Address& address, FloatRegister dest)
 {
     VFPRegister rt = dest;
-    ma_vldr(Operand(address), rt.singleOverlay());
+    ma_vldr(address, rt.singleOverlay());
     as_vcvt(rt, rt.singleOverlay());
 }
 
 void
 MacroAssemblerARMCompat::loadFloatAsDouble(const BaseIndex& src, FloatRegister dest)
 {
     // VFP instructions don't even support register Base + register Index modes,
     // so just add the index, then handle the offset like normal.
     Register base = src.base;
     Register index = src.index;
     uint32_t scale = Imm32::ShiftOf(src.scale).value;
     int32_t offset = src.offset;
     VFPRegister rt = dest;
     as_add(ScratchRegister, base, lsl(index, scale));
 
-    ma_vldr(Operand(ScratchRegister, offset), rt.singleOverlay());
+    ma_vldr(Address(ScratchRegister, offset), rt.singleOverlay());
     as_vcvt(rt, rt.singleOverlay());
 }
 
 void
 MacroAssemblerARMCompat::loadFloat32(const Address& address, FloatRegister dest)
 {
-    ma_vldr(Operand(address), VFPRegister(dest).singleOverlay());
+    ma_vldr(address, VFPRegister(dest).singleOverlay());
 }
 
 void
 MacroAssemblerARMCompat::loadFloat32(const BaseIndex& src, FloatRegister dest)
 {
     // VFP instructions don't even support register Base + register Index modes,
     // so just add the index, then handle the offset like normal.
     Register base = src.base;
     Register index = src.index;
     uint32_t scale = Imm32::ShiftOf(src.scale).value;
     int32_t offset = src.offset;
     as_add(ScratchRegister, base, lsl(index, scale));
 
-    ma_vldr(Operand(ScratchRegister, offset), VFPRegister(dest).singleOverlay());
+    ma_vldr(Address(ScratchRegister, offset), VFPRegister(dest).singleOverlay());
 }
 
 void
 MacroAssemblerARMCompat::store8(Imm32 imm, const Address& address)
 {
     ma_mov(imm, secondScratchReg_);
     store8(secondScratchReg_, address);
 }
@@ -2483,17 +2474,17 @@ MacroAssemblerARMCompat::storePtr(ImmGCP
 }
 
 template void MacroAssemblerARMCompat::storePtr<Address>(ImmGCPtr imm, Address address);
 template void MacroAssemblerARMCompat::storePtr<BaseIndex>(ImmGCPtr imm, BaseIndex address);
 
 void
 MacroAssemblerARMCompat::storePtr(Register src, const Address& address)
 {
-    ma_str(src, Operand(address));
+    ma_str(src, address);
 }
 
 void
 MacroAssemblerARMCompat::storePtr(Register src, const BaseIndex& address)
 {
     store32(src, address);
 }
 
@@ -2526,39 +2517,39 @@ MacroAssembler::clampDoubleToUint8(Float
         ma_lsr(Imm32(24), output, output);
         // If any of the bottom 24 bits were non-zero, then we're good, since
         // this number can't be exactly XX.0
         ma_b(&notSplit, NonZero);
         as_vxfer(ScratchRegister, InvalidReg, input, FloatToCore);
         ma_cmp(ScratchRegister, Imm32(0));
         // If the lower 32 bits of the double were 0, then this was an exact number,
         // and it should be even.
-        ma_bic(Imm32(1), output, NoSetCond, Zero);
+        ma_bic(Imm32(1), output, LeaveCC, Zero);
         bind(&notSplit);
     } else {
         Label outOfRange;
         ma_vcmpz(input);
         // Do the add, in place so we can reference it later.
         ma_vadd(input, ScratchDoubleReg, input);
         // Do the conversion to an integer.
         as_vcvt(VFPRegister(ScratchDoubleReg).uintOverlay(), VFPRegister(input));
         // Copy the converted value out.
         as_vxfer(output, InvalidReg, ScratchDoubleReg, FloatToCore);
         as_vmrs(pc);
-        ma_mov(Imm32(0), output, NoSetCond, Overflow);  // NaN => 0
+        ma_mov(Imm32(0), output, LeaveCC, Overflow);  // NaN => 0
         ma_b(&outOfRange, Overflow);  // NaN
         ma_cmp(output, Imm32(0xff));
-        ma_mov(Imm32(0xff), output, NoSetCond, Above);
+        ma_mov(Imm32(0xff), output, LeaveCC, Above);
         ma_b(&outOfRange, Above);
         // Convert it back to see if we got the same value back.
         as_vcvt(ScratchDoubleReg, VFPRegister(ScratchDoubleReg).uintOverlay());
         // Do the check.
         as_vcmp(ScratchDoubleReg, input);
         as_vmrs(pc);
-        ma_bic(Imm32(1), output, NoSetCond, Zero);
+        ma_bic(Imm32(1), output, LeaveCC, Zero);
         bind(&outOfRange);
     }
 }
 
 void
 MacroAssemblerARMCompat::cmp32(Register lhs, Imm32 rhs)
 {
     MOZ_ASSERT(lhs != ScratchRegister);
@@ -3142,29 +3133,29 @@ MacroAssemblerARMCompat::branchTestValue
 void
 MacroAssemblerARMCompat::branchTestValue(Condition cond, const Address& valaddr,
                                          const ValueOperand& value, Label* label)
 {
     MOZ_ASSERT(cond == Equal || cond == NotEqual);
 
     // Check payload before tag, since payload is more likely to differ.
     if (cond == NotEqual) {
-        ma_ldr(payloadOf(valaddr), ScratchRegister);
+        ma_ldr(ToPayload(valaddr), ScratchRegister);
         branchPtr(NotEqual, ScratchRegister, value.payloadReg(), label);
 
-        ma_ldr(tagOf(valaddr), ScratchRegister);
+        ma_ldr(ToType(valaddr), ScratchRegister);
         branchPtr(NotEqual, ScratchRegister, value.typeReg(), label);
 
     } else {
         Label fallthrough;
 
-        ma_ldr(payloadOf(valaddr), ScratchRegister);
+        ma_ldr(ToPayload(valaddr), ScratchRegister);
         branchPtr(NotEqual, ScratchRegister, value.payloadReg(), &fallthrough);
 
-        ma_ldr(tagOf(valaddr), ScratchRegister);
+        ma_ldr(ToType(valaddr), ScratchRegister);
         branchPtr(Equal, ScratchRegister, value.typeReg(), label);
 
         bind(&fallthrough);
     }
 }
 
 // Unboxing code.
 void
@@ -3172,17 +3163,17 @@ MacroAssemblerARMCompat::unboxNonDouble(
 {
     if (operand.payloadReg() != dest)
         ma_mov(operand.payloadReg(), dest);
 }
 
 void
 MacroAssemblerARMCompat::unboxNonDouble(const Address& src, Register dest)
 {
-    ma_ldr(payloadOf(src), dest);
+    ma_ldr(ToPayload(src), dest);
 }
 
 void
 MacroAssemblerARMCompat::unboxNonDouble(const BaseIndex& src, Register dest)
 {
     ma_alu(src.base, lsl(src.index, src.scale), ScratchRegister, OpAdd);
     ma_ldr(Address(ScratchRegister, src.offset), dest);
 }
@@ -3194,17 +3185,17 @@ MacroAssemblerARMCompat::unboxDouble(con
     as_vxfer(operand.payloadReg(), operand.typeReg(),
              VFPRegister(dest), CoreToFloat);
 }
 
 void
 MacroAssemblerARMCompat::unboxDouble(const Address& src, FloatRegister dest)
 {
     MOZ_ASSERT(dest.isDouble());
-    ma_vldr(Operand(src), dest);
+    ma_vldr(src, dest);
 }
 
 void
 MacroAssemblerARMCompat::unboxValue(const ValueOperand& src, AnyRegister dest)
 {
     if (dest.isFloat()) {
         Label notInt32, end;
         branchTestInt32(Assembler::NotEqual, src, &notInt32);
@@ -3281,17 +3272,17 @@ MacroAssemblerARMCompat::int32ValueToFlo
 
 void
 MacroAssemblerARMCompat::loadConstantFloat32(float f, FloatRegister dest)
 {
     ma_vimm_f32(f, dest);
 }
 
 void
-MacroAssemblerARMCompat::loadInt32OrDouble(const Operand& src, FloatRegister dest)
+MacroAssemblerARMCompat::loadInt32OrDouble(const Address& src, FloatRegister dest)
 {
     Label notInt32, end;
     // If it's an int, convert it to double.
     ma_ldr(ToType(src), ScratchRegister);
     branchTestInt32(Assembler::NotEqual, ScratchRegister, &notInt32);
     ma_ldr(ToPayload(src), ScratchRegister);
     convertInt32ToDouble(ScratchRegister, dest);
     ma_b(&end);
@@ -3360,31 +3351,31 @@ MacroAssemblerARMCompat::testDoubleTruth
     as_vmrs(pc);
     as_cmp(r0, O2Reg(r0), Overflow);
     return truthy ? NonZero : Zero;
 }
 
 Register
 MacroAssemblerARMCompat::extractObject(const Address& address, Register scratch)
 {
-    ma_ldr(payloadOf(address), scratch);
+    ma_ldr(ToPayload(address), scratch);
     return scratch;
 }
 
 Register
 MacroAssemblerARMCompat::extractTag(const Address& address, Register scratch)
 {
-    ma_ldr(tagOf(address), scratch);
+    ma_ldr(ToType(address), scratch);
     return scratch;
 }
 
 Register
 MacroAssemblerARMCompat::extractTag(const BaseIndex& address, Register scratch)
 {
-    ma_alu(address.base, lsl(address.index, address.scale), scratch, OpAdd, NoSetCond);
+    ma_alu(address.base, lsl(address.index, address.scale), scratch, OpAdd, LeaveCC);
     return extractTag(Address(scratch, address.offset), scratch);
 }
 
 template <typename T>
 void
 MacroAssemblerARMCompat::storeUnboxedValue(ConstantOrRegister value, MIRType valueType, const T& dest,
                                            MIRType slotType)
 {
@@ -3427,17 +3418,17 @@ MacroAssemblerARMCompat::moveValue(const
 {
     moveValue(val, dest.typeReg(), dest.payloadReg());
 }
 
 /////////////////////////////////////////////////////////////////
 // X86/X64-common (ARM too now) interface.
 /////////////////////////////////////////////////////////////////
 void
-MacroAssemblerARMCompat::storeValue(ValueOperand val, Operand dst)
+MacroAssemblerARMCompat::storeValue(ValueOperand val, const Address& dst)
 {
     ma_str(val.payloadReg(), ToPayload(dst));
     ma_str(val.typeReg(), ToType(dst));
 }
 
 void
 MacroAssemblerARMCompat::storeValue(ValueOperand val, const BaseIndex& dest)
 {
@@ -3484,64 +3475,64 @@ MacroAssemblerARMCompat::loadValue(const
         ma_alu(addr.base, lsl(addr.index, addr.scale), ScratchRegister, OpAdd);
         loadValue(Address(ScratchRegister, addr.offset), val);
     }
 }
 
 void
 MacroAssemblerARMCompat::loadValue(Address src, ValueOperand val)
 {
-    Operand srcOp = Operand(src);
-    Operand payload = ToPayload(srcOp);
-    Operand type = ToType(srcOp);
+    Address payload = ToPayload(src);
+    Address type = ToType(src);
+
     // TODO: copy this code into a generic function that acts on all sequences
     // of memory accesses
     if (isValueDTRDCandidate(val)) {
         // If the value we want is in two consecutive registers starting with an
         // even register, they can be combined as a single ldrd.
-        int offset = srcOp.disp();
+        int offset = src.offset;
         if (offset < 256 && offset > -256) {
-            ma_ldrd(EDtrAddr(Register::FromCode(srcOp.base()), EDtrOffImm(srcOp.disp())), val.payloadReg(), val.typeReg());
+            ma_ldrd(EDtrAddr(src.base, EDtrOffImm(src.offset)), val.payloadReg(), val.typeReg());
             return;
         }
     }
     // If the value is lower than the type, then we may be able to use an ldm
     // instruction.
 
     if (val.payloadReg().code() < val.typeReg().code()) {
-        if (srcOp.disp() <= 4 && srcOp.disp() >= -8 && (srcOp.disp() & 3) == 0) {
+        if (src.offset <= 4 && src.offset >= -8 && (src.offset & 3) == 0) {
             // Turns out each of the 4 value -8, -4, 0, 4 corresponds exactly
             // with one of LDM{DB, DA, IA, IB}
             DTMMode mode;
-            switch(srcOp.disp()) {
+            switch (src.offset) {
               case -8:
                 mode = DB;
                 break;
               case -4:
                 mode = DA;
                 break;
               case 0:
                 mode = IA;
                 break;
               case 4:
                 mode = IB;
                 break;
               default:
                 MOZ_CRASH("Bogus Offset for LoadValue as DTM");
             }
-            startDataTransferM(IsLoad, Register::FromCode(srcOp.base()), mode);
+            startDataTransferM(IsLoad, src.base, mode);
             transferReg(val.payloadReg());
             transferReg(val.typeReg());
             finishDataTransfer();
             return;
         }
     }
     // Ensure that loading the payload does not erase the pointer to the Value
     // in memory.
-    if (Register::FromCode(type.base()) != val.payloadReg()) {
+    if (type.base != val.payloadReg()) {
         ma_ldr(payload, val.payloadReg());
         ma_ldr(type, val.typeReg());
     } else {
         ma_ldr(type, val.typeReg());
         ma_ldr(payload, val.payloadReg());
     }
 }
 
@@ -3557,50 +3548,42 @@ MacroAssemblerARMCompat::tagValue(JSValu
 void
 MacroAssemblerARMCompat::pushValue(ValueOperand val) {
     ma_push(val.typeReg());
     ma_push(val.payloadReg());
 }
 void
 MacroAssemblerARMCompat::pushValue(const Address& addr)
 {
-    Operand srcOp = Operand(addr);
-    Operand type = ToType(srcOp);
-    Operand payload = ToPayloadAfterStackPush(srcOp);
-
-    ma_ldr(type, ScratchRegister);
+    ma_ldr(ToType(addr), ScratchRegister);
     ma_push(ScratchRegister);
-    ma_ldr(payload, ScratchRegister);
+    ma_ldr(ToPayloadAfterStackPush(addr), ScratchRegister);
     ma_push(ScratchRegister);
 }
 
 void
 MacroAssemblerARMCompat::popValue(ValueOperand val) {
     ma_pop(val.payloadReg());
     ma_pop(val.typeReg());
 }
 void
-MacroAssemblerARMCompat::storePayload(const Value& val, Operand dest)
+MacroAssemblerARMCompat::storePayload(const Value& val, const Address& dest)
 {
     jsval_layout jv = JSVAL_TO_IMPL(val);
     if (val.isMarkable())
         ma_mov(ImmGCPtr((gc::Cell*)jv.s.payload.ptr), secondScratchReg_);
     else
         ma_mov(Imm32(jv.s.payload.i32), secondScratchReg_);
     ma_str(secondScratchReg_, ToPayload(dest));
 }
-void
-MacroAssemblerARMCompat::storePayload(Register src, Operand dest)
-{
-    if (dest.getTag() == Operand::MEM) {
-        ma_str(src, ToPayload(dest));
-        return;
-    }
-    MOZ_CRASH("why do we do all of these things?");
-
+
+void
+MacroAssemblerARMCompat::storePayload(Register src, const Address& dest)
+{
+    ma_str(src, ToPayload(dest));
 }
 
 void
 MacroAssemblerARMCompat::storePayload(const Value& val, const BaseIndex& dest)
 {
     unsigned shift = ScaleToShift(dest.scale);
 
     jsval_layout jv = JSVAL_TO_IMPL(val);
@@ -3646,25 +3629,20 @@ MacroAssemblerARMCompat::storePayload(Re
     // never come up, and this is one less code path to get wrong.
     as_dtr(IsStore, 32, Offset, src, DTRAddr(dest.base, DtrRegImmShift(dest.index, LSL, shift)));
 
     if (dest.offset != 0)
         ma_sub(dest.base, Imm32(dest.offset), dest.base);
 }
 
 void
-MacroAssemblerARMCompat::storeTypeTag(ImmTag tag, Operand dest) {
-    if (dest.getTag() == Operand::MEM) {
-        ma_mov(tag, secondScratchReg_);
-        ma_str(secondScratchReg_, ToType(dest));
-        return;
-    }
-
-    MOZ_CRASH("why do we do all of these things?");
-
+MacroAssemblerARMCompat::storeTypeTag(ImmTag tag, const Address& dest)
+{
+    ma_mov(tag, secondScratchReg_);
+    ma_str(secondScratchReg_, ToType(dest));
 }
 
 void
 MacroAssemblerARMCompat::storeTypeTag(ImmTag tag, const BaseIndex& dest)
 {
     Register base = dest.base;
     Register index = dest.index;
     unsigned shift = ScaleToShift(dest.scale);
@@ -4205,62 +4183,62 @@ MacroAssemblerARMCompat::handleFailureWi
     callWithABI(handler);
 
     Label entryFrame;
     Label catch_;
     Label finally;
     Label return_;
     Label bailout;
 
-    ma_ldr(Operand(sp, offsetof(ResumeFromException, kind)), r0);
+    ma_ldr(Address(sp, offsetof(ResumeFromException, kind)), r0);
     branch32(Assembler::Equal, r0, Imm32(ResumeFromException::RESUME_ENTRY_FRAME), &entryFrame);
     branch32(Assembler::Equal, r0, Imm32(ResumeFromException::RESUME_CATCH), &catch_);
     branch32(Assembler::Equal, r0, Imm32(ResumeFromException::RESUME_FINALLY), &finally);
     branch32(Assembler::Equal, r0, Imm32(ResumeFromException::RESUME_FORCED_RETURN), &return_);
     branch32(Assembler::Equal, r0, Imm32(ResumeFromException::RESUME_BAILOUT), &bailout);
 
     breakpoint(); // Invalid kind.
 
     // No exception handler. Load the error value, load the new stack pointer
     // and return from the entry frame.
     bind(&entryFrame);
     moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
-    ma_ldr(Operand(sp, offsetof(ResumeFromException, stackPointer)), sp);
+    ma_ldr(Address(sp, offsetof(ResumeFromException, stackPointer)), sp);
 
     // We're going to be returning by the ion calling convention, which returns
     // by ??? (for now, I think ldr pc, [sp]!)
     as_dtr(IsLoad, 32, PostIndex, pc, DTRAddr(sp, DtrOffImm(4)));
 
     // If we found a catch handler, this must be a baseline frame. Restore state
     // and jump to the catch block.
     bind(&catch_);
-    ma_ldr(Operand(sp, offsetof(ResumeFromException, target)), r0);
-    ma_ldr(Operand(sp, offsetof(ResumeFromException, framePointer)), r11);
-    ma_ldr(Operand(sp, offsetof(ResumeFromException, stackPointer)), sp);
+    ma_ldr(Address(sp, offsetof(ResumeFromException, target)), r0);
+    ma_ldr(Address(sp, offsetof(ResumeFromException, framePointer)), r11);
+    ma_ldr(Address(sp, offsetof(ResumeFromException, stackPointer)), sp);
     jump(r0);
 
     // If we found a finally block, this must be a baseline frame. Push two
     // values expected by JSOP_RETSUB: BooleanValue(true) and the exception.
     bind(&finally);
     ValueOperand exception = ValueOperand(r1, r2);
     loadValue(Operand(sp, offsetof(ResumeFromException, exception)), exception);
 
-    ma_ldr(Operand(sp, offsetof(ResumeFromException, target)), r0);
-    ma_ldr(Operand(sp, offsetof(ResumeFromException, framePointer)), r11);
-    ma_ldr(Operand(sp, offsetof(ResumeFromException, stackPointer)), sp);
+    ma_ldr(Address(sp, offsetof(ResumeFromException, target)), r0);
+    ma_ldr(Address(sp, offsetof(ResumeFromException, framePointer)), r11);
+    ma_ldr(Address(sp, offsetof(ResumeFromException, stackPointer)), sp);
 
     pushValue(BooleanValue(true));
     pushValue(exception);
     jump(r0);
 
     // Only used in debug mode. Return BaselineFrame->returnValue() to the
     // caller.
     bind(&return_);
-    ma_ldr(Operand(sp, offsetof(ResumeFromException, framePointer)), r11);
-    ma_ldr(Operand(sp, offsetof(ResumeFromException, stackPointer)), sp);
+    ma_ldr(Address(sp, offsetof(ResumeFromException, framePointer)), r11);
+    ma_ldr(Address(sp, offsetof(ResumeFromException, stackPointer)), sp);
     loadValue(Address(r11, BaselineFrame::reverseOffsetOfReturnValue()), JSReturnOperand);
     ma_mov(r11, sp);
     pop(r11);
 
     // If profiling is enabled, then update the lastProfilingFrame to refer to caller
     // frame before returning.
     {
         Label skipProfilingInstrumentation;
@@ -4271,19 +4249,19 @@ MacroAssemblerARMCompat::handleFailureWi
         bind(&skipProfilingInstrumentation);
     }
 
     ret();
 
     // If we are bailing out to baseline to handle an exception, jump to the
     // bailout tail stub.
     bind(&bailout);
-    ma_ldr(Operand(sp, offsetof(ResumeFromException, bailoutInfo)), r2);
+    ma_ldr(Address(sp, offsetof(ResumeFromException, bailoutInfo)), r2);
     ma_mov(Imm32(BAILOUT_RETURN_OK), r0);
-    ma_ldr(Operand(sp, offsetof(ResumeFromException, target)), r1);
+    ma_ldr(Address(sp, offsetof(ResumeFromException, target)), r1);
     jump(r1);
 }
 
 Assembler::Condition
 MacroAssemblerARMCompat::testStringTruthy(bool truthy, const ValueOperand& value)
 {
     Register string = value.payloadReg();
     ma_dtr(IsLoad, string, Imm32(JSString::offsetOfLength()), ScratchRegister);
@@ -4304,17 +4282,17 @@ MacroAssemblerARMCompat::floor(FloatRegi
     ma_b(bail, Assembler::Overflow);
 
     // The argument is a positive number, truncation is the path to glory. Since
     // it is known to be > 0.0, explicitly convert to a larger range, then a
     // value that rounds to INT_MAX is explicitly different from an argument
     // that clamps to INT_MAX.
     ma_vcvt_F64_U32(input, ScratchDoubleReg.uintOverlay());
     ma_vxfer(ScratchDoubleReg.uintOverlay(), output);
-    ma_mov(output, output, SetCond);
+    ma_mov(output, output, SetCC);
     ma_b(bail, Signed);
     ma_b(&fin);
 
     bind(&handleZero);
     // Move the top word of the double into the output reg, if it is non-zero,
     // then the original value was -0.0.
     as_vxfer(output, InvalidReg, input, FloatToCore, Always, 1);
     ma_cmp(output, Imm32(0));
@@ -4323,20 +4301,20 @@ MacroAssemblerARMCompat::floor(FloatRegi
 
     bind(&handleNeg);
     // Negative case, negate, then start dancing.
     ma_vneg(input, input);
     ma_vcvt_F64_U32(input, ScratchDoubleReg.uintOverlay());
     ma_vxfer(ScratchDoubleReg.uintOverlay(), output);
     ma_vcvt_U32_F64(ScratchDoubleReg.uintOverlay(), ScratchDoubleReg);
     compareDouble(ScratchDoubleReg, input);
-    ma_add(output, Imm32(1), output, NoSetCond, NotEqual);
+    ma_add(output, Imm32(1), output, LeaveCC, NotEqual);
     // Negate the output. Since INT_MIN < -INT_MAX, even after adding 1, the
     // result will still be a negative number.
-    ma_rsb(output, Imm32(0), output, SetCond);
+    ma_rsb(output, Imm32(0), output, SetCC);
     // Flip the negated input back to its original value.
     ma_vneg(input, input);
     // If the result looks non-negative, then this value didn't actually fit
     // into the int range, and special handling is required. Zero is also caught
     // by this case, but floor of a negative number should never be zero.
     ma_b(bail, NotSigned);
 
     bind(&fin);
@@ -4355,17 +4333,17 @@ MacroAssemblerARMCompat::floorf(FloatReg
     ma_b(bail, Assembler::Overflow);
 
     // The argument is a positive number, truncation is the path to glory; Since
     // it is known to be > 0.0, explicitly convert to a larger range, then a
     // value that rounds to INT_MAX is explicitly different from an argument
     // that clamps to INT_MAX.
     ma_vcvt_F32_U32(input, ScratchFloat32Reg.uintOverlay());
     ma_vxfer(VFPRegister(ScratchFloat32Reg).uintOverlay(), output);
-    ma_mov(output, output, SetCond);
+    ma_mov(output, output, SetCC);
     ma_b(bail, Signed);
     ma_b(&fin);
 
     bind(&handleZero);
     // Move the top word of the double into the output reg, if it is non-zero,
     // then the original value was -0.0.
     as_vxfer(output, InvalidReg, VFPRegister(input).singleOverlay(), FloatToCore, Always, 0);
     ma_cmp(output, Imm32(0));
@@ -4374,20 +4352,20 @@ MacroAssemblerARMCompat::floorf(FloatReg
 
     bind(&handleNeg);
     // Negative case, negate, then start dancing.
     ma_vneg_f32(input, input);
     ma_vcvt_F32_U32(input, ScratchFloat32Reg.uintOverlay());
     ma_vxfer(VFPRegister(ScratchFloat32Reg).uintOverlay(), output);
     ma_vcvt_U32_F32(ScratchFloat32Reg.uintOverlay(), ScratchFloat32Reg);
     compareFloat(ScratchFloat32Reg, input);
-    ma_add(output, Imm32(1), output, NoSetCond, NotEqual);
+    ma_add(output, Imm32(1), output, LeaveCC, NotEqual);
     // Negate the output. Since INT_MIN < -INT_MAX, even after adding 1, the
     // result will still be a negative number.
-    ma_rsb(output, Imm32(0), output, SetCond);
+    ma_rsb(output, Imm32(0), output, SetCC);
     // Flip the negated input back to its original value.
     ma_vneg_f32(input, input);
     // If the result looks non-negative, then this value didn't actually fit
     // into the int range, and special handling is required. Zero is also caught
     // by this case, but floor of a negative number should never be zero.
     ma_b(bail, NotSigned);
 
     bind(&fin);
@@ -4413,17 +4391,17 @@ MacroAssemblerARMCompat::ceil(FloatRegis
     ma_b(bail, Assembler::GreaterThan);
 
     // We are in the ]-Inf; -1] range: ceil(x) == -floor(-x) and floor can be
     // computed with direct truncation here (x > 0).
     ma_vneg(input, ScratchDoubleReg);
     FloatRegister ScratchUIntReg = ScratchDoubleReg.uintOverlay();
     ma_vcvt_F64_U32(ScratchDoubleReg, ScratchUIntReg);
     ma_vxfer(ScratchUIntReg, output);
-    ma_neg(output, output, SetCond);
+    ma_neg(output, output, SetCC);
     ma_b(bail, NotSigned);
     ma_b(&fin);
 
     // Test for 0.0 / -0.0: if the top word of the input double is not zero,
     // then it was -0 and we need to bail out.
     bind(&handleZero);
     as_vxfer(output, InvalidReg, input, FloatToCore, Always, 1);
     ma_cmp(output, Imm32(0));
@@ -4432,19 +4410,19 @@ MacroAssemblerARMCompat::ceil(FloatRegis
 
     // We are in the ]0; +inf] range: truncate integer values, maybe add 1 for
     // non integer values, maybe bail if overflow.
     bind(&handlePos);
     ma_vcvt_F64_U32(input, ScratchUIntReg);
     ma_vxfer(ScratchUIntReg, output);
     ma_vcvt_U32_F64(ScratchUIntReg, ScratchDoubleReg);
     compareDouble(ScratchDoubleReg, input);
-    ma_add(output, Imm32(1), output, NoSetCond, NotEqual);
+    ma_add(output, Imm32(1), output, LeaveCC, NotEqual);
     // Bail out if the add overflowed or the result is non positive.
-    ma_mov(output, output, SetCond);
+    ma_mov(output, output, SetCC);
     ma_b(bail, Signed);
     ma_b(bail, Zero);
 
     bind(&fin);
 }
 
 void
 MacroAssemblerARMCompat::ceilf(FloatRegister input, Register output, Label* bail)
@@ -4466,17 +4444,17 @@ MacroAssemblerARMCompat::ceilf(FloatRegi
     ma_b(bail, Assembler::GreaterThan);
 
     // We are in the ]-Inf; -1] range: ceil(x) == -floor(-x) and floor can be
     // computed with direct truncation here (x > 0).
     ma_vneg_f32(input, ScratchFloat32Reg);
     FloatRegister ScratchUIntReg = ScratchDoubleReg.uintOverlay();
     ma_vcvt_F32_U32(ScratchFloat32Reg, ScratchUIntReg);
     ma_vxfer(ScratchUIntReg, output);
-    ma_neg(output, output, SetCond);
+    ma_neg(output, output, SetCC);
     ma_b(bail, NotSigned);
     ma_b(&fin);
 
     // Test for 0.0 / -0.0: if the top word of the input double is not zero,
     // then it was -0 and we need to bail out.
     bind(&handleZero);
     as_vxfer(output, InvalidReg, VFPRegister(input).singleOverlay(), FloatToCore, Always, 0);
     ma_cmp(output, Imm32(0));
@@ -4485,19 +4463,19 @@ MacroAssemblerARMCompat::ceilf(FloatRegi
 
     // We are in the ]0; +inf] range: truncate integer values, maybe add 1 for
     // non integer values, maybe bail if overflow.
     bind(&handlePos);
     ma_vcvt_F32_U32(input, ScratchUIntReg);
     ma_vxfer(ScratchUIntReg, output);
     ma_vcvt_U32_F32(ScratchUIntReg, ScratchFloat32Reg);
     compareFloat(ScratchFloat32Reg, input);
-    ma_add(output, Imm32(1), output, NoSetCond, NotEqual);
+    ma_add(output, Imm32(1), output, LeaveCC, NotEqual);
     // Bail out if the add overflowed or the result is non positive.
-    ma_mov(output, output, SetCond);
+    ma_mov(output, output, SetCC);
     ma_b(bail, Signed);
     ma_b(bail, Zero);
 
     bind(&fin);
 }
 
 CodeOffsetLabel
 MacroAssemblerARMCompat::toggledJump(Label* label)
@@ -4547,17 +4525,17 @@ MacroAssemblerARMCompat::round(FloatRegi
     // Add the biggest number less than 0.5 (not 0.5, because adding that to
     // the biggest number less than 0.5 would undesirably round up to 1), and
     // store the result into tmp.
     ma_vimm(GetBiggestNumberLessThan(0.5), ScratchDoubleReg);
     ma_vadd(ScratchDoubleReg, tmp, tmp);
 
     ma_vcvt_F64_U32(tmp, ScratchDoubleReg.uintOverlay());
     ma_vxfer(VFPRegister(ScratchDoubleReg).uintOverlay(), output);
-    ma_mov(output, output, SetCond);
+    ma_mov(output, output, SetCC);
     ma_b(bail, Signed);
     ma_b(&fin);
 
     bind(&handleZero);
     // Move the top word of the double into the output reg, if it is non-zero,
     // then the original value was -0.0
     as_vxfer(output, InvalidReg, input, FloatToCore, Always, 1);
     ma_cmp(output, Imm32(0));
@@ -4575,20 +4553,20 @@ MacroAssemblerARMCompat::round(FloatRegi
     ma_vcvt_F64_U32(tmp, ScratchDoubleReg.uintOverlay());
     ma_vxfer(VFPRegister(ScratchDoubleReg).uintOverlay(), output);
 
     // -output is now a correctly rounded value, unless the original value was
     // exactly halfway between two integers, at which point, it has been rounded
     // away from zero, when it should be rounded towards \infty.
     ma_vcvt_U32_F64(ScratchDoubleReg.uintOverlay(), ScratchDoubleReg);
     compareDouble(ScratchDoubleReg, tmp);
-    ma_sub(output, Imm32(1), output, NoSetCond, Equal);
+    ma_sub(output, Imm32(1), output, LeaveCC, Equal);
     // Negate the output. Since INT_MIN < -INT_MAX, even after adding 1, the
     // result will still be a negative number.
-    ma_rsb(output, Imm32(0), output, SetCond);
+    ma_rsb(output, Imm32(0), output, SetCC);
 
     // If the result looks non-negative, then this value didn't actually fit
     // into the int range, and special handling is required, or it was zero,
     // which means the result is actually -0.0 which also requires special
     // handling.
     ma_b(bail, NotSigned);
 
     bind(&fin);
@@ -4621,17 +4599,17 @@ MacroAssemblerARMCompat::roundf(FloatReg
     ma_vimm_f32(GetBiggestNumberLessThan(0.5f), ScratchFloat32Reg);
     ma_vadd_f32(ScratchFloat32Reg, input, tmp);
 
     // Note: it doesn't matter whether x + .5 === x or not here, as it doesn't
     // affect the semantics of the float to unsigned conversion (in particular,
     // we are not applying any fixup after the operation).
     ma_vcvt_F32_U32(tmp, ScratchFloat32Reg.uintOverlay());
     ma_vxfer(VFPRegister(ScratchFloat32Reg).uintOverlay(), output);
-    ma_mov(output, output, SetCond);
+    ma_mov(output, output, SetCC);
     ma_b(bail, Signed);
     ma_b(&fin);
 
     bind(&handleZero);
 
     // Move the whole float32 into the output reg, if it is non-zero, then the
     // original value was -0.0.
     as_vxfer(output, InvalidReg, input, FloatToCore, Always, 0);
@@ -4661,22 +4639,22 @@ MacroAssemblerARMCompat::roundf(FloatReg
     Label flipSign;
     ma_b(&flipSign, Equal);
 
     // -output is now a correctly rounded value, unless the original value was
     // exactly halfway between two integers, at which point, it has been rounded
     // away from zero, when it should be rounded towards \infty.
     ma_vcvt_U32_F32(tmp.uintOverlay(), tmp);
     compareFloat(tmp, ScratchFloat32Reg);
-    ma_sub(output, Imm32(1), output, NoSetCond, Equal);
+    ma_sub(output, Imm32(1), output, LeaveCC, Equal);
 
     // Negate the output. Since INT_MIN < -INT_MAX, even after adding 1, the
     // result will still be a negative number.
     bind(&flipSign);
-    ma_rsb(output, Imm32(0), output, SetCond);
+    ma_rsb(output, Imm32(0), output, SetCC);
 
     // If the result looks non-negative, then this value didn't actually fit
     // into the int range, and special handling is required, or it was zero,
     // which means the result is actually -0.0 which also requires special
     // handling.
     ma_b(bail, NotSigned);
 
     bind(&fin);
--- a/js/src/jit/arm/MacroAssembler-arm.h
+++ b/js/src/jit/arm/MacroAssembler-arm.h
@@ -35,38 +35,37 @@ class MacroAssemblerARM : public Assembl
     // register defaults to lr, since it's non-allocatable (as it can be
     // clobbered by some instructions). Allow the baseline compiler to override
     // this though, since baseline IC stubs rely on lr holding the return
     // address.
     Register secondScratchReg_;
 
   public:
     // Higher level tag testing code.
-    Operand ToPayload(Operand base) {
+    // TODO: Can probably remove the Operand versions.
+    Operand ToPayload(Operand base) const {
         return Operand(Register::FromCode(base.base()), base.disp());
     }
-    Address ToPayload(Address base) {
-        return ToPayload(Operand(base)).toAddress();
+    Address ToPayload(const Address& base) const {
+        return base;
     }
 
   protected:
-    Operand ToType(Operand base) {
+    Operand ToType(Operand base) const {
         return Operand(Register::FromCode(base.base()), base.disp() + sizeof(void*));
     }
-    Address ToType(Address base) {
+    Address ToType(const Address& base) const {
         return ToType(Operand(base)).toAddress();
     }
 
-    Operand ToPayloadAfterStackPush(Operand base) {
-        Register baseReg = Register::FromCode(base.base());
+    Address ToPayloadAfterStackPush(const Address& base) const {
         // If we are based on StackPointer, pass over the type tag just pushed.
-        if (baseReg == StackPointer)
-            return Operand(Register::FromCode(base.base()), base.disp() + sizeof(void*));
-        else
-            return ToPayload(base);
+        if (base.base == StackPointer)
+            return Address(base.base, base.offset + sizeof(void *));
+        return ToPayload(base);
     }
 
   public:
     MacroAssemblerARM()
       : secondScratchReg_(lr)
     { }
 
     void setSecondScratchReg(Register reg) {
@@ -101,27 +100,27 @@ class MacroAssemblerARM : public Assembl
     void negateDouble(FloatRegister reg);
     void inc64(AbsoluteAddress dest);
 
     // Somewhat direct wrappers for the low-level assembler funcitons
     // bitops. Attempt to encode a virtual alu instruction using two real
     // instructions.
   private:
     bool alu_dbl(Register src1, Imm32 imm, Register dest, ALUOp op,
-                 SetCond_ sc, Condition c);
+                 SBit s, Condition c);
 
   public:
     void ma_alu(Register src1, Operand2 op2, Register dest, ALUOp op,
-                SetCond_ sc = NoSetCond, Condition c = Always);
+                SBit s = LeaveCC, Condition c = Always);
     void ma_alu(Register src1, Imm32 imm, Register dest,
                 ALUOp op,
-                SetCond_ sc =  NoSetCond, Condition c = Always);
+                SBit s =  LeaveCC, Condition c = Always);
 
     void ma_alu(Register src1, Operand op2, Register dest, ALUOp op,
-                SetCond_ sc = NoSetCond, Condition c = Always);
+                SBit s = LeaveCC, Condition c = Always);
     void ma_nop();
 
     void ma_movPatchable(Imm32 imm, Register dest, Assembler::Condition c,
                          RelocStyle rs);
     void ma_movPatchable(ImmPtr imm, Register dest, Assembler::Condition c,
                          RelocStyle rs);
 
     static void ma_mov_patch(Imm32 imm, Register dest, Assembler::Condition c,
@@ -131,22 +130,22 @@ class MacroAssemblerARM : public Assembl
 
     // These should likely be wrapped up as a set of macros or something like
     // that. I cannot think of a good reason to explicitly have all of this
     // code.
 
     // ALU based ops
     // mov
     void ma_mov(Register src, Register dest,
-                SetCond_ sc = NoSetCond, Condition c = Always);
+                SBit s = LeaveCC, Condition c = Always);
 
     void ma_mov(Imm32 imm, Register dest,
-                SetCond_ sc = NoSetCond, Condition c = Always);
+                SBit s = LeaveCC, Condition c = Always);
     void ma_mov(ImmWord imm, Register dest,
-                SetCond_ sc = NoSetCond, Condition c = Always);
+                SBit s = LeaveCC, Condition c = Always);
 
     void ma_mov(ImmGCPtr ptr, Register dest);
 
     // Shifts (just a move with a shifting op2)
     void ma_lsl(Imm32 shift, Register src, Register dst);
     void ma_lsr(Imm32 shift, Register src, Register dst);
     void ma_asr(Imm32 shift, Register src, Register dst);
     void ma_ror(Imm32 shift, Register src, Register dst);
@@ -155,108 +154,108 @@ class MacroAssemblerARM : public Assembl
     void ma_lsl(Register shift, Register src, Register dst);
     void ma_lsr(Register shift, Register src, Register dst);
     void ma_asr(Register shift, Register src, Register dst);
     void ma_ror(Register shift, Register src, Register dst);
     void ma_rol(Register shift, Register src, Register dst);
 
     // Move not (dest <- ~src)
     void ma_mvn(Imm32 imm, Register dest,
-                SetCond_ sc = NoSetCond, Condition c = Always);
+                SBit s = LeaveCC, Condition c = Always);
 
 
     void ma_mvn(Register src1, Register dest,
-                SetCond_ sc = NoSetCond, Condition c = Always);
+                SBit s = LeaveCC, Condition c = Always);
 
     // Negate (dest <- -src) implemented as rsb dest, src, 0
     void ma_neg(Register src, Register dest,
-                SetCond_ sc = NoSetCond, Condition c = Always);
+                SBit s = LeaveCC, Condition c = Always);
 
     // And
     void ma_and(Register src, Register dest,
-                SetCond_ sc = NoSetCond, Condition c = Always);
+                SBit s = LeaveCC, Condition c = Always);
 
     void ma_and(Register src1, Register src2, Register dest,
-                SetCond_ sc = NoSetCond, Condition c = Always);
+                SBit s = LeaveCC, Condition c = Always);
 
     void ma_and(Imm32 imm, Register dest,
-                SetCond_ sc = NoSetCond, Condition c = Always);
+                SBit s = LeaveCC, Condition c = Always);
 
     void ma_and(Imm32 imm, Register src1, Register dest,
-                SetCond_ sc = NoSetCond, Condition c = Always);
+                SBit s = LeaveCC, Condition c = Always);
 
 
 
     // Bit clear (dest <- dest & ~imm) or (dest <- src1 & ~src2)
     void ma_bic(Imm32 imm, Register dest,
-                SetCond_ sc = NoSetCond, Condition c = Always);
+                SBit s = LeaveCC, Condition c = Always);
 
     // Exclusive or
     void ma_eor(Register src, Register dest,
-                SetCond_ sc = NoSetCond, Condition c = Always);
+                SBit s = LeaveCC, Condition c = Always);
 
     void ma_eor(Register src1, Register src2, Register dest,
-                SetCond_ sc = NoSetCond, Condition c = Always);
+                SBit s = LeaveCC, Condition c = Always);
 
     void ma_eor(Imm32 imm, Register dest,
-                SetCond_ sc = NoSetCond, Condition c = Always);
+                SBit s = LeaveCC, Condition c = Always);
 
     void ma_eor(Imm32 imm, Register src1, Register dest,
-                SetCond_ sc = NoSetCond, Condition c = Always);
+                SBit s = LeaveCC, Condition c = Always);
 
 
     // Or
     void ma_orr(Register src, Register dest,
-                SetCond_ sc = NoSetCond, Condition c = Always);
+                SBit s = LeaveCC, Condition c = Always);
 
     void ma_orr(Register src1, Register src2, Register dest,
-                SetCond_ sc = NoSetCond, Condition c = Always);
+                SBit s = LeaveCC, Condition c = Always);
 
     void ma_orr(Imm32 imm, Register dest,
-                SetCond_ sc = NoSetCond, Condition c = Always);
+                SBit s = LeaveCC, Condition c = Always);
 
     void ma_orr(Imm32 imm, Register src1, Register dest,
-                SetCond_ sc = NoSetCond, Condition c = Always);
+                SBit s = LeaveCC, Condition c = Always);
 
 
     // Arithmetic based ops.
     // Add with carry:
-    void ma_adc(Imm32 imm, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
-    void ma_adc(Register src, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
-    void ma_adc(Register src1, Register src2, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
+    void ma_adc(Imm32 imm, Register dest, SBit s = LeaveCC, Condition c = Always);
+    void ma_adc(Register src, Register dest, SBit s = LeaveCC, Condition c = Always);
+    void ma_adc(Register src1, Register src2, Register dest, SBit s = LeaveCC, Condition c = Always);
 
     // Add:
-    void ma_add(Imm32 imm, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
-    void ma_add(Register src1, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
-    void ma_add(Register src1, Register src2, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
-    void ma_add(Register src1, Operand op, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
-    void ma_add(Register src1, Imm32 op, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
+    void ma_add(Imm32 imm, Register dest, SBit s = LeaveCC, Condition c = Always);
+    void ma_add(Register src1, Register dest, SBit s = LeaveCC, Condition c = Always);
+    void ma_add(Register src1, Register src2, Register dest, SBit s = LeaveCC, Condition c = Always);
+    void ma_add(Register src1, Operand op, Register dest, SBit s = LeaveCC, Condition c = Always);
+    void ma_add(Register src1, Imm32 op, Register dest, SBit s = LeaveCC, Condition c = Always);
 
     // Subtract with carry:
-    void ma_sbc(Imm32 imm, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
-    void ma_sbc(Register src1, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
-    void ma_sbc(Register src1, Register src2, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
+    void ma_sbc(Imm32 imm, Register dest, SBit s = LeaveCC, Condition c = Always);
+    void ma_sbc(Register src1, Register dest, SBit s = LeaveCC, Condition c = Always);
+    void ma_sbc(Register src1, Register src2, Register dest, SBit s = LeaveCC, Condition c = Always);
 
     // Subtract:
-    void ma_sub(Imm32 imm, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
-    void ma_sub(Register src1, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
-    void ma_sub(Register src1, Register src2, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
-    void ma_sub(Register src1, Operand op, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
-    void ma_sub(Register src1, Imm32 op, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
+    void ma_sub(Imm32 imm, Register dest, SBit s = LeaveCC, Condition c = Always);
+    void ma_sub(Register src1, Register dest, SBit s = LeaveCC, Condition c = Always);
+    void ma_sub(Register src1, Register src2, Register dest, SBit s = LeaveCC, Condition c = Always);
+    void ma_sub(Register src1, Operand op, Register dest, SBit s = LeaveCC, Condition c = Always);
+    void ma_sub(Register src1, Imm32 op, Register dest, SBit s = LeaveCC, Condition c = Always);
 
     // Reverse subtract:
-    void ma_rsb(Imm32 imm, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
-    void ma_rsb(Register src1, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
-    void ma_rsb(Register src1, Register src2, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
-    void ma_rsb(Register src1, Imm32 op2, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
+    void ma_rsb(Imm32 imm, Register dest, SBit s = LeaveCC, Condition c = Always);
+    void ma_rsb(Register src1, Register dest, SBit s = LeaveCC, Condition c = Always);
+    void ma_rsb(Register src1, Register src2, Register dest, SBit s = LeaveCC, Condition c = Always);
+    void ma_rsb(Register src1, Imm32 op2, Register dest, SBit s = LeaveCC, Condition c = Always);
 
     // Reverse subtract with carry:
-    void ma_rsc(Imm32 imm, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
-    void ma_rsc(Register src1, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
-    void ma_rsc(Register src1, Register src2, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
+    void ma_rsc(Imm32 imm, Register dest, SBit s = LeaveCC, Condition c = Always);
+    void ma_rsc(Register src1, Register dest, SBit s = LeaveCC, Condition c = Always);
+    void ma_rsc(Register src1, Register src2, Register dest, SBit s = LeaveCC, Condition c = Always);
 
     // Compares/tests.
     // Compare negative (sets condition codes as src1 + src2 would):
     void ma_cmn(Register src1, Imm32 imm, Condition c = Always);
     void ma_cmn(Register src1, Register src2, Condition c = Always);
     void ma_cmn(Register src1, Operand op, Condition c = Always);
 
     // Compare (src - src2):
@@ -304,21 +303,21 @@ class MacroAssemblerARM : public Assembl
     void ma_dtr(LoadStore ls, Register rn, Imm32 offset, Register rt,
                 Index mode = Offset, Condition cc = Always);
 
     void ma_dtr(LoadStore ls, Register rn, Register rm, Register rt,
                 Index mode = Offset, Condition cc = Always);
 
 
     void ma_str(Register rt, DTRAddr addr, Index mode = Offset, Condition cc = Always);
-    void ma_str(Register rt, const Operand& addr, Index mode = Offset, Condition cc = Always);
-    void ma_dtr(LoadStore ls, Register rt, const Operand& addr, Index mode, Condition cc);
+    void ma_str(Register rt, const Address& addr, Index mode = Offset, Condition cc = Always);
+    void ma_dtr(LoadStore ls, Register rt, const Address& addr, Index mode, Condition cc);
 
     void ma_ldr(DTRAddr addr, Register rt, Index mode = Offset, Condition cc = Always);
-    void ma_ldr(const Operand& addr, Register rt, Index mode = Offset, Condition cc = Always);
+    void ma_ldr(const Address& addr, Register rt, Index mode = Offset, Condition cc = Always);
 
     void ma_ldrb(DTRAddr addr, Register rt, Index mode = Offset, Condition cc = Always);
     void ma_ldrh(EDtrAddr addr, Register rt, Index mode = Offset, Condition cc = Always);
     void ma_ldrsh(EDtrAddr addr, Register rt, Index mode = Offset, Condition cc = Always);
     void ma_ldrsb(EDtrAddr addr, Register rt, Index mode = Offset, Condition cc = Always);
     void ma_ldrd(EDtrAddr addr, Register rt, DebugOnly<Register> rt2, Index mode = Offset, Condition cc = Always);
     void ma_strb(Register rt, DTRAddr addr, Index mode = Offset, Condition cc = Always);
     void ma_strh(Register rt, EDtrAddr addr, Index mode = Offset, Condition cc = Always);
@@ -402,25 +401,25 @@ class MacroAssemblerARM : public Assembl
     void ma_vcvt_U32_F32(FloatRegister src, FloatRegister dest, Condition cc = Always);
 
 
     void ma_vxfer(VFPRegister src, Register dest, Condition cc = Always);
     void ma_vxfer(VFPRegister src, Register dest1, Register dest2, Condition cc = Always);
 
     void ma_vxfer(Register src1, Register src2, FloatRegister dest, Condition cc = Always);
 
-    BufferOffset ma_vdtr(LoadStore ls, const Operand& addr, VFPRegister dest, Condition cc = Always);
+    BufferOffset ma_vdtr(LoadStore ls, const Address& addr, VFPRegister dest, Condition cc = Always);
 
 
     BufferOffset ma_vldr(VFPAddr addr, VFPRegister dest, Condition cc = Always);
-    BufferOffset ma_vldr(const Operand& addr, VFPRegister dest, Condition cc = Always);
+    BufferOffset ma_vldr(const Address& addr, VFPRegister dest, Condition cc = Always);
     BufferOffset ma_vldr(VFPRegister src, Register base, Register index, int32_t shift = defaultShift, Condition cc = Always);
 
     BufferOffset ma_vstr(VFPRegister src, VFPAddr addr, Condition cc = Always);
-    BufferOffset ma_vstr(VFPRegister src, const Operand& addr, Condition cc = Always);
+    BufferOffset ma_vstr(VFPRegister src, const Address& addr, Condition cc = Always);
 
     BufferOffset ma_vstr(VFPRegister src, Register base, Register index, int32_t shift,
                          int32_t offset, Condition cc = Always);
     // Calls an Ion function, assumes that the stack is untouched (8 byte
     // aligned).
     void ma_callJit(const Register reg);
     // Calls an Ion function, assuming that sp has already been decremented.
     void ma_callJitNoPush(const Register reg);
@@ -660,18 +659,18 @@ class MacroAssemblerARMCompat : public M
     }
     void push(ImmGCPtr imm) {
         ma_mov(imm, ScratchRegister);
         ma_push(ScratchRegister);
     }
     void push(ImmMaybeNurseryPtr imm) {
         push(noteMaybeNurseryPtr(imm));
     }
-    void push(const Address& address) {
-        ma_ldr(Operand(address.base, address.offset), ScratchRegister);
+    void push(const Address& addr) {
+        ma_ldr(addr, ScratchRegister);
         ma_push(ScratchRegister);
     }
     void push(Register reg) {
         ma_push(reg);
     }
     void push(FloatRegister reg) {
         ma_vpush(VFPRegister(reg));
     }
@@ -724,35 +723,35 @@ class MacroAssemblerARMCompat : public M
         as_b(label);
     }
     void jump(JitCode* code) {
         branch(code);
     }
     void jump(Register reg) {
         ma_bx(reg);
     }
-    void jump(const Address& address) {
-        ma_ldr(Operand(address.base, address.offset), ScratchRegister);
+    void jump(const Address& addr) {
+        ma_ldr(addr, ScratchRegister);
         ma_bx(ScratchRegister);
     }
 
     void neg32(Register reg) {
-        ma_neg(reg, reg, SetCond);
+        ma_neg(reg, reg, SetCC);
     }
     void negl(Register reg) {
-        ma_neg(reg, reg, SetCond);
+        ma_neg(reg, reg, SetCC);
     }
     void test32(Register lhs, Register rhs) {
         ma_tst(lhs, rhs);
     }
     void test32(Register lhs, Imm32 imm) {
         ma_tst(lhs, imm);
     }
-    void test32(const Address& address, Imm32 imm) {
-        ma_ldr(Operand(address.base, address.offset), ScratchRegister);
+    void test32(const Address& addr, Imm32 imm) {
+        ma_ldr(addr, ScratchRegister);
         ma_tst(ScratchRegister, imm);
     }
     void testPtr(Register lhs, Register rhs) {
         test32(lhs, rhs);
     }
 
     // Returns the register containing the type tag.
     Register splitTagForTest(const ValueOperand& value) {
@@ -868,17 +867,17 @@ class MacroAssemblerARMCompat : public M
     Register extractTag(const Address& address, Register scratch);
     Register extractTag(const BaseIndex& address, Register scratch);
     Register extractTag(const ValueOperand& value, Register scratch) {
         return value.typeReg();
     }
 
     void boolValueToDouble(const ValueOperand& operand, FloatRegister dest);
     void int32ValueToDouble(const ValueOperand& operand, FloatRegister dest);
-    void loadInt32OrDouble(const Operand& src, FloatRegister dest);
+    void loadInt32OrDouble(const Address& src, FloatRegister dest);
     void loadInt32OrDouble(Register base, Register index,
                            FloatRegister dest, int32_t shift = defaultShift);
     void loadConstantDouble(double dp, FloatRegister dest);
     // Treat the value as a boolean, and set condition codes accordingly.
     Condition testInt32Truthy(bool truthy, const ValueOperand& operand);
     Condition testBooleanTruthy(bool truthy, const ValueOperand& operand);
     Condition testDoubleTruthy(bool truthy, FloatRegister reg);
     Condition testStringTruthy(bool truthy, const ValueOperand& value);
@@ -904,26 +903,26 @@ class MacroAssemblerARMCompat : public M
     void branch32(Condition cond, Register lhs, Imm32 imm, Label* label) {
         ma_cmp(lhs, imm);
         ma_b(label, cond);
     }
     void branch32(Condition cond, const Operand& lhs, Register rhs, Label* label) {
         if (lhs.getTag() == Operand::OP2) {
             branch32(cond, lhs.toReg(), rhs, label);
         } else {
-            ma_ldr(lhs, ScratchRegister);
+            ma_ldr(lhs.toAddress(), ScratchRegister);
             branch32(cond, ScratchRegister, rhs, label);
         }
     }
     void branch32(Condition cond, const Operand& lhs, Imm32 rhs, Label* label) {
         if (lhs.getTag() == Operand::OP2) {
             branch32(cond, lhs.toReg(), rhs, label);
         } else {
             // branch32 will use ScratchRegister.
-            ma_ldr(lhs, secondScratchReg_);
+            ma_ldr(lhs.toAddress(), secondScratchReg_);
             branch32(cond, secondScratchReg_, rhs, label);
         }
     }
     void branch32(Condition cond, const Address& lhs, Register rhs, Label* label) {
         load32(lhs, ScratchRegister);
         branch32(cond, ScratchRegister, rhs, label);
     }
     void branch32(Condition cond, const Address& lhs, Imm32 rhs, Label* label) {
@@ -1132,17 +1131,17 @@ class MacroAssemblerARMCompat : public M
     void branch32(Condition cond, AsmJSAbsoluteAddress addr, Imm32 imm, Label* label) {
         loadPtr(addr, ScratchRegister);
         ma_cmp(ScratchRegister, imm);
         ma_b(label, cond);
     }
 
     void loadUnboxedValue(Address address, MIRType type, AnyRegister dest) {
         if (dest.isFloat())
-            loadInt32OrDouble(Operand(address), dest.fpu());
+            loadInt32OrDouble(address, dest.fpu());
         else
             ma_ldr(address, dest.gpr());
     }
 
     void loadUnboxedValue(BaseIndex address, MIRType type, AnyRegister dest) {
         if (dest.isFloat())
             loadInt32OrDouble(address.base, address.index, dest.fpu(), address.scale);
         else
@@ -1190,39 +1189,36 @@ class MacroAssemblerARMCompat : public M
         }
 
         if (s0 != d0)
             ma_mov(s0, d0);
         if (s1 != d1)
             ma_mov(s1, d1);
     }
 
-    void storeValue(ValueOperand val, Operand dst);
+    void storeValue(ValueOperand val, const Address& dst);
     void storeValue(ValueOperand val, const BaseIndex& dest);
     void storeValue(JSValueType type, Register reg, BaseIndex dest) {
         ma_alu(dest.base, lsl(dest.index, dest.scale), ScratchRegister, OpAdd);
         storeValue(type, reg, Address(ScratchRegister, dest.offset));
     }
-    void storeValue(ValueOperand val, const Address& dest) {
-        storeValue(val, Operand(dest));
-    }
     void storeValue(JSValueType type, Register reg, Address dest) {
         ma_str(reg, dest);
         ma_mov(ImmTag(JSVAL_TYPE_TO_TAG(type)), secondScratchReg_);
         ma_str(secondScratchReg_, Address(dest.base, dest.offset + 4));
     }
-    void storeValue(const Value& val, Address dest) {
+    void storeValue(const Value& val, const Address& dest) {
         jsval_layout jv = JSVAL_TO_IMPL(val);
         ma_mov(Imm32(jv.s.tag), secondScratchReg_);
-        ma_str(secondScratchReg_, Address(dest.base, dest.offset + 4));
+        ma_str(secondScratchReg_, ToType(dest));
         if (val.isMarkable())
             ma_mov(ImmGCPtr(reinterpret_cast<gc::Cell*>(val.toGCThing())), secondScratchReg_);
         else
             ma_mov(Imm32(jv.s.payload.i32), secondScratchReg_);
-        ma_str(secondScratchReg_, dest);
+        ma_str(secondScratchReg_, ToPayload(dest));
     }
     void storeValue(const Value& val, BaseIndex dest) {
         ma_alu(dest.base, lsl(dest.index, dest.scale), ScratchRegister, OpAdd);
         storeValue(val, Address(ScratchRegister, dest.offset));
     }
 
     void loadValue(Address src, ValueOperand val);
     void loadValue(Operand dest, ValueOperand val) {
@@ -1242,21 +1238,21 @@ class MacroAssemblerARMCompat : public M
             push(Imm32(jv.s.payload.i32));
     }
     void pushValue(JSValueType type, Register reg) {
         push(ImmTag(JSVAL_TYPE_TO_TAG(type)));
         ma_push(reg);
     }
     void pushValue(const Address& addr);
 
-    void storePayload(const Value& val, Operand dest);
-    void storePayload(Register src, Operand dest);
+    void storePayload(const Value& val, const Address& dest);
+    void storePayload(Register src, const Address& dest);
     void storePayload(const Value& val, const BaseIndex& dest);
     void storePayload(Register src, const BaseIndex& dest);
-    void storeTypeTag(ImmTag tag, Operand dest);
+    void storeTypeTag(ImmTag tag, const Address& dest);
     void storeTypeTag(ImmTag tag, const BaseIndex& dest);
 
     void makeFrameDescriptor(Register frameSizeReg, FrameType type) {
         ma_lsl(Imm32(FRAMESIZE_SHIFT), frameSizeReg, frameSizeReg);
         ma_orr(Imm32(type), frameSizeReg);
     }
 
     void handleFailureWithHandlerTail(void* handler);
@@ -1438,30 +1434,30 @@ class MacroAssemblerARMCompat : public M
 
     template <typename T> void storePtr(ImmWord imm, T address);
     template <typename T> void storePtr(ImmPtr imm, T address);
     template <typename T> void storePtr(ImmGCPtr imm, T address);
     void storePtr(Register src, const Address& address);
     void storePtr(Register src, const BaseIndex& address);
     void storePtr(Register src, AbsoluteAddress dest);
     void storeDouble(FloatRegister src, Address addr) {
-        ma_vstr(src, Operand(addr));
+        ma_vstr(src, addr);
     }
     void storeDouble(FloatRegister src, BaseIndex addr) {
         uint32_t scale = Imm32::ShiftOf(addr.scale).value;
         ma_vstr(src, addr.base, addr.index, scale, addr.offset);
     }
     void moveDouble(FloatRegister src, FloatRegister dest) {
         ma_vmov(src, dest);
     }
 
-    void storeFloat32(FloatRegister src, Address addr) {
-        ma_vstr(VFPRegister(src).singleOverlay(), Operand(addr));
+    void storeFloat32(FloatRegister src, const Address& addr) {
+        ma_vstr(VFPRegister(src).singleOverlay(), addr);
     }
-    void storeFloat32(FloatRegister src, BaseIndex addr) {
+    void storeFloat32(FloatRegister src, const BaseIndex& addr) {
         uint32_t scale = Imm32::ShiftOf(addr.scale).value;
         ma_vstr(VFPRegister(src).singleOverlay(), addr.base, addr.index, scale, addr.offset);
     }
 
   private:
     template<typename T>
     Register computePointer(const T& src, Register r);
 
@@ -1697,19 +1693,19 @@ class MacroAssemblerARMCompat : public M
     template <typename T, typename S>
     void atomicXor32(const S& value, const T& mem) {
         atomicEffectOp(4, AtomicFetchXorOp, value, mem);
     }
 
     void clampIntToUint8(Register reg) {
         // Look at (reg >> 8) if it is 0, then reg shouldn't be clamped if it is
         // <0, then we want to clamp to 0, otherwise, we wish to clamp to 255
-        as_mov(ScratchRegister, asr(reg, 8), SetCond);
-        ma_mov(Imm32(0xff), reg, NoSetCond, NotEqual);
-        ma_mov(Imm32(0), reg, NoSetCond, Signed);
+        as_mov(ScratchRegister, asr(reg, 8), SetCC);
+        ma_mov(Imm32(0xff), reg, LeaveCC, NotEqual);
+        ma_mov(Imm32(0), reg, LeaveCC, Signed);
     }
 
     void incrementInt32Value(const Address& addr) {
         add32(Imm32(1), ToPayload(addr));
     }
 
     void cmp32(Register lhs, Imm32 rhs);
     void cmp32(Register lhs, Register rhs);
@@ -1777,17 +1773,17 @@ class MacroAssemblerARMCompat : public M
     // If source is a double, load it into dest. If source is int32, convert it
     // to double. Else, branch to failure.
     void ensureDouble(const ValueOperand& source, FloatRegister dest, Label* failure);
 
     void
     emitSet(Assembler::Condition cond, Register dest)
     {
         ma_mov(Imm32(0), dest);
-        ma_mov(Imm32(1), dest, NoSetCond, cond);
+        ma_mov(Imm32(1), dest, LeaveCC, cond);
     }
 
     template <typename T1, typename T2>
     void cmpPtrSet(Assembler::Condition cond, T1 lhs, T2 rhs, Register dest)
     {
         cmpPtr(lhs, rhs);
         emitSet(cond, dest);
     }
@@ -1854,22 +1850,22 @@ class MacroAssemblerARMCompat : public M
     void callWithABI(const Address& fun, MoveOp::Type result = MoveOp::GENERAL);
     void callWithABI(Register fun, MoveOp::Type result = MoveOp::GENERAL);
 
     CodeOffsetLabel labelForPatch() {
         return CodeOffsetLabel(nextOffset().getOffset());
     }
 
     void computeEffectiveAddress(const Address& address, Register dest) {
-        ma_add(address.base, Imm32(address.offset), dest, NoSetCond);
+        ma_add(address.base, Imm32(address.offset), dest, LeaveCC);
     }
     void computeEffectiveAddress(const BaseIndex& address, Register dest) {
-        ma_alu(address.base, lsl(address.index, address.scale), dest, OpAdd, NoSetCond);
+        ma_alu(address.base, lsl(address.index, address.scale), dest, OpAdd, LeaveCC);
         if (address.offset)
-            ma_add(dest, Imm32(address.offset), dest, NoSetCond);
+            ma_add(dest, Imm32(address.offset), dest, LeaveCC);
     }
     void floor(FloatRegister input, Register output, Label* handleNotAnInt);
     void floorf(FloatRegister input, Register output, Label* handleNotAnInt);
     void ceil(FloatRegister input, Register output, Label* handleNotAnInt);
     void ceilf(FloatRegister input, Register output, Label* handleNotAnInt);
     void round(FloatRegister input, Register output, Label* handleNotAnInt, FloatRegister tmp);
     void roundf(FloatRegister input, Register output, Label* handleNotAnInt, FloatRegister tmp);
 
@@ -1896,29 +1892,21 @@ class MacroAssemblerARMCompat : public M
         loadPtr(Address(Source.base, Source.offset+4), lr);
         storePtr(lr, Address(Dest.base, Dest.offset+4));
     }
 
     void lea(Operand addr, Register dest) {
         ma_add(addr.baseReg(), Imm32(addr.disp()), dest);
     }
 
-    void stackCheck(ImmWord limitAddr, Label* label) {
-        int* foo = 0;
-        *foo = 5;
-        movePtr(limitAddr, ScratchRegister);
-        ma_ldr(Address(ScratchRegister, 0), ScratchRegister);
-        ma_cmp(ScratchRegister, StackPointer);
-        ma_b(label, Assembler::AboveOrEqual);
-    }
     void abiret() {
         as_bx(lr);
     }
 
-    void ma_storeImm(Imm32 c, const Operand& dest) {
+    void ma_storeImm(Imm32 c, const Address& dest) {
         ma_mov(c, lr);
         ma_str(lr, dest);
     }
     BufferOffset ma_BoundsCheck(Register bounded) {
         return as_cmp(bounded, Imm8(0));
     }
 
     void moveFloat32(FloatRegister src, FloatRegister dest) {
--- a/js/src/jit/arm/MoveEmitter-arm.cpp
+++ b/js/src/jit/arm/MoveEmitter-arm.cpp
@@ -33,53 +33,46 @@ MoveEmitterARM::emit(const MoveResolver&
         emit(moves.getMove(i));
 }
 
 MoveEmitterARM::~MoveEmitterARM()
 {
     assertDone();
 }
 
-Operand
+Address
 MoveEmitterARM::cycleSlot(uint32_t slot, uint32_t subslot) const
 {
     int32_t offset =  masm.framePushed() - pushedAtCycle_;
     MOZ_ASSERT(offset < 4096 && offset > -4096);
-    return Operand(StackPointer, offset + slot * sizeof(double) + subslot);
+    return Address(StackPointer, offset + slot * sizeof(double) + subslot);
 }
 
-// THIS IS ALWAYS AN LDRAddr. It should not be wrapped in an operand, methinks.
-Operand
+Address
 MoveEmitterARM::spillSlot() const
 {
     int32_t offset =  masm.framePushed() - pushedAtSpill_;
     MOZ_ASSERT(offset < 4096 && offset > -4096);
-    return Operand(StackPointer, offset);
+    return Address(StackPointer, offset);
 }
 
-Operand
-MoveEmitterARM::toOperand(const MoveOperand& operand, bool isFloat) const
+Address
+MoveEmitterARM::toAddress(const MoveOperand& operand) const
 {
-    if (operand.isMemoryOrEffectiveAddress()) {
-        if (operand.base() != StackPointer) {
-            MOZ_ASSERT(operand.disp() < 1024 && operand.disp() > -1024);
-            return Operand(operand.base(), operand.disp());
-        }
+    MOZ_ASSERT(operand.isMemoryOrEffectiveAddress());
 
-        MOZ_ASSERT(operand.disp() >= 0);
-
-        // Otherwise, the stack offset may need to be adjusted.
-        return Operand(StackPointer, operand.disp() + (masm.framePushed() - pushedAtStart_));
+    if (operand.base() != StackPointer) {
+        MOZ_ASSERT(operand.disp() < 1024 && operand.disp() > -1024);
+        return Operand(operand.base(), operand.disp()).toAddress();
     }
 
-    if (operand.isGeneralReg())
-        return Operand(operand.reg());
+    MOZ_ASSERT(operand.disp() >= 0);
 
-    MOZ_ASSERT(operand.isFloatReg());
-    return Operand(operand.floatReg());
+    // Otherwise, the stack offset may need to be adjusted.
+    return Address(StackPointer, operand.disp() + (masm.framePushed() - pushedAtStart_));
 }
 
 Register
 MoveEmitterARM::tempReg()
 {
     if (spilledReg_ != InvalidReg)
         return spilledReg_;
 
@@ -108,43 +101,43 @@ MoveEmitterARM::breakCycle(const MoveOpe
     //   (B -> A)
     //
     // This case handles (A -> B), which we reach first. We save B, then allow
     // the original move to continue.
     switch (type) {
       case MoveOp::FLOAT32:
         if (to.isMemory()) {
             VFPRegister temp = ScratchFloat32Reg;
-            masm.ma_vldr(toOperand(to, true), temp);
+            masm.ma_vldr(toAddress(to), temp);
             // Since it is uncertain if the load will be aligned or not
             // just fill both of them with the same value.
             masm.ma_vstr(temp, cycleSlot(slotId, 0));
             masm.ma_vstr(temp, cycleSlot(slotId, 4));
         } else {
             FloatRegister src = to.floatReg();
             // Just always store the largest possible size. Currently, this is
             // a double. When SIMD is added, two doubles will need to be stored.
             masm.ma_vstr(src.doubleOverlay(), cycleSlot(slotId, 0));
         }
         break;
       case MoveOp::DOUBLE:
         if (to.isMemory()) {
             FloatRegister temp = ScratchDoubleReg;
-            masm.ma_vldr(toOperand(to, true), temp);
+            masm.ma_vldr(toAddress(to), temp);
             masm.ma_vstr(temp, cycleSlot(slotId, 0));
         } else {
             masm.ma_vstr(to.floatReg().doubleOverlay(), cycleSlot(slotId, 0));
         }
         break;
       case MoveOp::INT32:
       case MoveOp::GENERAL:
         // an non-vfp value
         if (to.isMemory()) {
             Register temp = tempReg();
-            masm.ma_ldr(toOperand(to, false), temp);
+            masm.ma_ldr(toAddress(to), temp);
             masm.ma_str(temp, cycleSlot(0,0));
         } else {
             if (to.reg() == spilledReg_) {
                 // If the destination was spilled, restore it first.
                 masm.ma_ldr(spillSlot(), spilledReg_);
                 spilledReg_ = InvalidReg;
             }
             masm.ma_str(to.reg(), cycleSlot(0,0));
@@ -165,31 +158,31 @@ MoveEmitterARM::completeCycle(const Move
     // This case handles (B -> A), which we reach last. We emit a move from the
     // saved value of B, to A.
     switch (type) {
       case MoveOp::FLOAT32:
       case MoveOp::DOUBLE:
         if (to.isMemory()) {
             FloatRegister temp = ScratchDoubleReg;
             masm.ma_vldr(cycleSlot(slotId, 0), temp);
-            masm.ma_vstr(temp, toOperand(to, true));
+            masm.ma_vstr(temp, toAddress(to));
         } else {
             uint32_t offset = 0;
             if ((!from.isMemory()) && from.floatReg().numAlignedAliased() == 1)
                 offset = sizeof(float);
             masm.ma_vldr(cycleSlot(slotId, offset), to.floatReg());
         }
         break;
       case MoveOp::INT32:
       case MoveOp::GENERAL:
         MOZ_ASSERT(slotId == 0);
         if (to.isMemory()) {
             Register temp = tempReg();
             masm.ma_ldr(cycleSlot(slotId, 0), temp);
-            masm.ma_str(temp, toOperand(to, false));
+            masm.ma_str(temp, toAddress(to));
         } else {
             if (to.reg() == spilledReg_) {
                 // Make sure we don't re-clobber the spilled register later.
                 spilledReg_ = InvalidReg;
             }
             masm.ma_ldr(cycleSlot(slotId, 0), to.reg());
         }
         break;
@@ -209,86 +202,75 @@ MoveEmitterARM::emitMove(const MoveOpera
 
     if (from.isGeneralReg()) {
         if (from.reg() == spilledReg_) {
             // If the source is a register that has been spilled, make sure
             // to load the source back into that register.
             masm.ma_ldr(spillSlot(), spilledReg_);
             spilledReg_ = InvalidReg;
         }
-        switch (toOperand(to, false).getTag()) {
-          case Operand::OP2:
-            // secretly must be a register
+        if (to.isMemoryOrEffectiveAddress())
+            masm.ma_str(from.reg(), toAddress(to));
+        else
             masm.ma_mov(from.reg(), to.reg());
-            break;
-          case Operand::MEM:
-            masm.ma_str(from.reg(), toOperand(to, false));
-            break;
-          default:
-            MOZ_CRASH("strange move!");
-        }
     } else if (to.isGeneralReg()) {
         MOZ_ASSERT(from.isMemoryOrEffectiveAddress());
         if (from.isMemory())
-            masm.ma_ldr(toOperand(from, false), to.reg());
+            masm.ma_ldr(toAddress(from), to.reg());
         else
             masm.ma_add(from.base(), Imm32(from.disp()), to.reg());
     } else {
         // Memory to memory gpr move.
         Register reg = tempReg();
 
         MOZ_ASSERT(from.isMemoryOrEffectiveAddress());
         if (from.isMemory())
-            masm.ma_ldr(toOperand(from, false), reg);
+            masm.ma_ldr(toAddress(from), reg);
         else
             masm.ma_add(from.base(), Imm32(from.disp()), reg);
         MOZ_ASSERT(to.base() != reg);
-        masm.ma_str(reg, toOperand(to, false));
+        masm.ma_str(reg, toAddress(to));
     }
 }
 
 void
 MoveEmitterARM::emitFloat32Move(const MoveOperand& from, const MoveOperand& to)
 {
     if (from.isFloatReg()) {
         if (to.isFloatReg())
             masm.ma_vmov_f32(from.floatReg(), to.floatReg());
         else
-            masm.ma_vstr(VFPRegister(from.floatReg()).singleOverlay(),
-                         toOperand(to, true));
+            masm.ma_vstr(VFPRegister(from.floatReg()).singleOverlay(), toAddress(to));
     } else if (to.isFloatReg()) {
-        masm.ma_vldr(toOperand(from, true),
-                     VFPRegister(to.floatReg()).singleOverlay());
+        masm.ma_vldr(toAddress(from), VFPRegister(to.floatReg()).singleOverlay());
     } else {
         // Memory to memory move.
         MOZ_ASSERT(from.isMemory());
         FloatRegister reg = ScratchFloat32Reg;
-        masm.ma_vldr(toOperand(from, true),
-                     VFPRegister(reg).singleOverlay());
-        masm.ma_vstr(VFPRegister(reg).singleOverlay(),
-                     toOperand(to, true));
+        masm.ma_vldr(toAddress(from), VFPRegister(reg).singleOverlay());
+        masm.ma_vstr(VFPRegister(reg).singleOverlay(), toAddress(to));
     }
 }
 
 void
 MoveEmitterARM::emitDoubleMove(const MoveOperand& from, const MoveOperand& to)
 {
     if (from.isFloatReg()) {
         if (to.isFloatReg())
             masm.ma_vmov(from.floatReg(), to.floatReg());
         else
-            masm.ma_vstr(from.floatReg(), toOperand(to, true));
+            masm.ma_vstr(from.floatReg(), toAddress(to));
     } else if (to.isFloatReg()) {
-        masm.ma_vldr(toOperand(from, true), to.floatReg());
+        masm.ma_vldr(toAddress(from), to.floatReg());
     } else {
         // Memory to memory move.
         MOZ_ASSERT(from.isMemory());
         FloatRegister reg = ScratchDoubleReg;
-        masm.ma_vldr(toOperand(from, true), reg);
-        masm.ma_vstr(reg, toOperand(to, true));
+        masm.ma_vldr(toAddress(from), reg);
+        masm.ma_vstr(reg, toAddress(to));
     }
 }
 
 void
 MoveEmitterARM::emit(const MoveOp& move)
 {
     const MoveOperand& from = move.from();
     const MoveOperand& to = move.to();
--- a/js/src/jit/arm/MoveEmitter-arm.h
+++ b/js/src/jit/arm/MoveEmitter-arm.h
@@ -31,19 +31,19 @@ class MoveEmitterARM
     // assigned InvalidReg. If no corresponding spill space has been assigned,
     // then these registers do not need to be spilled.
     Register spilledReg_;
     FloatRegister spilledFloatReg_;
 
     void assertDone();
     Register tempReg();
     FloatRegister tempFloatReg();
-    Operand cycleSlot(uint32_t slot, uint32_t subslot) const;
-    Operand spillSlot() const;
-    Operand toOperand(const MoveOperand& operand, bool isFloat) const;
+    Address cycleSlot(uint32_t slot, uint32_t subslot) const;
+    Address spillSlot() const;
+    Address toAddress(const MoveOperand& operand) const;
 
     void emitMove(const MoveOperand& from, const MoveOperand& to);
     void emitFloat32Move(const MoveOperand& from, const MoveOperand& to);
     void emitDoubleMove(const MoveOperand& from, const MoveOperand& to);
     void breakCycle(const MoveOperand& from, const MoveOperand& to,
                     MoveOp::Type type, uint32_t slot);
     void completeCycle(const MoveOperand& from, const MoveOperand& to,
                        MoveOp::Type type, uint32_t slot);
--- a/js/src/jit/arm/Trampoline-arm.cpp
+++ b/js/src/jit/arm/Trampoline-arm.cpp
@@ -166,27 +166,27 @@ JitRuntime::generateEnterJIT(JSContext* 
     // r4 is now the aligned on the bottom of the list of arguments.
     static_assert(sizeof(JitFrameLayout) % JitStackAlignment == 0,
       "No need to consider the JitFrameLayout for aligning the stack");
     // sp' = ~(JitStackAlignment - 1) & (sp - argc * sizeof(Value)) - sizeof(JitFrameLayout)
     aasm->as_sub(sp, r4, Imm8(sizeof(JitFrameLayout)));
 
     // Get a copy of the number of args to use as a decrement counter, also set
     // the zero condition code.
-    aasm->as_mov(r5, O2Reg(r1), SetCond);
+    aasm->as_mov(r5, O2Reg(r1), SetCC);
 
     // Loop over arguments, copying them from an unknown buffer onto the Ion
     // stack so they can be accessed from JIT'ed code.
     {
         Label header, footer;
         // If there aren't any arguments, don't do anything.
         aasm->as_b(&footer, Assembler::Zero);
         // Get the top of the loop.
         masm.bind(&header);
-        aasm->as_sub(r5, r5, Imm8(1), SetCond);
+        aasm->as_sub(r5, r5, Imm8(1), SetCC);
         // We could be more awesome, and unroll this, using a loadm
         // (particularly since the offset is effectively 0) but that seems more
         // error prone, and complex.
         // BIG FAT WARNING: this loads both r6 and r7.
         aasm->as_extdtr(IsLoad,  64, true, PostIndex, r6, EDtrAddr(r2, EDtrOffImm(8)));
         aasm->as_extdtr(IsStore, 64, true, PostIndex, r6, EDtrAddr(r4, EDtrOffImm(8)));
         aasm->as_b(&header, Assembler::NonZero);
         masm.bind(&footer);
@@ -468,34 +468,34 @@ JitRuntime::generateArgumentsRectifier(J
     masm.ma_mov(sp, r3); // Save %sp.
     masm.ma_mov(sp, r7); // Save %sp again.
 
     // Push undefined.
     {
         Label undefLoopTop;
         masm.bind(&undefLoopTop);
         masm.ma_dataTransferN(IsStore, 64, true, sp, Imm32(-8), r4, PreIndex);
-        masm.ma_sub(r2, Imm32(1), r2, SetCond);
+        masm.ma_sub(r2, Imm32(1), r2, SetCC);
 
         masm.ma_b(&undefLoopTop, Assembler::NonZero);
     }
 
     // Get the topmost argument.
 
     masm.ma_alu(r3, lsl(r8, 3), r3, OpAdd); // r3 <- r3 + nargs * 8
     masm.ma_add(r3, Imm32(sizeof(RectifierFrameLayout)), r3);
 
     // Push arguments, |nargs| + 1 times (to include |this|).
     {
         Label copyLoopTop;
         masm.bind(&copyLoopTop);
         masm.ma_dataTransferN(IsLoad, 64, true, r3, Imm32(-8), r4, PostIndex);
         masm.ma_dataTransferN(IsStore, 64, true, sp, Imm32(-8), r4, PreIndex);
 
-        masm.ma_sub(r8, Imm32(1), r8, SetCond);
+        masm.ma_sub(r8, Imm32(1), r8, SetCC);
         masm.ma_b(&copyLoopTop, Assembler::NotSigned);
     }
 
     // translate the framesize from values into bytes
     masm.ma_add(r6, Imm32(1), r6);
     masm.ma_lsl(Imm32(3), r6, r6);
 
     // Construct sizeDescriptor.
--- a/js/src/jsarray.cpp
+++ b/js/src/jsarray.cpp
@@ -2637,25 +2637,18 @@ ArrayConcatDenseKernel(JSContext* cx, JS
     uint32_t initlen2 = GetBoxedOrUnboxedInitializedLength<Type>(obj2);
     MOZ_ASSERT(initlen2 == GetAnyBoxedOrUnboxedArrayLength(obj2));
 
     /* No overflow here due to nelements limit. */
     uint32_t len = initlen1 + initlen2;
 
     MOZ_ASSERT(GetBoxedOrUnboxedInitializedLength<Type>(result) == 0);
 
-    if (Type == JSVAL_TYPE_MAGIC) {
-        if (!result->as<ArrayObject>().ensureElements(cx, len))
-            return DenseElementResult::Failure;
-    } else {
-        if (result->as<UnboxedArrayObject>().capacity() < len) {
-            if (!result->as<UnboxedArrayObject>().growElements(cx, len))
-                return DenseElementResult::Failure;
-        }
-    }
+    if (!EnsureBoxedOrUnboxedDenseElements<Type>(cx, result, len))
+        return DenseElementResult::Failure;
 
     CopyBoxedOrUnboxedDenseElements<Type>(cx, result, obj1, 0, 0, initlen1);
     CopyBoxedOrUnboxedDenseElements<Type>(cx, result, obj2, initlen1, 0, initlen2);
 
     SetAnyBoxedOrUnboxedArrayLength(cx, result, len);
     return DenseElementResult::Success;
 }
 
@@ -2894,16 +2887,30 @@ SliceSparse(JSContext* cx, HandleObject 
 
         if (!hole && !DefineElement(cx, result, index - begin, value))
             return false;
     }
 
     return true;
 }
 
+template <typename T>
+static inline uint32_t
+NormalizeSliceTerm(T value, uint32_t length)
+{
+    if (value < 0) {
+        value += length;
+        if (value < 0)
+            return 0;
+    } else if (double(value) > double(length)) {
+        return length;
+    }
+    return uint32_t(value);
+}
+
 bool
 js::array_slice(JSContext* cx, unsigned argc, Value* vp)
 {
     CallArgs args = CallArgsFromVp(argc, vp);
 
     RootedObject obj(cx, ToObject(cx, args.thisv()));
     if (!obj)
         return false;
@@ -2913,36 +2920,22 @@ js::array_slice(JSContext* cx, unsigned 
         return false;
 
     uint32_t begin = 0;
     uint32_t end = length;
     if (args.length() > 0) {
         double d;
         if (!ToInteger(cx, args[0], &d))
             return false;
-        if (d < 0) {
-            d += length;
-            if (d < 0)
-                d = 0;
-        } else if (d > length) {
-            d = length;
-        }
-        begin = (uint32_t)d;
+        begin = NormalizeSliceTerm(d, length);
 
         if (args.hasDefined(1)) {
             if (!ToInteger(cx, args[1], &d))
                 return false;
-            if (d < 0) {
-                d += length;
-                if (d < 0)
-                    d = 0;
-            } else if (d > length) {
-                d = length;
-            }
-            end = (uint32_t)d;
+            end = NormalizeSliceTerm(d, length);
         }
     }
 
     if (begin > end)
         begin = end;
 
     if (!ObjectMayHaveExtraIndexedProperties(obj)) {
         size_t initlen = GetAnyBoxedOrUnboxedInitializedLength(obj);
@@ -2995,16 +2988,67 @@ js::array_slice(JSContext* cx, unsigned 
         if (!SliceSlowly(cx, obj, obj, begin, end, narr))
             return false;
     }
 
     args.rval().setObject(*narr);
     return true;
 }
 
+template <JSValueType Type>
+DenseElementResult
+ArraySliceDenseKernel(JSContext* cx, JSObject* obj, int32_t beginArg, int32_t endArg, JSObject* result)
+{
+    int32_t length = GetAnyBoxedOrUnboxedArrayLength(obj);
+
+    uint32_t begin = NormalizeSliceTerm(beginArg, length);
+    uint32_t end = NormalizeSliceTerm(endArg, length);
+
+    if (begin > end)
+        begin = end;
+
+    size_t initlen = GetBoxedOrUnboxedInitializedLength<Type>(obj);
+    if (initlen > begin) {
+        size_t count = Min<size_t>(initlen - begin, end - begin);
+        if (count) {
+            if (!EnsureBoxedOrUnboxedDenseElements<Type>(cx, result, count))
+                return DenseElementResult::Failure;
+            CopyBoxedOrUnboxedDenseElements<Type>(cx, result, obj, 0, begin, count);
+        }
+    }
+
+    SetAnyBoxedOrUnboxedArrayLength(cx, result, end - begin);
+    return DenseElementResult::Success;
+}
+
+DefineBoxedOrUnboxedFunctor5(ArraySliceDenseKernel,
+                             JSContext*, JSObject*, int32_t, int32_t, JSObject*);
+
+JSObject*
+js::array_slice_dense(JSContext* cx, HandleObject obj, int32_t begin, int32_t end,
+                      HandleObject result)
+{
+    if (result) {
+        ArraySliceDenseKernelFunctor functor(cx, obj, begin, end, result);
+        DenseElementResult rv = CallBoxedOrUnboxedSpecialization(functor, result);
+        MOZ_ASSERT(rv != DenseElementResult::Incomplete);
+        return rv == DenseElementResult::Success ? result : nullptr;
+    }
+
+    // Slower path if the JIT wasn't able to allocate an object inline.
+    JS::AutoValueArray<4> argv(cx);
+    argv[0].setUndefined();
+    argv[1].setObject(*obj);
+    argv[2].setInt32(begin);
+    argv[3].setInt32(end);
+    if (!array_slice(cx, 2, argv.begin()))
+        return nullptr;
+    return &argv[0].toObject();
+}
+
 /* ES5 15.4.4.20. */
 static bool
 array_filter(JSContext* cx, unsigned argc, Value* vp)
 {
     CallArgs args = CallArgsFromVp(argc, vp);
 
     /* Step 1. */
     RootedObject obj(cx, ToObject(cx, args.thisv()));
--- a/js/src/jsarray.h
+++ b/js/src/jsarray.h
@@ -175,16 +175,19 @@ extern bool
 array_shift(JSContext* cx, unsigned argc, js::Value* vp);
 
 extern bool
 array_unshift(JSContext* cx, unsigned argc, js::Value* vp);
 
 extern bool
 array_slice(JSContext* cx, unsigned argc, js::Value* vp);
 
+extern JSObject*
+array_slice_dense(JSContext* cx, HandleObject obj, int32_t begin, int32_t end, HandleObject result);
+
 /*
  * Append the given (non-hole) value to the end of an array.  The array must be
  * a newborn array -- that is, one which has not been exposed to script for
  * arbitrary manipulation.  (This method optimizes on the assumption that
  * extending the array to accommodate the element will never make the array
  * sparse, which requires that the array be completely filled.)
  */
 extern bool
--- a/js/src/jsgc.cpp
+++ b/js/src/jsgc.cpp
@@ -7087,19 +7087,43 @@ JS::FinishIncrementalGC(JSRuntime* rt, g
 
 JS_PUBLIC_API(void)
 JS::AbortIncrementalGC(JSRuntime* rt)
 {
     rt->gc.abortGC();
 }
 
 char16_t*
-JS::GCDescription::formatMessage(JSRuntime* rt) const
-{
-    return rt->gc.stats.formatMessage();
+JS::GCDescription::formatSliceMessage(JSRuntime* rt) const
+{
+    UniqueChars cstr = rt->gc.stats.formatCompactSliceMessage();
+
+    size_t nchars = strlen(cstr.get());
+    UniquePtr<char16_t, JS::FreePolicy> out(js_pod_malloc<char16_t>(nchars + 1));
+    if (!out)
+        return nullptr;
+    out.get()[nchars] = 0;
+
+    CopyAndInflateChars(out.get(), cstr.get(), nchars);
+    return out.release();
+}
+
+char16_t*
+JS::GCDescription::formatSummaryMessage(JSRuntime* rt) const
+{
+    UniqueChars cstr = rt->gc.stats.formatCompactSummaryMessage();
+
+    size_t nchars = strlen(cstr.get());
+    UniquePtr<char16_t, JS::FreePolicy> out(js_pod_malloc<char16_t>(nchars + 1));
+    if (!out)
+        return nullptr;
+    out.get()[nchars] = 0;
+
+    CopyAndInflateChars(out.get(), cstr.get(), nchars);
+    return out.release();
 }
 
 JS::dbg::GarbageCollectionEvent::Ptr
 JS::GCDescription::toGCEvent(JSRuntime* rt) const
 {
     return JS::dbg::GarbageCollectionEvent::Create(rt, rt->gc.stats, rt->gc.majorGCCount());
 }
 
--- a/js/src/vm/Interpreter.cpp
+++ b/js/src/vm/Interpreter.cpp
@@ -8,16 +8,17 @@
  * JavaScript bytecode interpreter.
  */
 
 #include "vm/Interpreter-inl.h"
 
 #include "mozilla/ArrayUtils.h"
 #include "mozilla/DebugOnly.h"
 #include "mozilla/FloatingPoint.h"
+#include "mozilla/Maybe.h"
 #include "mozilla/PodOperations.h"
 
 #include <string.h>
 
 #include "jsarray.h"
 #include "jsatom.h"
 #include "jscntxt.h"
 #include "jsfun.h"
@@ -1649,16 +1650,68 @@ SetObjectElementOperation(JSContext* cx,
         return false;
 
     RootedValue tmp(cx, value);
     ObjectOpResult result;
     return SetProperty(cx, obj, id, tmp, receiver, result) &&
            result.checkStrictErrorOrWarning(cx, obj, id, strict);
 }
 
+/*
+ * As an optimization, the interpreter creates a handful of reserved Rooted<T>
+ * variables at the beginning, thus inserting them into the Rooted list once
+ * upon entry. ReservedRooted "borrows" a reserved Rooted variable and uses it
+ * within a local scope, resetting the value to nullptr (or the appropriate
+ * equivalent for T) at scope end. This avoids inserting/removing the Rooted
+ * from the rooter list, while preventing stale values from being kept alive
+ * unnecessarily.
+ */
+
+template<typename T>
+class ReservedRootedBase {
+};
+
+template<typename T>
+class ReservedRooted : public ReservedRootedBase<T>
+{
+    Rooted<T>* savedRoot;
+
+  public:
+    ReservedRooted(Rooted<T>* root, const T& ptr) : savedRoot(root) {
+        *root = ptr;
+    }
+
+    explicit ReservedRooted(Rooted<T>* root) : savedRoot(root) {
+        *root = js::GCMethods<T>::initial();
+    }
+
+    ~ReservedRooted() {
+        *savedRoot = js::GCMethods<T>::initial();
+    }
+
+    void set(const T& p) const { *savedRoot = p; }
+    operator Handle<T>() { return *savedRoot; }
+    operator Rooted<T>&() { return *savedRoot; }
+    MutableHandle<T> operator&() { return &*savedRoot; }
+
+    DECLARE_NONPOINTER_ACCESSOR_METHODS(savedRoot->get())
+    DECLARE_NONPOINTER_MUTABLE_ACCESSOR_METHODS(savedRoot->get())
+    DECLARE_POINTER_CONSTREF_OPS(T)
+    DECLARE_POINTER_ASSIGN_OPS(ReservedRooted, T)
+};
+
+template <>
+class ReservedRootedBase<Value> : public ValueOperations<ReservedRooted<Value>>
+{
+    friend class ValueOperations<ReservedRooted<Value>>;
+    const Value* extract() const {
+        return static_cast<const ReservedRooted<Value>*>(this)->address();
+    }
+};
+
 static MOZ_NEVER_INLINE bool
 Interpret(JSContext* cx, RunState& state)
 {
 /*
  * Define macros for an interpreter loop. Opcode dispatch may be either by a
  * switch statement or by indirect goto (aka a threaded interpreter), depending
  * on compiler support.
  *
@@ -2033,21 +2086,19 @@ CASE(JSOP_DUPAT)
 END_CASE(JSOP_DUPAT)
 
 CASE(JSOP_SETRVAL)
     POP_RETURN_VALUE();
 END_CASE(JSOP_SETRVAL)
 
 CASE(JSOP_ENTERWITH)
 {
-    RootedValue& val = rootValue0;
-    RootedObject& staticWith = rootObject0;
-    val = REGS.sp[-1];
+    ReservedRooted<Value> val(&rootValue0, REGS.sp[-1]);
     REGS.sp--;
-    staticWith = script->getObject(REGS.pc);
+    ReservedRooted<JSObject*> staticWith(&rootObject0, script->getObject(REGS.pc));
 
     if (!EnterWithOperation(cx, REGS.fp(), val, staticWith))
         goto error;
 }
 END_CASE(JSOP_ENTERWITH)
 
 CASE(JSOP_LEAVEWITH)
     REGS.fp()->popWith(cx);
@@ -2165,23 +2216,24 @@ END_CASE(JSOP_AND)
 
 CASE(JSOP_IN)
 {
     HandleValue rref = REGS.stackHandleAt(-1);
     if (!rref.isObject()) {
         ReportValueError(cx, JSMSG_IN_NOT_OBJECT, -1, rref, nullptr);
         goto error;
     }
-    RootedObject& obj = rootObject0;
-    obj = &rref.toObject();
-    RootedId& id = rootId0;
-    FETCH_ELEMENT_ID(-2, id);
     bool found;
-    if (!HasProperty(cx, obj, id, &found))
-        goto error;
+    {
+        ReservedRooted<JSObject*> obj(&rootObject0, &rref.toObject());
+        ReservedRooted<jsid> id(&rootId0);
+        FETCH_ELEMENT_ID(-2, id);
+        if (!HasProperty(cx, obj, id, &found))
+            goto error;