Merge inbound to mozilla-central. a=merge
authorBogdan Tara <btara@mozilla.com>
Fri, 03 Aug 2018 13:16:27 +0300
changeset 485058 9ad7706def24
parent 485041 3c1c7f965f0f (current diff)
parent 485057 c6d07c3eec16 (diff)
child 485059 3d1078668254
child 485073 e61148f3a1c5
child 485142 d138efcfa006
push id9719
push userffxbld-merge
push dateFri, 24 Aug 2018 17:49:46 +0000
treeherdermozilla-beta@719ec98fba77 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersmerge
milestone63.0a1
first release with
nightly linux32
9ad7706def24 / 63.0a1 / 20180803104322 / files
nightly linux64
9ad7706def24 / 63.0a1 / 20180803104322 / files
nightly mac
9ad7706def24 / 63.0a1 / 20180803104322 / files
nightly win32
9ad7706def24 / 63.0a1 / 20180803104322 / files
nightly win64
9ad7706def24 / 63.0a1 / 20180803104322 / files
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
releases
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Merge inbound to mozilla-central. a=merge
browser/components/payments/res/containers/payment-method-picker.js
--- a/browser/components/payments/res/containers/address-picker.js
+++ b/browser/components/payments/res/containers/address-picker.js
@@ -108,16 +108,18 @@ export default class AddressPicker exten
     // Update selectedness after the options are updated
     let selectedAddressGUID = state[this.selectedStateKey];
     this.dropdown.value = selectedAddressGUID;
 
     if (selectedAddressGUID && selectedAddressGUID !== this.dropdown.value) {
       throw new Error(`${this.selectedStateKey} option ${selectedAddressGUID} ` +
                       `does not exist in the address picker`);
     }
+
+    super.render(state);
   }
 
   get selectedStateKey() {
     return this.getAttribute("selected-state-key");
   }
 
   handleEvent(event) {
     switch (event.type) {
--- a/browser/components/payments/res/containers/payment-method-picker.js
+++ b/browser/components/payments/res/containers/payment-method-picker.js
@@ -60,16 +60,18 @@ export default class PaymentMethodPicker
     // Update selectedness after the options are updated
     let selectedPaymentCardGUID = state[this.selectedStateKey];
     this.dropdown.value = selectedPaymentCardGUID;
 
     if (selectedPaymentCardGUID && selectedPaymentCardGUID !== this.dropdown.value) {
       throw new Error(`The option ${selectedPaymentCardGUID} ` +
                       `does not exist in the payment method picker`);
     }
+
+    super.render(state);
   }
 
   get selectedStateKey() {
     return this.getAttribute("selected-state-key");
   }
 
   handleEvent(event) {
     switch (event.type) {
--- a/browser/components/payments/res/containers/rich-picker.js
+++ b/browser/components/payments/res/containers/rich-picker.js
@@ -44,13 +44,17 @@ export default class RichPicker extends 
   }
 
   attributeChangedCallback(name, oldValue, newValue) {
     if (name == "label") {
       this.labelElement.textContent = newValue;
     }
   }
 
+  render(state) {
+    this.editLink.hidden = !this.dropdown.value;
+  }
+
   get value() {
     return this.dropdown &&
            this.dropdown.selectedOption;
   }
 }
--- a/browser/components/payments/test/mochitest/test_address_picker.html
+++ b/browser/components/payments/test/mochitest/test_address_picker.html
@@ -33,16 +33,17 @@ Test the address-picker component
 import "../../res/containers/address-picker.js";
 
 let picker1 = document.getElementById("picker1");
 
 add_task(async function test_empty() {
   ok(picker1, "Check picker1 exists");
   let {savedAddresses} = picker1.requestStore.getState();
   is(Object.keys(savedAddresses).length, 0, "Check empty initial state");
+  is(picker1.editLink.hidden, true, "Check that picker edit link is hidden");
   is(picker1.dropdown.popupBox.children.length, 0, "Check dropdown is empty");
 });
 
 add_task(async function test_initialSet() {
   picker1.requestStore.setState({
     savedAddresses: {
       "48bnds6854t": {
         "address-level1": "MI",
@@ -107,16 +108,17 @@ add_task(async function test_update() {
   ok(options[1].textContent.includes("Mrs. Bar"), "Check that name is the same in second address");
   ok(options[1].getAttribute("street-address").includes("P.O. Box 123"),
      "Check second address is the same");
 });
 
 add_task(async function test_change_selected_address() {
   let options = picker1.dropdown.popupBox.children;
   is(picker1.dropdown.selectedOption, null, "Should default to no selected option");
+  is(picker1.editLink.hidden, true, "Picker edit link should be hidden when no option is selected");
   let {selectedShippingAddress} = picker1.requestStore.getState();
   is(selectedShippingAddress, null, "store should have no option selected");
 
   picker1.dropdown.popupBox.focus();
   synthesizeKey(options[1].getAttribute("name"), {});
   await asyncElementRendered();
 
   let selectedOption = picker1.dropdown.selectedOption;
--- a/browser/components/payments/test/mochitest/test_shipping_option_picker.html
+++ b/browser/components/payments/test/mochitest/test_shipping_option_picker.html
@@ -34,16 +34,18 @@ import "../../res/containers/shipping-op
 let picker1 = document.getElementById("picker1");
 
 add_task(async function test_empty() {
   ok(picker1, "Check picker1 exists");
   let state = picker1.requestStore.getState();
   let {shippingOptions} = state && state.request && state.request.paymentDetails;
   is(Object.keys(shippingOptions).length, 0, "Check empty initial state");
   is(picker1.dropdown.popupBox.children.length, 0, "Check dropdown is empty");
+  ok(picker1.editLink.hidden, true, "Check that picker edit link is always hidden");
+  ok(picker1.addLink.hidden, true, "Check that picker add link is always hidden");
 });
 
 add_task(async function test_initialSet() {
   picker1.requestStore.setState({
     request: {
       paymentDetails: {
         shippingOptions: [
           {
--- a/build/moz.configure/toolchain.configure
+++ b/build/moz.configure/toolchain.configure
@@ -584,16 +584,17 @@ def get_vc_paths(topsrcdir):
         tools_version = open(os.path.join(
             path, r'VC\Auxiliary\Build\Microsoft.VCToolsVersion.default.txt'), 'rb').read().strip()
         tools_path = os.path.join(
             path, r'VC\Tools\MSVC', tools_version, r'bin\HostX64')
         yield (Version(install['installationVersion']), {
             'x64': [os.path.join(tools_path, 'x64')],
             # The x64->x86 cross toolchain requires DLLs from the native x64 toolchain.
             'x86': [os.path.join(tools_path, 'x86'), os.path.join(tools_path, 'x64')],
+            'arm64': [os.path.join(tools_path, 'x64')],
         })
 
 
 js_option('--with-visual-studio-version', nargs=1,
           choices=('2017',),
           help='Select a specific Visual Studio version to use')
 
 
@@ -609,16 +610,17 @@ def vs_major_version(value):
 @imports('platform')
 def vc_compiler_path(host, target, vs_major_version, env, vs_release_name):
     if host.kernel != 'WINNT':
         return
     vc_target = {
         'x86': 'x86',
         'x86_64': 'x64',
         'arm': 'arm',
+        'aarch64': 'arm64'
     }.get(target.cpu)
     if vc_target is None:
         return
 
     all_versions = sorted(get_vc_paths(env.topsrcdir), key=itemgetter(0))
     if not all_versions:
         return
     if vs_major_version:
--- a/build/moz.configure/windows.configure
+++ b/build/moz.configure/windows.configure
@@ -351,22 +351,24 @@ set_config('INCLUDE', include_path)
 @imports('os')
 def lib_path(target, c_compiler, vc_path, windows_sdk_dir, ucrt_sdk_dir, dia_sdk_dir):
     if not vc_path:
         return
     sdk_target = {
         'x86': 'x86',
         'x86_64': 'x64',
         'arm': 'arm',
+        'aarch64': 'arm64',
     }.get(target.cpu)
 
     old_target = {
         'x86': '',
         'x86_64': 'amd64',
         'arm': 'arm',
+        'aarch64': 'arm64'
     }.get(target.cpu)
     if old_target is None:
         return
     # As old_target can be '', and os.path.join will happily use the empty
     # string, leading to a string ending with a backslash, that Make will
     # interpret as a "string continues on next line" indicator, use variable
     # args.
     old_target = (old_target,) if old_target else ()
--- a/dom/base/NodeUbiReporting.cpp
+++ b/dom/base/NodeUbiReporting.cpp
@@ -45,17 +45,17 @@ JS::ubi::Concrete<nsINode>::edges(JSCont
     char16_t* edgeName = nullptr;
     if (wantNames) {
       edgeName = NS_strdup(u"Child Node");
     }
     if (!range->addEdge(JS::ubi::Edge(edgeName, curr))) {
       return nullptr;
     }
   }
-  return range;
+  return js::UniquePtr<EdgeRange>(range.release());
 }
 
 JS::ubi::Node::Size
 JS::ubi::Concrete<nsINode>::size(mozilla::MallocSizeOf mallocSizeOf) const
 {
   AutoSuppressGCAnalysis suppress;
   mozilla::SizeOfState sz(mallocSizeOf);
   nsWindowSizes wn(sz);
--- a/js/src/frontend/BytecodeEmitter.cpp
+++ b/js/src/frontend/BytecodeEmitter.cpp
@@ -1951,23 +1951,23 @@ BytecodeEmitter::emitPropOp(ParseNode* p
 
     if (op == JSOP_CALLPROP && !emit1(JSOP_SWAP))
         return false;
 
     return true;
 }
 
 bool
-BytecodeEmitter::emitSuperPropOp(ParseNode* pn, JSOp op, bool isCall)
+BytecodeEmitter::emitSuperGetProp(ParseNode* pn, bool isCall)
 {
     ParseNode* base = &pn->as<PropertyAccess>().expression();
     if (!emitSuperPropLHS(base, isCall))
         return false;
 
-    if (!emitAtomOp(pn, op))
+    if (!emitAtomOp(pn, JSOP_GETPROP_SUPER))
         return false;
 
     if (isCall && !emit1(JSOP_SWAP))
         return false;
 
     return true;
 }
 
@@ -2105,62 +2105,49 @@ BytecodeEmitter::emitElemOperands(ParseN
     } else if (opts == EmitElemOption::Call) {
         if (!emit1(JSOP_DUP))
             return false;
     }
 
     if (!emitTree(pn->pn_right))
         return false;
 
-    if (opts == EmitElemOption::Set) {
-        if (!emit2(JSOP_PICK, 2))
-            return false;
-    } else if (opts == EmitElemOption::IncDec || opts == EmitElemOption::CompoundAssign) {
+    if (opts == EmitElemOption::IncDec || opts == EmitElemOption::CompoundAssign) {
         if (!emit1(JSOP_TOID))
             return false;
     }
     return true;
 }
 
 bool
 BytecodeEmitter::emitSuperElemOperands(ParseNode* pn, EmitElemOption opts)
 {
     MOZ_ASSERT(pn->isKind(ParseNodeKind::Elem) && pn->as<PropertyByValue>().isSuper());
 
-    // The ordering here is somewhat screwy. We need to evaluate the propval
-    // first, by spec. Do a little dance to not emit more than one JSOP_THIS.
-    // Since JSOP_THIS might throw in derived class constructors, we cannot
-    // just push it earlier as the receiver. We have to swap it down instead.
-
-    if (!emitTree(pn->pn_right))
+    if (!emitGetThisForSuperBase(pn->pn_left))      // THIS
+        return false;
+
+    if (opts == EmitElemOption::Call) {
+        // We need a second |this| that will be consumed during computation of
+        // the property value. (The original |this| is passed to the call.)
+        if (!emit1(JSOP_DUP))                       // THIS THIS
+            return false;
+    }
+
+    if (!emitTree(pn->pn_right))                    // THIS? THIS KEY
         return false;
 
     // We need to convert the key to an object id first, so that we do not do
     // it inside both the GETELEM and the SETELEM.
     if (opts == EmitElemOption::IncDec || opts == EmitElemOption::CompoundAssign) {
-        if (!emit1(JSOP_TOID))
-            return false;
-    }
-
-    if (!emitGetThisForSuperBase(pn->pn_left))
-        return false;
-
-    if (opts == EmitElemOption::Call) {
-        if (!emit1(JSOP_SWAP))
-            return false;
-
-        // We need another |this| on top, also
-        if (!emitDupAt(1))
-            return false;
-    }
-
-    if (!emit1(JSOP_SUPERBASE))
-        return false;
-
-    if (opts == EmitElemOption::Set && !emit2(JSOP_PICK, 3))
+        if (!emit1(JSOP_TOID))                      // THIS? THIS KEY
+            return false;
+    }
+
+    if (!emit1(JSOP_SUPERBASE))                     // THIS? THIS KEY SUPERBASE
         return false;
 
     return true;
 }
 
 bool
 BytecodeEmitter::emitElemOpBase(JSOp op)
 {
@@ -2169,40 +2156,37 @@ BytecodeEmitter::emitElemOpBase(JSOp op)
 
     checkTypeSet(op);
     return true;
 }
 
 bool
 BytecodeEmitter::emitElemOp(ParseNode* pn, JSOp op)
 {
-    EmitElemOption opts = EmitElemOption::Get;
-    if (op == JSOP_CALLELEM)
-        opts = EmitElemOption::Call;
-    else if (op == JSOP_SETELEM || op == JSOP_STRICTSETELEM)
-        opts = EmitElemOption::Set;
+    MOZ_ASSERT(op == JSOP_GETELEM ||
+               op == JSOP_CALLELEM ||
+               op == JSOP_DELELEM ||
+               op == JSOP_STRICTDELELEM);
+
+    EmitElemOption opts = op == JSOP_CALLELEM ? EmitElemOption::Call : EmitElemOption::Get;
 
     return emitElemOperands(pn, opts) && emitElemOpBase(op);
 }
 
 bool
-BytecodeEmitter::emitSuperElemOp(ParseNode* pn, JSOp op, bool isCall)
-{
-    EmitElemOption opts = EmitElemOption::Get;
-    if (isCall)
-        opts = EmitElemOption::Call;
-    else if (op == JSOP_SETELEM_SUPER || op == JSOP_STRICTSETELEM_SUPER)
-        opts = EmitElemOption::Set;
-
-    if (!emitSuperElemOperands(pn, opts))
-        return false;
-    if (!emitElemOpBase(op))
-        return false;
-
-    if (isCall && !emit1(JSOP_SWAP))
+BytecodeEmitter::emitSuperGetElem(ParseNode* pn, bool isCall)
+{
+    EmitElemOption opts = isCall ? EmitElemOption::Call : EmitElemOption::Get;
+
+    if (!emitSuperElemOperands(pn, opts))           // THIS? THIS KEY SUPERBASE
+        return false;
+    if (!emitElemOpBase(JSOP_GETELEM_SUPER))        // THIS? VALUE
+        return false;
+
+    if (isCall && !emit1(JSOP_SWAP))                // VALUE THIS
         return false;
 
     return true;
 }
 
 bool
 BytecodeEmitter::emitElemIncDec(ParseNode* pn)
 {
@@ -2223,54 +2207,43 @@ BytecodeEmitter::emitElemIncDec(ParseNod
 
     bool post;
     JSOp binop = GetIncDecInfo(pn->getKind(), &post);
 
     JSOp getOp;
     if (isSuper) {
         // There's no such thing as JSOP_DUP3, so we have to be creative.
         // Note that pushing things again is no fewer JSOps.
-        if (!emitDupAt(2))                              // KEY THIS OBJ KEY
-            return false;
-        if (!emitDupAt(2))                              // KEY THIS OBJ KEY THIS
-            return false;
-        if (!emitDupAt(2))                              // KEY THIS OBJ KEY THIS OBJ
+        if (!emitDupAt(2))                              // THIS KEY OBJ THIS
+            return false;
+        if (!emitDupAt(2))                              // THIS KEY OBJ THIS KEY
+            return false;
+        if (!emitDupAt(2))                              // THIS KEY OBJ THIS KEY OBJ
             return false;
         getOp = JSOP_GETELEM_SUPER;
     } else {
                                                         // OBJ KEY
         if (!emit1(JSOP_DUP2))                          // OBJ KEY OBJ KEY
             return false;
         getOp = JSOP_GETELEM;
     }
     if (!emitElemOpBase(getOp))                         // OBJ KEY V
         return false;
     if (!emit1(JSOP_POS))                               // OBJ KEY N
         return false;
-    if (post && !emit1(JSOP_DUP))                       // OBJ KEY N? N
-        return false;
-    if (!emit1(JSOP_ONE))                               // OBJ KEY N? N 1
-        return false;
-    if (!emit1(binop))                                  // OBJ KEY N? N+1
-        return false;
-
     if (post) {
-        if (isSuper) {
-            // We have one more value to rotate around, because of |this|
-            // on the stack
-            if (!emit2(JSOP_PICK, 4))
-                return false;
-        }
-        if (!emit2(JSOP_PICK, 3 + isSuper))             // KEY N N+1 OBJ
-            return false;
-        if (!emit2(JSOP_PICK, 3 + isSuper))             // N N+1 OBJ KEY
-            return false;
-        if (!emit2(JSOP_PICK, 2 + isSuper))             // N OBJ KEY N+1
-            return false;
-    }
+        if (!emit1(JSOP_DUP))                           // OBJ KEY N N
+            return false;
+        if (!emit2(JSOP_UNPICK, 3 + isSuper))           // N OBJ KEY N
+            return false;
+    }
+    if (!emit1(JSOP_ONE))                               // N? OBJ KEY N 1
+        return false;
+    if (!emit1(binop))                                  // N? OBJ KEY N+1
+        return false;
 
     JSOp setOp = isSuper ? (sc->strict() ? JSOP_STRICTSETELEM_SUPER : JSOP_SETELEM_SUPER)
                          : (sc->strict() ? JSOP_STRICTSETELEM : JSOP_SETELEM);
     if (!emitElemOpBase(setOp))                         // N? N+1
         return false;
     if (post && !emit1(JSOP_POP))                       // RESULT
         return false;
 
@@ -6476,56 +6449,74 @@ BytecodeEmitter::emitDeleteName(ParseNod
 }
 
 bool
 BytecodeEmitter::emitDeleteProperty(ParseNode* node)
 {
     MOZ_ASSERT(node->isKind(ParseNodeKind::DeleteProp));
     MOZ_ASSERT(node->isArity(PN_UNARY));
 
-    ParseNode* propExpr = node->pn_kid;
-    MOZ_ASSERT(propExpr->isKind(ParseNodeKind::Dot));
-
-    if (propExpr->as<PropertyAccess>().isSuper()) {
-        // Still have to calculate the base, even though we are are going
-        // to throw unconditionally, as calculating the base could also
-        // throw.
+    PropertyAccess* propExpr = &node->pn_kid->as<PropertyAccess>();
+
+    if (propExpr->isSuper()) {
+        // The expression |delete super.foo;| has to evaluate |super.foo|,
+        // which could throw if |this| hasn't yet been set by a |super(...)|
+        // call or the super-base is not an object, before throwing a
+        // ReferenceError for attempting to delete a super-reference.
+        if (!emitGetThisForSuperBase(&propExpr->expression()))
+            return false;
+
         if (!emit1(JSOP_SUPERBASE))
             return false;
 
-        return emitUint16Operand(JSOP_THROWMSG, JSMSG_CANT_DELETE_SUPER);
+        // Unconditionally throw when attempting to delete a super-reference.
+        if (!emitUint16Operand(JSOP_THROWMSG, JSMSG_CANT_DELETE_SUPER))
+            return false;
+
+        // Another wrinkle: Balance the stack from the emitter's point of view.
+        // Execution will not reach here, as the last bytecode threw.
+        return emit1(JSOP_POP);
     }
 
     JSOp delOp = sc->strict() ? JSOP_STRICTDELPROP : JSOP_DELPROP;
     return emitPropOp(propExpr, delOp);
 }
 
 bool
 BytecodeEmitter::emitDeleteElement(ParseNode* node)
 {
     MOZ_ASSERT(node->isKind(ParseNodeKind::DeleteElem));
     MOZ_ASSERT(node->isArity(PN_UNARY));
 
-    ParseNode* elemExpr = node->pn_kid;
-    MOZ_ASSERT(elemExpr->isKind(ParseNodeKind::Elem));
-
-    if (elemExpr->as<PropertyByValue>().isSuper()) {
-        // Still have to calculate everything, even though we're gonna throw
-        // since it may have side effects
+    PropertyByValue* elemExpr = &node->pn_kid->as<PropertyByValue>();
+
+    if (elemExpr->isSuper()) {
+        // The expression |delete super[foo];| has to evaluate |super[foo]|,
+        // which could throw if |this| hasn't yet been set by a |super(...)|
+        // call, or trigger side-effects when evaluating ToPropertyKey(foo),
+        // or also throw when the super-base is not an object, before throwing
+        // a ReferenceError for attempting to delete a super-reference.
+        if (!emitGetThisForSuperBase(elemExpr->pn_left))
+            return false;
+
         if (!emitTree(elemExpr->pn_right))
             return false;
+        if (!emit1(JSOP_TOID))
+            return false;
 
         if (!emit1(JSOP_SUPERBASE))
             return false;
+
+        // Unconditionally throw when attempting to delete a super-reference.
         if (!emitUint16Operand(JSOP_THROWMSG, JSMSG_CANT_DELETE_SUPER))
             return false;
 
         // Another wrinkle: Balance the stack from the emitter's point of view.
         // Execution will not reach here, as the last bytecode threw.
-        return emit1(JSOP_POP);
+        return emitPopN(2);
     }
 
     JSOp delOp = sc->strict() ? JSOP_STRICTDELELEM : JSOP_DELELEM;
     return emitElemOp(elemExpr, delOp);
 }
 
 bool
 BytecodeEmitter::emitDeleteExpression(ParseNode* node)
@@ -6749,22 +6740,22 @@ BytecodeEmitter::emitSelfHostedGetProper
     }
 
     ParseNode* funNode = pn->pn_head;  // The getPropertySuper node.
 
     ParseNode* objNode = funNode->pn_next;
     ParseNode* idNode = objNode->pn_next;
     ParseNode* receiverNode = idNode->pn_next;
 
+    if (!emitTree(receiverNode))
+        return false;
+
     if (!emitTree(idNode))
         return false;
 
-    if (!emitTree(receiverNode))
-        return false;
-
     if (!emitTree(objNode))
         return false;
 
     return emitElemOpBase(JSOP_GETELEM_SUPER);
 }
 
 bool
 BytecodeEmitter::isRestParameter(ParseNode* pn)
@@ -6811,28 +6802,28 @@ BytecodeEmitter::emitCallee(ParseNode* c
     switch (callee->getKind()) {
       case ParseNodeKind::Name:
         if (!emitGetName(callee, *callop))
             return false;
         break;
       case ParseNodeKind::Dot:
         MOZ_ASSERT(emitterMode != BytecodeEmitter::SelfHosting);
         if (callee->as<PropertyAccess>().isSuper()) {
-            if (!emitSuperPropOp(callee, JSOP_GETPROP_SUPER, /* isCall = */ *callop))
+            if (!emitSuperGetProp(callee, /* isCall = */ *callop))
                 return false;
         } else {
             if (!emitPropOp(callee, *callop ? JSOP_CALLPROP : JSOP_GETPROP))
                 return false;
         }
 
         break;
       case ParseNodeKind::Elem:
         MOZ_ASSERT(emitterMode != BytecodeEmitter::SelfHosting);
         if (callee->as<PropertyByValue>().isSuper()) {
-            if (!emitSuperElemOp(callee, JSOP_GETELEM_SUPER, /* isCall = */ *callop))
+            if (!emitSuperGetElem(callee, /* isCall = */ *callop))
                 return false;
         } else {
             if (!emitElemOp(callee, *callop ? JSOP_CALLELEM : JSOP_GETELEM))
                 return false;
             if (*callop) {
                 if (!emit1(JSOP_SWAP))
                     return false;
             }
@@ -8541,27 +8532,27 @@ BytecodeEmitter::emitTree(ParseNode* pn,
 
       case ParseNodeKind::DeleteExpr:
         if (!emitDeleteExpression(pn))
             return false;
         break;
 
       case ParseNodeKind::Dot:
         if (pn->as<PropertyAccess>().isSuper()) {
-            if (!emitSuperPropOp(pn, JSOP_GETPROP_SUPER))
+            if (!emitSuperGetProp(pn))
                 return false;
         } else {
             if (!emitPropOp(pn, JSOP_GETPROP))
                 return false;
         }
         break;
 
       case ParseNodeKind::Elem:
         if (pn->as<PropertyByValue>().isSuper()) {
-            if (!emitSuperElemOp(pn, JSOP_GETELEM_SUPER))
+            if (!emitSuperGetElem(pn))
                 return false;
         } else {
             if (!emitElemOp(pn, JSOP_GETELEM))
                 return false;
         }
         break;
 
       case ParseNodeKind::New:
--- a/js/src/frontend/BytecodeEmitter.h
+++ b/js/src/frontend/BytecodeEmitter.h
@@ -668,17 +668,17 @@ struct MOZ_STACK_CLASS BytecodeEmitter
     MOZ_MUST_USE bool emitAsyncWrapper(unsigned index, bool needsHomeObject, bool isArrow,
                                        bool isGenerator);
 
     MOZ_MUST_USE bool emitComputedPropertyName(ParseNode* computedPropName);
 
     // Emit bytecode to put operands for a JSOP_GETELEM/CALLELEM/SETELEM/DELELEM
     // opcode onto the stack in the right order. In the case of SETELEM, the
     // value to be assigned must already be pushed.
-    enum class EmitElemOption { Get, Set, Call, IncDec, CompoundAssign, Ref };
+    enum class EmitElemOption { Get, Call, IncDec, CompoundAssign, Ref };
     MOZ_MUST_USE bool emitElemOperands(ParseNode* pn, EmitElemOption opts);
 
     MOZ_MUST_USE bool emitElemOpBase(JSOp op);
     MOZ_MUST_USE bool emitElemOp(ParseNode* pn, JSOp op);
     MOZ_MUST_USE bool emitElemIncDec(ParseNode* pn);
 
     MOZ_MUST_USE bool emitCatch(ParseNode* pn);
     MOZ_MUST_USE bool emitIf(ParseNode* pn);
@@ -841,20 +841,20 @@ struct MOZ_STACK_CLASS BytecodeEmitter
     // It will pop the iterator and I, then iterate over the iterator by calling
     // |.next()| and put the results into the I-th element of array with
     // incrementing I, then push the result I (it will be original I +
     // iteration count). The stack after iteration will look like |ARRAY INDEX|.
     MOZ_MUST_USE bool emitSpread(bool allowSelfHosted = false);
 
     MOZ_MUST_USE bool emitClass(ParseNode* pn);
     MOZ_MUST_USE bool emitSuperPropLHS(ParseNode* superBase, bool isCall = false);
-    MOZ_MUST_USE bool emitSuperPropOp(ParseNode* pn, JSOp op, bool isCall = false);
+    MOZ_MUST_USE bool emitSuperGetProp(ParseNode* pn, bool isCall = false);
     MOZ_MUST_USE bool emitSuperElemOperands(ParseNode* pn,
                                             EmitElemOption opts = EmitElemOption::Get);
-    MOZ_MUST_USE bool emitSuperElemOp(ParseNode* pn, JSOp op, bool isCall = false);
+    MOZ_MUST_USE bool emitSuperGetElem(ParseNode* pn, bool isCall = false);
 
     MOZ_MUST_USE bool emitCallee(ParseNode* callee, ParseNode* call, bool* callop);
 
     MOZ_MUST_USE bool emitPipeline(ParseNode* pn);
 
     MOZ_MUST_USE bool emitExportDefault(ParseNode* pn);
 };
 
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/auto-regress/bug1476417.js
@@ -0,0 +1,22 @@
+function f(x) {
+    var y, z;
+
+    arguments; // Force creation of mapped arguments, so modifying |x| writes to the arguments object.
+    Math; // Access a global variable to create a resume point.
+    z = x + 1; // Was executed twice because only the resume point for 'Math' was present before the fix.
+    x = z; // Modifying |x| writes into the arguments object. We missed to create a resume point here.
+    y = 2 * x; // Triggers a bailout when overflowing int32 boundaries.
+
+    return [x, y];
+}
+
+var x = [0, 0, 0x3FFFFFFF];
+
+for (var j = 0; j < 3; ++j) {
+    var value = x[j];
+    var expected = [value + 1, (value + 1) * 2];
+    var actual = f(value);
+
+    assertEq(actual[0], expected[0]);
+    assertEq(actual[1], expected[1]);
+}
--- a/js/src/jit/BaselineCompiler.cpp
+++ b/js/src/jit/BaselineCompiler.cpp
@@ -2298,17 +2298,17 @@ BaselineCompiler::emit_JSOP_GETELEM()
 
 bool
 BaselineCompiler::emit_JSOP_GETELEM_SUPER()
 {
     // Store obj in the scratch slot.
     storeValue(frame.peek(-1), frame.addressOfScratchValue(), R2);
     frame.pop();
 
-    // Keep index and receiver in R0 and R1.
+    // Keep receiver and index in R0 and R1.
     frame.popRegsAndSync(2);
 
     // Keep obj on the stack.
     frame.pushScratchValue();
 
     ICGetElem_Fallback::Compiler stubCompiler(cx, /* hasReceiver = */ true);
     if (!emitOpIC(stubCompiler.getStub(&stubSpace_)))
         return false;
@@ -2351,31 +2351,31 @@ BaselineCompiler::emit_JSOP_STRICTSETELE
     return emit_JSOP_SETELEM();
 }
 
 bool
 BaselineCompiler::emit_JSOP_SETELEM_SUPER()
 {
     bool strict = IsCheckStrictOp(JSOp(*pc));
 
-    // Incoming stack is |propval, receiver, obj, rval|. We need to shuffle
+    // Incoming stack is |receiver, propval, obj, rval|. We need to shuffle
     // stack to leave rval when operation is complete.
 
-    // Pop rval into R0, then load propval into R1 and replace with rval.
+    // Pop rval into R0, then load receiver into R1 and replace with rval.
     frame.popRegsAndSync(1);
     masm.loadValue(frame.addressOfStackValue(frame.peek(-3)), R1);
     masm.storeValue(R0, frame.addressOfStackValue(frame.peek(-3)));
 
     prepareVMCall();
 
     pushArg(Imm32(strict));
-    masm.loadValue(frame.addressOfStackValue(frame.peek(-2)), R2);
-    pushArg(R2); // receiver
+    pushArg(R1); // receiver
     pushArg(R0); // rval
-    pushArg(R1); // propval
+    masm.loadValue(frame.addressOfStackValue(frame.peek(-2)), R0);
+    pushArg(R0); // propval
     masm.unboxObject(frame.addressOfStackValue(frame.peek(-1)), R0.scratchReg());
     pushArg(R0.scratchReg()); // obj
 
     if (!callVM(SetObjectElementInfo))
         return false;
 
     frame.popn(2);
     return true;
@@ -2722,17 +2722,16 @@ BaselineCompiler::getEnvironmentCoordina
 }
 
 Address
 BaselineCompiler::getEnvironmentCoordinateAddressFromObject(Register objReg, Register reg)
 {
     EnvironmentCoordinate ec(pc);
     Shape* shape = EnvironmentCoordinateToEnvironmentShape(script, pc);
 
-    Address addr;
     if (shape->numFixedSlots() <= ec.slot()) {
         masm.loadPtr(Address(objReg, NativeObject::offsetOfSlots()), reg);
         return Address(reg, (ec.slot() - shape->numFixedSlots()) * sizeof(Value));
     }
 
     return Address(objReg, NativeObject::getFixedSlotOffset(ec.slot()));
 }
 
--- a/js/src/jit/BaselineIC.cpp
+++ b/js/src/jit/BaselineIC.cpp
@@ -634,17 +634,17 @@ DoGetElemFallback(JSContext* cx, Baselin
     if (!attached && !isTemporarilyUnoptimizable)
         stub->noteUnoptimizableAccess();
 
     return true;
 }
 
 static bool
 DoGetElemSuperFallback(JSContext* cx, BaselineFrame* frame, ICGetElem_Fallback* stub_,
-                       HandleValue lhs, HandleValue receiver, HandleValue rhs,
+                       HandleValue lhs, HandleValue rhs, HandleValue receiver,
                        MutableHandleValue res)
 {
     // This fallback stub may trigger debug mode toggling.
     DebugModeOSRVolatileStub<ICGetElem_Fallback*> stub(frame, stub_);
 
     RootedScript script(cx, frame->script());
     jsbytecode* pc = stub->icEntry()->pc(frame->script());
     StackTypeSet* types = TypeScript::BytecodeTypes(script, pc);
@@ -729,27 +729,27 @@ ICGetElem_Fallback::Compiler::generateSt
     MOZ_ASSERT(engine_ == Engine::Baseline);
     MOZ_ASSERT(R0 == JSReturnOperand);
 
     // Restore the tail call register.
     EmitRestoreTailCallReg(masm);
 
     // Super property getters use a |this| that differs from base object
     if (hasReceiver_) {
-        // State: index in R0, receiver in R1, obj on the stack
+        // State: receiver in R0, index in R1, obj on the stack
 
         // Ensure stack is fully synced for the expression decompiler.
-        // We need: index, receiver, obj
+        // We need: receiver, index, obj
         masm.pushValue(R0);
         masm.pushValue(R1);
         masm.pushValue(Address(masm.getStackPointer(), sizeof(Value) * 2));
 
         // Push arguments.
-        masm.pushValue(R0); // Index
-        masm.pushValue(R1); // Reciver
+        masm.pushValue(R0); // Receiver
+        masm.pushValue(R1); // Index
         masm.pushValue(Address(masm.getStackPointer(), sizeof(Value) * 5)); // Obj
         masm.push(ICStubReg);
         pushStubPayload(masm, R0.scratchReg());
 
         return tailCallVM(DoGetElemSuperFallbackInfo, masm);
     }
 
     // Ensure stack is fully synced for the expression decompiler.
--- a/js/src/jit/CacheIRCompiler.cpp
+++ b/js/src/jit/CacheIRCompiler.cpp
@@ -1646,17 +1646,17 @@ CacheIRCompiler::emitGuardFunctionProtot
     FailurePath* failure;
     if (!addFailurePath(&failure))
         return false;
 
     // Guard on the .prototype object.
     StubFieldOffset slot(reader.stubOffset(), StubField::Type::RawWord);
     masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch1);
     emitLoadStubField(slot, scratch2);
-    BaseValueIndex prototypeSlot(scratch1, scratch2);
+    BaseObjectSlotIndex prototypeSlot(scratch1, scratch2);
     masm.branchTestObject(Assembler::NotEqual, prototypeSlot, failure->label());
     masm.unboxObject(prototypeSlot, scratch1);
     masm.branchPtr(Assembler::NotEqual,
                    prototypeObject,
                    scratch1, failure->label());
 
     return true;
 }
--- a/js/src/jit/CodeGenerator.cpp
+++ b/js/src/jit/CodeGenerator.cpp
@@ -1481,32 +1481,48 @@ PrepareAndExecuteRegExp(JSContext* cx, M
         masm.branchLatin1String(input, &done);
 
         // Check if |lastIndex > 0 && lastIndex < input->length()|.
         // lastIndex should already have no sign here.
         masm.branchTest32(Assembler::Zero, lastIndex, lastIndex, &done);
         masm.loadStringLength(input, temp2);
         masm.branch32(Assembler::AboveOrEqual, lastIndex, temp2, &done);
 
+        // For TrailSurrogateMin ≤ x ≤ TrailSurrogateMax and
+        // LeadSurrogateMin ≤ x ≤ LeadSurrogateMax, the following
+        // equations hold.
+        //
+        //    SurrogateMin ≤ x ≤ SurrogateMax
+        // <> SurrogateMin ≤ x ≤ SurrogateMin + 2^10 - 1
+        // <> ((x - SurrogateMin) >>> 10) = 0    where >>> is an unsigned-shift
+        // See Hacker's Delight, section 4-1 for details.
+        //
+        //    ((x - SurrogateMin) >>> 10) = 0
+        // <> floor((x - SurrogateMin) / 1024) = 0
+        // <> floor((x / 1024) - (SurrogateMin / 1024)) = 0
+        // <> floor(x / 1024) = SurrogateMin / 1024
+        // <> floor(x / 1024) * 1024 = SurrogateMin
+        // <> (x >>> 10) << 10 = SurrogateMin
+        // <> x & ~(2^10 - 1) = SurrogateMin
+
+        constexpr char16_t SurrogateMask = 0xFC00;
+
         // Check if input[lastIndex] is trail surrogate.
         masm.loadStringChars(input, temp2, CharEncoding::TwoByte);
-        masm.computeEffectiveAddress(BaseIndex(temp2, lastIndex, TimesTwo), temp3);
-        masm.load16ZeroExtend(Address(temp3, 0), temp3);
-
-        masm.branch32(Assembler::Below, temp3, Imm32(unicode::TrailSurrogateMin), &done);
-        masm.branch32(Assembler::Above, temp3, Imm32(unicode::TrailSurrogateMax), &done);
+        masm.load16ZeroExtend(BaseIndex(temp2, lastIndex, TimesTwo), temp3);
+
+        masm.and32(Imm32(SurrogateMask), temp3);
+        masm.branch32(Assembler::NotEqual, temp3, Imm32(unicode::TrailSurrogateMin), &done);
 
         // Check if input[lastIndex-1] is lead surrogate.
-        masm.move32(lastIndex, temp3);
-        masm.sub32(Imm32(1), temp3);
-        masm.computeEffectiveAddress(BaseIndex(temp2, temp3, TimesTwo), temp3);
-        masm.load16ZeroExtend(Address(temp3, 0), temp3);
-
-        masm.branch32(Assembler::Below, temp3, Imm32(unicode::LeadSurrogateMin), &done);
-        masm.branch32(Assembler::Above, temp3, Imm32(unicode::LeadSurrogateMax), &done);
+        masm.load16ZeroExtend(BaseIndex(temp2, lastIndex, TimesTwo, -int32_t(sizeof(char16_t))),
+                              temp3);
+
+        masm.and32(Imm32(SurrogateMask), temp3);
+        masm.branch32(Assembler::NotEqual, temp3, Imm32(unicode::LeadSurrogateMin), &done);
 
         // Move lastIndex to lead surrogate.
         masm.subPtr(Imm32(1), lastIndex);
 
         masm.bind(&done);
     }
 
     if (mode == RegExpShared::Normal) {
@@ -3686,17 +3702,17 @@ CodeGenerator::visitSetPropertyPolymorph
 
 void
 CodeGenerator::visitElements(LElements* lir)
 {
     Address elements(ToRegister(lir->object()), NativeObject::offsetOfElements());
     masm.loadPtr(elements, ToRegister(lir->output()));
 }
 
-typedef bool (*ConvertElementsToDoublesFn)(JSContext*, uintptr_t);
+typedef void (*ConvertElementsToDoublesFn)(JSContext*, uintptr_t);
 static const VMFunction ConvertElementsToDoublesInfo =
     FunctionInfo<ConvertElementsToDoublesFn>(ObjectElements::ConvertElementsToDoubles,
                                              "ObjectElements::ConvertElementsToDoubles");
 
 void
 CodeGenerator::visitConvertElementsToDoubles(LConvertElementsToDoubles* lir)
 {
     Register elements = ToRegister(lir->elements());
@@ -7576,17 +7592,16 @@ CodeGenerator::visitSignDI(LSignDI* ins)
 
     // Bailout for NaN and negative zero.
     Label bailout;
     masm.bind(&zeroOrNaN);
     masm.branchDouble(Assembler::DoubleUnordered, input, input, &bailout);
 
     // The easiest way to distinguish -0.0 from 0.0 is that 1.0/-0.0
     // is -Infinity instead of Infinity.
-    Label isNegInf;
     masm.loadConstantDouble(1.0, temp);
     masm.divDouble(input, temp);
     masm.branchDouble(Assembler::DoubleLessThan, temp, input, &bailout);
     masm.move32(Imm32(0), output);
 
     bailoutFrom(&bailout, ins->snapshot());
 
     masm.bind(&done);
@@ -9257,17 +9272,17 @@ void
 CodeGenerator::emitStoreHoleCheck(Register elements, const LAllocation* index,
                                   int32_t offsetAdjustment, LSnapshot* snapshot)
 {
     Label bail;
     if (index->isConstant()) {
         Address dest(elements, ToInt32(index) * sizeof(js::Value) + offsetAdjustment);
         masm.branchTestMagic(Assembler::Equal, dest, &bail);
     } else {
-        BaseIndex dest(elements, ToRegister(index), TimesEight, offsetAdjustment);
+        BaseObjectElementIndex dest(elements, ToRegister(index), offsetAdjustment);
         masm.branchTestMagic(Assembler::Equal, dest, &bail);
     }
     bailoutFrom(&bail, snapshot);
 }
 
 static ConstantOrRegister
 ToConstantOrRegister(const LAllocation* value, MIRType valueType)
 {
@@ -9282,17 +9297,17 @@ CodeGenerator::emitStoreElementTyped(con
                                      Register elements, const LAllocation* index,
                                      int32_t offsetAdjustment)
 {
     ConstantOrRegister v = ToConstantOrRegister(value, valueType);
     if (index->isConstant()) {
         Address dest(elements, ToInt32(index) * sizeof(js::Value) + offsetAdjustment);
         masm.storeUnboxedValue(v, valueType, dest, elementType);
     } else {
-        BaseIndex dest(elements, ToRegister(index), TimesEight, offsetAdjustment);
+        BaseObjectElementIndex dest(elements, ToRegister(index), offsetAdjustment);
         masm.storeUnboxedValue(v, valueType, dest, elementType);
     }
 }
 
 void
 CodeGenerator::visitStoreElementT(LStoreElementT* store)
 {
     Register elements = ToRegister(store->elements());
@@ -9322,18 +9337,18 @@ CodeGenerator::visitStoreElementV(LStore
     if (lir->mir()->needsHoleCheck())
         emitStoreHoleCheck(elements, index, lir->mir()->offsetAdjustment(), lir->snapshot());
 
     if (lir->index()->isConstant()) {
         Address dest(elements,
                      ToInt32(lir->index()) * sizeof(js::Value) + lir->mir()->offsetAdjustment());
         masm.storeValue(value, dest);
     } else {
-        BaseIndex dest(elements, ToRegister(lir->index()), TimesEight,
-                       lir->mir()->offsetAdjustment());
+        BaseObjectElementIndex dest(elements, ToRegister(lir->index()),
+                                    lir->mir()->offsetAdjustment());
         masm.storeValue(value, dest);
     }
 }
 
 template <typename T> void
 CodeGenerator::emitStoreElementHoleT(T* lir)
 {
     static_assert(std::is_same<T, LStoreElementHoleT>::value || std::is_same<T, LFallibleStoreElementT>::value,
@@ -9355,17 +9370,17 @@ CodeGenerator::emitStoreElementHoleT(T* 
 
     if (std::is_same<T, LFallibleStoreElementT>::value) {
         // If the object might be non-extensible, check for frozen elements and
         // holes.
         Address flags(elements, ObjectElements::offsetOfFlags());
         masm.branchTest32(Assembler::NonZero, flags, Imm32(ObjectElements::FROZEN),
                           ool->callStub());
         if (lir->toFallibleStoreElementT()->mir()->needsHoleCheck()) {
-            masm.branchTestMagic(Assembler::Equal, BaseValueIndex(elements, index),
+            masm.branchTestMagic(Assembler::Equal, BaseObjectElementIndex(elements, index),
                                  ool->callStub());
         }
     }
 
     masm.bind(ool->rejoinStore());
     emitStoreElementTyped(lir->value(), lir->mir()->value()->type(), lir->mir()->elementType(),
                           elements, lir->index(), 0);
 
@@ -9398,26 +9413,26 @@ CodeGenerator::emitStoreElementHoleV(T* 
 
     if (std::is_same<T, LFallibleStoreElementV>::value) {
         // If the object might be non-extensible, check for frozen elements and
         // holes.
         Address flags(elements, ObjectElements::offsetOfFlags());
         masm.branchTest32(Assembler::NonZero, flags, Imm32(ObjectElements::FROZEN),
                           ool->callStub());
         if (lir->toFallibleStoreElementV()->mir()->needsHoleCheck()) {
-            masm.branchTestMagic(Assembler::Equal, BaseValueIndex(elements, index),
+            masm.branchTestMagic(Assembler::Equal, BaseObjectElementIndex(elements, index),
                                  ool->callStub());
         }
     }
 
     if (lir->mir()->needsBarrier())
         emitPreBarrier(elements, lir->index(), 0);
 
     masm.bind(ool->rejoinStore());
-    masm.storeValue(value, BaseIndex(elements, index, TimesEight));
+    masm.storeValue(value, BaseObjectElementIndex(elements, index));
 
     masm.bind(ool->rejoin());
 }
 
 void
 CodeGenerator::visitStoreElementHoleV(LStoreElementHoleV* lir)
 {
     emitStoreElementHoleV(lir);
@@ -9680,17 +9695,17 @@ CodeGenerator::emitArrayPopShift(LInstru
         masm.bind(&notEmpty);
     } else {
         masm.branchTest32(Assembler::Zero, lengthTemp, lengthTemp, ool->entry());
     }
 
     masm.sub32(Imm32(1), lengthTemp);
 
     if (mir->mode() == MArrayPopShift::Pop) {
-        BaseIndex addr(elementsTemp, lengthTemp, TimesEight);
+        BaseObjectElementIndex addr(elementsTemp, lengthTemp);
         masm.loadElementTypedOrValue(addr, out, mir->needsHoleCheck(), ool->entry());
     } else {
         MOZ_ASSERT(mir->mode() == MArrayPopShift::Shift);
         Address addr(elementsTemp, 0);
         masm.loadElementTypedOrValue(addr, out, mir->needsHoleCheck(), ool->entry());
     }
 
     // Handle the failure cases when the array length is non-writable or the
@@ -9763,17 +9778,17 @@ CodeGenerator::emitArrayPush(LInstructio
     Address initLength(elementsTemp, ObjectElements::offsetOfInitializedLength());
     masm.branch32(Assembler::NotEqual, initLength, length, ool->entry());
 
     // Guard length < capacity.
     Address capacity(elementsTemp, ObjectElements::offsetOfCapacity());
     masm.spectreBoundsCheck32(length, capacity, spectreTemp, ool->entry());
 
     // Do the store.
-    masm.storeConstantOrRegister(value, BaseIndex(elementsTemp, length, TimesEight));
+    masm.storeConstantOrRegister(value, BaseObjectElementIndex(elementsTemp, length));
 
     masm.add32(Imm32(1), length);
 
     // Update length and initialized length.
     masm.store32(length, Address(elementsTemp, ObjectElements::offsetOfLength()));
     masm.store32(length, Address(elementsTemp, ObjectElements::offsetOfInitializedLength()));
 
     masm.bind(ool->rejoin());
@@ -11418,18 +11433,18 @@ void
 CodeGenerator::visitLoadElementT(LLoadElementT* lir)
 {
     Register elements = ToRegister(lir->elements());
     const LAllocation* index = lir->index();
     if (index->isConstant()) {
         int32_t offset = ToInt32(index) * sizeof(js::Value) + lir->mir()->offsetAdjustment();
         emitLoadElementT(lir, Address(elements, offset));
     } else {
-        emitLoadElementT(lir, BaseIndex(elements, ToRegister(index), TimesEight,
-                                        lir->mir()->offsetAdjustment()));
+        emitLoadElementT(lir, BaseObjectElementIndex(elements, ToRegister(index),
+                                                     lir->mir()->offsetAdjustment()));
     }
 }
 
 void
 CodeGenerator::visitLoadElementV(LLoadElementV* load)
 {
     Register elements = ToRegister(load->elements());
     const ValueOperand out = ToOutValue(load);
@@ -11564,17 +11579,17 @@ CodeGenerator::visitLoadUnboxedScalar(LL
 {
     Register elements = ToRegister(lir->elements());
     Register temp = lir->temp()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp());
     AnyRegister out = ToAnyRegister(lir->output());
 
     const MLoadUnboxedScalar* mir = lir->mir();
 
     Scalar::Type readType = mir->readType();
-    int width = Scalar::byteSize(mir->storageType());
+    size_t width = Scalar::byteSize(mir->storageType());
     bool canonicalizeDouble = mir->canonicalizeDoubles();
 
     Label fail;
     if (lir->index()->isConstant()) {
         Address source(elements, ToInt32(lir->index()) * width + mir->offsetAdjustment());
         masm.loadFromTypedArray(readType, source, out, temp, &fail, canonicalizeDouble);
     } else {
         BaseIndex source(elements, ToRegister(lir->index()), ScaleFromElemWidth(width),
@@ -11601,17 +11616,17 @@ CodeGenerator::visitLoadTypedArrayElemen
     // Load undefined if index >= length.
     Label outOfBounds, done;
     masm.spectreBoundsCheck32(index, scratch, scratch2, &outOfBounds);
 
     // Load the elements vector.
     masm.loadPtr(Address(object, TypedArrayObject::dataOffset()), scratch);
 
     Scalar::Type arrayType = lir->mir()->arrayType();
-    int width = Scalar::byteSize(arrayType);
+    size_t width = Scalar::byteSize(arrayType);
     Label fail;
     BaseIndex source(scratch, index, ScaleFromElemWidth(width));
     masm.loadFromTypedArray(arrayType, source, out, lir->mir()->allowDouble(),
                             out.scratchReg(), &fail);
     masm.jump(&done);
 
     masm.bind(&outOfBounds);
     masm.moveValue(UndefinedValue(), out);
@@ -11875,17 +11890,17 @@ CodeGenerator::visitStoreUnboxedScalar(L
 {
     Register elements = ToRegister(lir->elements());
     const LAllocation* value = lir->value();
 
     const MStoreUnboxedScalar* mir = lir->mir();
 
     Scalar::Type writeType = mir->writeType();
 
-    int width = Scalar::byteSize(mir->storageType());
+    size_t width = Scalar::byteSize(mir->storageType());
 
     if (lir->index()->isConstant()) {
         Address dest(elements, ToInt32(lir->index()) * width + mir->offsetAdjustment());
         StoreToTypedArray(masm, writeType, value, dest);
     } else {
         BaseIndex dest(elements, ToRegister(lir->index()), ScaleFromElemWidth(width),
                        mir->offsetAdjustment());
         StoreToTypedArray(masm, writeType, value, dest);
@@ -11894,17 +11909,17 @@ CodeGenerator::visitStoreUnboxedScalar(L
 
 void
 CodeGenerator::visitStoreTypedArrayElementHole(LStoreTypedArrayElementHole* lir)
 {
     Register elements = ToRegister(lir->elements());
     const LAllocation* value = lir->value();
 
     Scalar::Type arrayType = lir->mir()->arrayType();
-    int width = Scalar::byteSize(arrayType);
+    size_t width = Scalar::byteSize(arrayType);
 
     Register index = ToRegister(lir->index());
     const LAllocation* length = lir->length();
     Register spectreTemp = ToTempRegisterOrInvalid(lir->spectreTemp());
 
     Label skip;
     if (length->isRegister())
         masm.spectreBoundsCheck32(index, ToRegister(length), spectreTemp, &skip);
@@ -12048,17 +12063,17 @@ CodeGenerator::visitInArray(LInArray* li
         Label negativeIntCheck;
         Register index = ToRegister(lir->index());
 
         if (mir->needsNegativeIntCheck())
             failedInitLength = &negativeIntCheck;
 
         masm.branch32(Assembler::BelowOrEqual, initLength, index, failedInitLength);
         if (mir->needsHoleCheck()) {
-            BaseIndex address = BaseIndex(elements, ToRegister(lir->index()), TimesEight);
+            BaseObjectElementIndex address(elements, ToRegister(lir->index()));
             masm.branchTestMagic(Assembler::Equal, address, &falseBranch);
         }
         masm.jump(&trueBranch);
 
         if (mir->needsNegativeIntCheck()) {
             masm.bind(&negativeIntCheck);
             ool = oolCallVM(OperatorInIInfo, lir,
                             ArgList(index, ToRegister(lir->object())),
@@ -12445,24 +12460,22 @@ CodeGenerator::emitIsCallableOrConstruct
     //   is<JSFunction>() || (getClass()->cOps && getClass()->cOps->call).
     // An object is constructor iff:
     //  ((is<JSFunction>() && as<JSFunction>().isConstructor) ||
     //   (getClass()->cOps && getClass()->cOps->construct)).
     masm.branchPtr(Assembler::NotEqual, output, ImmPtr(&JSFunction::class_), &notFunction);
     if (mode == Callable) {
         masm.move32(Imm32(1), output);
     } else {
-        Label notConstructor;
+        static_assert(mozilla::IsPowerOfTwo(unsigned(JSFunction::CONSTRUCTOR)),
+                      "JSFunction::CONSTRUCTOR has only one bit set");
+
         masm.load16ZeroExtend(Address(object, JSFunction::offsetOfFlags()), output);
-        masm.and32(Imm32(JSFunction::CONSTRUCTOR), output);
-        masm.branchTest32(Assembler::Zero, output, output, &notConstructor);
-        masm.move32(Imm32(1), output);
-        masm.jump(&done);
-        masm.bind(&notConstructor);
-        masm.move32(Imm32(0), output);
+        masm.rshift32(Imm32(mozilla::FloorLog2(JSFunction::CONSTRUCTOR)), output);
+        masm.and32(Imm32(1), output);
     }
     masm.jump(&done);
 
     masm.bind(&notFunction);
     masm.branchPtr(Assembler::NonZero, Address(output, offsetof(js::Class, cOps)),
                    ImmPtr(nullptr), &hasCOps);
     masm.move32(Imm32(0), output);
     masm.jump(&done);
--- a/js/src/jit/IonBuilder.cpp
+++ b/js/src/jit/IonBuilder.cpp
@@ -9760,18 +9760,18 @@ IonBuilder::jsop_getprop_super(PropertyN
     TemporaryTypeSet* types = bytecodeTypes(pc);
     return pushTypeBarrier(ins, types, BarrierKind::TypeSet);
 }
 
 AbortReasonOr<Ok>
 IonBuilder::jsop_getelem_super()
 {
     MDefinition* obj = current->pop();
+    MDefinition* id = current->pop();
     MDefinition* receiver = current->pop();
-    MDefinition* id = current->pop();
 
 #if defined(JS_CODEGEN_X86)
     if (instrumentedProfiling())
         return abort(AbortReason::Disable, "profiling functions with GETELEM_SUPER is disabled on x86");
 #endif
 
     auto* ins = MGetPropSuperCache::New(alloc(), obj, receiver, id);
     current->add(ins);
@@ -12252,18 +12252,20 @@ IonBuilder::jsop_setarg(uint32_t arg)
                   script()->baselineScript()->modifiesArguments());
     MDefinition* val = current->peek(-1);
 
     // If an arguments object is in use, and it aliases formals, then all SETARGs
     // must go through the arguments object.
     if (info().argsObjAliasesFormals()) {
         if (needsPostBarrier(val))
             current->add(MPostWriteBarrier::New(alloc(), current->argumentsObject(), val));
-        current->add(MSetArgumentsObjectArg::New(alloc(), current->argumentsObject(),
-                                                 GET_ARGNO(pc), val));
+        auto* ins = MSetArgumentsObjectArg::New(alloc(), current->argumentsObject(),
+                                                GET_ARGNO(pc), val);
+        current->add(ins);
+        MOZ_TRY(resumeAfter(ins));
         return Ok();
     }
 
     // :TODO: if hasArguments() is true, and the script has a JSOP_SETARG, then
     // convert all arg accesses to go through the arguments object. (see Bug 957475)
     if (info().hasArguments())
         return abort(AbortReason::Disable, "NYI: arguments & setarg.");
 
--- a/js/src/jit/MacroAssembler.cpp
+++ b/js/src/jit/MacroAssembler.cpp
@@ -3203,18 +3203,20 @@ MacroAssembler::linkExitFrame(Register c
 // ===============================================================
 // Branch functions
 
 void
 MacroAssembler::branchIfNotInterpretedConstructor(Register fun, Register scratch, Label* label)
 {
     // 16-bit loads are slow and unaligned 32-bit loads may be too so
     // perform an aligned 32-bit load and adjust the bitmask accordingly.
-    MOZ_ASSERT(JSFunction::offsetOfNargs() % sizeof(uint32_t) == 0);
-    MOZ_ASSERT(JSFunction::offsetOfFlags() == JSFunction::offsetOfNargs() + 2);
+    static_assert(JSFunction::offsetOfNargs() % sizeof(uint32_t) == 0,
+                  "JSFunction nargs are aligned to uint32_t");
+    static_assert(JSFunction::offsetOfFlags() == JSFunction::offsetOfNargs() + 2,
+                  "JSFunction nargs and flags are stored next to each other");
 
     // First, ensure it's a scripted function.
     load32(Address(fun, JSFunction::offsetOfNargs()), scratch);
     int32_t bits = IMM32_16ADJ(JSFunction::INTERPRETED);
     branchTest32(Assembler::Zero, scratch, Imm32(bits), label);
 
     // Check if the CONSTRUCTOR bit is set.
     bits = IMM32_16ADJ(JSFunction::CONSTRUCTOR);
--- a/js/src/jit/arm/Assembler-arm.h
+++ b/js/src/jit/arm/Assembler-arm.h
@@ -1331,27 +1331,27 @@ class Assembler : public AssemblerShared
     static uint32_t NopFill;
     static uint32_t GetNopFill();
     static uint32_t AsmPoolMaxOffset;
     static uint32_t GetPoolMaxOffset();
 
   protected:
     // Structure for fixing up pc-relative loads/jumps when a the machine code
     // gets moved (executable copy, gc, etc.).
-    struct RelativePatch
+    class RelativePatch
     {
         void* target_;
-        Relocation::Kind kind_;
+        RelocationKind kind_;
 
       public:
-        RelativePatch(void* target, Relocation::Kind kind)
+        RelativePatch(void* target, RelocationKind kind)
           : target_(target), kind_(kind)
         { }
         void* target() const { return target_; }
-        Relocation::Kind kind() const { return kind_; }
+        RelocationKind kind() const { return kind_; }
     };
 
     // TODO: this should actually be a pool-like object. It is currently a big
     // hack, and probably shouldn't exist.
     js::Vector<RelativePatch, 8, SystemAllocPolicy> jumps_;
 
     CompactBufferWriter jumpRelocations_;
     CompactBufferWriter dataRelocations_;
@@ -1771,51 +1771,51 @@ class Assembler : public AssemblerShared
     bool nextLink(BufferOffset b, BufferOffset* next);
     void bind(Label* label, BufferOffset boff = BufferOffset());
     void bind(RepatchLabel* label);
     uint32_t currentOffset() {
         return nextOffset().getOffset();
     }
     void retarget(Label* label, Label* target);
     // I'm going to pretend this doesn't exist for now.
-    void retarget(Label* label, void* target, Relocation::Kind reloc);
+    void retarget(Label* label, void* target, RelocationKind reloc);
 
     static void Bind(uint8_t* rawCode, const CodeLabel& label);
 
     void as_bkpt();
     BufferOffset as_illegal_trap();
 
   public:
     static void TraceJumpRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader);
     static void TraceDataRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader);
 
     void assertNoGCThings() const {
 #ifdef DEBUG
         MOZ_ASSERT(dataRelocations_.length() == 0);
         for (auto& j : jumps_)
-            MOZ_ASSERT(j.kind() == Relocation::HARDCODED);
+            MOZ_ASSERT(j.kind() == RelocationKind::HARDCODED);
 #endif
     }
 
     static bool SupportsFloatingPoint() {
         return HasVFP();
     }
     static bool SupportsUnalignedAccesses() {
         return HasARMv7();
     }
     static bool SupportsSimd() {
         return js::jit::SupportsSimd;
     }
 
     static bool HasRoundInstruction(RoundingMode mode) { return false; }
 
   protected:
-    void addPendingJump(BufferOffset src, ImmPtr target, Relocation::Kind kind) {
+    void addPendingJump(BufferOffset src, ImmPtr target, RelocationKind kind) {
         enoughMemory_ &= jumps_.append(RelativePatch(target.value, kind));
-        if (kind == Relocation::JITCODE)
+        if (kind == RelocationKind::JITCODE)
             writeRelocation(src);
     }
 
   public:
     // The buffer is about to be linked, make sure any constant pools or excess
     // bookkeeping has been flushed to the instruction stream.
     void flush() {
         MOZ_ASSERT(!isFinished);
--- a/js/src/jit/arm/CodeGenerator-arm.cpp
+++ b/js/src/jit/arm/CodeGenerator-arm.cpp
@@ -1801,17 +1801,17 @@ CodeGenerator::visitCompareExchangeTyped
     Register elements = ToRegister(lir->elements());
     AnyRegister output = ToAnyRegister(lir->output());
     Register temp = lir->temp()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp());
 
     Register oldval = ToRegister(lir->oldval());
     Register newval = ToRegister(lir->newval());
 
     Scalar::Type arrayType = lir->mir()->arrayType();
-    int width = Scalar::byteSize(arrayType);
+    size_t width = Scalar::byteSize(arrayType);
 
     if (lir->index()->isConstant()) {
         Address dest(elements, ToInt32(lir->index()) * width);
         masm.compareExchangeJS(arrayType, Synchronization::Full(), dest, oldval, newval, temp, output);
     } else {
         BaseIndex dest(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
         masm.compareExchangeJS(arrayType, Synchronization::Full(), dest, oldval, newval, temp, output);
     }
@@ -1822,17 +1822,17 @@ CodeGenerator::visitAtomicExchangeTypedA
 {
     Register elements = ToRegister(lir->elements());
     AnyRegister output = ToAnyRegister(lir->output());
     Register temp = lir->temp()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp());
 
     Register value = ToRegister(lir->value());
 
     Scalar::Type arrayType = lir->mir()->arrayType();
-    int width = Scalar::byteSize(arrayType);
+    size_t width = Scalar::byteSize(arrayType);
 
     if (lir->index()->isConstant()) {
         Address dest(elements, ToInt32(lir->index()) * width);
         masm.atomicExchangeJS(arrayType, Synchronization::Full(), dest, value, temp, output);
     } else {
         BaseIndex dest(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
         masm.atomicExchangeJS(arrayType, Synchronization::Full(), dest, value, temp, output);
     }
@@ -1845,17 +1845,17 @@ CodeGenerator::visitAtomicTypedArrayElem
 
     AnyRegister output = ToAnyRegister(lir->output());
     Register elements = ToRegister(lir->elements());
     Register flagTemp = ToRegister(lir->temp1());
     Register outTemp = lir->temp2()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp2());
     Register value = ToRegister(lir->value());
 
     Scalar::Type arrayType = lir->mir()->arrayType();
-    int width = Scalar::byteSize(arrayType);
+    size_t width = Scalar::byteSize(arrayType);
 
     if (lir->index()->isConstant()) {
         Address mem(elements, ToInt32(lir->index()) * width);
         masm.atomicFetchOpJS(arrayType, Synchronization::Full(), lir->mir()->operation(), value,
                              mem, flagTemp, outTemp, output);
     } else {
         BaseIndex mem(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
         masm.atomicFetchOpJS(arrayType, Synchronization::Full(), lir->mir()->operation(), value,
@@ -1867,17 +1867,17 @@ void
 CodeGenerator::visitAtomicTypedArrayElementBinopForEffect(LAtomicTypedArrayElementBinopForEffect* lir)
 {
     MOZ_ASSERT(!lir->mir()->hasUses());
 
     Register elements = ToRegister(lir->elements());
     Register flagTemp = ToRegister(lir->flagTemp());
     Register value = ToRegister(lir->value());
     Scalar::Type arrayType = lir->mir()->arrayType();
-    int width = Scalar::byteSize(arrayType);
+    size_t width = Scalar::byteSize(arrayType);
 
     if (lir->index()->isConstant()) {
         Address mem(elements, ToInt32(lir->index()) * width);
         masm.atomicEffectOpJS(arrayType, Synchronization::Full(), lir->mir()->operation(), value,
                               mem, flagTemp);
     } else {
         BaseIndex mem(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
         masm.atomicEffectOpJS(arrayType, Synchronization::Full(), lir->mir()->operation(), value,
--- a/js/src/jit/arm/MacroAssembler-arm.cpp
+++ b/js/src/jit/arm/MacroAssembler-arm.cpp
@@ -3963,17 +3963,17 @@ MacroAssemblerARMCompat::toggledJump(Lab
     CodeOffset ret(b.getOffset());
     return ret;
 }
 
 CodeOffset
 MacroAssemblerARMCompat::toggledCall(JitCode* target, bool enabled)
 {
     BufferOffset bo = nextOffset();
-    addPendingJump(bo, ImmPtr(target->raw()), Relocation::JITCODE);
+    addPendingJump(bo, ImmPtr(target->raw()), RelocationKind::JITCODE);
     ScratchRegisterScope scratch(asMasm());
     ma_movPatchable(ImmPtr(target->raw()), scratch, Always);
     if (enabled)
         ma_blx(scratch);
     else
         ma_nop();
     return CodeOffset(bo.getOffset());
 }
@@ -4536,17 +4536,17 @@ MacroAssembler::call(ImmWord imm)
 {
     call(ImmPtr((void*)imm.value));
 }
 
 void
 MacroAssembler::call(ImmPtr imm)
 {
     BufferOffset bo = m_buffer.nextOffset();
-    addPendingJump(bo, imm, Relocation::HARDCODED);
+    addPendingJump(bo, imm, RelocationKind::HARDCODED);
     ma_call(imm);
 }
 
 void
 MacroAssembler::call(wasm::SymbolicAddress imm)
 {
     movePtr(imm, CallReg);
     call(CallReg);
@@ -4558,17 +4558,17 @@ MacroAssembler::call(const Address& addr
     loadPtr(addr, CallReg);
     call(CallReg);
 }
 
 void
 MacroAssembler::call(JitCode* c)
 {
     BufferOffset bo = m_buffer.nextOffset();
-    addPendingJump(bo, ImmPtr(c->raw()), Relocation::JITCODE);
+    addPendingJump(bo, ImmPtr(c->raw()), RelocationKind::JITCODE);
     ScratchRegisterScope scratch(*this);
     ma_movPatchable(ImmPtr(c->raw()), scratch, Always);
     callJitNoProfiler(scratch);
 }
 
 CodeOffset
 MacroAssembler::callWithPatch()
 {
@@ -5018,17 +5018,17 @@ MacroAssembler::storeUnboxedValue(const 
         storePayload(value.reg().typedReg().gpr(), dest);
 }
 
 template void
 MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType,
                                   const Address& dest, MIRType slotType);
 template void
 MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType,
-                                  const BaseIndex& dest, MIRType slotType);
+                                  const BaseObjectElementIndex& dest, MIRType slotType);
 
 CodeOffset
 MacroAssembler::wasmTrapInstruction()
 {
     return CodeOffset(as_illegal_trap().getOffset());
 }
 
 void
--- a/js/src/jit/arm/MacroAssembler-arm.h
+++ b/js/src/jit/arm/MacroAssembler-arm.h
@@ -567,17 +567,17 @@ class MacroAssemblerARMCompat : public M
         ma_mov(Imm32(imm.value), dest);
     }
     void mov(ImmPtr imm, Register dest) {
         mov(ImmWord(uintptr_t(imm.value)), dest);
     }
 
     void branch(JitCode* c) {
         BufferOffset bo = m_buffer.nextOffset();
-        addPendingJump(bo, ImmPtr(c->raw()), Relocation::JITCODE);
+        addPendingJump(bo, ImmPtr(c->raw()), RelocationKind::JITCODE);
         ScratchRegisterScope scratch(asMasm());
         ma_movPatchable(ImmPtr(c->raw()), scratch, Always);
         ma_bx(scratch);
     }
     void branch(const Register reg) {
         ma_bx(reg);
     }
     void nop() {
--- a/js/src/jit/arm64/Assembler-arm64.cpp
+++ b/js/src/jit/arm64/Assembler-arm64.cpp
@@ -310,52 +310,52 @@ Assembler::bind(RepatchLabel* label)
         return;
     }
     int branchOffset = label->offset();
     Instruction* inst = getInstructionAt(BufferOffset(branchOffset));
     inst->SetImmPCOffsetTarget(inst + nextOffset().getOffset() - branchOffset);
 }
 
 void
-Assembler::addJumpRelocation(BufferOffset src, Relocation::Kind reloc)
+Assembler::addJumpRelocation(BufferOffset src, RelocationKind reloc)
 {
     // Only JITCODE relocations are patchable at runtime.
-    MOZ_ASSERT(reloc == Relocation::JITCODE);
+    MOZ_ASSERT(reloc == RelocationKind::JITCODE);
 
     // The jump relocation table starts with a fixed-width integer pointing
     // to the start of the extended jump table. But, we don't know the
     // actual extended jump table offset yet, so write a 0 which we'll
     // patch later in Assembler::finish().
     if (!jumpRelocations_.length())
         jumpRelocations_.writeFixedUint32_t(0);
 
     // Each entry in the table is an (offset, extendedTableIndex) pair.
     jumpRelocations_.writeUnsigned(src.getOffset());
     jumpRelocations_.writeUnsigned(pendingJumps_.length());
 }
 
 void
-Assembler::addPendingJump(BufferOffset src, ImmPtr target, Relocation::Kind reloc)
+Assembler::addPendingJump(BufferOffset src, ImmPtr target, RelocationKind reloc)
 {
     MOZ_ASSERT(target.value != nullptr);
 
-    if (reloc == Relocation::JITCODE)
+    if (reloc == RelocationKind::JITCODE)
         addJumpRelocation(src, reloc);
 
     // This jump is not patchable at runtime. Extended jump table entry requirements
     // cannot be known until finalization, so to be safe, give each jump and entry.
     // This also causes GC tracing of the target.
     enoughMemory_ &= pendingJumps_.append(RelativePatch(src, target.value, reloc));
 }
 
 size_t
-Assembler::addPatchableJump(BufferOffset src, Relocation::Kind reloc)
+Assembler::addPatchableJump(BufferOffset src, RelocationKind reloc)
 {
     MOZ_CRASH("TODO: This is currently unused (and untested)");
-    if (reloc == Relocation::JITCODE)
+    if (reloc == RelocationKind::JITCODE)
         addJumpRelocation(src, reloc);
 
     size_t extendedTableIndex = pendingJumps_.length();
     enoughMemory_ &= pendingJumps_.append(RelativePatch(src, nullptr, reloc));
     return extendedTableIndex;
 }
 
 void
--- a/js/src/jit/arm64/Assembler-arm64.h
+++ b/js/src/jit/arm64/Assembler-arm64.h
@@ -270,26 +270,26 @@ class Assembler : public vixl::Assembler
 
     static bool SupportsFloatingPoint() { return true; }
     static bool SupportsUnalignedAccesses() { return true; }
     static bool SupportsSimd() { return js::jit::SupportsSimd; }
 
     static bool HasRoundInstruction(RoundingMode mode) { return false; }
 
     // Tracks a jump that is patchable after finalization.
-    void addJumpRelocation(BufferOffset src, Relocation::Kind reloc);
+    void addJumpRelocation(BufferOffset src, RelocationKind reloc);
 
   protected:
     // Add a jump whose target is unknown until finalization.
     // The jump may not be patched at runtime.
-    void addPendingJump(BufferOffset src, ImmPtr target, Relocation::Kind kind);
+    void addPendingJump(BufferOffset src, ImmPtr target, RelocationKind kind);
 
     // Add a jump whose target is unknown until finalization, and may change
     // thereafter. The jump is patchable at runtime.
-    size_t addPatchableJump(BufferOffset src, Relocation::Kind kind);
+    size_t addPatchableJump(BufferOffset src, RelocationKind kind);
 
   public:
     static uint32_t PatchWrite_NearCallSize() {
         return 4;
     }
 
     static uint32_t NopSize() {
         return 4;
@@ -332,17 +332,17 @@ class Assembler : public vixl::Assembler
 
     static void TraceJumpRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader);
     static void TraceDataRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader);
 
     void assertNoGCThings() const {
 #ifdef DEBUG
         MOZ_ASSERT(dataRelocations_.length() == 0);
         for (auto& j : pendingJumps_)
-            MOZ_ASSERT(j.kind == Relocation::HARDCODED);
+            MOZ_ASSERT(j.kind == RelocationKind::HARDCODED);
 #endif
     }
 
   public:
     // A Jump table entry is 2 instructions, with 8 bytes of raw data
     static const size_t SizeOfJumpTableEntry = 16;
 
     struct JumpTableEntry
@@ -371,36 +371,36 @@ class Assembler : public vixl::Assembler
                                      const Disassembler::HeapAccess& heapAccess)
     {
         MOZ_CRASH("verifyHeapAccessDisassembly");
     }
 
   protected:
     // Because jumps may be relocated to a target inaccessible by a short jump,
     // each relocatable jump must have a unique entry in the extended jump table.
-    // Valid relocatable targets are of type Relocation::JITCODE.
+    // Valid relocatable targets are of type RelocationKind::JITCODE.
     struct JumpRelocation
     {
         BufferOffset jump; // Offset to the short jump, from the start of the code buffer.
         uint32_t extendedTableIndex; // Unique index within the extended jump table.
 
         JumpRelocation(BufferOffset jump, uint32_t extendedTableIndex)
           : jump(jump), extendedTableIndex(extendedTableIndex)
         { }
     };
 
     // Structure for fixing up pc-relative loads/jumps when the machine
     // code gets moved (executable copy, gc, etc.).
     struct RelativePatch
     {
         BufferOffset offset;
         void* target;
-        Relocation::Kind kind;
+        RelocationKind kind;
 
-        RelativePatch(BufferOffset offset, void* target, Relocation::Kind kind)
+        RelativePatch(BufferOffset offset, void* target, RelocationKind kind)
           : offset(offset), target(target), kind(kind)
         { }
     };
 
     // List of jumps for which the target is either unknown until finalization,
     // or cannot be known due to GC. Each entry here requires a unique entry
     // in the extended jump table, and is patched at finalization.
     js::Vector<RelativePatch, 8, SystemAllocPolicy> pendingJumps_;
--- a/js/src/jit/arm64/CodeGenerator-arm64.cpp
+++ b/js/src/jit/arm64/CodeGenerator-arm64.cpp
@@ -810,17 +810,17 @@ CodeGenerator::visitCompareExchangeTyped
     Register elements = ToRegister(lir->elements());
     AnyRegister output = ToAnyRegister(lir->output());
     Register temp = lir->temp()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp());
 
     Register oldval = ToRegister(lir->oldval());
     Register newval = ToRegister(lir->newval());
 
     Scalar::Type arrayType = lir->mir()->arrayType();
-    int width = Scalar::byteSize(arrayType);
+    size_t width = Scalar::byteSize(arrayType);
 
     if (lir->index()->isConstant()) {
         Address dest(elements, ToInt32(lir->index()) * width);
         masm.compareExchangeJS(arrayType, Synchronization::Full(), dest, oldval, newval, temp, output);
     } else {
         BaseIndex dest(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
         masm.compareExchangeJS(arrayType, Synchronization::Full(), dest, oldval, newval, temp, output);
     }
@@ -831,17 +831,17 @@ CodeGenerator::visitAtomicExchangeTypedA
 {
     Register elements = ToRegister(lir->elements());
     AnyRegister output = ToAnyRegister(lir->output());
     Register temp = lir->temp()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp());
 
     Register value = ToRegister(lir->value());
 
     Scalar::Type arrayType = lir->mir()->arrayType();
-    int width = Scalar::byteSize(arrayType);
+    size_t width = Scalar::byteSize(arrayType);
 
     if (lir->index()->isConstant()) {
         Address dest(elements, ToInt32(lir->index()) * width);
         masm.atomicExchangeJS(arrayType, Synchronization::Full(), dest, value, temp, output);
     } else {
         BaseIndex dest(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
         masm.atomicExchangeJS(arrayType, Synchronization::Full(), dest, value, temp, output);
     }
--- a/js/src/jit/arm64/MacroAssembler-arm64.cpp
+++ b/js/src/jit/arm64/MacroAssembler-arm64.cpp
@@ -618,17 +618,17 @@ MacroAssembler::call(const Address& addr
 
 void
 MacroAssembler::call(JitCode* c)
 {
     vixl::UseScratchRegisterScope temps(this);
     const ARMRegister scratch64 = temps.AcquireX();
     syncStackPtr();
     BufferOffset off = immPool64(scratch64, uint64_t(c->raw()));
-    addPendingJump(off, ImmPtr(c->raw()), Relocation::JITCODE);
+    addPendingJump(off, ImmPtr(c->raw()), RelocationKind::JITCODE);
     blr(scratch64);
 }
 
 CodeOffset
 MacroAssembler::callWithPatch()
 {
     bl(0, LabelDoc());
     return CodeOffset(currentOffset());
@@ -1041,17 +1041,17 @@ MacroAssembler::storeUnboxedValue(const 
 
 }
 
 template void
 MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType,
                                   const Address& dest, MIRType slotType);
 template void
 MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType,
-                                  const BaseIndex& dest, MIRType slotType);
+                                  const BaseObjectElementIndex& dest, MIRType slotType);
 
 void
 MacroAssembler::comment(const char* msg)
 {
     Assembler::comment(msg);
 }
 
 // ========================================================================
--- a/js/src/jit/arm64/MacroAssembler-arm64.h
+++ b/js/src/jit/arm64/MacroAssembler-arm64.h
@@ -671,17 +671,17 @@ class MacroAssemblerCompat : public vixl
         B(label);
     }
     void jump(JitCode* code) {
         branch(code);
     }
     void jump(TrampolinePtr code) {
         syncStackPtr();
         BufferOffset loc = b(-1, LabelDoc()); // The jump target will be patched by executableCopy().
-        addPendingJump(loc, ImmPtr(code.value), Relocation::HARDCODED);
+        addPendingJump(loc, ImmPtr(code.value), RelocationKind::HARDCODED);
     }
     void jump(RepatchLabel* label) {
         MOZ_CRASH("jump (repatchlabel)");
     }
     void jump(Register reg) {
         Br(ARMRegister(reg, 64));
     }
     void jump(const Address& addr) {
@@ -1225,17 +1225,17 @@ class MacroAssemblerCompat : public vixl
     }
 
     void branch(Condition cond, Label* label) {
         B(label, cond);
     }
     void branch(JitCode* target) {
         syncStackPtr();
         BufferOffset loc = b(-1, LabelDoc()); // The jump target will be patched by executableCopy().
-        addPendingJump(loc, ImmPtr(target->raw()), Relocation::JITCODE);
+        addPendingJump(loc, ImmPtr(target->raw()), RelocationKind::JITCODE);
     }
 
     CodeOffsetJump jumpWithPatch(RepatchLabel* label)
     {
         ARMBuffer::PoolEntry pe;
         BufferOffset load_bo;
 
         // Does not overwrite condition codes from the caller.
@@ -1891,17 +1891,17 @@ class MacroAssemblerCompat : public vixl
             loadOffset = immPool64(ScratchReg2_64, uint64_t(target->raw()));
 
             if (enabled)
                 blr(ScratchReg2_64);
             else
                 nop();
         }
 
-        addPendingJump(loadOffset, ImmPtr(target->raw()), Relocation::JITCODE);
+        addPendingJump(loadOffset, ImmPtr(target->raw()), RelocationKind::JITCODE);
         CodeOffset ret(offset.getOffset());
         return ret;
     }
 
     static size_t ToggledCallSize(uint8_t* code) {
         static const uint32_t syncStackInstruction = 0x9100039f; // mov sp, r28
 
         // start it off as an 8 byte sequence
--- a/js/src/jit/mips-shared/Assembler-mips-shared.h
+++ b/js/src/jit/mips-shared/Assembler-mips-shared.h
@@ -856,19 +856,19 @@ class AssemblerMIPSShared : public Assem
     // structure for fixing up pc-relative loads/jumps when a the machine code
     // gets moved (executable copy, gc, etc.)
     struct RelativePatch
     {
         // the offset within the code buffer where the value is loaded that
         // we want to fix-up
         BufferOffset offset;
         void* target;
-        Relocation::Kind kind;
+        RelocationKind kind;
 
-        RelativePatch(BufferOffset offset, void* target, Relocation::Kind kind)
+        RelativePatch(BufferOffset offset, void* target, RelocationKind kind)
           : offset(offset),
             target(target),
             kind(kind)
         { }
     };
 
     js::Vector<RelativePatch, 8, SystemAllocPolicy> jumps_;
 
@@ -906,17 +906,17 @@ class AssemblerMIPSShared : public Assem
             dataRelocations_.writeUnsigned(nextOffset().getOffset());
         }
     }
 
     void assertNoGCThings() const {
 #ifdef DEBUG
         MOZ_ASSERT(dataRelocations_.length() == 0);
         for (auto& j : jumps_)
-            MOZ_ASSERT(j.kind == Relocation::HARDCODED);
+            MOZ_ASSERT(j.kind == RelocationKind::HARDCODED);
 #endif
     }
 
   public:
     bool oom() const;
 
     void setPrinter(Sprinter* sp) {
 #ifdef JS_JITSPEW
@@ -1280,19 +1280,19 @@ class AssemblerMIPSShared : public Assem
     }
 
     static bool HasRoundInstruction(RoundingMode mode) {
         return false;
     }
 
   protected:
     InstImm invertBranch(InstImm branch, BOffImm16 skipOffset);
-    void addPendingJump(BufferOffset src, ImmPtr target, Relocation::Kind kind) {
+    void addPendingJump(BufferOffset src, ImmPtr target, RelocationKind kind) {
         enoughMemory_ &= jumps_.append(RelativePatch(src, target.value, kind));
-        if (kind == Relocation::JITCODE)
+        if (kind == RelocationKind::JITCODE)
             writeRelocation(src);
     }
 
     void addLongJump(BufferOffset src, BufferOffset dst) {
         CodeLabel cl;
         cl.patchAt()->bind(src.getOffset());
         cl.target()->bind(dst.getOffset());
         cl.setLinkMode(CodeLabel::JumpImmediate);
--- a/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
+++ b/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
@@ -2412,17 +2412,17 @@ CodeGenerator::visitAtomicTypedArrayElem
     Register elements = ToRegister(lir->elements());
     Register outTemp = ToTempRegisterOrInvalid(lir->temp2());
     Register valueTemp = ToTempRegisterOrInvalid(lir->valueTemp());
     Register offsetTemp = ToTempRegisterOrInvalid(lir->offsetTemp());
     Register maskTemp = ToTempRegisterOrInvalid(lir->maskTemp());
     Register value = ToRegister(lir->value());
 
     Scalar::Type arrayType = lir->mir()->arrayType();
-    int width = Scalar::byteSize(arrayType);
+    size_t width = Scalar::byteSize(arrayType);
 
     if (lir->index()->isConstant()) {
         Address mem(elements, ToInt32(lir->index()) * width);
         masm.atomicFetchOpJS(arrayType, Synchronization::Full(), lir->mir()->operation(), value,
                              mem, valueTemp, offsetTemp, maskTemp, outTemp, output);
     } else {
         BaseIndex mem(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
         masm.atomicFetchOpJS(arrayType, Synchronization::Full(), lir->mir()->operation(), value,
@@ -2436,17 +2436,17 @@ CodeGenerator::visitAtomicTypedArrayElem
     MOZ_ASSERT(!lir->mir()->hasUses());
 
     Register elements = ToRegister(lir->elements());
     Register valueTemp = ToTempRegisterOrInvalid(lir->valueTemp());
     Register offsetTemp = ToTempRegisterOrInvalid(lir->offsetTemp());
     Register maskTemp = ToTempRegisterOrInvalid(lir->maskTemp());
     Register value = ToRegister(lir->value());
     Scalar::Type arrayType = lir->mir()->arrayType();
-    int width = Scalar::byteSize(arrayType);
+    size_t width = Scalar::byteSize(arrayType);
 
     if (lir->index()->isConstant()) {
         Address mem(elements, ToInt32(lir->index()) * width);
         masm.atomicEffectOpJS(arrayType, Synchronization::Full(), lir->mir()->operation(), value,
                              mem, valueTemp, offsetTemp, maskTemp);
     } else {
         BaseIndex mem(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
         masm.atomicEffectOpJS(arrayType, Synchronization::Full(), lir->mir()->operation(), value,
@@ -2463,17 +2463,17 @@ CodeGenerator::visitCompareExchangeTyped
 
     Register oldval = ToRegister(lir->oldval());
     Register newval = ToRegister(lir->newval());
     Register valueTemp = ToTempRegisterOrInvalid(lir->valueTemp());
     Register offsetTemp = ToTempRegisterOrInvalid(lir->offsetTemp());
     Register maskTemp = ToTempRegisterOrInvalid(lir->maskTemp());
 
     Scalar::Type arrayType = lir->mir()->arrayType();
-    int width = Scalar::byteSize(arrayType);
+    size_t width = Scalar::byteSize(arrayType);
 
     if (lir->index()->isConstant()) {
         Address dest(elements, ToInt32(lir->index()) * width);
         masm.compareExchangeJS(arrayType, Synchronization::Full(), dest, oldval, newval,
                                valueTemp, offsetTemp, maskTemp, outTemp, output);
     } else {
         BaseIndex dest(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
         masm.compareExchangeJS(arrayType, Synchronization::Full(), dest, oldval, newval,
@@ -2489,17 +2489,17 @@ CodeGenerator::visitAtomicExchangeTypedA
     Register outTemp = ToTempRegisterOrInvalid(lir->temp());
 
     Register value = ToRegister(lir->value());
     Register valueTemp = ToTempRegisterOrInvalid(lir->valueTemp());
     Register offsetTemp = ToTempRegisterOrInvalid(lir->offsetTemp());
     Register maskTemp = ToTempRegisterOrInvalid(lir->maskTemp());
 
     Scalar::Type arrayType = lir->mir()->arrayType();
-    int width = Scalar::byteSize(arrayType);
+    size_t width = Scalar::byteSize(arrayType);
 
     if (lir->index()->isConstant()) {
         Address dest(elements, ToInt32(lir->index()) * width);
         masm.atomicExchangeJS(arrayType, Synchronization::Full(), dest, value, valueTemp,
                               offsetTemp, maskTemp, outTemp, output);
     } else {
         BaseIndex dest(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
         masm.atomicExchangeJS(arrayType, Synchronization::Full(), dest, value, valueTemp,
--- a/js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp
+++ b/js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp
@@ -1608,25 +1608,25 @@ MacroAssembler::call(ImmWord target)
 {
     call(ImmPtr((void*)target.value));
 }
 
 void
 MacroAssembler::call(ImmPtr target)
 {
     BufferOffset bo = m_buffer.nextOffset();
-    addPendingJump(bo, target, Relocation::HARDCODED);
+    addPendingJump(bo, target, RelocationKind::HARDCODED);
     ma_call(target);
 }
 
 void
 MacroAssembler::call(JitCode* c)
 {
     BufferOffset bo = m_buffer.nextOffset();
-    addPendingJump(bo, ImmPtr(c->raw()), Relocation::JITCODE);
+    addPendingJump(bo, ImmPtr(c->raw()), RelocationKind::JITCODE);
     ma_liPatchable(ScratchRegister, ImmPtr(c->raw()));
     callJitNoProfiler(ScratchRegister);
 }
 
 CodeOffset
 MacroAssembler::nopPatchableToCall(const wasm::CallSiteDesc& desc)
 {
     CodeOffset offset(currentOffset());
--- a/js/src/jit/mips32/MacroAssembler-mips32.cpp
+++ b/js/src/jit/mips32/MacroAssembler-mips32.cpp
@@ -2018,17 +2018,17 @@ MacroAssemblerMIPSCompat::toggledJump(La
     return ret;
 }
 
 CodeOffset
 MacroAssemblerMIPSCompat::toggledCall(JitCode* target, bool enabled)
 {
     BufferOffset bo = nextOffset();
     CodeOffset offset(bo.getOffset());
-    addPendingJump(bo, ImmPtr(target->raw()), Relocation::JITCODE);
+    addPendingJump(bo, ImmPtr(target->raw()), RelocationKind::JITCODE);
     ma_liPatchable(ScratchRegister, ImmPtr(target->raw()));
     if (enabled) {
         as_jalr(ScratchRegister);
         as_nop();
     } else {
         as_nop();
         as_nop();
     }
@@ -2424,17 +2424,17 @@ MacroAssembler::storeUnboxedValue(const 
         storePayload(value.reg().typedReg().gpr(), dest);
 }
 
 template void
 MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType,
                                   const Address& dest, MIRType slotType);
 template void
 MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType,
-                                  const BaseIndex& dest, MIRType slotType);
+                                  const BaseObjectElementIndex& dest, MIRType slotType);
 
 
 void
 MacroAssembler::wasmBoundsCheck(Condition cond, Register index, Register boundsCheckLimit,
                                 Label* label)
 {
      ma_b(index, boundsCheckLimit, label, cond);
 }
--- a/js/src/jit/mips32/MacroAssembler-mips32.h
+++ b/js/src/jit/mips32/MacroAssembler-mips32.h
@@ -239,17 +239,17 @@ class MacroAssemblerMIPSCompat : public 
         MOZ_CRASH("NYI-IC");
     }
     void mov(Address src, Register dest) {
         MOZ_CRASH("NYI-IC");
     }
 
     void branch(JitCode* c) {
         BufferOffset bo = m_buffer.nextOffset();
-        addPendingJump(bo, ImmPtr(c->raw()), Relocation::JITCODE);
+        addPendingJump(bo, ImmPtr(c->raw()), RelocationKind::JITCODE);
         ma_liPatchable(ScratchRegister, ImmPtr(c->raw()));
         as_jr(ScratchRegister);
         as_nop();
     }
     void branch(const Register reg) {
         as_jr(reg);
         as_nop();
     }
@@ -342,17 +342,17 @@ class MacroAssemblerMIPSCompat : public 
     void jump(JitCode* code) {
         branch(code);
     }
 
     void jump(TrampolinePtr code)
     {
         auto target = ImmPtr(code.value);
         BufferOffset bo = m_buffer.nextOffset();
-        addPendingJump(bo, target, Relocation::HARDCODED);
+        addPendingJump(bo, target, RelocationKind::HARDCODED);
         ma_jump(target);
     }
 
     void negl(Register reg) {
         ma_negu(reg, reg);
     }
 
     void splitTagForTest(const ValueOperand& value, ScratchTagScope& tag) {
--- a/js/src/jit/mips64/MacroAssembler-mips64.cpp
+++ b/js/src/jit/mips64/MacroAssembler-mips64.cpp
@@ -1885,17 +1885,17 @@ MacroAssemblerMIPS64Compat::toggledJump(
     return ret;
 }
 
 CodeOffset
 MacroAssemblerMIPS64Compat::toggledCall(JitCode* target, bool enabled)
 {
     BufferOffset bo = nextOffset();
     CodeOffset offset(bo.getOffset());
-    addPendingJump(bo, ImmPtr(target->raw()), Relocation::JITCODE);
+    addPendingJump(bo, ImmPtr(target->raw()), RelocationKind::JITCODE);
     ma_liPatchable(ScratchRegister, ImmPtr(target->raw()));
     if (enabled) {
         as_jalr(ScratchRegister);
         as_nop();
     } else {
         as_nop();
         as_nop();
     }
@@ -2252,17 +2252,17 @@ MacroAssembler::storeUnboxedValue(const 
         storeValue(ValueTypeFromMIRType(valueType), value.reg().typedReg().gpr(), dest);
 }
 
 template void
 MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType,
                                   const Address& dest, MIRType slotType);
 template void
 MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType,
-                                  const BaseIndex& dest, MIRType slotType);
+                                  const BaseObjectElementIndex& dest, MIRType slotType);
 
 
 void
 MacroAssembler::wasmBoundsCheck(Condition cond, Register index, Register boundsCheckLimit,
                                 Label* label)
 {
     ma_b(index, boundsCheckLimit, label, cond);
 }
--- a/js/src/jit/mips64/MacroAssembler-mips64.h
+++ b/js/src/jit/mips64/MacroAssembler-mips64.h
@@ -250,17 +250,17 @@ class MacroAssemblerMIPS64Compat : publi
             if (cell && gc::IsInsideNursery(cell))
                 embedsNurseryPointers_ = true;
             dataRelocations_.writeUnsigned(currentOffset());
         }
     }
 
     void branch(JitCode* c) {
         BufferOffset bo = m_buffer.nextOffset();
-        addPendingJump(bo, ImmPtr(c->raw()), Relocation::JITCODE);
+        addPendingJump(bo, ImmPtr(c->raw()), RelocationKind::JITCODE);
         ma_liPatchable(ScratchRegister, ImmPtr(c->raw()));
         as_jr(ScratchRegister);
         as_nop();
     }
     void branch(const Register reg) {
         as_jr(reg);
         as_nop();
     }
@@ -357,17 +357,17 @@ class MacroAssemblerMIPS64Compat : publi
     void jump(JitCode* code) {
         branch(code);
     }
 
     void jump(TrampolinePtr code)
     {
         auto target = ImmPtr(code.value);
         BufferOffset bo = m_buffer.nextOffset();
-        addPendingJump(bo, target, Relocation::HARDCODED);
+        addPendingJump(bo, target, RelocationKind::HARDCODED);
         ma_jump(target);
     }
 
     void splitTag(Register src, Register dest) {
         ma_dsrl(dest, src, Imm32(JSVAL_TAG_SHIFT));
     }
 
     void splitTag(const ValueOperand& operand, Register dest) {
--- a/js/src/jit/shared/Assembler-shared.h
+++ b/js/src/jit/shared/Assembler-shared.h
@@ -317,18 +317,17 @@ struct Address
     Address(Register base, int32_t offset) : base(RegisterOrSP(base)), offset(offset)
     { }
 
 #ifdef JS_HAS_HIDDEN_SP
     Address(RegisterOrSP base, int32_t offset) : base(base), offset(offset)
     { }
 #endif
 
-    Address() : base(RegisterOrSP(Registers::Invalid)), offset(0)
-    { }
+    Address() = delete;
 };
 
 #if JS_BITS_PER_WORD == 32
 
 static inline Address
 LowWord(const Address& address) {
     CheckedInt<int32_t> offset = CheckedInt<int32_t>(address.offset) + INT64LOW_OFFSET;
     MOZ_ALWAYS_TRUE(offset.isValid());
@@ -358,22 +357,17 @@ struct BaseIndex
     { }
 
 #ifdef JS_HAS_HIDDEN_SP
     BaseIndex(RegisterOrSP base, Register index, Scale scale, int32_t offset = 0)
       : base(base), index(index), scale(scale), offset(offset)
     { }
 #endif
 
-    BaseIndex()
-      : base(RegisterOrSP(Registers::Invalid))
-      , index(Registers::Invalid)
-      , scale(TimesOne)
-      , offset(0)
-    {}
+    BaseIndex() = delete;
 };
 
 #if JS_BITS_PER_WORD == 32
 
 static inline BaseIndex
 LowWord(const BaseIndex& address) {
     CheckedInt<int32_t> offset = CheckedInt<int32_t>(address.offset) + INT64LOW_OFFSET;
     MOZ_ALWAYS_TRUE(offset.isValid());
@@ -440,27 +434,24 @@ struct BaseObjectSlotIndex : BaseValueIn
     BaseObjectSlotIndex(RegisterOrSP base, Register index)
       : BaseValueIndex(base, index)
     {
         NativeObject::slotsSizeMustNotOverflow();
     }
 #endif
 };
 
-class Relocation {
-  public:
-    enum Kind {
-        // The target is immovable, so patching is only needed if the source
-        // buffer is relocated and the reference is relative.
-        HARDCODED,
+enum class RelocationKind {
+    // The target is immovable, so patching is only needed if the source
+    // buffer is relocated and the reference is relative.
+    HARDCODED,
 
-        // The target is the start of a JitCode buffer, which must be traced
-        // during garbage collection. Relocations and patching may be needed.
-        JITCODE
-    };
+    // The target is the start of a JitCode buffer, which must be traced
+    // during garbage collection. Relocations and patching may be needed.
+    JITCODE
 };
 
 class RepatchLabel
 {
     static const int32_t INVALID_OFFSET = 0xC0000000;
     int32_t offset_ : 31;
     uint32_t bound_ : 1;
   public:
--- a/js/src/jit/shared/CodeGenerator-shared.cpp
+++ b/js/src/jit/shared/CodeGenerator-shared.cpp
@@ -1492,23 +1492,24 @@ CodeGeneratorShared::omitOverRecursedChe
     // and it uses only a small amount of stack space, it doesn't need a
     // stack overflow check. Note that the actual number here is somewhat
     // arbitrary, and codegen actually uses small bounded amounts of
     // additional stack space in some cases too.
     return frameSize() < MAX_UNCHECKED_LEAF_FRAME_SIZE && !gen->needsOverrecursedCheck();
 }
 
 void
-CodeGeneratorShared::emitPreBarrier(Register base, const LAllocation* index, int32_t offsetAdjustment)
+CodeGeneratorShared::emitPreBarrier(Register elements, const LAllocation* index,
+                                    int32_t offsetAdjustment)
 {
     if (index->isConstant()) {
-        Address address(base, ToInt32(index) * sizeof(Value) + offsetAdjustment);
+        Address address(elements, ToInt32(index) * sizeof(Value) + offsetAdjustment);
         masm.guardedCallPreBarrier(address, MIRType::Value);
     } else {
-        BaseIndex address(base, ToRegister(index), TimesEight, offsetAdjustment);
+        BaseObjectElementIndex address(elements, ToRegister(index), offsetAdjustment);
         masm.guardedCallPreBarrier(address, MIRType::Value);
     }
 }
 
 void
 CodeGeneratorShared::emitPreBarrier(Address address)
 {
     masm.guardedCallPreBarrier(address, MIRType::Value);
--- a/js/src/jit/shared/CodeGenerator-shared.h
+++ b/js/src/jit/shared/CodeGenerator-shared.h
@@ -312,17 +312,17 @@ class CodeGeneratorShared : public LElem
     //      an invalidation marker.
     void ensureOsiSpace();
 
     OutOfLineCode* oolTruncateDouble(FloatRegister src, Register dest, MInstruction* mir,
                                      wasm::BytecodeOffset callOffset = wasm::BytecodeOffset());
     void emitTruncateDouble(FloatRegister src, Register dest, MTruncateToInt32* mir);
     void emitTruncateFloat32(FloatRegister src, Register dest, MTruncateToInt32* mir);
 
-    void emitPreBarrier(Register base, const LAllocation* index, int32_t offsetAdjustment);
+    void emitPreBarrier(Register elements, const LAllocation* index, int32_t offsetAdjustment);
     void emitPreBarrier(Address address);
 
     // We don't emit code for trivial blocks, so if we want to branch to the
     // given block, and it's trivial, return the ultimate block we should
     // actually branch directly to.
     MBasicBlock* skipTrivialBlocks(MBasicBlock* block) {
         while (block->lir()->isTrivial()) {
             LGoto* ins = block->lir()->rbegin()->toGoto();
--- a/js/src/jit/x64/Assembler-x64.cpp
+++ b/js/src/jit/x64/Assembler-x64.cpp
@@ -113,45 +113,45 @@ ABIArgGenerator::next(MIRType type)
       default:
         MOZ_CRASH("Unexpected argument type");
     }
     return current_;
 #endif
 }
 
 void
-Assembler::writeRelocation(JmpSrc src, Relocation::Kind reloc)
+Assembler::writeRelocation(JmpSrc src, RelocationKind reloc)
 {
     if (!jumpRelocations_.length()) {
         // The jump relocation table starts with a fixed-width integer pointing
         // to the start of the extended jump table. But, we don't know the
         // actual extended jump table offset yet, so write a 0 which we'll
         // patch later.
         jumpRelocations_.writeFixedUint32_t(0);
     }
-    if (reloc == Relocation::JITCODE) {
+    if (reloc == RelocationKind::JITCODE) {
         jumpRelocations_.writeUnsigned(src.offset());
         jumpRelocations_.writeUnsigned(jumps_.length());
     }
 }
 
 void
-Assembler::addPendingJump(JmpSrc src, ImmPtr target, Relocation::Kind reloc)
+Assembler::addPendingJump(JmpSrc src, ImmPtr target, RelocationKind reloc)
 {
     MOZ_ASSERT(target.value != nullptr);
 
     // Emit reloc before modifying the jump table, since it computes a 0-based
     // index. This jump is not patchable at runtime.
-    if (reloc == Relocation::JITCODE)
+    if (reloc == RelocationKind::JITCODE)
         writeRelocation(src, reloc);
     enoughMemory_ &= jumps_.append(RelativePatch(src.offset(), target.value, reloc));
 }
 
 size_t
-Assembler::addPatchableJump(JmpSrc src, Relocation::Kind reloc)
+Assembler::addPatchableJump(JmpSrc src, RelocationKind reloc)
 {
     // This jump is patchable at runtime so we always need to make sure the
     // jump table is emitted.
     writeRelocation(src, reloc);
 
     size_t index = jumps_.length();
     enoughMemory_ &= jumps_.append(RelativePatch(src.offset(), nullptr, reloc));
     return index;
--- a/js/src/jit/x64/Assembler-x64.h
+++ b/js/src/jit/x64/Assembler-x64.h
@@ -283,21 +283,21 @@ class Assembler : public AssemblerX86Sha
     static const uint32_t SizeOfExtendedJump = 1 + 1 + 4 + 2 + 8;
     static const uint32_t SizeOfJumpTableEntry = 16;
 
     uint32_t extendedJumpTable_;
 
     static JitCode* CodeFromJump(JitCode* code, uint8_t* jump);
 
   private:
-    void writeRelocation(JmpSrc src, Relocation::Kind reloc);
-    void addPendingJump(JmpSrc src, ImmPtr target, Relocation::Kind reloc);
+    void writeRelocation(JmpSrc src, RelocationKind reloc);
+    void addPendingJump(JmpSrc src, ImmPtr target, RelocationKind reloc);
 
   protected:
-    size_t addPatchableJump(JmpSrc src, Relocation::Kind reloc);
+    size_t addPatchableJump(JmpSrc src, RelocationKind reloc);
 
   public:
     using AssemblerX86Shared::j;
     using AssemblerX86Shared::jmp;
     using AssemblerX86Shared::push;
     using AssemblerX86Shared::pop;
     using AssemblerX86Shared::vmovq;
 
@@ -1055,50 +1055,49 @@ class Assembler : public AssemblerX86Sha
             masm.testq_i32m(rhs.value, lhs.disp(), lhs.base());
             break;
           default:
             MOZ_CRASH("unexpected operand kind");
             break;
         }
     }
 
-    void jmp(ImmPtr target, Relocation::Kind reloc = Relocation::HARDCODED) {
+    void jmp(ImmPtr target, RelocationKind reloc = RelocationKind::HARDCODED) {
         JmpSrc src = masm.jmp();
         addPendingJump(src, target, reloc);
     }
-    void j(Condition cond, ImmPtr target,
-           Relocation::Kind reloc = Relocation::HARDCODED) {
+    void j(Condition cond, ImmPtr target, RelocationKind reloc = RelocationKind::HARDCODED) {
         JmpSrc src = masm.jCC(static_cast<X86Encoding::Condition>(cond));
         addPendingJump(src, target, reloc);
     }
 
     void jmp(JitCode* target) {
-        jmp(ImmPtr(target->raw()), Relocation::JITCODE);
+        jmp(ImmPtr(target->raw()), RelocationKind::JITCODE);
     }
     void j(Condition cond, JitCode* target) {
-        j(cond, ImmPtr(target->raw()), Relocation::JITCODE);
+        j(cond, ImmPtr(target->raw()), RelocationKind::JITCODE);
     }
     void call(JitCode* target) {
         JmpSrc src = masm.call();
-        addPendingJump(src, ImmPtr(target->raw()), Relocation::JITCODE);
+        addPendingJump(src, ImmPtr(target->raw()), RelocationKind::JITCODE);
     }
     void call(ImmWord target) {
         call(ImmPtr((void*)target.value));
     }
     void call(ImmPtr target) {
         JmpSrc src = masm.call();
-        addPendingJump(src, target, Relocation::HARDCODED);
+        addPendingJump(src, target, RelocationKind::HARDCODED);
     }
 
     // Emit a CALL or CMP (nop) instruction. ToggleCall can be used to patch
     // this instruction.
     CodeOffset toggledCall(JitCode* target, bool enabled) {
         CodeOffset offset(size());
         JmpSrc src = enabled ? masm.call() : masm.cmp_eax();
-        addPendingJump(src, ImmPtr(target->raw()), Relocation::JITCODE);
+        addPendingJump(src, ImmPtr(target->raw()), RelocationKind::JITCODE);
         MOZ_ASSERT_IF(!oom(), size() - offset.offset() == ToggledCallSize(nullptr));
         return offset;
     }
 
     static size_t ToggledCallSize(uint8_t* code) {
         // Size of a call instruction.
         return 5;
     }
--- a/js/src/jit/x64/MacroAssembler-x64.cpp
+++ b/js/src/jit/x64/MacroAssembler-x64.cpp
@@ -579,17 +579,17 @@ MacroAssembler::storeUnboxedValue(const 
         storeValue(ValueTypeFromMIRType(valueType), value.reg().typedReg().gpr(), dest);
 }
 
 template void
 MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType,
                                   const Address& dest, MIRType slotType);
 template void
 MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType,
-                                  const BaseIndex& dest, MIRType slotType);
+                                  const BaseObjectElementIndex& dest, MIRType slotType);
 
 // ========================================================================
 // wasm support
 
 void
 MacroAssembler::wasmLoad(const wasm::MemoryAccessDesc& access, Operand srcAddr, AnyRegister out)
 {
     memoryBarrierBefore(access.sync());
--- a/js/src/jit/x64/MacroAssembler-x64.h
+++ b/js/src/jit/x64/MacroAssembler-x64.h
@@ -541,17 +541,17 @@ class MacroAssemblerX64 : public MacroAs
     }
 
     /////////////////////////////////////////////////////////////////
     // Common interface.
     /////////////////////////////////////////////////////////////////
 
     CodeOffsetJump jumpWithPatch(RepatchLabel* label) {
         JmpSrc src = jmpSrc(label);
-        return CodeOffsetJump(size(), addPatchableJump(src, Relocation::HARDCODED));
+        return CodeOffsetJump(size(), addPatchableJump(src, RelocationKind::HARDCODED));
     }
 
     void movePtr(Register src, Register dest) {
         movq(src, dest);
     }
     void movePtr(Register src, const Operand& dest) {
         movq(src, dest);
     }
--- a/js/src/jit/x64/Trampoline-x64.cpp
+++ b/js/src/jit/x64/Trampoline-x64.cpp
@@ -452,17 +452,17 @@ JitRuntime::generateArgumentsRectifier(M
         masm.j(Assembler::NonZero, &undefLoopTop);
     }
 
     // Get the topmost argument.
     static_assert(sizeof(Value) == 8, "TimesEight is used to skip arguments");
 
     // | - sizeof(Value)| is used to put rcx such that we can read the last
     // argument, and not the value which is after.
-    BaseIndex b = BaseIndex(r9, r8, TimesEight, sizeof(RectifierFrameLayout) - sizeof(Value));
+    BaseIndex b(r9, r8, TimesEight, sizeof(RectifierFrameLayout) - sizeof(Value));
     masm.lea(Operand(b), rcx);
 
     // Copy & Push arguments, |nargs| + 1 times (to include |this|).
     {
         Label copyLoopTop;
 
         masm.bind(&copyLoopTop);
         masm.push(Operand(rcx, 0x0));
--- a/js/src/jit/x86-shared/Assembler-x86-shared.h
+++ b/js/src/jit/x86-shared/Assembler-x86-shared.h
@@ -250,19 +250,19 @@ class CPUInfo
 };
 
 class AssemblerX86Shared : public AssemblerShared
 {
   protected:
     struct RelativePatch {
         int32_t offset;
         void* target;
-        Relocation::Kind kind;
+        RelocationKind kind;
 
-        RelativePatch(int32_t offset, void* target, Relocation::Kind kind)
+        RelativePatch(int32_t offset, void* target, RelocationKind kind)
           : offset(offset),
             target(target),
             kind(kind)
         { }
     };
 
     Vector<RelativePatch, 8, SystemAllocPolicy> jumps_;
     CompactBufferWriter jumpRelocations_;
@@ -394,17 +394,17 @@ class AssemblerX86Shared : public Assemb
     }
 
     static void TraceDataRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader);
 
     void assertNoGCThings() const {
 #ifdef DEBUG
         MOZ_ASSERT(dataRelocations_.length() == 0);
         for (auto& j : jumps_)
-            MOZ_ASSERT(j.kind == Relocation::HARDCODED);
+            MOZ_ASSERT(j.kind == RelocationKind::HARDCODED);
 #endif
     }
 
     bool oom() const {
         return AssemblerShared::oom() ||
                masm.oom() ||
                jumpRelocations_.oom() ||
                dataRelocations_.oom();
--- a/js/src/jit/x86-shared/AssemblerBuffer-x86-shared.h
+++ b/js/src/jit/x86-shared/AssemblerBuffer-x86-shared.h
@@ -32,17 +32,17 @@
 
 #include <stdarg.h>
 #include <string.h>
 
 #include "jit/ExecutableAllocator.h"
 #include "jit/JitSpewer.h"
 
 // Spew formatting helpers.
-#define PRETTYHEX(x)                       (((x)<0)?"-":""),(((x)<0)?-(x):(x))
+#define PRETTYHEX(x)                       (((x)<0)?"-":""),((unsigned)((x)^((x)>>31))+((unsigned)(x)>>31))
 
 #define MEM_o     "%s0x%x"
 #define MEM_os    MEM_o   "(,%s,%d)"
 #define MEM_ob    MEM_o   "(%s)"
 #define MEM_obs   MEM_o   "(%s,%s,%d)"
 
 #define MEM_o32   "%s0x%04x"
 #define MEM_o32s  MEM_o32 "(,%s,%d)"
--- a/js/src/jit/x86-shared/BaseAssembler-x86-shared.h
+++ b/js/src/jit/x86-shared/BaseAssembler-x86-shared.h
@@ -1797,17 +1797,17 @@ public:
     {
         spew("cmpb       $0x%x, " MEM_obs, rhs, ADDR_obs(offset, base, index, scale));
         m_formatter.oneByteOp(OP_GROUP1_EbIb, offset, base, index, scale, GROUP1_OP_CMP);
         m_formatter.immediate8(rhs);
     }
 
     void cmpl_im(int32_t rhs, int32_t offset, RegisterID base, RegisterID index, int scale)
     {
-        spew("cmpl       $0x%x, " MEM_o32b, rhs, ADDR_o32b(offset, base));
+        spew("cmpl       $0x%x, " MEM_obs, rhs, ADDR_obs(offset, base, index, scale));
         if (CAN_SIGN_EXTEND_8_32(rhs)) {
             m_formatter.oneByteOp(OP_GROUP1_EvIb, offset, base, index, scale, GROUP1_OP_CMP);
             m_formatter.immediate8s(rhs);
         } else {
             m_formatter.oneByteOp(OP_GROUP1_EvIz, offset, base, index, scale, GROUP1_OP_CMP);
             m_formatter.immediate32(rhs);
         }
     }
--- a/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
+++ b/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
@@ -494,33 +494,33 @@ CodeGeneratorX86Shared::generateOutOfLin
 class BailoutJump {
     Assembler::Condition cond_;
 
   public:
     explicit BailoutJump(Assembler::Condition cond) : cond_(cond)
     { }
 #ifdef JS_CODEGEN_X86
     void operator()(MacroAssembler& masm, uint8_t* code) const {
-        masm.j(cond_, ImmPtr(code), Relocation::HARDCODED);
+        masm.j(cond_, ImmPtr(code), RelocationKind::HARDCODED);
     }
 #endif
     void operator()(MacroAssembler& masm, Label* label) const {
         masm.j(cond_, label);
     }
 };
 
 class BailoutLabel {
     Label* label_;
 
   public:
     explicit BailoutLabel(Label* label) : label_(label)
     { }
 #ifdef JS_CODEGEN_X86
     void operator()(MacroAssembler& masm, uint8_t* code) const {
-        masm.retarget(label_, ImmPtr(code), Relocation::HARDCODED);
+        masm.retarget(label_, ImmPtr(code), RelocationKind::HARDCODED);
     }
 #endif
     void operator()(MacroAssembler& masm, Label* label) const {
         masm.retarget(label_, label);
     }
 };
 
 template <typename T> void
@@ -2485,17 +2485,17 @@ CodeGenerator::visitCompareExchangeTyped
     Register elements = ToRegister(lir->elements());
     AnyRegister output = ToAnyRegister(lir->output());
     Register temp = lir->temp()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp());
 
     Register oldval = ToRegister(lir->oldval());
     Register newval = ToRegister(lir->newval());
 
     Scalar::Type arrayType = lir->mir()->arrayType();
-    int width = Scalar::byteSize(arrayType);
+    size_t width = Scalar::byteSize(arrayType);
 
     if (lir->index()->isConstant()) {
         Address dest(elements, ToInt32(lir->index()) * width);
         masm.compareExchangeJS(arrayType, Synchronization::Full(), dest, oldval, newval, temp, output);
     } else {
         BaseIndex dest(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
         masm.compareExchangeJS(arrayType, Synchronization::Full(), dest, oldval, newval, temp, output);
     }
@@ -2506,17 +2506,17 @@ CodeGenerator::visitAtomicExchangeTypedA
 {
     Register elements = ToRegister(lir->elements());
     AnyRegister output = ToAnyRegister(lir->output());
     Register temp = lir->temp()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp());
 
     Register value = ToRegister(lir->value());
 
     Scalar::Type arrayType = lir->mir()->arrayType();
-    int width = Scalar::byteSize(arrayType);
+    size_t width = Scalar::byteSize(arrayType);
 
     if (lir->index()->isConstant()) {
         Address dest(elements, ToInt32(lir->index()) * width);
         masm.atomicExchangeJS(arrayType, Synchronization::Full(), dest, value, temp, output);
     } else {
         BaseIndex dest(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
         masm.atomicExchangeJS(arrayType, Synchronization::Full(), dest, value, temp, output);
     }
@@ -2544,17 +2544,17 @@ CodeGenerator::visitAtomicTypedArrayElem
 
     AnyRegister output = ToAnyRegister(lir->output());
     Register elements = ToRegister(lir->elements());
     Register temp1 = lir->temp1()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp1());
     Register temp2 = lir->temp2()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp2());
     const LAllocation* value = lir->value();
 
     Scalar::Type arrayType = lir->mir()->arrayType();
-    int width = Scalar::byteSize(arrayType);
+    size_t width = Scalar::byteSize(arrayType);
 
     if (lir->index()->isConstant()) {
         Address mem(elements, ToInt32(lir->index()) * width);
         AtomicBinopToTypedArray(masm, lir->mir()->operation(), arrayType, value, mem, temp1, temp2, output);
     } else {
         BaseIndex mem(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
         AtomicBinopToTypedArray(masm, lir->mir()->operation(), arrayType, value, mem, temp1, temp2, output);
     }
@@ -2577,17 +2577,17 @@ AtomicBinopToTypedArray(MacroAssembler& 
 void
 CodeGenerator::visitAtomicTypedArrayElementBinopForEffect(LAtomicTypedArrayElementBinopForEffect* lir)
 {
     MOZ_ASSERT(!lir->mir()->hasUses());
 
     Register elements = ToRegister(lir->elements());
     const LAllocation* value = lir->value();
     Scalar::Type arrayType = lir->mir()->arrayType();
-    int width = Scalar::byteSize(arrayType);
+    size_t width = Scalar::byteSize(arrayType);
 
     if (lir->index()->isConstant()) {
         Address mem(elements, ToInt32(lir->index()) * width);
         AtomicBinopToTypedArray(masm, arrayType, lir->mir()->operation(), value, mem);
     } else {
         BaseIndex mem(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
         AtomicBinopToTypedArray(masm, arrayType, lir->mir()->operation(), value, mem);
     }
--- a/js/src/jit/x86/Assembler-x86.h
+++ b/js/src/jit/x86/Assembler-x86.h
@@ -217,19 +217,19 @@ HighWord(const Operand& op) {
 // Return operand from a JS -> JS call.
 static constexpr ValueOperand JSReturnOperand{JSReturnReg_Type, JSReturnReg_Data};
 
 class Assembler : public AssemblerX86Shared
 {
     void writeRelocation(JmpSrc src) {
         jumpRelocations_.writeUnsigned(src.offset());
     }
-    void addPendingJump(JmpSrc src, ImmPtr target, Relocation::Kind kind) {
+    void addPendingJump(JmpSrc src, ImmPtr target, RelocationKind kind) {
         enoughMemory_ &= jumps_.append(RelativePatch(src.offset(), target.value, kind));
-        if (kind == Relocation::JITCODE)
+        if (kind == RelocationKind::JITCODE)
             writeRelocation(src);
     }
 
   public:
     using AssemblerX86Shared::movl;
     using AssemblerX86Shared::j;
     using AssemblerX86Shared::jmp;
     using AssemblerX86Shared::vmovsd;
@@ -501,62 +501,61 @@ class Assembler : public AssemblerX86Sha
           case Operand::MEM_REG_DISP:
             masm.fild_m(src.disp(), src.base());
             break;
           default:
             MOZ_CRASH("unexpected operand kind");
         }
     }
 
-    void jmp(ImmPtr target, Relocation::Kind reloc = Relocation::HARDCODED) {
+    void jmp(ImmPtr target, RelocationKind reloc = RelocationKind::HARDCODED) {
         JmpSrc src = masm.jmp();
         addPendingJump(src, target, reloc);
     }
-    void j(Condition cond, ImmPtr target,
-           Relocation::Kind reloc = Relocation::HARDCODED) {
+    void j(Condition cond, ImmPtr target, RelocationKind reloc = RelocationKind::HARDCODED) {
         JmpSrc src = masm.jCC(static_cast<X86Encoding::Condition>(cond));
         addPendingJump(src, target, reloc);
     }
 
     void jmp(JitCode* target) {
-        jmp(ImmPtr(target->raw()), Relocation::JITCODE);
+        jmp(ImmPtr(target->raw()), RelocationKind::JITCODE);
     }
     void j(Condition cond, JitCode* target) {
-        j(cond, ImmPtr(target->raw()), Relocation::JITCODE);
+        j(cond, ImmPtr(target->raw()), RelocationKind::JITCODE);
     }
     void call(JitCode* target) {
         JmpSrc src = masm.call();
-        addPendingJump(src, ImmPtr(target->raw()), Relocation::JITCODE);
+        addPendingJump(src, ImmPtr(target->raw()), RelocationKind::JITCODE);
     }
     void call(ImmWord target) {
         call(ImmPtr((void*)target.value));
     }
     void call(ImmPtr target) {
         JmpSrc src = masm.call();
-        addPendingJump(src, target, Relocation::HARDCODED);
+        addPendingJump(src, target, RelocationKind::HARDCODED);
     }
 
     // Emit a CALL or CMP (nop) instruction. ToggleCall can be used to patch
     // this instruction.
     CodeOffset toggledCall(JitCode* target, bool enabled) {
         CodeOffset offset(size());
         JmpSrc src = enabled ? masm.call() : masm.cmp_eax();
-        addPendingJump(src, ImmPtr(target->raw()), Relocation::JITCODE);
+        addPendingJump(src, ImmPtr(target->raw()), RelocationKind::JITCODE);
         MOZ_ASSERT_IF(!oom(), size() - offset.offset() == ToggledCallSize(nullptr));
         return offset;
     }
 
     static size_t ToggledCallSize(uint8_t* code) {
         // Size of a call instruction.
         return 5;
     }
 
     // Re-routes pending jumps to an external target, flushing the label in the
     // process.
-    void retarget(Label* label, ImmPtr target, Relocation::Kind reloc) {
+    void retarget(Label* label, ImmPtr target, RelocationKind reloc) {
         if (label->used()) {
             bool more;
             X86Encoding::JmpSrc jmp(label->offset());
             do {
                 X86Encoding::JmpSrc next;
                 more = masm.nextJump(jmp, &next);
                 addPendingJump(jmp, target, reloc);
                 jmp = next;
--- a/js/src/jit/x86/MacroAssembler-x86.cpp
+++ b/js/src/jit/x86/MacroAssembler-x86.cpp
@@ -587,17 +587,17 @@ MacroAssembler::storeUnboxedValue(const 
         storePayload(value.reg().typedReg().gpr(), Operand(dest));
 }
 
 template void
 MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType,
                                   const Address& dest, MIRType slotType);
 template void
 MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType,
-                                  const BaseIndex& dest, MIRType slotType);
+                                  const BaseObjectElementIndex& dest, MIRType slotType);
 
 // wasm specific methods, used in both the wasm baseline compiler and ion.
 
 void
 MacroAssembler::wasmBoundsCheck(Condition cond, Register index, Register boundsCheckLimit, Label* label)
 {
     cmp32(index, boundsCheckLimit);
     j(cond, label);
--- a/js/src/jit/x86/Trampoline-x86.cpp
+++ b/js/src/jit/x86/Trampoline-x86.cpp
@@ -445,18 +445,17 @@ JitRuntime::generateArgumentsRectifier(M
         masm.push(ebx); // type(undefined);
         masm.push(edi); // payload(undefined);
         masm.subl(Imm32(1), ecx);
         masm.j(Assembler::NonZero, &undefLoopTop);
     }
 
     // Get the topmost argument. We did a push of %ebp earlier, so be sure to
     // account for this in the offset
-    BaseIndex b = BaseIndex(FramePointer, esi, TimesEight,
-                            sizeof(RectifierFrameLayout) + sizeof(void*));
+    BaseIndex b(FramePointer, esi, TimesEight, sizeof(RectifierFrameLayout) + sizeof(void*));
     masm.lea(Operand(b), ecx);
 
     // Push arguments, |nargs| + 1 times (to include |this|).
     masm.addl(Imm32(1), esi);
     {
         Label copyLoopTop;
 
         masm.bind(&copyLoopTop);
@@ -509,17 +508,17 @@ JitRuntime::generateArgumentsRectifier(M
 
     // Remove the rectifier frame.
     masm.pop(ebx);            // ebx <- descriptor with FrameType.
     masm.shrl(Imm32(FRAMESIZE_SHIFT), ebx); // ebx <- descriptor.
     masm.pop(edi);            // Discard calleeToken.
     masm.pop(edi);            // Discard number of actual arguments.
 
     // Discard pushed arguments, but not the pushed frame pointer.
-    BaseIndex unwind = BaseIndex(esp, ebx, TimesOne, -int32_t(sizeof(void*)));
+    BaseIndex unwind(esp, ebx, TimesOne, -int32_t(sizeof(void*)));
     masm.lea(Operand(unwind), esp);
 
     masm.pop(FramePointer);
     masm.ret();
 }
 
 static void
 PushBailoutFrame(MacroAssembler& masm, uint32_t frameClass, Register spArg)
--- a/js/src/tests/jstests.list
+++ b/js/src/tests/jstests.list
@@ -431,19 +431,16 @@ skip script test262/built-ins/Function/p
 skip script test262/built-ins/Function/prototype/toString/well-known-intrinsic-object-functions.js
 
 # https://bugzilla.mozilla.org/show_bug.cgi?id=1462745
 skip script test262/annexB/language/function-code/block-decl-nested-blocks-with-fun-decl.js
 
 # https://bugzilla.mozilla.org/show_bug.cgi?id=1406171
 skip script test262/built-ins/Reflect/ownKeys/return-on-corresponding-order-large-index.js
 
-# https://bugzilla.mozilla.org/show_bug.cgi?id=1472211
-skip script test262/language/statements/class/super/in-constructor-superproperty-evaluation.js
-
 # https://bugzilla.mozilla.org/show_bug.cgi?id=1473228
 skip script test262/intl402/RelativeTimeFormat/prototype/toStringTag/toStringTag.js
 
 # https://bugzilla.mozilla.org/show_bug.cgi?id=1473229
 skip script test262/intl402/RelativeTimeFormat/prototype/formatToParts/length.js
 skip script test262/intl402/RelativeTimeFormat/prototype/formatToParts/name.js
 skip script test262/intl402/RelativeTimeFormat/prototype/formatToParts/prop-desc.js
 
new file mode 100644
--- /dev/null
+++ b/js/src/tests/non262/class/superElemDelete.js
@@ -0,0 +1,68 @@
+// Make sure we get the proper side effects.
+// |delete super[expr]| applies ToPropertyKey on |expr| before throwing.
+
+class base {
+    constructor() { }
+}
+
+class derived extends base {
+    constructor() { super(); }
+    testDeleteElem() {
+        let sideEffect = 0;
+        let key = {
+            toString() {
+                sideEffect++;
+                return "";
+            }
+        };
+        assertThrowsInstanceOf(() => delete super[key], ReferenceError);
+        assertEq(sideEffect, 1);
+    }
+    testDeleteElemPropValFirst() {
+        // The deletion error is a reference error, but by munging the prototype
+        // chain, we can force a type error from JSOP_SUPERBASE.
+        let key = {
+            toString() {
+                Object.setPrototypeOf(derived.prototype, null);
+                return "";
+            }
+        };
+        delete super[key];
+    }
+}
+
+class derivedTestDeleteElem extends base {
+    constructor() {
+        let sideEffect = 0;
+        let key = {
+            toString() {
+                sideEffect++;
+                return "";
+            }
+        };
+
+        assertThrowsInstanceOf(() => delete super[key], ReferenceError);
+        assertEq(sideEffect, 0);
+
+        super();
+
+        assertThrowsInstanceOf(() => delete super[key], ReferenceError);
+        assertEq(sideEffect, 1);
+
+        Object.setPrototypeOf(derivedTestDeleteElem.prototype, null);
+
+        assertThrowsInstanceOf(() => delete super[key], TypeError);
+        assertEq(sideEffect, 2);
+
+        return {};
+    }
+}
+
+var d = new derived();
+d.testDeleteElem();
+assertThrowsInstanceOf(() => d.testDeleteElemPropValFirst(), TypeError);
+
+new derivedTestDeleteElem();
+
+if (typeof reportCompare === 'function')
+    reportCompare(0,0,"OK");
--- a/js/src/tests/non262/class/superPropDelete.js
+++ b/js/src/tests/non262/class/superPropDelete.js
@@ -37,10 +37,28 @@ assertEq(Object.prototype.toString, save
 var thing2 = {
     go() { delete super.prop; }
 };
 Object.setPrototypeOf(thing2, new Proxy({}, {
     deleteProperty(x) { throw "FAIL"; }
 }));
 assertThrowsInstanceOf(() => thing2.go(), ReferenceError);
 
+class derivedTestDeleteProp extends base {
+    constructor() {
+        // The deletion error is a reference error, but by munging the prototype
+        // chain, we can force a type error from JSOP_SUPERBASE.
+        Object.setPrototypeOf(derivedTestDeleteProp.prototype, null);
+
+        assertThrowsInstanceOf(() => delete super.prop, ReferenceError);
+
+        super();
+
+        assertThrowsInstanceOf(() => delete super.prop, TypeError);
+
+        return {};
+    }
+}
+
+new derivedTestDeleteProp();
+
 if (typeof reportCompare === 'function')
     reportCompare(0,0,"OK");
--- a/js/src/vm/BytecodeUtil.cpp
+++ b/js/src/vm/BytecodeUtil.cpp
@@ -1794,17 +1794,17 @@ ExpressionDecompiler::decompilePC(jsbyte
                write("[") &&
                decompilePCForStackOperand(pc, -1) &&
                write("]") &&
                (hasDelete ? write(")") : true);
       }
 
       case JSOP_GETELEM_SUPER:
         return write("super[") &&
-               decompilePCForStackOperand(pc, -3) &&
+               decompilePCForStackOperand(pc, -2) &&
                write("]");
       case JSOP_NULL:
         return write(js_null_str);
       case JSOP_TRUE:
         return write(js_true_str);
       case JSOP_FALSE:
         return write(js_false_str);
       case JSOP_ZERO:
--- a/js/src/vm/Interpreter.cpp
+++ b/js/src/vm/Interpreter.cpp
@@ -3078,18 +3078,18 @@ CASE(JSOP_CALLELEM)
 
     TypeScript::Monitor(cx, script, REGS.pc, res);
     REGS.sp--;
 }
 END_CASE(JSOP_GETELEM)
 
 CASE(JSOP_GETELEM_SUPER)
 {
-    ReservedRooted<Value> rval(&rootValue0, REGS.sp[-3]);
-    ReservedRooted<Value> receiver(&rootValue1, REGS.sp[-2]);
+    ReservedRooted<Value> receiver(&rootValue1, REGS.sp[-3]);
+    ReservedRooted<Value> rval(&rootValue0, REGS.sp[-2]);
     ReservedRooted<JSObject*> obj(&rootObject1, &REGS.sp[-1].toObject());
 
     MutableHandleValue res = REGS.stackHandleAt(-3);
 
     // Since we have asserted that obj has to be an object, it cannot be
     // either optimized arguments, or indeed any primitive. This simplifies
     // our task some.
     if (!GetObjectElementOperation(cx, JSOp(*REGS.pc), obj, receiver, rval, res))
@@ -3121,18 +3121,18 @@ CASE(JSOP_STRICTSETELEM)
 END_CASE(JSOP_SETELEM)
 
 CASE(JSOP_SETELEM_SUPER)
 CASE(JSOP_STRICTSETELEM_SUPER)
 {
     static_assert(JSOP_SETELEM_SUPER_LENGTH == JSOP_STRICTSETELEM_SUPER_LENGTH,
                   "setelem-super and strictsetelem-super must be the same size");
 
-    ReservedRooted<Value> index(&rootValue1, REGS.sp[-4]);
-    ReservedRooted<Value> receiver(&rootValue0, REGS.sp[-3]);
+    ReservedRooted<Value> receiver(&rootValue0, REGS.sp[-4]);
+    ReservedRooted<Value> index(&rootValue1, REGS.sp[-3]);
     ReservedRooted<JSObject*> obj(&rootObject1, &REGS.sp[-2].toObject());
     HandleValue value = REGS.stackHandleAt(-1);
 
     bool strict = JSOp(*REGS.pc) == JSOP_STRICTSETELEM_SUPER;
     if (!SetObjectElement(cx, obj, index, value, receiver, strict))
         goto error;
     REGS.sp[-4] = value;
     REGS.sp -= 3;
--- a/js/src/vm/NativeObject.cpp
+++ b/js/src/vm/NativeObject.cpp
@@ -71,21 +71,21 @@ HeapSlot* const js::emptyObjectElementsS
 bool
 NativeObject::canHaveNonEmptyElements()
 {
     return !this->is<TypedArrayObject>();
 }
 
 #endif // DEBUG
 
-/* static */ bool
+/* static */ void
 ObjectElements::ConvertElementsToDoubles(JSContext* cx, uintptr_t elementsPtr)
 {
     /*
-     * This function is infallible, but has a fallible interface so that it can
+     * This function has an otherwise unused JSContext argument so that it can
      * be called directly from Ion code. Only arrays can have their dense
      * elements converted to doubles, and arrays never have empty elements.
      */
     HeapSlot* elementsHeapPtr = (HeapSlot*) elementsPtr;
     MOZ_ASSERT(elementsHeapPtr != emptyObjectElements &&
                elementsHeapPtr != emptyObjectElementsShared);
 
     ObjectElements* header = ObjectElements::fromElements(elementsHeapPtr);
@@ -95,17 +95,16 @@ ObjectElements::ConvertElementsToDoubles
     // arrays. See comment on ObjectElements.
     Value* vp = (Value*) elementsPtr;
     for (size_t i = 0; i < header->initializedLength; i++) {
         if (vp[i].isInt32())
             vp[i].setDouble(vp[i].toInt32());
     }
 
     header->setShouldConvertDoubleElements();
-    return true;
 }
 
 /* static */ bool
 ObjectElements::MakeElementsCopyOnWrite(JSContext* cx, NativeObject* obj)
 {
     static_assert(sizeof(HeapSlot) >= sizeof(GCPtrObject),
                   "there must be enough room for the owner object pointer at "
                   "the end of the elements");
--- a/js/src/vm/NativeObject.h
+++ b/js/src/vm/NativeObject.h
@@ -362,17 +362,17 @@ class ObjectElements
     }
     static int offsetOfCapacity() {
         return int(offsetof(ObjectElements, capacity)) - int(sizeof(ObjectElements));
     }
     static int offsetOfLength() {
         return int(offsetof(ObjectElements, length)) - int(sizeof(ObjectElements));
     }
 
-    static bool ConvertElementsToDoubles(JSContext* cx, uintptr_t elements);
+    static void ConvertElementsToDoubles(JSContext* cx, uintptr_t elements);
     static bool MakeElementsCopyOnWrite(JSContext* cx, NativeObject* obj);
 
     static MOZ_MUST_USE bool PreventExtensions(JSContext* cx, NativeObject* obj);
     static void FreezeOrSeal(JSContext* cx, NativeObject* obj, IntegrityLevel level);
 
     bool isSealed() const {
         return flags & SEALED;
     }
--- a/js/src/vm/Opcodes.h
+++ b/js/src/vm/Opcodes.h
@@ -533,27 +533,27 @@ 1234567890123456789012345678901234567890
      *   Category: Literals
      *   Type: Object
      *   Operands:
      *   Stack: obj, propval => obj[propval]
      */ \
     macro(JSOP_GETELEM,   55, "getelem",    NULL,         1,  2,  1, JOF_BYTE |JOF_ELEM|JOF_TYPESET|JOF_LEFTASSOC) \
     /*
      * Pops the top three values on the stack as 'val', 'propval' and 'obj',
-     * sets 'propval' property of 'obj' as 'val', pushes 'obj' onto the
+     * sets 'propval' property of 'obj' as 'val', pushes 'val' onto the
      * stack.
      *   Category: Literals
      *   Type: Object
      *   Operands:
      *   Stack: obj, propval, val => val
      */ \
     macro(JSOP_SETELEM,   56, "setelem",    NULL,         1,  3,  1, JOF_BYTE |JOF_ELEM|JOF_PROPSET|JOF_DETECTING|JOF_CHECKSLOPPY) \
     /*
      * Pops the top three values on the stack as 'val', 'propval' and 'obj',
-     * sets 'propval' property of 'obj' as 'val', pushes 'obj' onto the
+     * sets 'propval' property of 'obj' as 'val', pushes 'val' onto the
      * stack. Throws a TypeError if the set fails, per strict mode
      * semantics.
      *   Category: Literals
      *   Type: Object
      *   Operands:
      *   Stack: obj, propval, val => val
      */ \
     macro(JSOP_STRICTSETELEM,   57, "strict-setelem",    NULL,         1,  3,  1, JOF_BYTE |JOF_ELEM|JOF_PROPSET|JOF_DETECTING|JOF_CHECKSTRICT) \
@@ -1276,17 +1276,17 @@ 1234567890123456789012345678901234567890
      */ \
     macro(JSOP_STRICTEVAL,       124, "strict-eval",       NULL,         3, -1,  1, JOF_UINT16|JOF_INVOKE|JOF_TYPESET|JOF_CHECKSTRICT) \
     /*
      * LIKE JSOP_GETELEM but takes receiver on stack, and the propval is
      * evaluated before the obj.
      *   Category: Literals
      *   Type: Object
      *   Operands:
-     *   Stack: propval, receiver, obj => obj[propval]
+     *   Stack: receiver, propval, obj => obj[propval]
      */ \
     macro(JSOP_GETELEM_SUPER, 125, "getelem-super", NULL, 1,  3,  1, JOF_BYTE|JOF_ELEM|JOF_TYPESET|JOF_LEFTASSOC) \
     macro(JSOP_UNUSED126, 126, "unused126", NULL, 5,  0,  1, JOF_UINT32) \
     \
     /*
      * Defines the given function on the current scope.
      *
      * This is used for global scripts and also in some cases for function
@@ -1634,26 +1634,26 @@ 1234567890123456789012345678901234567890
      */                                                                 \
     macro(JSOP_GIMPLICITTHIS, 157, "gimplicitthis", "",      5,  0,  1,  JOF_ATOM) \
     /*
      * LIKE JSOP_SETELEM, but takes receiver on the stack, and the propval is
      * evaluated before the base.
      *   Category: Literals
      *   Type: Object
      *   Operands:
-     *   Stack: propval, receiver, obj, val => val
+     *   Stack: receiver, propval, obj, val => val
      */ \
     macro(JSOP_SETELEM_SUPER,   158, "setelem-super", NULL, 1,  4,  1, JOF_BYTE |JOF_ELEM|JOF_PROPSET|JOF_DETECTING|JOF_CHECKSLOPPY) \
     /*
      * LIKE JSOP_STRICTSETELEM, but takes receiver on the stack, and the
      * propval is evaluated before the base.
      *   Category: Literals
      *   Type: Object
      *   Operands:
-     *   Stack: propval, receiver, obj, val => val
+     *   Stack: receiver, propval, obj, val => val
      */ \
     macro(JSOP_STRICTSETELEM_SUPER, 159, "strict-setelem-super", NULL, 1,  4, 1, JOF_BYTE |JOF_ELEM|JOF_PROPSET|JOF_DETECTING|JOF_CHECKSTRICT) \
     \
     /*
      * Pushes a regular expression literal onto the stack.
      * It requires special "clone on exec" handling.
      *   Category: Literals
      *   Type: RegExp
--- a/js/xpconnect/src/XPCJSRuntime.cpp
+++ b/js/xpconnect/src/XPCJSRuntime.cpp
@@ -2874,17 +2874,17 @@ ReflectorNode::edges(JSContext* cx, bool
             if (wantNames) {
                 edgeName = NS_strdup(u"Reflected Node");
             }
             if (!range->addEdge(Edge(edgeName, node.get()))){
                 return nullptr;
             }
         }
     }
-    return range;
+    return js::UniquePtr<EdgeRange>(range.release());
 }
 
 } // Namespace ubi
 } // Namespace JS
 
 void
 ConstructUbiNode(void* storage, JSObject* ptr)
 {
--- a/mfbt/HashTable.h
+++ b/mfbt/HashTable.h
@@ -1,33 +1,73 @@
 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
  * vim: set ts=8 sts=4 et sw=4 tw=99:
  * This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
-// A note on the differences between mozilla::HashTable and PLDHashTable (and
-// its subclasses, such as nsTHashtable).
+//---------------------------------------------------------------------------
+// Overview
+//---------------------------------------------------------------------------
+//
+// This file defines HashMap<Key, Value> and HashSet<T>, hash tables that are
+// fast and have a nice API.
+//
+// Both hash tables have two optional template parameters.
+//
+// - HashPolicy. This defines the operations for hashing and matching keys. The
+//   default HashPolicy is appropriate when both of the following two
+//   conditions are true.
+//
+//   - The key type stored in the table (|Key| for |HashMap<Key, Value>|, |T|
+//     for |HashSet<T>|) is an integer, pointer, UniquePtr, float, double, or
+//     char*.
+//
+//   - The type used for lookups (|Lookup|) is the same as the key type. This
+//     is usually the case, but not always.
+//
+//   Otherwise, you must provide your own hash policy; see the "Hash Policy"
+//   section below.
+//
+// - AllocPolicy. This defines how allocations are done by the table.
+//
+//   - |MallocAllocPolicy| is the default and is usually appropriate; note that
+//     operations (such as insertions) that might cause allocations are
+//     fallible and must be checked for OOM. These checks are enforced by the
+//     use of MOZ_MUST_USE.
+//
+//   - |InfallibleAllocPolicy| is another possibility; it allows the
+//     abovementioned OOM checks to be done with MOZ_ALWAYS_TRUE().
+//
+//  See AllocPolicy.h for more details.
+//
+// Documentation on how to use HashMap and HashSet, including examples, is
+// present within those classes. Search for "class HashMap" and "class
+// HashSet".
+//
+// Both HashMap and HashSet are implemented on top of a third class, HashTable.
+// You only need to look at HashTable if you want to understand the
+// implementation.
+//
+// How does mozilla::HashTable (this file) compare with PLDHashTable (and its
+// subclasses, such as nsTHashtable)?
 //
 // - mozilla::HashTable is a lot faster, largely because it uses templates
 //   throughout *and* inlines everything. PLDHashTable inlines operations much
 //   less aggressively, and also uses "virtual ops" for operations like hashing
 //   and matching entries that require function calls.
 //
 // - Correspondingly, mozilla::HashTable use is likely to increase executable
 //   size much more than PLDHashTable.
 //
 // - mozilla::HashTable has a nicer API, with a proper HashSet vs. HashMap
 //   distinction.
 //
-// - mozilla::HashTable requires more explicit OOM checking. Use
-//   mozilla::InfallibleAllocPolicy to make allocations infallible; note that
-//   return values of possibly-allocating methods such as add() will still need
-//   checking in some fashion -- e.g. with MOZ_ALWAYS_TRUE() -- due to the use
-//   of MOZ_MUST_USE.
+// - mozilla::HashTable requires more explicit OOM checking. As mentioned
+//   above, the use of |InfallibleAllocPolicy| can simplify things.
 //
 // - mozilla::HashTable has a default capacity on creation of 32 and a minimum
 //   capacity of 4. PLDHashTable has a default capacity on creation of 8 and a
 //   minimum capacity of 8.
 //
 // - mozilla::HashTable allocates memory eagerly. PLDHashTable delays
 //   allocating until the first element is inserted.
 
@@ -62,45 +102,44 @@ namespace detail {
 template<typename T>
 class HashTableEntry;
 
 template<class T, class HashPolicy, class AllocPolicy>
 class HashTable;
 
 } // namespace detail
 
-/*****************************************************************************/
-
 // The "generation" of a hash table is an opaque value indicating the state of
 // modification of the hash table through its lifetime.  If the generation of
 // a hash table compares equal at times T1 and T2, then lookups in the hash
 // table, pointers to (or into) hash table entries, etc. at time T1 are valid
 // at time T2.  If the generation compares unequal, these computations are all
 // invalid and must be performed again to be used.
 //
 // Generations are meaningfully comparable only with respect to a single hash
 // table.  It's always nonsensical to compare the generation of distinct hash
 // tables H1 and H2.
 using Generation = Opaque<uint64_t>;
 
-// A performant, STL-like container providing a hash-based map from keys to
-// values. In particular, HashMap calls constructors and destructors of all
-// objects added so non-PODs may be used safely.
+//---------------------------------------------------------------------------
+// HashMap
+//---------------------------------------------------------------------------
+
+// HashMap is a fast hash-based map from keys to values.
 //
-// Key/Value requirements:
-//  - movable, destructible, assignable
-// HashPolicy requirements:
-//  - see Hash Policy section below
-// AllocPolicy:
-//  - see AllocPolicy.h
+// Template parameter requirements:
+// - Key/Value: movable, destructible, assignable.
+// - HashPolicy: see the "Hash Policy" section below.
+// - AllocPolicy: see AllocPolicy.h.
 //
 // Note:
 // - HashMap is not reentrant: Key/Value/HashPolicy/AllocPolicy members
 //   called by HashMap must not call back into the same HashMap object.
 // - Due to the lack of exception handling, the user must call |init()|.
+//
 template<class Key,
          class Value,
          class HashPolicy = DefaultHasher<Key>,
          class AllocPolicy = MallocAllocPolicy>
 class HashMap
 {
   using TableEntry = HashMapEntry<Key, Value>;
 
@@ -119,109 +158,111 @@ class HashMap
 
   using Impl = detail::HashTable<TableEntry, MapHashPolicy, AllocPolicy>;
   Impl mImpl;
 
 public:
   using Lookup = typename HashPolicy::Lookup;
   using Entry = TableEntry;
 
-  // HashMap construction is fallible (due to OOM); thus the user must call
-  // init after constructing a HashMap and check the return value.
+  // HashMap construction is fallible (due to possible OOM). The user must
+  // call init() after construction and check the return value.
   explicit HashMap(AllocPolicy aPolicy = AllocPolicy())
     : mImpl(aPolicy)
   {
   }
 
+  // Initialize the map for use. Must be called after construction, before
+  // any other operations (other than initialized()).
   MOZ_MUST_USE bool init(uint32_t aLen = 16) { return mImpl.init(aLen); }
 
+  // Has the map been initialized?
   bool initialized() const { return mImpl.initialized(); }
 
-  // Return whether the given lookup value is present in the map. E.g.:
+  // Return a Ptr indicating whether a key/value matching |aLookup| is
+  // present in the map. E.g.:
   //
   //   using HM = HashMap<int,char>;
   //   HM h;
   //   if (HM::Ptr p = h.lookup(3)) {
-  //     const HM::Entry& e = *p; // p acts like a pointer to Entry
-  //     assert(p->key == 3);     // Entry contains the key
-  //     char val = p->value;     // and value
+  //     assert(p->key() == 3);
+  //     char val = p->value();
   //   }
   //
-  // Also see the definition of Ptr in HashTable above (with T = Entry).
   using Ptr = typename Impl::Ptr;
   MOZ_ALWAYS_INLINE Ptr lookup(const Lookup& aLookup) const
   {
     return mImpl.lookup(aLookup);
   }
 
-  // Like lookup, but does not assert if two threads call lookup at the same
+  // Like lookup(), but does not assert if two threads call it at the same
   // time. Only use this method when none of the threads will modify the map.
   MOZ_ALWAYS_INLINE Ptr readonlyThreadsafeLookup(const Lookup& aLookup) const
   {
     return mImpl.readonlyThreadsafeLookup(aLookup);
   }
 
-  // Assuming |p.found()|, remove |*p|.
+  // Remove a previously found key/value (assuming aPtr.found()). The map
+  // must not have been mutated in the interim.
   void remove(Ptr aPtr) { mImpl.remove(aPtr); }
 
   // Like |lookup(l)|, but on miss, |p = lookupForAdd(l)| allows efficient
   // insertion of Key |k| (where |HashPolicy::match(k,l) == true|) using
-  // |add(p,k,v)|. After |add(p,k,v)|, |p| points to the new Entry. E.g.:
+  // |add(p,k,v)|. After |add(p,k,v)|, |p| points to the new key/value. E.g.:
   //
   //   using HM = HashMap<int,char>;
   //   HM h;
   //   HM::AddPtr p = h.lookupForAdd(3);
   //   if (!p) {
   //     if (!h.add(p, 3, 'a')) {
   //       return false;
   //     }
   //   }
-  //   const HM::Entry& e = *p;   // p acts like a pointer to Entry
-  //   assert(p->key == 3);       // Entry contains the key
-  //   char val = p->value;       // and value
-  //
-  // Also see the definition of AddPtr in HashTable above (with T = Entry).
+  //   assert(p->key() == 3);
+  //   char val = p->value();
   //
-  // N.B. The caller must ensure that no mutating hash table operations
-  // occur between a pair of |lookupForAdd| and |add| calls. To avoid
-  // looking up the key a second time, the caller may use the more efficient
-  // relookupOrAdd method. This method reuses part of the hashing computation
-  // to more efficiently insert the key if it has not been added. For
-  // example, a mutation-handling version of the previous example:
+  // N.B. The caller must ensure that no mutating hash table operations occur
+  // between a pair of lookupForAdd() and add() calls. To avoid looking up the
+  // key a second time, the caller may use the more efficient relookupOrAdd()
+  // method. This method reuses part of the hashing computation to more
+  // efficiently insert the key if it has not been added. For example, a
+  // mutation-handling version of the previous example:
   //
   //    HM::AddPtr p = h.lookupForAdd(3);
   //    if (!p) {
   //      call_that_may_mutate_h();
   //      if (!h.relookupOrAdd(p, 3, 'a')) {
   //        return false;
   //      }
   //    }
-  //    const HM::Entry& e = *p;
-  //    assert(p->key == 3);
-  //    char val = p->value;
+  //    assert(p->key() == 3);
+  //    char val = p->value();
   //
   using AddPtr = typename Impl::AddPtr;
   MOZ_ALWAYS_INLINE AddPtr lookupForAdd(const Lookup& aLookup) const
   {
     return mImpl.lookupForAdd(aLookup);
   }
 
+  // Add a key/value. Returns false on OOM.
   template<typename KeyInput, typename ValueInput>
   MOZ_MUST_USE bool add(AddPtr& aPtr, KeyInput&& aKey, ValueInput&& aValue)
   {
     return mImpl.add(
       aPtr, std::forward<KeyInput>(aKey), std::forward<ValueInput>(aValue));
   }
 
+  // Add a given key and a default value. Returns false on OOM.
   template<typename KeyInput>
   MOZ_MUST_USE bool add(AddPtr& aPtr, KeyInput&& aKey)
   {
     return mImpl.add(aPtr, std::forward<KeyInput>(aKey), Value());
   }
 
+  // See the comment above lookupForAdd() for details.
   template<typename KeyInput, typename ValueInput>
   MOZ_MUST_USE bool relookupOrAdd(AddPtr& aPtr,
                                   KeyInput&& aKey,
                                   ValueInput&& aValue)
   {
     return mImpl.relookupOrAdd(aPtr,
                                aKey,
                                std::forward<KeyInput>(aKey),
@@ -230,138 +271,136 @@ public:
 
   // |iter()| returns an Iterator:
   //
   //   HashMap<int, char> h;
   //   for (auto iter = h.iter(); !iter.done(); iter.next()) {
   //     char c = iter.get().value();
   //   }
   //
-  // Also see the definition of Iterator in HashTable above (with T = Entry).
   using Iterator = typename Impl::Iterator;
   Iterator iter() const { return mImpl.iter(); }
 
   // |modIter()| returns a ModIterator:
   //
   //   HashMap<int, char> h;
   //   for (auto iter = h.modIter(); !iter.done(); iter.next()) {
   //     if (iter.get().value() == 'l') {
   //       iter.remove();
   //     }
   //   }
   //
-  // Table resize may occur in ModIterator's destructor. Also see the
-  // definition of ModIterator in HashTable above (with T = Entry).
+  // Table resize may occur in ModIterator's destructor.
   using ModIterator = typename Impl::ModIterator;
   ModIterator modIter() { return mImpl.modIter(); }
 
-  // These are similar to Iterator/ModIterator/iter(), but use less common
+  // These are similar to Iterator/ModIterator/iter(), but use different
   // terminology.
   using Range = typename Impl::Range;
   using Enum = typename Impl::Enum;
   Range all() const { return mImpl.all(); }
 
-  // Remove all entries. This does not shrink the table. For that consider
-  // using the finish() method.
+  // Remove all keys/values without changing the capacity.
   void clear() { mImpl.clear(); }
 
-  // Remove all entries. Unlike clear() this method tries to shrink the table.
-  // Unlike finish() it does not require the map to be initialized again.
+  // Remove all keys/values and attempt to minimize the capacity.
   void clearAndShrink() { mImpl.clearAndShrink(); }
 
-  // Remove all the entries and release all internal buffers. The map must
-  // be initialized again before any use.
+  // Remove all keys/values and release entry storage. The map must be
+  // initialized via init() again before further use.
   void finish() { mImpl.finish(); }
 
-  // Does the table contain any entries?
+  // Is the map empty?
   bool empty() const { return mImpl.empty(); }
 
-  // Number of live elements in the map.
+  // Number of keys/values in the map.
   uint32_t count() const { return mImpl.count(); }
 
-  // Total number of allocation in the dynamic table. Note: resize will
-  // happen well before count() == capacity().
+  // Number of key/value slots in the map. Note: resize will happen well before
+  // count() == capacity().
   size_t capacity() const { return mImpl.capacity(); }
 
-  // Measure the size of the HashMap's entry storage. If the entries contain
-  // pointers to other heap blocks, you must iterate over the table and measure
+  // The size of the map's entry storage, in bytes. If the keys/values contain
+  // pointers to other heap blocks, you must iterate over the map and measure
   // them separately; hence the "shallow" prefix.
   size_t shallowSizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const
   {
     return mImpl.shallowSizeOfExcludingThis(aMallocSizeOf);
   }
   size_t shallowSizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const
   {
     return aMallocSizeOf(this) +
            mImpl.shallowSizeOfExcludingThis(aMallocSizeOf);
   }
 
+  // The map's current generation.
   Generation generation() const { return mImpl.generation(); }
 
   /************************************************** Shorthand operations */
 
+  // Does the map contain a key/value matching |aLookup|?
   bool has(const Lookup& aLookup) const
   {
     return mImpl.lookup(aLookup).found();
   }
 
-  // Overwrite existing value with aValue. Return false on oom.
+  // Overwrite existing value with |aValue|, or add it if not present. Returns
+  // false on OOM.
   template<typename KeyInput, typename ValueInput>
   MOZ_MUST_USE bool put(KeyInput&& aKey, ValueInput&& aValue)
   {
     AddPtr p = lookupForAdd(aKey);
     if (p) {
       p->value() = std::forward<ValueInput>(aValue);
       return true;
     }
     return add(
       p, std::forward<KeyInput>(aKey), std::forward<ValueInput>(aValue));
   }
 
-  // Like put, but assert that the given key is not already present.
+  // Like put(), but asserts that the given key is not already present.
   template<typename KeyInput, typename ValueInput>
   MOZ_MUST_USE bool putNew(KeyInput&& aKey, ValueInput&& aValue)
   {
     return mImpl.putNew(
       aKey, std::forward<KeyInput>(aKey), std::forward<ValueInput>(aValue));
   }
 
   // Only call this to populate an empty map after reserving space with init().
   template<typename KeyInput, typename ValueInput>
   void putNewInfallible(KeyInput&& aKey, ValueInput&& aValue)
   {
     mImpl.putNewInfallible(
       aKey, std::forward<KeyInput>(aKey), std::forward<ValueInput>(aValue));
   }
 
   // Add (aKey,aDefaultValue) if |aKey| is not found. Return a false-y Ptr on
-  // oom.
+  // OOM.
   Ptr lookupWithDefault(const Key& aKey, const Value& aDefaultValue)
   {
     AddPtr p = lookupForAdd(aKey);
     if (p) {
       return p;
     }
     bool ok = add(p, aKey, aDefaultValue);
-    MOZ_ASSERT_IF(!ok, !p); // p is left false-y on oom.
+    MOZ_ASSERT_IF(!ok, !p); // p is left false-y on OOM.
     (void)ok;
     return p;
   }
 
-  // Remove if present.
+  // Lookup and remove the key/value matching |aLookup|, if present.
   void remove(const Lookup& aLookup)
   {
     if (Ptr p = lookup(aLookup)) {
       remove(p);
     }
   }
 
-  // Infallibly rekey one entry, if necessary.
-  // Requires template parameters Key and HashPolicy::Lookup to be the same
-  // type.
+  // Infallibly rekey one entry, if necessary. Requires that template
+  // parameters Key and HashPolicy::Lookup are the same type.
   void rekeyIfMoved(const Key& aOldKey, const Key& aNewKey)
   {
     if (aOldKey != aNewKey) {
       rekeyAs(aOldKey, aNewKey, aNewKey);
     }
   }
 
   // Infallibly rekey one entry if present, and return whether that happened.
@@ -371,131 +410,133 @@ public:
   {
     if (Ptr p = lookup(aOldLookup)) {
       mImpl.rekeyAndMaybeRehash(p, aNewLookup, aNewKey);
       return true;
     }
     return false;
   }
 
-  // HashMap is movable
+  // HashMap is movable.
   HashMap(HashMap&& aRhs)
     : mImpl(std::move(aRhs.mImpl))
   {
   }
   void operator=(HashMap&& aRhs)
   {
     MOZ_ASSERT(this != &aRhs, "self-move assignment is prohibited");
     mImpl = std::move(aRhs.mImpl);
   }
 
 private:
-  // HashMap is not copyable or assignable
+  // HashMap is not copyable or assignable.
   HashMap(const HashMap& hm) = delete;
   HashMap& operator=(const HashMap& hm) = delete;
 
   friend class Impl::Enum;
 };
 
-/*****************************************************************************/
+//---------------------------------------------------------------------------
+// HashSet
+//---------------------------------------------------------------------------
 
-// A performant, STL-like container providing a hash-based set of values. In
-// particular, HashSet calls constructors and destructors of all objects added
-// so non-PODs may be used safely.
+// HashSet is a fast hash-based set of values.
 //
-// T requirements:
-//  - movable, destructible, assignable
-// HashPolicy requirements:
-//  - see Hash Policy section below
-// AllocPolicy:
-//  - see AllocPolicy.h
+// Template parameter requirements:
+// - T: movable, destructible, assignable.
+// - HashPolicy: see the "Hash Policy" section below.
+// - AllocPolicy: see AllocPolicy.h
 //
 // Note:
 // - HashSet is not reentrant: T/HashPolicy/AllocPolicy members called by
 //   HashSet must not call back into the same HashSet object.
 // - Due to the lack of exception handling, the user must call |init()|.
+//
 template<class T,
          class HashPolicy = DefaultHasher<T>,
          class AllocPolicy = MallocAllocPolicy>
 class HashSet
 {
-  struct SetOps : HashPolicy
+  struct SetHashPolicy : HashPolicy
   {
     using Base = HashPolicy;
     using KeyType = T;
 
     static const KeyType& getKey(const T& aT) { return aT; }
+
     static void setKey(T& aT, KeyType& aKey) { HashPolicy::rekey(aT, aKey); }
   };
 
-  using Impl = detail::HashTable<const T, SetOps, AllocPolicy>;
+  using Impl = detail::HashTable<const T, SetHashPolicy, AllocPolicy>;
   Impl mImpl;
 
 public:
   using Lookup = typename HashPolicy::Lookup;
   using Entry = T;
 
-  // HashSet construction is fallible (due to OOM); thus the user must call
-  // init after constructing a HashSet and check the return value.
+  // HashSet construction is fallible (due to possible OOM). The user must call
+  // init() after construction and check the return value.
   explicit HashSet(AllocPolicy a = AllocPolicy())
     : mImpl(a)
   {
   }
 
+  // Initialize the set for use. Must be called after construction, before
+  // any other operations (other than initialized()).
   MOZ_MUST_USE bool init(uint32_t aLen = 16) { return mImpl.init(aLen); }
 
+  // Has the set been initialized?
   bool initialized() const { return mImpl.initialized(); }
 
-  // Return whether the given lookup value is present in the map. E.g.:
+  // Return a Ptr indicating whether an element matching |aLookup| is present
+  // in the set. E.g.:
   //
   //   using HS = HashSet<int>;
   //   HS h;
   //   if (HS::Ptr p = h.lookup(3)) {
   //     assert(*p == 3);   // p acts like a pointer to int
   //   }
   //
-  // Also see the definition of Ptr in HashTable above.
   using Ptr = typename Impl::Ptr;
   MOZ_ALWAYS_INLINE Ptr lookup(const Lookup& aLookup) const
   {
     return mImpl.lookup(aLookup);
   }
 
-  // Like lookup, but does not assert if two threads call lookup at the same
-  // time. Only use this method when none of the threads will modify the map.
+  // Like lookup(), but does not assert if two threads call it at the same
+  // time. Only use this method when none of the threads will modify the set.
   MOZ_ALWAYS_INLINE Ptr readonlyThreadsafeLookup(const Lookup& aLookup) const
   {
     return mImpl.readonlyThreadsafeLookup(aLookup);
   }
 
-  // Assuming |aPtr.found()|, remove |*aPtr|.
+  // Remove a previously found element (assuming aPtr.found()). The set must
+  // not have been mutated in the interim.
   void remove(Ptr aPtr) { mImpl.remove(aPtr); }
 
   // Like |lookup(l)|, but on miss, |p = lookupForAdd(l)| allows efficient
   // insertion of T value |t| (where |HashPolicy::match(t,l) == true|) using
   // |add(p,t)|. After |add(p,t)|, |p| points to the new element. E.g.:
   //
   //   using HS = HashSet<int>;
   //   HS h;
   //   HS::AddPtr p = h.lookupForAdd(3);
   //   if (!p) {
   //     if (!h.add(p, 3)) {
   //       return false;
   //     }
   //   }
   //   assert(*p == 3);   // p acts like a pointer to int
   //
-  // Also see the definition of AddPtr in HashTable above.
-  //
-  // N.B. The caller must ensure that no mutating hash table operations
-  // occur between a pair of |lookupForAdd| and |add| calls. To avoid
-  // looking up the key a second time, the caller may use the more efficient
-  // relookupOrAdd method. This method reuses part of the hashing computation
-  // to more efficiently insert the key if it has not been added. For
-  // example, a mutation-handling version of the previous example:
+  // N.B. The caller must ensure that no mutating hash table operations occur
+  // between a pair of lookupForAdd() and add() calls. To avoid looking up the
+  // key a second time, the caller may use the more efficient relookupOrAdd()
+  // method. This method reuses part of the hashing computation to more
+  // efficiently insert the key if it has not been added. For example, a
+  // mutation-handling version of the previous example:
   //
   //    HS::AddPtr p = h.lookupForAdd(3);
   //    if (!p) {
   //      call_that_may_mutate_h();
   //      if (!h.relookupOrAdd(p, 3, 3)) {
   //        return false;
   //      }
   //    }
@@ -504,140 +545,142 @@ public:
   // Note that relookupOrAdd(p,l,t) performs Lookup using |l| and adds the
   // entry |t|, where the caller ensures match(l,t).
   using AddPtr = typename Impl::AddPtr;
   MOZ_ALWAYS_INLINE AddPtr lookupForAdd(const Lookup& aLookup) const
   {
     return mImpl.lookupForAdd(aLookup);
   }
 
+  // Add an element. Returns false on OOM.
   template<typename U>
   MOZ_MUST_USE bool add(AddPtr& aPtr, U&& aU)
   {
     return mImpl.add(aPtr, std::forward<U>(aU));
   }
 
+  // See the comment above lookupForAdd() for details.
   template<typename U>
   MOZ_MUST_USE bool relookupOrAdd(AddPtr& aPtr, const Lookup& aLookup, U&& aU)
   {
     return mImpl.relookupOrAdd(aPtr, aLookup, std::forward<U>(aU));
   }
 
   // |iter()| returns an Iterator:
   //
   //   HashSet<int> h;
   //   for (auto iter = h.iter(); !iter.done(); iter.next()) {
   //     int i = iter.get();
   //   }
   //
-  // Also see the definition of Iterator in HashTable above.
   typedef typename Impl::Iterator Iterator;
   Iterator iter() const { return mImpl.iter(); }
 
   // |modIter()| returns a ModIterator:
   //
   //   HashSet<int> h;
   //   for (auto iter = h.modIter(); !iter.done(); iter.next()) {
   //     if (iter.get() == 42) {
   //       iter.remove();
   //     }
   //   }
   //
-  // Table resize may occur in ModIterator's destructor. Also see the
-  // definition of ModIterator in HashTable above.
+  // Table resize may occur in ModIterator's destructor.
   typedef typename Impl::ModIterator ModIterator;
   ModIterator modIter() { return mImpl.modIter(); }
 
   // These are similar to Iterator/ModIterator/iter(), but use different
   // terminology.
   using Range = typename Impl::Range;
   using Enum = typename Impl::Enum;
   Range all() const { return mImpl.all(); }
 
-  // Remove all entries. This does not shrink the table. For that consider
-  // using the finish() method.
+  // Remove all elements without changing the capacity.
   void clear() { mImpl.clear(); }
 
-  // Remove all entries. Unlike clear() this method tries to shrink the table.
-  // Unlike finish() it does not require the set to be initialized again.
+  // Remove all elements and attempt to minimize the capacity.
   void clearAndShrink() { mImpl.clearAndShrink(); }
 
-  // Remove all the entries and release all internal buffers. The set must
-  // be initialized again before any use.
+  // Remove all keys/values and release entry storage. The set must be
+  // initialized via init() again before further use.
   void finish() { mImpl.finish(); }
 
-  // Does the table contain any entries?
+  // Is the set empty?
   bool empty() const { return mImpl.empty(); }
 
-  // Number of live elements in the map.
+  // Number of elements in the set.
   uint32_t count() const { return mImpl.count(); }
 
-  // Total number of allocation in the dynamic table. Note: resize will
-  // happen well before count() == capacity().
+  // Number of element slots in the set. Note: resize will happen well before
+  // count() == capacity().
   size_t capacity() const { return mImpl.capacity(); }
 
-  // Measure the size of the HashSet's entry storage. If the entries contain
-  // pointers to other heap blocks, you must iterate over the table and measure
+  // The size of the HashSet's entry storage, in bytes. If the elements contain
+  // pointers to other heap blocks, you must iterate over the set and measure
   // them separately; hence the "shallow" prefix.
   size_t shallowSizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const
   {
     return mImpl.shallowSizeOfExcludingThis(aMallocSizeOf);
   }
   size_t shallowSizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const
   {
     return aMallocSizeOf(this) +
            mImpl.shallowSizeOfExcludingThis(aMallocSizeOf);
   }
 
+  // The set's current generation.
   Generation generation() const { return mImpl.generation(); }
 
   /************************************************** Shorthand operations */
 
+  // Does the set contain an element matching |aLookup|?
   bool has(const Lookup& aLookup) const
   {
     return mImpl.lookup(aLookup).found();
   }
 
-  // Add |aU| if it is not present already. Return false on oom.
+  // Add |aU| if it is not present already. Returns false on OOM.
   template<typename U>
   MOZ_MUST_USE bool put(U&& aU)
   {
     AddPtr p = lookupForAdd(aU);
     return p ? true : add(p, std::forward<U>(aU));
   }
 
-  // Like put, but assert that the given key is not already present.
+  // Like put(), but asserts that the given key is not already present.
   template<typename U>
   MOZ_MUST_USE bool putNew(U&& aU)
   {
     return mImpl.putNew(aU, std::forward<U>(aU));
   }
 
+  // Like the other putNew(), but for when |Lookup| is different to |T|.
   template<typename U>
   MOZ_MUST_USE bool putNew(const Lookup& aLookup, U&& aU)
   {
     return mImpl.putNew(aLookup, std::forward<U>(aU));
   }
 
   // Only call this to populate an empty set after reserving space with init().
   template<typename U>
   void putNewInfallible(const Lookup& aLookup, U&& aU)
   {
     mImpl.putNewInfallible(aLookup, std::forward<U>(aU));
   }
 
+  // Lookup and remove the element matching |aLookup|, if present.
   void remove(const Lookup& aLookup)
   {
     if (Ptr p = lookup(aLookup)) {
       remove(p);
     }
   }
 
-  // Infallibly rekey one entry, if present.
-  // Requires template parameters T and HashPolicy::Lookup to be the same type.
+  // Infallibly rekey one entry, if present. Requires that template parameters
+  // T and HashPolicy::Lookup are the same type.
   void rekeyIfMoved(const Lookup& aOldValue, const T& aNewValue)
   {
     if (aOldValue != aNewValue) {
       rekeyAs(aOldValue, aNewValue, aNewValue);
     }
   }
 
   // Infallibly rekey one entry if present, and return whether that happened.
@@ -647,30 +690,30 @@ public:
   {
     if (Ptr p = lookup(aOldLookup)) {
       mImpl.rekeyAndMaybeRehash(p, aNewLookup, aNewValue);
       return true;
     }
     return false;
   }
 
-  // Infallibly replace the current key at |p| with an equivalent key.
+  // Infallibly replace the current key at |aPtr| with an equivalent key.
   // Specifically, both HashPolicy::hash and HashPolicy::match must return
   // identical results for the new and old key when applied against all
   // possible matching values.
   void replaceKey(Ptr aPtr, const T& aNewValue)
   {
     MOZ_ASSERT(aPtr.found());
     MOZ_ASSERT(*aPtr != aNewValue);
     MOZ_ASSERT(HashPolicy::hash(*aPtr) == HashPolicy::hash(aNewValue));
     MOZ_ASSERT(HashPolicy::match(*aPtr, aNewValue));
     const_cast<T&>(*aPtr) = aNewValue;
   }
 
-  // HashSet is movable
+  // HashSet is movable.
   HashSet(HashSet&& aRhs)
     : mImpl(std::move(aRhs.mImpl))
   {
   }
   void operator=(HashSet&& aRhs)
   {
     MOZ_ASSERT(this != &aRhs, "self-move assignment is prohibited");
     mImpl = std::move(aRhs.mImpl);
@@ -679,46 +722,47 @@ public:
 private:
   // HashSet is not copyable or assignable.
   HashSet(const HashSet& hs) = delete;
   HashSet& operator=(const HashSet& hs) = delete;
 
   friend class Impl::Enum;
 };
 
-/*****************************************************************************/
+//---------------------------------------------------------------------------
+// Hash Policy
+//---------------------------------------------------------------------------
 
-// Hash Policy
+// A hash policy |HP| for a hash table with key-type |Key| must provide:
 //
-// A hash policy P for a hash table with key-type Key must provide:
-//  - a type |P::Lookup| to use to lookup table entries;
-//  - a static member function |P::hash| with signature
+//  - a type |HP::Lookup| to use to lookup table entries;
 //
-//      static mozilla::HashNumber hash(Lookup)
+//  - a static member function |HP::hash| that hashes lookup values:
+//
+//      static mozilla::HashNumber hash(const Lookup&);
 //
-//    to use to hash the lookup type; and
-//  - a static member function |P::match| with signature
+//  - a static member function |HP::match| that tests equality of key and
+//    lookup values:
 //
-//      static bool match(Key, Lookup)
-//
-//    to use to test equality of key and lookup values.
+//      static bool match(const Key&, const Lookup&);
 //
 // Normally, Lookup = Key. In general, though, different values and types of
-// values can be used to lookup and store. If a Lookup value |l| is != to the
-// added Key value |k|, the user must ensure that |P::match(k,l)|. E.g.:
+// values can be used to lookup and store. If a Lookup value |l| is not equal
+// to the added Key value |k|, the user must ensure that |HP::match(k,l)| is
+// true. E.g.:
 //
-//   mozilla::HashSet<Key, P>::AddPtr p = h.lookup(l);
+//   mozilla::HashSet<Key, HP>::AddPtr p = h.lookup(l);
 //   if (!p) {
-//     assert(P::match(k, l));  // must hold
+//     assert(HP::match(k, l));  // must hold
 //     h.add(p, k);
 //   }
 
-// Pointer hashing policy that uses HashGeneric() to create good hashes for
-// pointers.  Note that we don't shift out the lowest k bits to generate a
-// good distribution for arena allocated pointers.
+// A pointer hashing policy that uses HashGeneric() to create good hashes for
+// pointers. Note that we don't shift out the lowest k bits because we don't
+// want to assume anything about the alignment of the pointers.
 template<typename Key>
 struct PointerHasher
 {
   using Lookup = Key;
 
   static HashNumber hash(const Lookup& aLookup)
   {
     size_t word = reinterpret_cast<size_t>(aLookup);
@@ -728,124 +772,132 @@ struct PointerHasher
   static bool match(const Key& aKey, const Lookup& aLookup)
   {
     return aKey == aLookup;
   }
 
   static void rekey(Key& aKey, const Key& aNewKey) { aKey = aNewKey; }
 };
 
-// Default hash policy: just use the 'lookup' value. This of course only
-// works if the lookup value is integral. HashTable applies ScrambleHashCode to
-// the result of the 'hash' which means that it is 'ok' if the lookup value is
-// not well distributed over the HashNumber domain.
+// The default hash policy, which only works with integers.
 template<class Key>
 struct DefaultHasher
 {
   using Lookup = Key;
 
   static HashNumber hash(const Lookup& aLookup)
   {
-    // Hash if can implicitly cast to hash number type.
+    // Just convert the integer to a HashNumber and use that as is. (This
+    // discards the high 32-bits of 64-bit integers!) ScrambleHashCode() is
+    // subsequently called on the value to improve the distribution.
     return aLookup;
   }
 
   static bool match(const Key& aKey, const Lookup& aLookup)
   {
     // Use builtin or overloaded operator==.
     return aKey == aLookup;
   }
 
   static void rekey(Key& aKey, const Key& aNewKey) { aKey = aNewKey; }
 };
 
-// Specialize hashing policy for pointer types. It assumes that the type is
-// at least word-aligned. For types with smaller size use PointerHasher.
+// A DefaultHasher specialization for pointers.
 template<class T>
 struct DefaultHasher<T*> : PointerHasher<T*>
 {
 };
 
-// Specialize hashing policy for mozilla::UniquePtr to proxy the UniquePtr's
-// raw pointer to PointerHasher.
+// A DefaultHasher specialization for mozilla::UniquePtr.
 template<class T, class D>
 struct DefaultHasher<UniquePtr<T, D>>
 {
-  using Lookup = UniquePtr<T, D>;
+  using Key = UniquePtr<T, D>;
+  using Lookup = Key;
   using PtrHasher = PointerHasher<T*>;
 
   static HashNumber hash(const Lookup& aLookup)
   {
     return PtrHasher::hash(aLookup.get());
   }
 
-  static bool match(const UniquePtr<T, D>& aKey, const Lookup& aLookup)
+  static bool match(const Key& aKey, const Lookup& aLookup)
   {
     return PtrHasher::match(aKey.get(), aLookup.get());
   }
 
   static void rekey(UniquePtr<T, D>& aKey, UniquePtr<T, D>&& aNewKey)
   {
     aKey = std::move(aNewKey);
   }
 };
 
-// For doubles, we can xor the two uint32s.
+// A DefaultHasher specialization for doubles.
 template<>
 struct DefaultHasher<double>
 {
-  using Lookup = double;
+  using Key = double;
+  using Lookup = Key;
 
-  static HashNumber hash(double aVal)
+  static HashNumber hash(const Lookup& aLookup)
   {
+    // Just xor the high bits with the low bits, and then treat the bits of the
+    // result as a uint32_t.
     static_assert(sizeof(HashNumber) == 4,
                   "subsequent code assumes a four-byte hash");
-    uint64_t u = BitwiseCast<uint64_t>(aVal);
+    uint64_t u = BitwiseCast<uint64_t>(aLookup);
     return HashNumber(u ^ (u >> 32));
   }
 
-  static bool match(double aLhs, double aRhs)
+  static bool match(const Key& aKey, const Lookup& aLookup)
   {
-    return BitwiseCast<uint64_t>(aLhs) == BitwiseCast<uint64_t>(aRhs);
+    return BitwiseCast<uint64_t>(aKey) == BitwiseCast<uint64_t>(aLookup);
   }
 };
 
+// A DefaultHasher specialization for floats.
 template<>
 struct DefaultHasher<float>
 {
-  using Lookup = float;
+  using Key = float;
+  using Lookup = Key;
 
-  static HashNumber hash(float aVal)
+  static HashNumber hash(const Lookup& aLookup)
   {
+    // Just use the value as if its bits form an integer. ScrambleHashCode() is
+    // subsequently called on the value to improve the distribution.
     static_assert(sizeof(HashNumber) == 4,
                   "subsequent code assumes a four-byte hash");
-    return HashNumber(BitwiseCast<uint32_t>(aVal));
+    return HashNumber(BitwiseCast<uint32_t>(aLookup));
   }
 
-  static bool match(float aLhs, float aRhs)
+  static bool match(const Key& aKey, const Lookup& aLookup)
   {
-    return BitwiseCast<uint32_t>(aLhs) == BitwiseCast<uint32_t>(aRhs);
+    return BitwiseCast<uint32_t>(aKey) == BitwiseCast<uint32_t>(aLookup);
   }
 };
 
-// A hash policy that compares C strings.
+// A hash policy for C strings.
 struct CStringHasher
 {
+  using Key = const char*;
   using Lookup = const char*;
 
-  static HashNumber hash(Lookup aLookup) { return HashString(aLookup); }
+  static HashNumber hash(const Lookup& aLookup) { return HashString(aLookup); }
 
-  static bool match(const char* key, Lookup lookup)
+  static bool match(const Key& aKey, const Lookup& aLookup)
   {
-    return strcmp(key, lookup) == 0;
+    return strcmp(aKey, aLookup) == 0;
   }
 };
 
-// Fallible hashing interface.
-//
+//---------------------------------------------------------------------------
+// Fallible Hashing Interface
+//---------------------------------------------------------------------------
+
 // Most of the time generating a hash code is infallible so this class provides
 // default methods that always succeed.  Specialize this class for your own hash
 // policy to provide fallible hashing.
 //
 // This is used by MovableCellHasher to handle the fact that generating a unique
 // ID for cell pointer may fail due to OOM.
 template<typename HashPolicy>
 struct FallibleHashMethods
@@ -878,17 +930,19 @@ HasHash(Lookup&& aLookup)
 template<typename HashPolicy, typename Lookup>
 static bool
 EnsureHash(Lookup&& aLookup)
 {
   return FallibleHashMethods<typename HashPolicy::Base>::ensureHash(
     std::forward<Lookup>(aLookup));
 }
 
-/*****************************************************************************/
+//---------------------------------------------------------------------------
+// Implementation Details (HashMapEntry, HashTableEntry, HashTable)
+//---------------------------------------------------------------------------
 
 // Both HashMap and HashSet are implemented by a single HashTable that is even
 // more heavily parameterized than the other two. This leaves HashTable gnarly
 // and extremely coupled to HashMap and HashSet; thus code should not use
 // HashTable directly.
 
 template<class Key, class Value>
 class HashMapEntry
@@ -922,16 +976,19 @@ public:
     key_ = std::move(aRhs.key_);
     value_ = std::move(aRhs.value_);
   }
 
   using KeyType = Key;
   using ValueType = Value;
 
   const Key& key() const { return key_; }
+
+  // Use this method with caution! If the key is changed such that its hash
+  // value also changes, the map will be left in an invalid state.
   Key& mutableKey() { return key_; }
 
   const Value& value() const { return value_; }
   Value& value() { return value_; }
 
 private:
   HashMapEntry(const HashMapEntry&) = delete;
   void operator=(const HashMapEntry&) = delete;
--- a/mobile/android/geckoview/src/androidTest/java/org/mozilla/geckoview/test/ProgressDelegateTest.kt
+++ b/mobile/android/geckoview/src/androidTest/java/org/mozilla/geckoview/test/ProgressDelegateTest.kt
@@ -18,16 +18,40 @@ import org.junit.Assume.assumeThat
 import org.junit.Ignore
 import org.junit.Test
 import org.junit.runner.RunWith
 
 @RunWith(AndroidJUnit4::class)
 @MediumTest
 class ProgressDelegateTest : BaseSessionTest() {
 
+    @Test fun loadProgress() {
+        sessionRule.session.loadTestPath(HELLO_HTML_PATH)
+        sessionRule.waitForPageStop()
+
+        var counter = 0
+        var lastProgress = -1
+
+        sessionRule.forCallbacksDuringWait(object : Callbacks.ProgressDelegate {
+            @AssertCalled
+            override fun onProgressChange(session: GeckoSession, progress: Int) {
+                assertThat("Progress must be strictly increasing", progress,
+                           greaterThan(lastProgress))
+                lastProgress = progress
+                counter++
+            }
+        })
+
+        assertThat("Callback should be called at least twice", counter,
+                   greaterThanOrEqualTo(2))
+        assertThat("Last progress value should be 100", lastProgress,
+                   equalTo(100))
+    }
+
+
     @Test fun load() {
         sessionRule.session.loadTestPath(HELLO_HTML_PATH)
         sessionRule.waitForPageStop()
 
         sessionRule.forCallbacksDuringWait(object : Callbacks.ProgressDelegate {
             @AssertCalled(count = 1, order = [1])
             override fun onPageStart(session: GeckoSession, url: String) {
                 assertThat("Session should not be null", session, notNullValue())
--- a/mobile/android/geckoview_example/src/main/java/org/mozilla/geckoview_example/GeckoViewActivity.java
+++ b/mobile/android/geckoview_example/src/main/java/org/mozilla/geckoview_example/GeckoViewActivity.java
@@ -29,17 +29,19 @@ import android.support.v4.app.ActivityCo
 import android.support.v4.content.ContextCompat;
 import android.support.v7.app.ActionBar;
 import android.support.v7.app.AppCompatActivity;
 import android.support.v7.widget.Toolbar;
 import android.util.Log;
 import android.view.Menu;
 import android.view.MenuInflater;
 import android.view.MenuItem;
+import android.view.View;
 import android.view.WindowManager;
+import android.widget.ProgressBar;
 
 import java.util.LinkedList;
 import java.util.Locale;
 
 public class GeckoViewActivity extends AppCompatActivity {
     private static final String LOGTAG = "GeckoViewActivity";
     private static final String DEFAULT_URL = "https://mozilla.org";
     private static final String USE_MULTIPROCESS_EXTRA = "use_multiprocess";
@@ -58,16 +60,18 @@ public class GeckoViewActivity extends A
     private boolean mKillProcessOnDestroy;
 
     private LocationView mLocationView;
     private String mCurrentUri;
     private boolean mCanGoBack;
     private boolean mCanGoForward;
     private boolean mFullScreen;
 
+    private ProgressBar mProgressView;
+
     private LinkedList<GeckoSession.WebResponseInfo> mPendingDownloads = new LinkedList<>();
 
     private LocationView.CommitListener mCommitListener = new LocationView.CommitListener() {
         @Override
         public void onCommit(String text) {
             if ((text.contains(".") || text.contains(":")) && !text.contains(" ")) {
                 mGeckoSession.loadUri(text);
             } else {
@@ -90,16 +94,17 @@ public class GeckoViewActivity extends A
 
         mLocationView = new LocationView(this);
         getSupportActionBar().setCustomView(mLocationView,
                 new ActionBar.LayoutParams(ActionBar.LayoutParams.MATCH_PARENT,
                         ActionBar.LayoutParams.WRAP_CONTENT));
         getSupportActionBar().setDisplayOptions(ActionBar.DISPLAY_SHOW_CUSTOM);
 
         mUseMultiprocess = getIntent().getBooleanExtra(USE_MULTIPROCESS_EXTRA, true);
+        mProgressView = (ProgressBar) findViewById(R.id.page_progress);
 
         if (sGeckoRuntime == null) {
             final GeckoRuntimeSettings.Builder runtimeSettingsBuilder =
                 new GeckoRuntimeSettings.Builder();
 
             if (BuildConfig.DEBUG) {
                 // In debug builds, we want to load JavaScript resources fresh with
                 // each build.
@@ -431,16 +436,24 @@ public class GeckoViewActivity extends A
             Log.i(LOGTAG, "zerdatime " + SystemClock.elapsedRealtime() +
                   " - page load stop");
             mTp.logCounters();
         }
 
         @Override
         public void onProgressChange(GeckoSession session, int progress) {
             Log.i(LOGTAG, "onProgressChange " + progress);
+
+            mProgressView.setProgress(progress, true);
+
+            if (progress > 0 && progress < 100) {
+                mProgressView.setVisibility(View.VISIBLE);
+            } else {
+                mProgressView.setVisibility(View.GONE);
+            }
         }
 
         @Override
         public void onSecurityChange(GeckoSession session, SecurityInformation securityInfo) {
             Log.i(LOGTAG, "Security status changed to " + securityInfo.securityMode);
         }
     }
 
--- a/mobile/android/geckoview_example/src/main/res/layout/geckoview_activity.xml
+++ b/mobile/android/geckoview_example/src/main/res/layout/geckoview_activity.xml
@@ -12,9 +12,17 @@
         android:scrollbars="none"
         />
 
     <android.support.v7.widget.Toolbar
             android:id="@+id/toolbar"
             android:layout_width="match_parent"
             android:layout_height="?android:actionBarSize"
             android:layout_alignParentBottom="true"/>
+
+    <ProgressBar
+            android:id="@+id/page_progress"
+            style="@style/Base.Widget.AppCompat.ProgressBar.Horizontal"
+            android:layout_width="match_parent"
+            android:layout_height="3dp"
+            android:layout_alignTop="@id/gecko_view"
+            android:progress="70" />
 </RelativeLayout>
--- a/old-configure.in
+++ b/old-configure.in
@@ -269,16 +269,19 @@ case "$target" in
         # Set midl environment
         case "$target" in
         i*86-*)
             MIDL_FLAGS="${MIDL_FLAGS} -env win32"
             ;;
         x86_64-*)
             MIDL_FLAGS="${MIDL_FLAGS} -env x64"
             ;;
+        aarch64-*)
+            MIDL_FLAGS="${MIDL_FLAGS} -env arm64"
+            ;;
         esac
 
         unset _MSVC_VER_FILTER
 
         AC_CACHE_CHECK(for overridable _RAISE,
                        ac_cv_have__RAISE,
             [
                 AC_LANG_SAVE
--- a/python/mozbuild/mozbuild/configure/constants.py
+++ b/python/mozbuild/mozbuild/configure/constants.py
@@ -71,17 +71,17 @@ WindowsBinaryType = EnumString.subclass(
     'win64',
 )
 
 # The order of those checks matter
 CPU_preprocessor_checks = OrderedDict((
     ('x86', '__i386__ || _M_IX86'),
     ('x86_64', '__x86_64__ || _M_X64'),
     ('arm', '__arm__ || _M_ARM'),
-    ('aarch64', '__aarch64__'),
+    ('aarch64', '__aarch64__ || _M_ARM64'),
     ('ia64', '__ia64__'),
     ('s390x', '__s390x__'),
     ('s390', '__s390__'),
     ('ppc64', '__powerpc64__'),
     ('ppc', '__powerpc__'),
     ('Alpha', '__alpha__'),
     ('hppa', '__hppa__'),
     ('sparc64', '__sparc__ && __arch64__'),