Backed out 5 changesets (bug 1639153) for landing the wrong stack of patches. CLOSED TREE
authorButkovits Atila <abutkovits@mozilla.com>
Fri, 11 Sep 2020 12:23:15 +0300
changeset 548291 1fc282e54b7a584087712a870dce62e0bf4c942b
parent 548290 691a86eef68649b434b921080f5b55960b3c9f91
child 548292 5dd737d76eada2a34f24244b30141f01033ea259
push id37776
push userbtara@mozilla.com
push dateFri, 11 Sep 2020 15:10:42 +0000
treeherdermozilla-central@b133e2d673e8 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
bugs1639153
milestone82.0a1
backs out62480de389ffba353998de24760993b2e1531232
f2e486f1be1760e1751cf50acea1ab50cb3cfa90
b79c89e6ac82e947d6805ce7fb4ee52f28c55a5a
ab5825b43bb56cdb7b50e536e4cab3734c35088f
1b94af3458ce8bf47b776e03ac74f0c1b5a72c65
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Backed out 5 changesets (bug 1639153) for landing the wrong stack of patches. CLOSED TREE Backed out changeset 62480de389ff (bug 1639153) Backed out changeset f2e486f1be17 (bug 1639153) Backed out changeset b79c89e6ac82 (bug 1639153) Backed out changeset ab5825b43bb5 (bug 1639153) Backed out changeset 1b94af3458ce (bug 1639153)
js/src/jit/CodeGenerator.cpp
js/src/jit/MacroAssembler.cpp
js/src/jit/MacroAssembler.h
js/src/jit/arm/Assembler-arm.h
js/src/jit/arm/MacroAssembler-arm.cpp
js/src/jit/arm64/Assembler-arm64.h
js/src/jit/arm64/MacroAssembler-arm64.cpp
js/src/jit/mips32/Assembler-mips32.h
js/src/jit/mips32/MacroAssembler-mips32.cpp
js/src/jit/mips64/Assembler-mips64.h
js/src/jit/mips64/MacroAssembler-mips64.cpp
js/src/jit/none/MacroAssembler-none.h
js/src/jit/x64/Assembler-x64.h
js/src/jit/x64/MacroAssembler-x64.cpp
js/src/jit/x86/Assembler-x86.h
js/src/jit/x86/MacroAssembler-x86.cpp
js/src/vm/JitActivation.cpp
js/src/vm/JitActivation.h
js/src/wasm/WasmBaselineCompile.cpp
js/src/wasm/WasmBaselineCompile.h
js/src/wasm/WasmBuiltins.cpp
js/src/wasm/WasmFrameIter.cpp
js/src/wasm/WasmFrameIter.h
js/src/wasm/WasmGC.cpp
js/src/wasm/WasmGC.h
js/src/wasm/WasmIonCompile.cpp
js/src/wasm/WasmSignalHandlers.cpp
js/src/wasm/WasmStubs.cpp
js/src/wasm/WasmTypes.cpp
js/src/wasm/WasmTypes.h
--- a/js/src/jit/CodeGenerator.cpp
+++ b/js/src/jit/CodeGenerator.cpp
@@ -8464,18 +8464,17 @@ void CodeGenerator::visitWasmCall(LWasmC
 
   // Now that all the outbound in-memory args are on the stack, note the
   // required lower boundary point of the associated StackMap.
   lir->safepoint()->setFramePushedAtStackMapBase(
       masm.framePushed() - mir->stackArgAreaSizeUnaligned());
   MOZ_ASSERT(!lir->safepoint()->isWasmTrap());
 
   if (reloadRegs) {
-    masm.loadPtr(Address(masm.getStackPointer(), WasmCallerTLSOffsetBeforeCall),
-                 WasmTlsReg);
+    masm.loadWasmTlsRegFromFrame();
     masm.loadWasmPinnedRegsFromTls();
     if (switchRealm) {
       masm.switchToWasmTlsRealm(ABINonArgReturnReg0, ABINonArgReturnReg1);
     }
   } else {
     MOZ_ASSERT(!switchRealm);
   }
 }
@@ -15022,17 +15021,17 @@ void CodeGenerator::emitIonToWasmCallBas
   masm.propagateOOM(stackArgs.reserve(lir->numOperands()));
   if (masm.oom()) {
     return;
   }
 
   const wasm::FuncExport& funcExport = lir->mir()->funcExport();
   const wasm::FuncType& sig = funcExport.funcType();
 
-  WasmABIArgGenerator abi;
+  ABIArgGenerator abi;
   for (size_t i = 0; i < lir->numOperands(); i++) {
     MIRType argMir;
     switch (sig.args()[i].kind()) {
       case wasm::ValType::I32:
       case wasm::ValType::I64:
       case wasm::ValType::F32:
       case wasm::ValType::F64:
         argMir = ToMIRType(sig.args()[i]);
--- a/js/src/jit/MacroAssembler.cpp
+++ b/js/src/jit/MacroAssembler.cpp
@@ -2989,29 +2989,29 @@ void MacroAssembler::freeStack(uint32_t 
   }
   framePushed_ -= amount;
 }
 
 void MacroAssembler::freeStack(Register amount) { addToStackPtr(amount); }
 
 // ===============================================================
 // ABI function calls.
-template <class ABIArgGeneratorT>
-void MacroAssembler::setupABICallHelper() {
+
+void MacroAssembler::setupABICall() {
 #ifdef DEBUG
   MOZ_ASSERT(!inCall_);
   inCall_ = true;
 #endif
 
 #ifdef JS_SIMULATOR
   signature_ = 0;
 #endif
 
   // Reinitialize the ABIArg generator.
-  abiArgs_ = ABIArgGeneratorT();
+  abiArgs_ = ABIArgGenerator();
 
 #if defined(JS_CODEGEN_ARM)
   // On ARM, we need to know what ABI we are using, either in the
   // simulator, or based on the configure flags.
 #  if defined(JS_SIMULATOR_ARM)
   abiArgs_.setUseHardFp(UseHardFpABI());
 #  elif defined(JS_CODEGEN_ARM_HARDFP)
   abiArgs_.setUseHardFp(true);
@@ -3024,35 +3024,31 @@ void MacroAssembler::setupABICallHelper(
   // On MIPS, the system ABI use general registers pairs to encode double
   // arguments, after one or 2 integer-like arguments. Unfortunately, the
   // Lowering phase is not capable to express it at the moment. So we enforce
   // the system ABI here.
   abiArgs_.enforceO32ABI();
 #endif
 }
 
-void MacroAssembler::setupNativeABICall() {
-  setupABICallHelper<ABIArgGenerator>();
-}
-
 void MacroAssembler::setupWasmABICall() {
   MOZ_ASSERT(IsCompilingWasm(), "non-wasm should use setupAlignedABICall");
-  setupABICallHelper<WasmABIArgGenerator>();
+  setupABICall();
 
 #if defined(JS_CODEGEN_ARM)
   // The builtin thunk does the FP -> GPR moving on soft-FP, so
   // use hard fp unconditionally.
   abiArgs_.setUseHardFp(true);
 #endif
   dynamicAlignment_ = false;
 }
 
 void MacroAssembler::setupAlignedABICall() {
   MOZ_ASSERT(!IsCompilingWasm(), "wasm should use setupWasmABICall");
-  setupNativeABICall();
+  setupABICall();
   dynamicAlignment_ = false;
 
 #if defined(JS_CODEGEN_ARM64)
   MOZ_CRASH("Not supported on arm64");
 #endif
 }
 
 void MacroAssembler::passABIArg(const MoveOperand& from, MoveOp::Type type) {
@@ -3683,19 +3679,16 @@ std::pair<CodeOffset, uint32_t> MacroAss
   wasmTrap(wasm::Trap::StackOverflow, trapOffset);
   CodeOffset trapInsnOffset = CodeOffset(currentOffset());
   bind(&ok);
   return std::pair<CodeOffset, uint32_t>(trapInsnOffset, amount);
 }
 
 CodeOffset MacroAssembler::wasmCallImport(const wasm::CallSiteDesc& desc,
                                           const wasm::CalleeDesc& callee) {
-  storePtr(WasmTlsReg,
-           Address(getStackPointer(), WasmCallerTLSOffsetBeforeCall));
-
   // Load the callee, before the caller's registers are clobbered.
   uint32_t globalDataOffset = callee.importGlobalDataOffset();
   loadWasmGlobalPtr(globalDataOffset + offsetof(wasm::FuncImportTls, code),
                     ABINonArgReg0);
 
 #ifndef JS_CODEGEN_NONE
   static_assert(ABINonArgReg0 != WasmTlsReg, "by constraint");
 #endif
@@ -3704,34 +3697,26 @@ CodeOffset MacroAssembler::wasmCallImpor
   loadWasmGlobalPtr(globalDataOffset + offsetof(wasm::FuncImportTls, realm),
                     ABINonArgReg1);
   loadPtr(Address(WasmTlsReg, offsetof(wasm::TlsData, cx)), ABINonArgReg2);
   storePtr(ABINonArgReg1, Address(ABINonArgReg2, JSContext::offsetOfRealm()));
 
   // Switch to the callee's TLS and pinned registers and make the call.
   loadWasmGlobalPtr(globalDataOffset + offsetof(wasm::FuncImportTls, tls),
                     WasmTlsReg);
-
-  storePtr(WasmTlsReg,
-           Address(getStackPointer(), WasmCalleeTLSOffsetBeforeCall));
   loadWasmPinnedRegsFromTls();
 
   return call(desc, ABINonArgReg0);
 }
 
 CodeOffset MacroAssembler::wasmCallBuiltinInstanceMethod(
     const wasm::CallSiteDesc& desc, const ABIArg& instanceArg,
     wasm::SymbolicAddress builtin, wasm::FailureMode failureMode) {
   MOZ_ASSERT(instanceArg != ABIArg());
 
-  storePtr(WasmTlsReg,
-           Address(getStackPointer(), WasmCallerTLSOffsetBeforeCall));
-  storePtr(WasmTlsReg,
-           Address(getStackPointer(), WasmCalleeTLSOffsetBeforeCall));
-
   if (instanceArg.kind() == ABIArg::GPR) {
     loadPtr(Address(WasmTlsReg, offsetof(wasm::TlsData, instance)),
             instanceArg.gpr());
   } else if (instanceArg.kind() == ABIArg::Stack) {
     // Safe to use ABINonArgReg0 since it's the last thing before the call.
     Register scratch = ABINonArgReg0;
     loadPtr(Address(WasmTlsReg, offsetof(wasm::TlsData, instance)), scratch);
     storePtr(scratch,
@@ -3787,20 +3772,16 @@ CodeOffset MacroAssembler::wasmCallIndir
     loadWasmGlobalPtr(callee.tableFunctionBaseGlobalDataOffset(), scratch);
     if (sizeof(wasm::FunctionTableElem) == 8) {
       computeEffectiveAddress(BaseIndex(scratch, index, TimesEight), scratch);
     } else {
       lshift32(Imm32(4), index);
       addPtr(index, scratch);
     }
     loadPtr(Address(scratch, offsetof(wasm::FunctionTableElem, code)), scratch);
-    storePtr(WasmTlsReg,
-             Address(getStackPointer(), WasmCallerTLSOffsetBeforeCall));
-    storePtr(WasmTlsReg,
-             Address(getStackPointer(), WasmCallerTLSOffsetBeforeCall));
     return call(desc, scratch);
   }
 
   MOZ_ASSERT(callee.which() == wasm::CalleeDesc::WasmTable);
 
   // Write the functype-id into the ABI functype-id register.
   wasm::FuncTypeIdDesc funcTypeId = callee.wasmTableSigId();
   switch (funcTypeId.kind()) {
@@ -3832,21 +3813,17 @@ CodeOffset MacroAssembler::wasmCallIndir
   // Load the callee from the table.
   if (sizeof(wasm::FunctionTableElem) == 8) {
     computeEffectiveAddress(BaseIndex(scratch, index, TimesEight), scratch);
   } else {
     lshift32(Imm32(4), index);
     addPtr(index, scratch);
   }
 
-  storePtr(WasmTlsReg,
-           Address(getStackPointer(), WasmCallerTLSOffsetBeforeCall));
   loadPtr(Address(scratch, offsetof(wasm::FunctionTableElem, tls)), WasmTlsReg);
-  storePtr(WasmTlsReg,
-           Address(getStackPointer(), WasmCalleeTLSOffsetBeforeCall));
 
   Label nonNull;
   branchTest32(Assembler::NonZero, WasmTlsReg, WasmTlsReg, &nonNull);
   wasmTrap(wasm::Trap::IndirectCallToNull, trapOffset);
   bind(&nonNull);
 
   loadWasmPinnedRegsFromTls();
   switchToWasmTlsRealm(index, WasmTableCallScratchReg1);
@@ -4530,24 +4507,9 @@ void AutoGenericRegisterScope<RegisterTy
 }
 
 template void AutoGenericRegisterScope<Register>::reacquire();
 template void AutoGenericRegisterScope<FloatRegister>::reacquire();
 
 #endif  // DEBUG
 
 }  // namespace jit
-
-namespace wasm {
-const TlsData* ExtractCallerTlsFromFrameWithTls(const Frame* fp) {
-  return *reinterpret_cast<TlsData* const*>(
-      reinterpret_cast<const uint8_t*>(fp) + sizeof(Frame) + ShadowStackSpace +
-      FrameWithTls::callerTLSOffset());
-}
-
-const TlsData* ExtractCalleeTlsFromFrameWithTls(const Frame* fp) {
-  return *reinterpret_cast<TlsData* const*>(
-      reinterpret_cast<const uint8_t*>(fp) + sizeof(Frame) + ShadowStackSpace +
-      FrameWithTls::calleeTLSOffset());
-}
-}  // namespace wasm
-
 }  // namespace js
--- a/js/src/jit/MacroAssembler.h
+++ b/js/src/jit/MacroAssembler.h
@@ -227,21 +227,16 @@ enum class CheckUnsafeCallWithABI {
   // Don't check this callWithABI uses AutoUnsafeCallWithABI, for instance
   // because we're calling a simple helper function (like malloc or js_free)
   // that we can't change and/or that we know won't GC.
   DontCheckOther,
 };
 
 enum class CharEncoding { Latin1, TwoByte };
 
-constexpr uint32_t WasmCallerTLSOffsetBeforeCall =
-    wasm::FrameWithTls::callerTLSOffset() + ShadowStackSpace;
-constexpr uint32_t WasmCalleeTLSOffsetBeforeCall =
-    wasm::FrameWithTls::calleeTLSOffset() + ShadowStackSpace;
-
 // The public entrypoint for emitting assembly. Note that a MacroAssembler can
 // use cx->lifoAlloc, so take care not to interleave masm use with other
 // lifoAlloc use if one will be destroyed before the other.
 class MacroAssembler : public MacroAssemblerSpecific {
   MacroAssembler* thisFromCtor() { return this; }
 
  public:
   /*
@@ -633,22 +628,17 @@ class MacroAssembler : public MacroAssem
                          mozilla::Maybe<int32_t> tlsOffset,
                          MoveOp::Type result = MoveOp::GENERAL);
   void callDebugWithABI(wasm::SymbolicAddress fun,
                         MoveOp::Type result = MoveOp::GENERAL);
 
  private:
   // Reinitialize the variables which have to be cleared before making a call
   // with callWithABI.
-  template <class ABIArgGeneratorT>
-  void setupABICallHelper();
-
-  // Reinitialize the variables which have to be cleared before making a call
-  // with native abi.
-  void setupNativeABICall();
+  void setupABICall();
 
   // Reserve the stack and resolve the arguments move.
   void callWithABIPre(uint32_t* stackAdjust,
                       bool callFromWasm = false) PER_ARCH;
 
   // Emits a call to a C/C++ function, resolving all argument moves.
   void callWithABINoProfiler(void* fun, MoveOp::Type result,
                              CheckUnsafeCallWithABI check);
@@ -4218,30 +4208,28 @@ static inline MIRType ToMIRType(ABIArgTy
     case ArgType_Int64:
       return MIRType::Int64;
     default:
       break;
   }
   MOZ_CRASH("unexpected argType");
 }
 
-template <class VecT, class ABIArgGeneratorT>
-class ABIArgIterBase {
-  ABIArgGeneratorT gen_;
+template <class VecT>
+class ABIArgIter {
+  ABIArgGenerator gen_;
   const VecT& types_;
   unsigned i_;
 
   void settle() {
     if (!done()) gen_.next(ToMIRType(types_[i_]));
   }
 
  public:
-  explicit ABIArgIterBase(const VecT& types) : types_(types), i_(0) {
-    settle();
-  }
+  explicit ABIArgIter(const VecT& types) : types_(types), i_(0) { settle(); }
   void operator++(int) {
     MOZ_ASSERT(!done());
     i_++;
     settle();
   }
   bool done() const { return i_ == types_.length(); }
 
   ABIArg* operator->() {
@@ -4261,40 +4249,12 @@ class ABIArgIterBase {
     MOZ_ASSERT(!done());
     return ToMIRType(types_[i_]);
   }
   uint32_t stackBytesConsumedSoFar() const {
     return gen_.stackBytesConsumedSoFar();
   }
 };
 
-// This is not an alias because we want to allow class template argument
-// deduction.
-template <class VecT>
-class ABIArgIter : public ABIArgIterBase<VecT, ABIArgGenerator> {
- public:
-  explicit ABIArgIter(const VecT& types)
-      : ABIArgIterBase<VecT, ABIArgGenerator>(types) {}
-};
-
-class WasmABIArgGenerator : public ABIArgGenerator {
- public:
-  WasmABIArgGenerator() {
-    increaseStackOffset(wasm::FrameWithTls::sizeWithoutFrame());
-  }
-};
-
-template <class VecT>
-class WasmABIArgIter : public ABIArgIterBase<VecT, WasmABIArgGenerator> {
- public:
-  explicit WasmABIArgIter(const VecT& types)
-      : ABIArgIterBase<VecT, WasmABIArgGenerator>(types) {}
-};
 }  // namespace jit
-
-namespace wasm {
-const TlsData* ExtractCalleeTlsFromFrameWithTls(const Frame* fp);
-const TlsData* ExtractCallerTlsFromFrameWithTls(const Frame* fp);
-}  // namespace wasm
-
 }  // namespace js
 
 #endif /* jit_MacroAssembler_h */
--- a/js/src/jit/arm/Assembler-arm.h
+++ b/js/src/jit/arm/Assembler-arm.h
@@ -142,17 +142,16 @@ class ABIArgGenerator {
 
   void setUseHardFp(bool useHardFp) {
     MOZ_ASSERT(intRegIndex_ == 0 && floatRegIndex_ == 0);
     useHardFp_ = useHardFp;
   }
   ABIArg next(MIRType argType);
   ABIArg& current() { return current_; }
   uint32_t stackBytesConsumedSoFar() const { return stackOffset_; }
-  void increaseStackOffset(uint32_t bytes) { stackOffset_ += bytes; }
 };
 
 bool IsUnaligned(const wasm::MemoryAccessDesc& access);
 
 // These registers may be volatile or nonvolatile.
 static constexpr Register ABINonArgReg0 = r4;
 static constexpr Register ABINonArgReg1 = r5;
 static constexpr Register ABINonArgReg2 = r6;
--- a/js/src/jit/arm/MacroAssembler-arm.cpp
+++ b/js/src/jit/arm/MacroAssembler-arm.cpp
@@ -4349,17 +4349,17 @@ void MacroAssembler::patchCallToNop(uint
 void MacroAssembler::pushReturnAddress() { push(lr); }
 
 void MacroAssembler::popReturnAddress() { pop(lr); }
 
 // ===============================================================
 // ABI function calls.
 
 void MacroAssembler::setupUnalignedABICall(Register scratch) {
-  setupNativeABICall();
+  setupABICall();
   dynamicAlignment_ = true;
 
   ma_mov(sp, scratch);
   // Force sp to be aligned.
   as_bic(sp, sp, Imm8(ABIStackAlignment - 1));
   ma_push(scratch);
 }
 
--- a/js/src/jit/arm64/Assembler-arm64.h
+++ b/js/src/jit/arm64/Assembler-arm64.h
@@ -390,17 +390,16 @@ static const uint32_t NumFloatArgRegs = 
 class ABIArgGenerator {
  public:
   ABIArgGenerator()
       : intRegIndex_(0), floatRegIndex_(0), stackOffset_(0), current_() {}
 
   ABIArg next(MIRType argType);
   ABIArg& current() { return current_; }
   uint32_t stackBytesConsumedSoFar() const { return stackOffset_; }
-  void increaseStackOffset(uint32_t bytes) { stackOffset_ += bytes; }
 
  protected:
   unsigned intRegIndex_;
   unsigned floatRegIndex_;
   uint32_t stackOffset_;
   ABIArg current_;
 };
 
--- a/js/src/jit/arm64/MacroAssembler-arm64.cpp
+++ b/js/src/jit/arm64/MacroAssembler-arm64.cpp
@@ -790,17 +790,17 @@ void MacroAssembler::popReturnAddress() 
   MOZ_RELEASE_ASSERT(!sp.Is(GetStackPointer64()), "Not valid");
   pop(lr);
 }
 
 // ===============================================================
 // ABI function calls.
 
 void MacroAssembler::setupUnalignedABICall(Register scratch) {
-  setupNativeABICall();
+  setupABICall();
   dynamicAlignment_ = true;
 
   int64_t alignment = ~(int64_t(ABIStackAlignment) - 1);
   ARMRegister scratch64(scratch, 64);
 
   // Always save LR -- Baseline ICs assume that LR isn't modified.
   push(lr);
 
--- a/js/src/jit/mips32/Assembler-mips32.h
+++ b/js/src/jit/mips32/Assembler-mips32.h
@@ -38,18 +38,16 @@ class ABIArgGenerator {
 
   uint32_t stackBytesConsumedSoFar() const {
     if (usedArgSlots_ <= 4) {
       return ShadowStackSpace;
     }
 
     return usedArgSlots_ * sizeof(intptr_t);
   }
-
-  void increaseStackOffset(uint32_t bytes) { MOZ_CRASH("NYI"); }
 };
 
 // These registers may be volatile or nonvolatile.
 static constexpr Register ABINonArgReg0 = t0;
 static constexpr Register ABINonArgReg1 = t1;
 static constexpr Register ABINonArgReg2 = t2;
 static constexpr Register ABINonArgReg3 = t3;
 
--- a/js/src/jit/mips32/MacroAssembler-mips32.cpp
+++ b/js/src/jit/mips32/MacroAssembler-mips32.cpp
@@ -2025,17 +2025,17 @@ void MacroAssembler::storeRegsInMask(Liv
     MOZ_ASSERT(diffF == 0);
   }
 }
 // ===============================================================
 // ABI function calls.
 
 void MacroAssembler::setupUnalignedABICall(Register scratch) {
   MOZ_ASSERT(!IsCompilingWasm(), "wasm should only use aligned ABI calls");
-  setupNativeABICall();
+  setupABICall();
   dynamicAlignment_ = true;
 
   ma_move(scratch, StackPointer);
 
   // Force sp to be aligned
   asMasm().subPtr(Imm32(sizeof(uintptr_t)), StackPointer);
   ma_and(StackPointer, StackPointer, Imm32(~(ABIStackAlignment - 1)));
   storePtr(scratch, Address(StackPointer, 0));
--- a/js/src/jit/mips64/Assembler-mips64.h
+++ b/js/src/jit/mips64/Assembler-mips64.h
@@ -33,17 +33,16 @@ class ABIArgGenerator {
 
   uint32_t stackBytesConsumedSoFar() const {
     if (usedArgSlots_ <= 8) {
       return 0;
     }
 
     return (usedArgSlots_ - 8) * sizeof(int64_t);
   }
-  void increaseStackOffset(uint32_t bytes) { MOZ_CRASH("NYI"); }
 };
 
 // These registers may be volatile or nonvolatile.
 static constexpr Register ABINonArgReg0 = t0;
 static constexpr Register ABINonArgReg1 = t1;
 static constexpr Register ABINonArgReg2 = t2;
 static constexpr Register ABINonArgReg3 = t3;
 
--- a/js/src/jit/mips64/MacroAssembler-mips64.cpp
+++ b/js/src/jit/mips64/MacroAssembler-mips64.cpp
@@ -1867,17 +1867,17 @@ void MacroAssembler::storeRegsInMask(Liv
   diffF -= diffF % sizeof(uintptr_t);
   MOZ_ASSERT(diffF == 0);
 }
 // ===============================================================
 // ABI function calls.
 
 void MacroAssembler::setupUnalignedABICall(Register scratch) {
   MOZ_ASSERT(!IsCompilingWasm(), "wasm should only use aligned ABI calls");
-  setupNativeABICall();
+  setupABICall();
   dynamicAlignment_ = true;
 
   ma_move(scratch, StackPointer);
 
   // Force sp to be aligned
   asMasm().subPtr(Imm32(sizeof(uintptr_t)), StackPointer);
   ma_and(StackPointer, StackPointer, Imm32(~(ABIStackAlignment - 1)));
   storePtr(scratch, Address(StackPointer, 0));
--- a/js/src/jit/none/MacroAssembler-none.h
+++ b/js/src/jit/none/MacroAssembler-none.h
@@ -612,17 +612,16 @@ class MacroAssemblerNone : public Assemb
 typedef MacroAssemblerNone MacroAssemblerSpecific;
 
 class ABIArgGenerator {
  public:
   ABIArgGenerator() { MOZ_CRASH(); }
   ABIArg next(MIRType) { MOZ_CRASH(); }
   ABIArg& current() { MOZ_CRASH(); }
   uint32_t stackBytesConsumedSoFar() const { MOZ_CRASH(); }
-  void increaseStackOffset(uint32_t) { MOZ_CRASH(); }
 };
 
 static inline bool GetTempRegForIntArg(uint32_t, uint32_t, Register*) {
   MOZ_CRASH();
 }
 
 }  // namespace jit
 }  // namespace js
--- a/js/src/jit/x64/Assembler-x64.h
+++ b/js/src/jit/x64/Assembler-x64.h
@@ -187,17 +187,16 @@ class ABIArgGenerator {
   uint32_t stackOffset_;
   ABIArg current_;
 
  public:
   ABIArgGenerator();
   ABIArg next(MIRType argType);
   ABIArg& current() { return current_; }
   uint32_t stackBytesConsumedSoFar() const { return stackOffset_; }
-  void increaseStackOffset(uint32_t bytes) { stackOffset_ += bytes; }
 };
 
 // These registers may be volatile or nonvolatile.
 // Avoid r11, which is the MacroAssembler's ScratchReg.
 static constexpr Register ABINonArgReg0 = rax;
 static constexpr Register ABINonArgReg1 = rbx;
 static constexpr Register ABINonArgReg2 = r10;
 static constexpr Register ABINonArgReg3 = r12;
--- a/js/src/jit/x64/MacroAssembler-x64.cpp
+++ b/js/src/jit/x64/MacroAssembler-x64.cpp
@@ -309,17 +309,17 @@ void MacroAssembler::subFromStackPtr(Imm
   }
 }
 
 //{{{ check_macroassembler_style
 // ===============================================================
 // ABI function calls.
 
 void MacroAssembler::setupUnalignedABICall(Register scratch) {
-  setupNativeABICall();
+  setupABICall();
   dynamicAlignment_ = true;
 
   movq(rsp, scratch);
   andq(Imm32(~(ABIStackAlignment - 1)), rsp);
   push(scratch);
 }
 
 void MacroAssembler::callWithABIPre(uint32_t* stackAdjust, bool callFromWasm) {
--- a/js/src/jit/x86/Assembler-x86.h
+++ b/js/src/jit/x86/Assembler-x86.h
@@ -83,17 +83,16 @@ class ABIArgGenerator {
   uint32_t stackOffset_;
   ABIArg current_;
 
  public:
   ABIArgGenerator();
   ABIArg next(MIRType argType);
   ABIArg& current() { return current_; }
   uint32_t stackBytesConsumedSoFar() const { return stackOffset_; }
-  void increaseStackOffset(uint32_t bytes) { stackOffset_ += bytes; }
 };
 
 // These registers may be volatile or nonvolatile.
 static constexpr Register ABINonArgReg0 = eax;
 static constexpr Register ABINonArgReg1 = ebx;
 static constexpr Register ABINonArgReg2 = ecx;
 static constexpr Register ABINonArgReg3 = edx;
 
--- a/js/src/jit/x86/MacroAssembler-x86.cpp
+++ b/js/src/jit/x86/MacroAssembler-x86.cpp
@@ -307,17 +307,17 @@ void MacroAssembler::subFromStackPtr(Imm
   }
 }
 
 //{{{ check_macroassembler_style
 // ===============================================================
 // ABI function calls.
 
 void MacroAssembler::setupUnalignedABICall(Register scratch) {
-  setupNativeABICall();
+  setupABICall();
   dynamicAlignment_ = true;
 
   movl(esp, scratch);
   andl(Imm32(~(ABIStackAlignment - 1)), esp);
   push(scratch);
 }
 
 void MacroAssembler::callWithABIPre(uint32_t* stackAdjust, bool callFromWasm) {
--- a/js/src/vm/JitActivation.cpp
+++ b/js/src/vm/JitActivation.cpp
@@ -228,17 +228,17 @@ void js::jit::JitActivation::startWasmTr
   bool unwound;
   wasm::UnwindState unwindState;
   MOZ_RELEASE_ASSERT(wasm::StartUnwinding(state, &unwindState, &unwound));
   MOZ_ASSERT(unwound == (trap == wasm::Trap::IndirectCallBadSig));
 
   void* pc = unwindState.pc;
   const wasm::Frame* fp = wasm::Frame::fromUntaggedWasmExitFP(unwindState.fp);
 
-  const wasm::Code& code = wasm::GetNearestEffectiveTls(fp)->instance->code();
+  const wasm::Code& code = fp->instance()->code();
   MOZ_RELEASE_ASSERT(&code == wasm::LookupCode(pc));
 
   // If the frame was unwound, the bytecodeOffset must be recovered from the
   // callsite so that it is accurate.
   if (unwound) {
     bytecodeOffset = code.lookupCallSite(pc)->lineOrBytecode();
   }
 
--- a/js/src/vm/JitActivation.h
+++ b/js/src/vm/JitActivation.h
@@ -209,19 +209,16 @@ class JitActivation : public Activation 
   bool hasWasmExitFP() const {
     return wasm::Frame::isExitOrJitEntryFP(packedExitFP_);
   }
   wasm::Frame* wasmExitFP() const {
     MOZ_ASSERT(hasWasmExitFP());
     return reinterpret_cast<wasm::Frame*>(
         wasm::Frame::toJitEntryCaller(packedExitFP_));
   }
-  wasm::TlsData* wasmExitTls() const {
-    return wasm::GetNearestEffectiveTls(wasmExitFP());
-  }
   void setWasmExitFP(const wasm::Frame* fp) {
     if (fp) {
       MOZ_ASSERT(!wasm::Frame::isExitOrJitEntryFP(fp));
       packedExitFP_ = wasm::Frame::addExitOrJitEntryFPTag(fp);
       MOZ_ASSERT(hasWasmExitFP());
     } else {
       packedExitFP_ = nullptr;
     }
--- a/js/src/wasm/WasmBaselineCompile.cpp
+++ b/js/src/wasm/WasmBaselineCompile.cpp
@@ -5148,17 +5148,17 @@ class BaseCompiler final : public BaseCo
 
     MOZ_ASSERT(stackMapGenerator_.machineStackTracker.length() == 0);
     if (!stackMapGenerator_.machineStackTracker.pushNonGCPointers(
             stackMapGenerator_.numStackArgWords)) {
       return false;
     }
 
     // Identify GC-managed pointers passed on the stack.
-    for (WasmABIArgIter i(args); !i.done(); i++) {
+    for (ABIArgIter i(args); !i.done(); i++) {
       ABIArg argLoc = *i;
       if (argLoc.kind() == ABIArg::Stack &&
           args[i.index()] == MIRType::RefOrNull) {
         uint32_t offset = argLoc.offsetFromArgBase();
         MOZ_ASSERT(offset < inboundStackArgBytes);
         MOZ_ASSERT(offset % sizeof(void*) == 0);
         stackMapGenerator_.machineStackTracker.setGCPointer(offset /
                                                             sizeof(void*));
@@ -5233,17 +5233,17 @@ class BaseCompiler final : public BaseCo
         uint32_t offs = fr.localOffsetFromSp(l);
         MOZ_ASSERT(0 == (offs % sizeof(void*)));
         stackMapGenerator_.machineStackTracker.setGCPointer(offs /
                                                             sizeof(void*));
       }
     }
 
     // Copy arguments from registers to stack.
-    for (WasmABIArgIter i(args); !i.done(); i++) {
+    for (ABIArgIter i(args); !i.done(); i++) {
       if (args.isSyntheticStackResultPointerArg(i.index())) {
         // If there are stack results and the pointer to stack results
         // was passed in a register, store it to the stack.
         if (i->argInRegister()) {
           fr.storeIncomingStackResultAreaPtr(RegPtr(i->gpr()));
         }
         // If we're in a debug frame, copy the stack result pointer arg
         // to a well-known place.
@@ -5498,17 +5498,17 @@ class BaseCompiler final : public BaseCo
 #ifdef JS_CODEGEN_ARM
           hardFP(true),
 #endif
           frameAlignAdjustment(0),
           stackArgAreaSize(0) {
     }
 
     uint32_t lineOrBytecode;
-    WasmABIArgGenerator abi;
+    ABIArgGenerator abi;
     bool isInterModule;
     bool usesSystemAbi;
 #ifdef JS_CODEGEN_ARM
     bool hardFP;
 #endif
     size_t frameAlignAdjustment;
     size_t stackArgAreaSize;
   };
--- a/js/src/wasm/WasmBaselineCompile.h
+++ b/js/src/wasm/WasmBaselineCompile.h
@@ -37,17 +37,17 @@ MOZ_MUST_USE bool BaselineCompileFunctio
                                            UniqueChars* error);
 
 class BaseLocalIter {
  private:
   using ConstValTypeRange = mozilla::Range<const ValType>;
 
   const ValTypeVector& locals_;
   const ArgTypeVector& args_;
-  jit::WasmABIArgIter<ArgTypeVector> argsIter_;
+  jit::ABIArgIter<ArgTypeVector> argsIter_;
   size_t index_;
   int32_t frameSize_;
   int32_t nextFrameSize_;
   int32_t frameOffset_;
   int32_t stackResultPointerOffset_;
   jit::MIRType mirType_;
   bool done_;
 
--- a/js/src/wasm/WasmBuiltins.cpp
+++ b/js/src/wasm/WasmBuiltins.cpp
@@ -297,17 +297,17 @@ static JitActivation* CallingActivation(
   MOZ_ASSERT(act->asJit()->hasWasmExitFP());
   return act->asJit();
 }
 
 static bool WasmHandleDebugTrap() {
   JitActivation* activation = CallingActivation();
   JSContext* cx = activation->cx();
   Frame* fp = activation->wasmExitFP();
-  Instance* instance = GetNearestEffectiveTls(fp)->instance;
+  Instance* instance = fp->instance();
   const Code& code = instance->code();
   MOZ_ASSERT(code.metadata().debugEnabled);
 
   // The debug trap stub is the innermost frame. It's return address is the
   // actual trap site.
   const CallSite* site = code.lookupCallSite(fp->returnAddress());
   MOZ_ASSERT(site);
 
@@ -502,17 +502,17 @@ static void* WasmHandleTrap() {
       // TlsData::setInterrupt() causes a fake stack overflow. Since
       // TlsData::setInterrupt() is called racily, it's possible for a real
       // stack overflow to trap, followed by a racy call to setInterrupt().
       // Thus, we must check for a real stack overflow first before we
       // CheckInterrupt() and possibly resume execution.
       if (!CheckRecursionLimit(cx)) {
         return nullptr;
       }
-      if (activation->wasmExitTls()->isInterrupted()) {
+      if (activation->wasmExitFP()->tls()->isInterrupted()) {
         return CheckInterrupt(cx, activation);
       }
       return ReportError(cx, JSMSG_OVER_RECURSED);
     case Trap::ThrowReported:
       // Error was already reported under another name.
       return nullptr;
     case Trap::Limit:
       break;
--- a/js/src/wasm/WasmFrameIter.cpp
+++ b/js/src/wasm/WasmFrameIter.cpp
@@ -35,36 +35,34 @@ using mozilla::Maybe;
 // WasmFrameIter implementation
 
 WasmFrameIter::WasmFrameIter(JitActivation* activation, wasm::Frame* fp)
     : activation_(activation),
       code_(nullptr),
       codeRange_(nullptr),
       lineOrBytecode_(0),
       fp_(fp ? fp : activation->wasmExitFP()),
-      tls_(nullptr),
       unwoundIonCallerFP_(nullptr),
       unwoundIonFrameType_(jit::FrameType(-1)),
       unwind_(Unwind::False),
       unwoundAddressOfReturnAddress_(nullptr),
       resumePCinCurrentFrame_(nullptr) {
   MOZ_ASSERT(fp_);
-  tls_ = GetNearestEffectiveTls(fp_);
 
   // When the stack is captured during a trap (viz., to create the .stack
   // for an Error object), use the pc/bytecode information captured by the
   // signal handler in the runtime. Take care not to use this trap unwind
   // state for wasm frames in the middle of a JitActivation, i.e., wasm frames
   // that called into JIT frames before the trap.
 
   if (activation->isWasmTrapping() && fp_ == activation->wasmExitFP()) {
     const TrapData& trapData = activation->wasmTrapData();
     void* unwoundPC = trapData.unwoundPC;
 
-    code_ = &tls_->instance->code();
+    code_ = &fp_->instance()->code();
     MOZ_ASSERT(code_ == LookupCode(unwoundPC));
 
     codeRange_ = code_->lookupFuncRange(unwoundPC);
     MOZ_ASSERT(codeRange_);
 
     lineOrBytecode_ = trapData.bytecodeOffset;
 
     MOZ_ASSERT(!done());
@@ -191,26 +189,22 @@ void WasmFrameIter::popFrame() {
       activation_->setJSExitFP(unwoundIonCallerFP());
       unwoundAddressOfReturnAddress_ = prevFP->addressOfReturnAddress();
     }
 
     MOZ_ASSERT(done());
     return;
   }
 
+  MOZ_ASSERT(code_ == &fp_->instance()->code());
   MOZ_ASSERT(codeRange_->kind() == CodeRange::Function);
 
   const CallSite* callsite = code_->lookupCallSite(returnAddress);
   MOZ_ASSERT(callsite);
 
-  if (callsite->mightBeCrossInstance()) {
-    tls_ = ExtractCallerTlsFromFrameWithTls(prevFP);
-  }
-
-  MOZ_ASSERT(code_ == &tls()->instance->code());
   lineOrBytecode_ = callsite->lineOrBytecode();
 
   MOZ_ASSERT(!done());
 }
 
 const char* WasmFrameIter::filename() const {
   MOZ_ASSERT(!done());
   return code_->metadata().filename.get();
@@ -273,17 +267,17 @@ unsigned WasmFrameIter::computeLine(uint
   if (column) {
     *column = codeRange_->funcIndex() | ColumnBit;
   }
   return lineOrBytecode_;
 }
 
 Instance* WasmFrameIter::instance() const {
   MOZ_ASSERT(!done());
-  return tls_->instance;
+  return fp_->instance();
 }
 
 void** WasmFrameIter::unwoundAddressOfReturnAddress() const {
   MOZ_ASSERT(done());
   MOZ_ASSERT(unwind_ == Unwind::True);
   MOZ_ASSERT(unwoundAddressOfReturnAddress_);
   return unwoundAddressOfReturnAddress_;
 }
@@ -919,49 +913,16 @@ static bool isSignatureCheckFail(uint32_
   //                        4. jump 7
   // unchecked call entry:  5. push Frame
   //                        6. set FP
   //                        7. function's code
   return offsetInCode < codeRange->funcUncheckedCallEntry() &&
          (offsetInCode - codeRange->funcCheckedCallEntry()) > SetFP;
 }
 
-const TlsData* js::wasm::GetNearestEffectiveTls(const Frame* fp) {
-  while (true) {
-    if (fp->callerIsExitOrJitEntryFP()) {
-      // It is a direct call from JIT.
-      MOZ_ASSERT(!LookupCode(fp->returnAddress()));
-      return ExtractCalleeTlsFromFrameWithTls(fp);
-    }
-
-    uint8_t* returnAddress = fp->returnAddress();
-    const CodeRange* codeRange = nullptr;
-    const Code* code = LookupCode(returnAddress, &codeRange);
-    MOZ_ASSERT(codeRange);
-
-    if (codeRange->isEntry()) {
-      return ExtractCalleeTlsFromFrameWithTls(fp);
-    }
-
-    MOZ_ASSERT(codeRange->kind() == CodeRange::Function);
-    MOZ_ASSERT(code);
-    const CallSite* callsite = code->lookupCallSite(returnAddress);
-    if (callsite->mightBeCrossInstance()) {
-      return ExtractCalleeTlsFromFrameWithTls(fp);
-    }
-
-    fp = fp->wasmCaller();
-  }
-}
-
-TlsData* js::wasm::GetNearestEffectiveTls(Frame* fp) {
-  return const_cast<TlsData*>(
-      GetNearestEffectiveTls(const_cast<const Frame*>(fp)));
-}
-
 bool js::wasm::StartUnwinding(const RegisterState& registers,
                               UnwindState* unwindState, bool* unwoundCaller) {
   // Shorthands.
   uint8_t* const pc = (uint8_t*)registers.pc;
   void** const sp = (void**)registers.sp;
 
   // The frame pointer might be:
   // - in the process of tagging/untagging when calling into the JITs;
@@ -1323,18 +1284,17 @@ void ProfilingFrameIterator::operator++(
 
   if (codeRange_->isJitEntry()) {
     unwoundIonCallerFP_ = callerFP_;
     MOZ_ASSERT(!done());
     return;
   }
 
   MOZ_ASSERT(code_ ==
-             &GetNearestEffectiveTls(Frame::fromUntaggedWasmExitFP(callerFP_))
-                  ->instance->code());
+             &Frame::fromUntaggedWasmExitFP(callerFP_)->instance()->code());
 
   switch (codeRange_->kind()) {
     case CodeRange::Function:
     case CodeRange::ImportJitExit:
     case CodeRange::ImportInterpExit:
     case CodeRange::BuiltinThunk:
     case CodeRange::TrapExit:
     case CodeRange::DebugTrap:
--- a/js/src/wasm/WasmFrameIter.h
+++ b/js/src/wasm/WasmFrameIter.h
@@ -60,17 +60,16 @@ class WasmFrameIter {
   static constexpr uint32_t ColumnBit = 1u << 31;
 
  private:
   jit::JitActivation* activation_;
   const Code* code_;
   const CodeRange* codeRange_;
   unsigned lineOrBytecode_;
   Frame* fp_;
-  const TlsData* tls_;
   uint8_t* unwoundIonCallerFP_;
   jit::FrameType unwoundIonFrameType_;
   Unwind unwind_;
   void** unwoundAddressOfReturnAddress_;
   uint8_t* resumePCinCurrentFrame_;
 
   void popFrame();
 
@@ -91,17 +90,16 @@ class WasmFrameIter {
   const CodeRange* codeRange() const { return codeRange_; }
   Instance* instance() const;
   void** unwoundAddressOfReturnAddress() const;
   bool debugEnabled() const;
   DebugFrame* debugFrame() const;
   jit::FrameType unwoundIonFrameType() const;
   uint8_t* unwoundIonCallerFP() const { return unwoundIonCallerFP_; }
   Frame* frame() const { return fp_; }
-  const TlsData* tls() const { return tls_; }
 
   // Returns the address of the next instruction that will execute in this
   // frame, once control returns to this frame.
   uint8_t* resumePCinCurrentFrame() const;
 };
 
 enum class SymbolicAddress;
 
@@ -230,21 +228,16 @@ void GenerateJitEntryPrologue(jit::Macro
 
 void GenerateFunctionPrologue(jit::MacroAssembler& masm,
                               const FuncTypeIdDesc& funcTypeId,
                               const mozilla::Maybe<uint32_t>& tier1FuncIndex,
                               FuncOffsets* offsets);
 void GenerateFunctionEpilogue(jit::MacroAssembler& masm, unsigned framePushed,
                               FuncOffsets* offsets);
 
-// Iterates through frames for either possible cross-instance call or an entry
-// stub to obtain tls that corresponds to the passed fp.
-const TlsData* GetNearestEffectiveTls(const Frame* fp);
-TlsData* GetNearestEffectiveTls(Frame* fp);
-
 // Describes register state and associated code at a given call frame.
 
 struct UnwindState {
   uint8_t* fp;
   void* pc;
   const Code* code;
   const CodeRange* codeRange;
   UnwindState() : fp(nullptr), pc(nullptr), code(nullptr), codeRange(nullptr) {}
--- a/js/src/wasm/WasmGC.cpp
+++ b/js/src/wasm/WasmGC.cpp
@@ -123,17 +123,17 @@ bool CreateStackMapForFunctionEntryTrap(
   MOZ_ASSERT(nInboundStackArgBytes % sizeof(void*) == 0);
   const size_t numStackArgWords = nInboundStackArgBytes / sizeof(void*);
 
   const size_t wordsSoFar = vec.length();
   if (!vec.appendN(false, numStackArgWords)) {
     return false;
   }
 
-  for (WasmABIArgIter i(argTypes); !i.done(); i++) {
+  for (ABIArgIter i(argTypes); !i.done(); i++) {
     ABIArg argLoc = *i;
     if (argLoc.kind() == ABIArg::Stack &&
         argTypes[i.index()] == MIRType::RefOrNull) {
       uint32_t offset = argLoc.offsetFromArgBase();
       MOZ_ASSERT(offset < nInboundStackArgBytes);
       MOZ_ASSERT(offset % sizeof(void*) == 0);
       vec[wordsSoFar + offset / sizeof(void*)] = true;
       hasRefs = true;
@@ -178,17 +178,17 @@ bool GenerateStackmapEntriesForTrapExit(
   // If this doesn't hold, we can't distinguish saved and not-saved
   // registers in the MachineState.  See MachineState::MachineState().
   MOZ_ASSERT(trapExitLayoutNumWords < 0x100);
 
   if (!extras->appendN(false, trapExitLayoutNumWords)) {
     return false;
   }
 
-  for (WasmABIArgIter i(args); !i.done(); i++) {
+  for (ABIArgIter i(args); !i.done(); i++) {
     if (!i->argInRegister() || i.mirType() != MIRType::RefOrNull) {
       continue;
     }
 
     size_t offsetFromTop =
         reinterpret_cast<size_t>(trapExitLayout.address(i->gpr()));
 
     // If this doesn't hold, the associated register wasn't saved by
--- a/js/src/wasm/WasmGC.h
+++ b/js/src/wasm/WasmGC.h
@@ -247,26 +247,26 @@ class StackMaps {
 // byte boundary.
 //
 // Note, StackArgAreaSize{Unaligned,Aligned}() must process all the arguments
 // in order to take into account all necessary alignment constraints.  The
 // signature must include any receiver argument -- in other words, it must be
 // the complete native-ABI-level call signature.
 template <class T>
 static inline size_t StackArgAreaSizeUnaligned(const T& argTypes) {
-  WasmABIArgIter<const T> i(argTypes);
+  ABIArgIter<const T> i(argTypes);
   while (!i.done()) {
     i++;
   }
   return i.stackBytesConsumedSoFar();
 }
 
 static inline size_t StackArgAreaSizeUnaligned(
     const SymbolicAddressSignature& saSig) {
-  // WasmABIArgIter::ABIArgIter wants the items to be iterated over to be
+  // ABIArgIter::ABIArgIter wants the items to be iterated over to be
   // presented in some type that has methods length() and operator[].  So we
   // have to wrap up |saSig|'s array of types in this API-matching class.
   class MOZ_STACK_CLASS ItemsAndLength {
     const MIRType* items_;
     size_t length_;
 
    public:
     ItemsAndLength(const MIRType* items, size_t length)
--- a/js/src/wasm/WasmIonCompile.cpp
+++ b/js/src/wasm/WasmIonCompile.cpp
@@ -59,17 +59,17 @@ struct IonCompilePolicy {
 using IonOpIter = OpIter<IonCompilePolicy>;
 
 class FunctionCompiler;
 
 // CallCompileState describes a call that is being compiled.
 
 class CallCompileState {
   // A generator object that is passed each argument as it is compiled.
-  WasmABIArgGenerator abi_;
+  ABIArgGenerator abi_;
 
   // Accumulates the register arguments while compiling arguments.
   MWasmCall::Args regArgs_;
 
   // Reserved argument for passing Instance* to builtin instance method calls.
   ABIArg instanceArg_;
 
   // The stack area in which the callee will write stack return values, or
@@ -156,17 +156,17 @@ class FunctionCompiler {
 
     if (!mirGen_.ensureBallast()) {
       return false;
     }
     if (!newBlock(/* prev */ nullptr, &curBlock_)) {
       return false;
     }
 
-    for (WasmABIArgIter i(args); !i.done(); i++) {
+    for (ABIArgIter i(args); !i.done(); i++) {
       MWasmParameter* ins = MWasmParameter::New(alloc(), *i, i.mirType());
       curBlock_->add(ins);
       if (args.isSyntheticStackResultPointerArg(i.index())) {
         MOZ_ASSERT(stackResultPointer_ == nullptr);
         stackResultPointer_ = ins;
       } else {
         curBlock_->initSlot(info().localSlot(args.naturalIndex(i.index())),
                             ins);
--- a/js/src/wasm/WasmSignalHandlers.cpp
+++ b/js/src/wasm/WasmSignalHandlers.cpp
@@ -712,19 +712,17 @@ static MOZ_MUST_USE bool HandleTrap(CONT
     return false;
   }
 
   // We have a safe, expected wasm trap, so fp is well-defined to be a Frame*.
   // For the first sanity check, the Trap::IndirectCallBadSig special case is
   // due to this trap occurring in the indirect call prologue, while fp points
   // to the caller's Frame which can be in a different Module. In any case,
   // though, the containing JSContext is the same.
-
-  auto* frame = reinterpret_cast<Frame*>(ContextToFP(context));
-  Instance* instance = GetNearestEffectiveTls(frame)->instance;
+  Instance* instance = ((Frame*)ContextToFP(context))->instance();
   MOZ_RELEASE_ASSERT(&instance->code() == &segment.code() ||
                      trap == Trap::IndirectCallBadSig);
 
   if (isUnalignedSignal) {
     if (trap != Trap::OutOfBounds) {
       return false;
     }
     if (HandleUnalignedTrap(context, pc, instance)) {
@@ -1179,18 +1177,17 @@ bool wasm::MemoryAccessTraps(const Regis
 
   Trap trap;
   BytecodeOffset bytecode;
   if (!segment.code().lookupTrap(regs.pc, &trap, &bytecode) ||
       trap != Trap::OutOfBounds) {
     return false;
   }
 
-  Instance& instance =
-      *GetNearestEffectiveTls(Frame::fromUntaggedWasmExitFP(regs.fp))->instance;
+  Instance& instance = *Frame::fromUntaggedWasmExitFP(regs.fp)->instance();
   MOZ_ASSERT(&instance.code() == &segment.code());
 
   if (!instance.memoryAccessInGuardRegion((uint8_t*)addr, numBytes)) {
     return false;
   }
 
   jit::JitActivation* activation = TlsContext.get()->activation()->asJit();
   activation->startWasmTrap(Trap::OutOfBounds, bytecode.offset(), regs);
--- a/js/src/wasm/WasmStubs.cpp
+++ b/js/src/wasm/WasmStubs.cpp
@@ -287,38 +287,28 @@ static bool FinishOffsets(MacroAssembler
 
 static void AssertStackAlignment(MacroAssembler& masm, uint32_t alignment,
                                  uint32_t addBeforeAssert = 0) {
   MOZ_ASSERT(
       (sizeof(Frame) + masm.framePushed() + addBeforeAssert) % alignment == 0);
   masm.assertStackAlignment(alignment, addBeforeAssert);
 }
 
-template <class VectorT, template <class VecT> class ABIArgIterT>
-static unsigned StackArgBytesHelper(const VectorT& args) {
-  ABIArgIterT<VectorT> iter(args);
+template <class VectorT>
+static unsigned StackArgBytes(const VectorT& args) {
+  ABIArgIter<VectorT> iter(args);
   while (!iter.done()) {
     iter++;
   }
   return iter.stackBytesConsumedSoFar();
 }
 
-template <class VectorT>
-static unsigned StackArgBytesForNativeABI(const VectorT& args) {
-  return StackArgBytesHelper<VectorT, ABIArgIter>(args);
-}
-
-template <class VectorT>
-static unsigned StackArgBytesForWasmABI(const VectorT& args) {
-  return StackArgBytesHelper<VectorT, WasmABIArgIter>(args);
-}
-
-static unsigned StackArgBytesForWasmABI(const FuncType& funcType) {
+static unsigned StackArgBytes(const FuncType& funcType) {
   ArgTypeVector args(funcType);
-  return StackArgBytesForWasmABI(args);
+  return StackArgBytes(args);
 }
 
 static void Move64(MacroAssembler& masm, const Address& src,
                    const Address& dest, Register scratch) {
 #if JS_BITS_PER_WORD == 32
   masm.load32(LowWord(src), scratch);
   masm.store32(scratch, LowWord(dest));
   masm.load32(HighWord(src), scratch);
@@ -328,22 +318,22 @@ static void Move64(MacroAssembler& masm,
   masm.load64(src, scratch64);
   masm.store64(scratch64, dest);
 #endif
 }
 
 static void SetupABIArguments(MacroAssembler& masm, const FuncExport& fe,
                               Register argv, Register scratch) {
   // Copy parameters out of argv and into the registers/stack-slots specified by
-  // the wasm ABI.
+  // the system ABI.
   //
   // SetupABIArguments are only used for C++ -> wasm calls through callExport(),
   // and V128 and Ref types (other than externref) are not currently allowed.
   ArgTypeVector args(fe.funcType());
-  for (WasmABIArgIter iter(args); !iter.done(); iter++) {
+  for (ABIArgIter iter(args); !iter.done(); iter++) {
     unsigned argOffset = iter.index() * sizeof(ExportArg);
     Address src(argv, argOffset);
     MIRType type = iter.mirType();
     switch (iter->kind()) {
       case ABIArg::GPR:
         if (type == MIRType::Int32) {
           masm.load32(src, iter->gpr());
         } else if (type == MIRType::Int64) {
@@ -765,33 +755,29 @@ static bool GenerateInterpEntry(MacroAss
 #ifdef JS_CODEGEN_ARM64
   static_assert(WasmStackAlignment == 16, "ARM64 SP alignment");
 #else
   masm.moveStackPtrTo(scratch);
   masm.andToStackPtr(Imm32(~(WasmStackAlignment - 1)));
   masm.Push(scratch);
 #endif
 
-  // Reserve stack space for the wasm call.
-  unsigned argDecrement =
-      StackDecrementForCall(WasmStackAlignment, masm.framePushed(),
-                            StackArgBytesForWasmABI(fe.funcType()));
+  // Reserve stack space for the call.
+  unsigned argDecrement = StackDecrementForCall(
+      WasmStackAlignment, masm.framePushed(), StackArgBytes(fe.funcType()));
   masm.reserveStack(argDecrement);
 
   // Copy parameters out of argv and into the wasm ABI registers/stack-slots.
   SetupABIArguments(masm, fe, argv, scratch);
 
   // Setup wasm register state. The nullness of the frame pointer is used to
   // determine whether the call ended in success or failure.
   masm.movePtr(ImmWord(0), FramePointer);
   masm.loadWasmPinnedRegsFromTls();
 
-  masm.storePtr(WasmTlsReg,
-                Address(masm.getStackPointer(), WasmCalleeTLSOffsetBeforeCall));
-
   // Call into the real function. Note that, due to the throw stub, fp, tls
   // and pinned registers may be clobbered.
   masm.assertStackAlignment(WasmStackAlignment);
   CallFuncExport(masm, fe, funcPtr);
   masm.assertStackAlignment(WasmStackAlignment);
 
   // Pop the arguments pushed after the dynamic alignment.
   masm.freeStack(argDecrement);
@@ -964,23 +950,23 @@ static bool GenerateJitEntry(MacroAssemb
   RegisterOrSP sp = masm.getStackPointer();
 
   GenerateJitEntryPrologue(masm, offsets);
 
   // The jit caller has set up the following stack layout (sp grows to the
   // left):
   // <-- retAddr | descriptor | callee | argc | this | arg1..N
 
-  unsigned normalBytesNeeded = StackArgBytesForWasmABI(fe.funcType());
+  unsigned normalBytesNeeded = StackArgBytes(fe.funcType());
 
   MIRTypeVector coerceArgTypes;
   MOZ_ALWAYS_TRUE(coerceArgTypes.append(MIRType::Int32));
   MOZ_ALWAYS_TRUE(coerceArgTypes.append(MIRType::Pointer));
   MOZ_ALWAYS_TRUE(coerceArgTypes.append(MIRType::Pointer));
-  unsigned oolBytesNeeded = StackArgBytesForWasmABI(coerceArgTypes);
+  unsigned oolBytesNeeded = StackArgBytes(coerceArgTypes);
 
   unsigned bytesNeeded = std::max(normalBytesNeeded, oolBytesNeeded);
 
   // Note the jit caller ensures the stack is aligned *after* the call
   // instruction.
   unsigned frameSize = StackDecrementForCall(WasmStackAlignment,
                                              masm.framePushed(), bytesNeeded);
 
@@ -1161,17 +1147,17 @@ static bool GenerateJitEntry(MacroAssemb
     masm.bind(&next);
   }
 
   Label rejoinBeforeCall;
   masm.bind(&rejoinBeforeCall);
 
   // Convert all the expected values to unboxed values on the stack.
   ArgTypeVector args(fe.funcType());
-  for (WasmABIArgIter iter(args); !iter.done(); iter++) {
+  for (ABIArgIter iter(args); !iter.done(); iter++) {
     unsigned jitArgOffset =
         frameSize + JitFrameLayout::offsetOfActualArg(iter.index());
     Address argv(sp, jitArgOffset);
     bool isStackArg = iter->kind() == ABIArg::Stack;
     switch (iter.mirType()) {
       case MIRType::Int32: {
         Register target = isStackArg ? ScratchIonEntry : iter->gpr();
         masm.unboxInt32(argv, target);
@@ -1239,19 +1225,16 @@ static bool GenerateJitEntry(MacroAssemb
     }
   }
 
   GenPrintf(DebugChannel::Function, masm, "\n");
 
   // Setup wasm register state.
   masm.loadWasmPinnedRegsFromTls();
 
-  masm.storePtr(WasmTlsReg,
-                Address(masm.getStackPointer(), WasmCalleeTLSOffsetBeforeCall));
-
   // Call into the real function. Note that, due to the throw stub, fp, tls
   // and pinned registers may be clobbered.
   masm.assertStackAlignment(WasmStackAlignment);
   CallFuncExport(masm, fe, funcPtr);
   masm.assertStackAlignment(WasmStackAlignment);
 
   // If fp is equal to the FailFP magic value (set by the throw stub), then
   // report the exception to the JIT caller by jumping into the exception
@@ -1334,19 +1317,17 @@ static bool GenerateJitEntry(MacroAssemb
   masm.ret();
 #endif
 
   // Generate an OOL call to the C++ conversion path.
   if (fe.funcType().args().length()) {
     masm.bind(&oolCall);
     masm.setFramePushed(frameSize);
 
-    // Baseline and Ion call C++ runtime via BuiltinThunk with wasm abi, so to
-    // unify the BuiltinThunk's interface we call it here with wasm abi.
-    jit::WasmABIArgIter<MIRTypeVector> argsIter(coerceArgTypes);
+    ABIArgMIRTypeIter argsIter(coerceArgTypes);
 
     // argument 0: function export index.
     if (argsIter->kind() == ABIArg::GPR) {
       masm.movePtr(ImmWord(funcExportIndex), argsIter->gpr());
     } else {
       masm.storePtr(ImmWord(funcExportIndex),
                     Address(sp, argsIter->offsetFromArgBase()));
     }
@@ -1425,28 +1406,28 @@ void wasm::GenerateDirectCallFromJit(Mac
   *callOffset = masm.buildFakeExitFrame(scratch);
   masm.loadJSContext(scratch);
 
   masm.moveStackPtrTo(FramePointer);
   masm.enterFakeExitFrame(scratch, scratch, ExitFrameType::DirectWasmJitCall);
   masm.orPtr(Imm32(ExitOrJitEntryFPTag), FramePointer);
 
   // Move stack arguments to their final locations.
-  unsigned bytesNeeded = StackArgBytesForWasmABI(fe.funcType());
+  unsigned bytesNeeded = StackArgBytes(fe.funcType());
   bytesNeeded = StackDecrementForCall(WasmStackAlignment, masm.framePushed(),
                                       bytesNeeded);
   if (bytesNeeded) {
     masm.reserveStack(bytesNeeded);
   }
 
   GenPrintf(DebugChannel::Function, masm, "wasm-function[%d]; arguments ",
             fe.funcIndex());
 
   ArgTypeVector args(fe.funcType());
-  for (WasmABIArgIter iter(args); !iter.done(); iter++) {
+  for (ABIArgIter iter(args); !iter.done(); iter++) {
     MOZ_ASSERT_IF(iter->kind() == ABIArg::GPR, iter->gpr() != scratch);
     MOZ_ASSERT_IF(iter->kind() == ABIArg::GPR, iter->gpr() != FramePointer);
     if (iter->kind() != ABIArg::Stack) {
       switch (iter.mirType()) {
         case MIRType::Int32:
           GenPrintIsize(DebugChannel::Function, masm, iter->gpr());
           break;
         case MIRType::Int64:
@@ -1545,18 +1526,16 @@ void wasm::GenerateDirectCallFromJit(Mac
       }
     }
   }
 
   GenPrintf(DebugChannel::Function, masm, "\n");
 
   // Load tls; from now on, WasmTlsReg is live.
   masm.movePtr(ImmPtr(inst.tlsData()), WasmTlsReg);
-  masm.storePtr(WasmTlsReg,
-                Address(masm.getStackPointer(), WasmCalleeTLSOffsetBeforeCall));
   masm.loadWasmPinnedRegsFromTls();
 
   // Actual call.
   const CodeTier& codeTier = inst.code().codeTier(inst.code().bestTier());
   const MetadataTier& metadata = codeTier.metadata();
   const CodeRange& codeRange = metadata.codeRange(fe);
   void* callee = codeTier.segment().base() + codeRange.funcUncheckedCallEntry();
 
@@ -1822,18 +1801,19 @@ static void FillArgumentArrayForExit(
           } else if (type == MIRType::Int64) {
 #if JS_BITS_PER_WORD == 64
             Register64 scratch64(scratch2);
 #else
             Register64 scratch64(scratch2, scratch3);
 #endif
             masm.load64(src, scratch64);
             GenPrintI64(DebugChannel::Import, masm, scratch64);
-            GenerateBigIntInitialization(masm, sizeof(Frame), scratch64,
-                                         scratch, nullptr, throwLabel);
+            GenerateBigIntInitialization(masm, offsetFromFPToCallerStackArgs,
+                                         scratch64, scratch, nullptr,
+                                         throwLabel);
             masm.storeValue(JSVAL_TYPE_BIGINT, scratch, dst);
           } else if (type == MIRType::RefOrNull) {
             // This works also for FuncRef because it is distinguishable from a
             // boxed AnyRef.
             masm.loadPtr(src, scratch);
             UnboxAnyrefIntoValue(masm, tls, scratch, dst, scratch2);
           } else if (IsFloatingPointType(type)) {
             ScratchDoubleScope dscratch(masm);
@@ -1892,34 +1872,34 @@ static bool GenerateImportFunction(jit::
                                    FuncTypeIdDesc funcTypeId,
                                    FuncOffsets* offsets) {
   AssertExpectedSP(masm);
 
   GenerateFunctionPrologue(masm, funcTypeId, Nothing(), offsets);
 
   MOZ_ASSERT(masm.framePushed() == 0);
   const unsigned sizeOfTlsSlot = sizeof(void*);
-  unsigned framePushed = StackDecrementForCall(
-      WasmStackAlignment,
-      sizeof(Frame),  // pushed by prologue
-      StackArgBytesForWasmABI(fi.funcType()) + sizeOfTlsSlot);
+  unsigned framePushed =
+      StackDecrementForCall(WasmStackAlignment,
+                            sizeof(Frame),  // pushed by prologue
+                            StackArgBytes(fi.funcType()) + sizeOfTlsSlot);
   masm.wasmReserveStackChecked(framePushed, BytecodeOffset(0));
   MOZ_ASSERT(masm.framePushed() == framePushed);
 
   masm.storePtr(WasmTlsReg,
                 Address(masm.getStackPointer(), framePushed - sizeOfTlsSlot));
 
   // The argument register state is already setup by our caller. We just need
   // to be sure not to clobber it before the call.
   Register scratch = ABINonArgReg0;
 
   // Copy our frame's stack arguments to the callee frame's stack argument.
   unsigned offsetFromFPToCallerStackArgs = sizeof(Frame);
   ArgTypeVector args(fi.funcType());
-  for (WasmABIArgIter i(args); !i.done(); i++) {
+  for (ABIArgIter i(args); !i.done(); i++) {
     if (i->kind() != ABIArg::Stack) {
       continue;
     }
 
     Address src(FramePointer,
                 offsetFromFPToCallerStackArgs + i->offsetFromArgBase());
     Address dst(masm.getStackPointer(), i->offsetFromArgBase());
     GenPrintf(DebugChannel::Import, masm,
@@ -1994,30 +1974,30 @@ static bool GenerateImportInterpExit(Mac
   MIRTypeVector invokeArgTypes;
   MOZ_ALWAYS_TRUE(invokeArgTypes.append(typeArray, ArrayLength(typeArray)));
 
   // At the point of the call, the stack layout shall be (sp grows to the left):
   //  | stack args | padding | argv[] | padding | retaddr | caller stack args |
   // The padding between stack args and argv ensures that argv is aligned. The
   // padding between argv and retaddr ensures that sp is aligned.
   unsigned argOffset =
-      AlignBytes(StackArgBytesForNativeABI(invokeArgTypes), sizeof(double));
+      AlignBytes(StackArgBytes(invokeArgTypes), sizeof(double));
   // The abiArgCount includes a stack result pointer argument if needed.
   unsigned abiArgCount = ArgTypeVector(fi.funcType()).lengthWithStackResults();
   unsigned argBytes = std::max<size_t>(1, abiArgCount) * sizeof(Value);
   unsigned framePushed =
       StackDecrementForCall(ABIStackAlignment,
                             sizeof(Frame),  // pushed by prologue
                             argOffset + argBytes);
 
   GenerateExitPrologue(masm, framePushed, ExitReason::Fixed::ImportInterp,
                        offsets);
 
   // Fill the argument array.
-  unsigned offsetFromFPToCallerStackArgs = sizeof(FrameWithTls);
+  unsigned offsetFromFPToCallerStackArgs = sizeof(Frame);
   Register scratch = ABINonArgReturnReg0;
   Register scratch2 = ABINonArgReturnReg1;
   // The scratch3 reg does not need to be non-volatile, but has to be
   // distinct from scratch & scratch2.
   Register scratch3 = ABINonVolatileReg;
   FillArgumentArrayForExit(masm, WasmTlsReg, funcImportIndex, fi.funcType(),
                            argOffset, offsetFromFPToCallerStackArgs, scratch,
                            scratch2, scratch3, ToValue(false), throwLabel);
@@ -2228,17 +2208,17 @@ static bool GenerateImportJitExit(MacroA
   argOffset += sizeof(size_t);
   MOZ_ASSERT(argOffset == sizeOfPreFrame + frameAlignExtra);
 
   // 4. |this| value.
   masm.storeValue(UndefinedValue(), Address(masm.getStackPointer(), argOffset));
   argOffset += sizeof(Value);
 
   // 5. Fill the arguments.
-  const uint32_t offsetFromFPToCallerStackArgs = sizeof(FrameWithTls);
+  const uint32_t offsetFromFPToCallerStackArgs = sizeof(Frame);
   Register scratch = ABINonArgReturnReg1;   // Repeatedly clobbered
   Register scratch2 = ABINonArgReturnReg0;  // Reused as callee below
   // The scratch3 reg does not need to be non-volatile, but has to be
   // distinct from scratch & scratch2.
   Register scratch3 = ABINonVolatileReg;
   FillArgumentArrayForExit(masm, WasmTlsReg, funcImportIndex, fi.funcType(),
                            argOffset, offsetFromFPToCallerStackArgs, scratch,
                            scratch2, scratch3, ToValue(true), throwLabel);
@@ -2406,17 +2386,17 @@ static bool GenerateImportJitExit(MacroA
     masm.bind(&oolConvert);
     masm.setFramePushed(nativeFramePushed);
 
     // Coercion calls use the following stack layout (sp grows to the left):
     //   | args | padding | Value argv[1] | padding | exit Frame |
     MIRTypeVector coerceArgTypes;
     MOZ_ALWAYS_TRUE(coerceArgTypes.append(MIRType::Pointer));
     unsigned offsetToCoerceArgv =
-        AlignBytes(StackArgBytesForWasmABI(coerceArgTypes), sizeof(Value));
+        AlignBytes(StackArgBytes(coerceArgTypes), sizeof(Value));
     MOZ_ASSERT(nativeFramePushed >= offsetToCoerceArgv + sizeof(Value));
     AssertStackAlignment(masm, ABIStackAlignment);
 
     // Store return value into argv[0].
     masm.storeValue(JSReturnOperand,
                     Address(masm.getStackPointer(), offsetToCoerceArgv));
 
     // From this point, it's safe to reuse the scratch register (which
@@ -2537,24 +2517,24 @@ bool wasm::GenerateBuiltinThunk(MacroAss
                                 CallableOffsets* offsets) {
   AssertExpectedSP(masm);
   masm.setFramePushed(0);
 
   ABIFunctionArgs args(abiType);
   uint32_t framePushed =
       StackDecrementForCall(ABIStackAlignment,
                             sizeof(Frame),  // pushed by prologue
-                            StackArgBytesForNativeABI(args));
+                            StackArgBytes(args));
 
   GenerateExitPrologue(masm, framePushed, exitReason, offsets);
 
   // Copy out and convert caller arguments, if needed.
-  unsigned offsetFromFPToCallerStackArgs = sizeof(FrameWithTls);
+  unsigned offsetFromFPToCallerStackArgs = sizeof(Frame);
   Register scratch = ABINonArgReturnReg0;
-  for (ABIArgIter i(args); !i.done(); i++) {
+  for (ABIArgIter<ABIFunctionArgs> i(args); !i.done(); i++) {
     if (i->argInRegister()) {
 #ifdef JS_CODEGEN_ARM
       // Non hard-fp passes the args values in GPRs.
       if (!UseHardFpABI() && IsFloatingPointType(i.mirType())) {
         FloatRegister input = i->fpu();
         if (i.mirType() == MIRType::Float32) {
           masm.ma_vxfer(input, Register::FromCode(input.id()));
         } else if (i.mirType() == MIRType::Double) {
--- a/js/src/wasm/WasmTypes.cpp
+++ b/js/src/wasm/WasmTypes.cpp
@@ -629,21 +629,20 @@ size_t wasm::ComputeMappedSize(uint64_t 
 
   MOZ_ASSERT(boundsCheckLimit % gc::SystemPageSize() == 0);
   MOZ_ASSERT(GuardSize % gc::SystemPageSize() == 0);
   return boundsCheckLimit + GuardSize;
 }
 
 /* static */
 DebugFrame* DebugFrame::from(Frame* fp) {
-  MOZ_ASSERT(
-      GetNearestEffectiveTls(fp)->instance->code().metadata().debugEnabled);
+  MOZ_ASSERT(fp->instance()->code().metadata().debugEnabled);
   auto* df =
       reinterpret_cast<DebugFrame*>((uint8_t*)fp - DebugFrame::offsetOfFrame());
-  MOZ_ASSERT(GetNearestEffectiveTls(fp)->instance == df->instance());
+  MOZ_ASSERT(fp->instance() == df->instance());
   return df;
 }
 
 void DebugFrame::alignmentStaticAsserts() {
   // VS2017 doesn't consider offsetOfFrame() to be a constexpr, so we have
   // to use offsetof directly. These asserts can't be at class-level
   // because the type is incomplete.
 
@@ -654,20 +653,16 @@ void DebugFrame::alignmentStaticAsserts(
 #ifdef JS_CODEGEN_ARM64
   // This constraint may or may not be necessary.  If you hit this because
   // you've changed the frame size then feel free to remove it, but be extra
   // aware of possible problems.
   static_assert(sizeof(DebugFrame) % 16 == 0, "ARM64 SP alignment");
 #endif
 }
 
-Instance* DebugFrame::instance() const {
-  return GetNearestEffectiveTls(&frame_)->instance;
-}
-
 GlobalObject* DebugFrame::global() const {
   return &instance()->object()->global();
 }
 
 bool DebugFrame::hasGlobal(const GlobalObject* global) const {
   return global == &instance()->objectUnbarriered()->global();
 }
 
--- a/js/src/wasm/WasmTypes.h
+++ b/js/src/wasm/WasmTypes.h
@@ -45,18 +45,18 @@
 #include "wasm/WasmConstants.h"
 #include "wasm/WasmUtility.h"
 
 namespace js {
 
 namespace jit {
 class JitScript;
 enum class RoundingMode;
-template <class VecT, class ABIArgGeneratorT>
-class ABIArgIterBase;
+template <class VecT>
+class ABIArgIter;
 }  // namespace jit
 
 // This is a widespread header, so lets keep out the core wasm impl types.
 
 typedef GCVector<JSFunction*, 0, SystemAllocPolicy> JSFunctionVector;
 
 class WasmMemoryObject;
 using GCPtrWasmMemoryObject = GCPtr<WasmMemoryObject*>;
@@ -1297,23 +1297,23 @@ struct FuncTypeHashPolicy {
 // non-synthetic arguments.  We call those "natural" arguments.
 
 enum class StackResults { HasStackResults, NoStackResults };
 
 class ArgTypeVector {
   const ValTypeVector& args_;
   bool hasStackResults_;
 
-  // To allow ABIArgIterBase<VecT, ABIArgGeneratorT>, we define a private
-  // length() method.  To prevent accidental errors, other users need to be
+  // To allow ABIArgIter<ArgTypeVector>, we define a private length()
+  // method.  To prevent accidental errors, other users need to be
   // explicit and call lengthWithStackResults() or
   // lengthWithoutStackResults().
   size_t length() const { return args_.length() + size_t(hasStackResults_); }
-  template <class VecT, class ABIArgGeneratorT>
-  friend class jit::ABIArgIterBase;
+  friend jit::ABIArgIter<ArgTypeVector>;
+  friend jit::ABIArgIter<const ArgTypeVector>;
 
  public:
   ArgTypeVector(const ValTypeVector& args, StackResults stackResults)
       : args_(args),
         hasStackResults_(stackResults == StackResults::HasStackResults) {}
   explicit ArgTypeVector(const FuncType& funcType);
 
   bool hasSyntheticStackResultPointerArg() const { return hasStackResults_; }
@@ -2571,17 +2571,16 @@ class CallSiteDesc {
   }
   CallSiteDesc(uint32_t lineOrBytecode, Kind kind)
       : lineOrBytecode_(lineOrBytecode), kind_(kind) {
     MOZ_ASSERT(kind == Kind(kind_));
     MOZ_ASSERT(lineOrBytecode == lineOrBytecode_);
   }
   uint32_t lineOrBytecode() const { return lineOrBytecode_; }
   Kind kind() const { return Kind(kind_); }
-  bool mightBeCrossInstance() const { return kind() == CallSiteDesc::Dynamic; }
 };
 
 class CallSite : public CallSiteDesc {
   uint32_t returnAddressOffset_;
 
  public:
   CallSite() : returnAddressOffset_(0) {}
 
@@ -3253,16 +3252,17 @@ class Frame {
   }
 
   void** addressOfReturnAddress() {
     return reinterpret_cast<void**>(&returnAddress_);
   }
 
   uint8_t* rawCaller() const { return callerFP_; }
   TlsData* tls() const { return tls_; }
+  Instance* instance() const { return tls()->instance; }
 
   Frame* wasmCaller() const {
     MOZ_ASSERT(!callerIsExitOrJitEntryFP());
     return reinterpret_cast<Frame*>(callerFP_);
   }
 
   bool callerIsExitOrJitEntryFP() const {
     return isExitOrJitEntryFP(callerFP_);
@@ -3289,45 +3289,16 @@ class Frame {
     MOZ_ASSERT(!isExitOrJitEntryFP(fp));
     return reinterpret_cast<uint8_t*>(reinterpret_cast<uintptr_t>(fp) |
                                       ExitOrJitEntryFPTag);
   }
 };
 
 static_assert(!std::is_polymorphic_v<Frame>, "Frame doesn't need a vtable.");
 
-class FrameWithTls : public Frame {
-  TlsData* calleeTls_;
-  TlsData* callerTls_;
-
- public:
-  TlsData* calleeTls() { return calleeTls_; }
-  TlsData* callerTls() { return callerTls_; }
-
-  constexpr static uint32_t sizeWithoutFrame() {
-    return sizeof(wasm::FrameWithTls) - sizeof(wasm::Frame);
-  }
-
-  constexpr static uint32_t calleeTLSOffset() {
-    return offsetof(FrameWithTls, calleeTls_) - sizeof(wasm::Frame);
-  }
-
-  constexpr static uint32_t callerTLSOffset() {
-    return offsetof(FrameWithTls, callerTls_) - sizeof(wasm::Frame);
-  }
-};
-
-static_assert(FrameWithTls::calleeTLSOffset() == 0u,
-              "Callee tls stored right above the return address.");
-static_assert(FrameWithTls::callerTLSOffset() == sizeof(void*),
-              "Caller tls stored right above the callee tls.");
-
-static_assert(FrameWithTls::sizeWithoutFrame() == 2 * sizeof(void*),
-              "There are only two additional slots");
-
 #if defined(JS_CODEGEN_ARM64)
 static_assert(sizeof(Frame) % 16 == 0, "frame is aligned");
 #endif
 
 // A DebugFrame is a Frame with additional fields that are added after the
 // normal function prologue by the baseline compiler. If a Module is compiled
 // with debugging enabled, then all its code creates DebugFrames on the stack
 // instead of just Frames. These extra fields are used by the Debugger API.
@@ -3408,17 +3379,17 @@ class DebugFrame {
  private:
   // The Frame goes at the end since the stack grows down.
   Frame frame_;
 
  public:
   static DebugFrame* from(Frame* fp);
   Frame& frame() { return frame_; }
   uint32_t funcIndex() const { return funcIndex_; }
-  Instance* instance() const;
+  Instance* instance() const { return frame_.instance(); }
   GlobalObject* global() const;
   bool hasGlobal(const GlobalObject* global) const;
   JSObject* environmentChain() const;
   bool getLocal(uint32_t localIndex, MutableHandleValue vp);
 
   // The return value must be written from the unboxed representation in the
   // results union into cachedReturnJSValue_ by updateReturnJSValue() before
   // returnValue() can return a Handle to it.