Bug 1658279 - Part 1: Optimise ArraySlice in CacheIR and Warp. r=jandem
authorAndré Bargull <andre.bargull@gmail.com>
Fri, 14 Aug 2020 08:03:27 +0000
changeset 544652 75d3ba32d90a90a39d9fb188a6d41209e93fbef7
parent 544651 2420dc816657755690aa835e081e33c431d883e1
child 544653 5e227e32ff865a2dd22d411666fd9ef5e623a14a
push id124179
push usercbrindusan@mozilla.com
push dateFri, 14 Aug 2020 10:49:10 +0000
treeherderautoland@37746b10f75c [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjandem
bugs1658279
milestone81.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1658279 - Part 1: Optimise ArraySlice in CacheIR and Warp. r=jandem Optimise Array.prototype.slice for packed arrays in CacheIR and Warp. Ion also optimises `slice()` for non-packed arrays, but unless non-packed arrays actually appear in user code, only optimising packed arrays should be okay for now. The second argument to `slice()` defaults to the array length, which means we need a non-result version of `LoadInt32ArrayLength`. Drive-by change: - Move `IonCacheIRCompiler::emitArrayPush` to the bottom of the file, next to the other non-implemented methods. Also change from `MOZ_ASSERT_UNREACHABLE` to `MOZ_CRASH` for consistency. Differential Revision: https://phabricator.services.mozilla.com/D86532
js/src/builtin/Array.cpp
js/src/jit-test/tests/cacheir/array-slice.js
js/src/jit/BaselineBailouts.cpp
js/src/jit/BaselineCacheIRCompiler.cpp
js/src/jit/CacheIR.cpp
js/src/jit/CacheIR.h
js/src/jit/CacheIRCompiler.cpp
js/src/jit/CacheIROps.yaml
js/src/jit/CodeGenerator.cpp
js/src/jit/IonCacheIRCompiler.cpp
js/src/jit/IonTypes.h
js/src/jit/Lowering.cpp
js/src/jit/MacroAssembler.cpp
js/src/jit/MacroAssembler.h
js/src/jit/WarpCacheIRTranspiler.cpp
--- a/js/src/builtin/Array.cpp
+++ b/js/src/builtin/Array.cpp
@@ -3608,16 +3608,18 @@ static bool ArraySliceDenseKernel(JSCont
   MOZ_ASSERT(count >= result->length());
   result->setLength(cx, count);
 
   return true;
 }
 
 JSObject* js::ArraySliceDense(JSContext* cx, HandleObject obj, int32_t begin,
                               int32_t end, HandleObject result) {
+  MOZ_ASSERT_IF(jit::JitOptions.warpBuilder, IsPackedArray(obj));
+
   if (result && IsArraySpecies(cx, obj)) {
     if (!ArraySliceDenseKernel(cx, &obj->as<ArrayObject>(), begin, end,
                                &result->as<ArrayObject>())) {
       return nullptr;
     }
     return result;
   }
 
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/cacheir/array-slice.js
@@ -0,0 +1,39 @@
+function packed() {
+  var a = [0, 1, 2, 3];
+  for (var i = 0; i <= 100; ++i) {
+    var r = a.slice(0);
+    assertEq(r.length, 4);
+  }
+}
+
+for (var i = 0; i < 2; ++i) {
+  packed();
+}
+
+function packedThenUnpacked() {
+  var a = [0, 1, 2, 3];
+  var q = 0;
+  for (var i = 0; i <= 100; ++i) {
+    if (i === 100) a[10] = 0;
+
+    var r = a.slice(0);
+    assertEq(r.length, i < 100 ? 4 : 11);
+  }
+}
+
+for (var i = 0; i < 2; ++i) {
+  packedThenUnpacked();
+}
+
+function unpacked() {
+  var a = [0, 1, /* hole */ , 3];
+  for (var i = 0; i <= 100; ++i) {
+    var r = a.slice(0);
+    assertEq(r.length, 4);
+    assertEq(2 in r, false);
+  }
+}
+
+for (var i = 0; i < 2; ++i) {
+  unpacked();
+}
--- a/js/src/jit/BaselineBailouts.cpp
+++ b/js/src/jit/BaselineBailouts.cpp
@@ -2028,16 +2028,17 @@ bool jit::FinishBailoutToBaseline(Baseli
     case BailoutKind::SpecificSymbolGuard:
     case BailoutKind::NonInt32ArrayLength:
     case BailoutKind::ProtoGuard:
     case BailoutKind::ProxyGuard:
     case BailoutKind::NotProxyGuard:
     case BailoutKind::NotDOMProxyGuard:
     case BailoutKind::NotArrayBufferMaybeSharedGuard:
     case BailoutKind::ArrayPopShift:
+    case BailoutKind::ArraySlice:
     case BailoutKind::TagNotEqualGuard:
       // Do nothing.
       break;
 
     case BailoutKind::FirstExecution:
       // Do not return directly, as this was not frequent in the first place,
       // thus rely on the check for frequent bailouts to recompile the current
       // script.
--- a/js/src/jit/BaselineCacheIRCompiler.cpp
+++ b/js/src/jit/BaselineCacheIRCompiler.cpp
@@ -1459,16 +1459,60 @@ bool BaselineCacheIRCompiler::emitArrayP
 
   // Return value is new length.
   masm.add32(Imm32(1), scratchLength);
   masm.tagValue(JSVAL_TYPE_INT32, scratchLength, val);
 
   return true;
 }
 
+bool BaselineCacheIRCompiler::emitPackedArraySliceResult(
+    uint32_t templateObjectOffset, ObjOperandId arrayId, Int32OperandId beginId,
+    Int32OperandId endId) {
+  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+
+  AutoOutputRegister output(*this);
+  AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
+  AutoScratchRegisterMaybeOutputType scratch2(allocator, masm, output);
+
+  Register array = allocator.useRegister(masm, arrayId);
+  Register begin = allocator.useRegister(masm, beginId);
+  Register end = allocator.useRegister(masm, endId);
+
+  FailurePath* failure;
+  if (!addFailurePath(&failure)) {
+    return false;
+  }
+
+  masm.branchArrayIsNotPacked(array, scratch1, scratch2, failure->label());
+
+  allocator.discardStack(masm);
+
+  AutoStubFrame stubFrame(*this);
+  stubFrame.enter(masm, scratch1);
+
+  // Don't attempt to pre-allocate the object, instead always use the slow
+  // path.
+  ImmPtr result(nullptr);
+
+  masm.Push(result);
+  masm.Push(end);
+  masm.Push(begin);
+  masm.Push(array);
+
+  using Fn =
+      JSObject* (*)(JSContext*, HandleObject, int32_t, int32_t, HandleObject);
+  callVM<Fn, ArraySliceDense>(masm);
+
+  stubFrame.leave(masm);
+
+  masm.tagValue(JSVAL_TYPE_OBJECT, ReturnReg, output.valueReg());
+  return true;
+}
+
 bool BaselineCacheIRCompiler::emitIsArrayResult(ValOperandId inputId) {
   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
 
   AutoOutputRegister output(*this);
   AutoScratchRegister scratch1(allocator, masm);
   AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
 
   ValueOperand val = allocator.useValueRegister(masm, inputId);
--- a/js/src/jit/CacheIR.cpp
+++ b/js/src/jit/CacheIR.cpp
@@ -5223,16 +5223,94 @@ AttachDecision CallIRGenerator::tryAttac
 
   // Set the stub kind to Regular
   cacheIRStubKind_ = BaselineCacheIRStubKind::Regular;
 
   trackAttached("ArrayJoin");
   return AttachDecision::Attach;
 }
 
+AttachDecision CallIRGenerator::tryAttachArraySlice(HandleFunction callee) {
+  // Only handle argc <= 2.
+  if (argc_ > 2) {
+    return AttachDecision::NoAction;
+  }
+
+  // Only optimize if |this| is a packed array.
+  if (!thisval_.isObject() || !IsPackedArray(&thisval_.toObject())) {
+    return AttachDecision::NoAction;
+  }
+
+  // Arguments for the sliced region must be integers.
+  if (argc_ > 0 && !args_[0].isInt32()) {
+    return AttachDecision::NoAction;
+  }
+  if (argc_ > 1 && !args_[1].isInt32()) {
+    return AttachDecision::NoAction;
+  }
+
+  RootedArrayObject arr(cx_, &thisval_.toObject().as<ArrayObject>());
+
+  // The group of the result will be dynamically fixed up to match the input
+  // object, allowing us to handle 'this' objects that might have more than one
+  // group. Make sure that no singletons can be sliced here.
+  if (arr->isSingleton()) {
+    return AttachDecision::NoAction;
+  }
+
+  JSObject* templateObj =
+      NewFullyAllocatedArrayTryReuseGroup(cx_, arr, 0, TenuredObject);
+  if (!templateObj) {
+    cx_->recoverFromOutOfMemory();
+    return AttachDecision::NoAction;
+  }
+
+  // Initialize the input operand.
+  Int32OperandId argcId(writer.setInputOperandId(0));
+
+  // Guard callee is the 'slice' native function.
+  emitNativeCalleeGuard(callee);
+
+  ValOperandId thisValId =
+      writer.loadArgumentFixedSlot(ArgumentKind::This, argc_);
+  ObjOperandId objId = writer.guardToObject(thisValId);
+  writer.guardClass(objId, GuardClassKind::Array);
+
+  Int32OperandId int32BeginId;
+  if (argc_ > 0) {
+    ValOperandId beginId =
+        writer.loadArgumentFixedSlot(ArgumentKind::Arg0, argc_);
+    int32BeginId = writer.guardToInt32(beginId);
+  } else {
+    int32BeginId = writer.loadInt32Constant(0);
+  }
+
+  Int32OperandId int32EndId;
+  if (argc_ > 1) {
+    ValOperandId endId =
+        writer.loadArgumentFixedSlot(ArgumentKind::Arg1, argc_);
+    int32EndId = writer.guardToInt32(endId);
+  } else {
+    int32EndId = writer.loadInt32ArrayLength(objId);
+  }
+
+  writer.packedArraySliceResult(templateObj, objId, int32BeginId, int32EndId);
+
+  if (!JitOptions.warpBuilder) {
+    // Store the template object for BaselineInspector.
+    writer.metaNativeTemplateObject(callee, templateObj);
+  }
+
+  writer.typeMonitorResult();
+  cacheIRStubKind_ = BaselineCacheIRStubKind::Monitored;
+
+  trackAttached("ArraySlice");
+  return AttachDecision::Attach;
+}
+
 AttachDecision CallIRGenerator::tryAttachArrayIsArray(HandleFunction callee) {
   // Need a single argument.
   if (argc_ != 1) {
     return AttachDecision::NoAction;
   }
 
   // Initialize the input operand.
   Int32OperandId argcId(writer.setInputOperandId(0));
@@ -8222,16 +8300,18 @@ AttachDecision CallIRGenerator::tryAttac
       return tryAttachArrayConstructor(callee);
     case InlinableNative::ArrayPush:
       return tryAttachArrayPush(callee);
     case InlinableNative::ArrayPop:
     case InlinableNative::ArrayShift:
       return tryAttachArrayPopShift(callee, native);
     case InlinableNative::ArrayJoin:
       return tryAttachArrayJoin(callee);
+    case InlinableNative::ArraySlice:
+      return tryAttachArraySlice(callee);
     case InlinableNative::ArrayIsArray:
       return tryAttachArrayIsArray(callee);
 
     // DataView natives.
     case InlinableNative::DataViewGetInt8:
       return tryAttachDataViewGet(callee, Scalar::Int8);
     case InlinableNative::DataViewGetUint8:
       return tryAttachDataViewGet(callee, Scalar::Uint8);
@@ -8759,16 +8839,27 @@ bool CallIRGenerator::getTemplateObjectF
         return true;
       }
 
       RootedObject obj(cx_, &thisval_.toObject());
       if (obj->isSingleton()) {
         return true;
       }
 
+      if (IsPackedArray(obj)) {
+        // This case is handled by tryAttachArraySlice.
+        return true;
+      }
+
+      // TODO(Warp): Support non-packed arrays in tryAttachArraySlice if they're
+      // common in user code.
+      if (JitOptions.warpBuilder) {
+        return true;
+      }
+
       res.set(NewFullyAllocatedArrayTryReuseGroup(cx_, obj, 0, TenuredObject));
       return !!res;
     }
 
     case InlinableNative::String: {
       RootedString emptyString(cx_, cx_->runtime()->emptyString);
       res.set(StringObject::create(cx_, emptyString, /* proto = */ nullptr,
                                    TenuredObject));
--- a/js/src/jit/CacheIR.h
+++ b/js/src/jit/CacheIR.h
@@ -1631,16 +1631,17 @@ class MOZ_RAII CallIRGenerator : public 
 
   AtomicsReadWriteModifyOperands emitAtomicsReadWriteModifyOperands(
       HandleFunction callee);
 
   AttachDecision tryAttachArrayPush(HandleFunction callee);
   AttachDecision tryAttachArrayPopShift(HandleFunction callee,
                                         InlinableNative native);
   AttachDecision tryAttachArrayJoin(HandleFunction callee);
+  AttachDecision tryAttachArraySlice(HandleFunction callee);
   AttachDecision tryAttachArrayIsArray(HandleFunction callee);
   AttachDecision tryAttachDataViewGet(HandleFunction callee, Scalar::Type type);
   AttachDecision tryAttachDataViewSet(HandleFunction callee, Scalar::Type type);
   AttachDecision tryAttachUnsafeGetReservedSlot(HandleFunction callee,
                                                 InlinableNative native);
   AttachDecision tryAttachUnsafeSetReservedSlot(HandleFunction callee);
   AttachDecision tryAttachIsSuspendedGenerator(HandleFunction callee);
   AttachDecision tryAttachToObject(HandleFunction callee,
--- a/js/src/jit/CacheIRCompiler.cpp
+++ b/js/src/jit/CacheIRCompiler.cpp
@@ -2474,16 +2474,35 @@ bool CacheIRCompiler::emitLoadInt32Array
   masm.load32(Address(scratch, ObjectElements::offsetOfLength()), scratch);
 
   // Guard length fits in an int32.
   masm.branchTest32(Assembler::Signed, scratch, scratch, failure->label());
   EmitStoreResult(masm, scratch, JSVAL_TYPE_INT32, output);
   return true;
 }
 
+bool CacheIRCompiler::emitLoadInt32ArrayLength(ObjOperandId objId,
+                                               Int32OperandId resultId) {
+  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
+  Register obj = allocator.useRegister(masm, objId);
+  Register res = allocator.defineRegister(masm, resultId);
+
+  FailurePath* failure;
+  if (!addFailurePath(&failure)) {
+    return false;
+  }
+
+  masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), res);
+  masm.load32(Address(res, ObjectElements::offsetOfLength()), res);
+
+  // Guard length fits in an int32.
+  masm.branchTest32(Assembler::Signed, res, res, failure->label());
+  return true;
+}
+
 bool CacheIRCompiler::emitDoubleAddResult(NumberOperandId lhsId,
                                           NumberOperandId rhsId) {
   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   AutoOutputRegister output(*this);
 
   // Float register must be preserved. The BinaryArith ICs use
   // the fact that baseline has them available, as well as fixed temps on
   // LBinaryCache.
@@ -8024,16 +8043,20 @@ template <>
 struct ReturnTypeToJSValueType<JSString*> {
   static constexpr JSValueType result = JSVAL_TYPE_STRING;
 };
 template <>
 struct ReturnTypeToJSValueType<BigInt*> {
   static constexpr JSValueType result = JSVAL_TYPE_BIGINT;
 };
 template <>
+struct ReturnTypeToJSValueType<JSObject*> {
+  static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
+};
+template <>
 struct ReturnTypeToJSValueType<ArrayIteratorObject*> {
   static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
 };
 template <>
 struct ReturnTypeToJSValueType<StringIteratorObject*> {
   static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
 };
 template <>
--- a/js/src/jit/CacheIROps.yaml
+++ b/js/src/jit/CacheIROps.yaml
@@ -917,16 +917,26 @@
 
 - name: PackedArrayShiftResult
   shared: true
   transpile: true
   cost_estimate: 4
   args:
     array: ObjId
 
+- name: PackedArraySliceResult
+  shared: false
+  transpile: true
+  cost_estimate: 5
+  args:
+    templateObject: ObjectField
+    array: ObjId
+    begin: Int32Id
+    end: Int32Id
+
 - name: IsArrayResult
   shared: false
   transpile: true
   cost_estimate: 5
   args:
     input: ValId
 
 - name: StoreFixedSlotUndefinedResult
@@ -1657,16 +1667,24 @@
 
 - name: LoadInt32ArrayLengthResult
   shared: true
   transpile: true
   cost_estimate: 1
   args:
     obj: ObjId
 
+- name: LoadInt32ArrayLength
+  shared: true
+  transpile: true
+  cost_estimate: 1
+  args:
+    obj: ObjId
+    result: Int32Id
+
 - name: LoadArgumentsObjectArgResult
   shared: true
   transpile: false
   cost_estimate: 2
   args:
     obj: ObjId
     index: Int32Id
 
--- a/js/src/jit/CodeGenerator.cpp
+++ b/js/src/jit/CodeGenerator.cpp
@@ -10529,16 +10529,23 @@ void CodeGenerator::visitArraySlice(LArr
   Register object = ToRegister(lir->object());
   Register begin = ToRegister(lir->begin());
   Register end = ToRegister(lir->end());
   Register temp1 = ToRegister(lir->temp1());
   Register temp2 = ToRegister(lir->temp2());
 
   Label call, fail;
 
+  if (JitOptions.warpBuilder) {
+    Label bail;
+    masm.branchArrayIsNotPacked(object, temp1, temp2, &bail);
+
+    bailoutFrom(&bail, lir->snapshot());
+  }
+
   // Try to allocate an object.
   TemplateObject templateObject(lir->mir()->templateObj());
   masm.createGCObject(temp1, temp2, templateObject, lir->mir()->initialHeap(),
                       &fail);
 
   // Fixup the group of the result in case it doesn't match the template object.
   masm.copyObjGroupNoPreBarrier(object, temp1, temp2);
 
--- a/js/src/jit/IonCacheIRCompiler.cpp
+++ b/js/src/jit/IonCacheIRCompiler.cpp
@@ -1861,21 +1861,16 @@ bool IonCacheIRCompiler::emitStoreDenseE
   masm.bind(&doStore);
   EmitStoreDenseElement(masm, val, scratch1, element);
   if (needsPostBarrier()) {
     emitPostBarrierElement(obj, val, scratch1, index);
   }
   return true;
 }
 
-bool IonCacheIRCompiler::emitArrayPush(ObjOperandId objId, ValOperandId rhsId) {
-  MOZ_ASSERT_UNREACHABLE("emitArrayPush not supported for IonCaches.");
-  return false;
-}
-
 bool IonCacheIRCompiler::emitCallNativeSetter(ObjOperandId objId,
                                               uint32_t setterOffset,
                                               ValOperandId rhsId) {
   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   AutoSaveLiveRegisters save(*this);
 
   Register obj = allocator.useRegister(masm, objId);
   JSFunction* target = &objectStubField(setterOffset)->as<JSFunction>();
@@ -2434,16 +2429,26 @@ bool IonCacheIRCompiler::emitLoadArgumen
   MOZ_CRASH("Call ICs not used in ion");
 }
 
 bool IonCacheIRCompiler::emitGuardFunApply(Int32OperandId argcId,
                                            CallFlags flags) {
   MOZ_CRASH("Call ICs not used in ion");
 }
 
+bool IonCacheIRCompiler::emitArrayPush(ObjOperandId objId, ValOperandId rhsId) {
+  MOZ_CRASH("Call ICs not used in ion");
+}
+
+bool IonCacheIRCompiler::emitPackedArraySliceResult(
+    uint32_t templateObjectOffset, ObjOperandId arrayId, Int32OperandId beginId,
+    Int32OperandId endId) {
+  MOZ_CRASH("Call ICs not used in ion");
+}
+
 bool IonCacheIRCompiler::emitIsArrayResult(ValOperandId inputId) {
   MOZ_CRASH("Call ICs not used in ion");
 }
 
 bool IonCacheIRCompiler::emitIsTypedArrayResult(ObjOperandId objId,
                                                 bool isPossiblyWrapped) {
   MOZ_CRASH("Call ICs not used in ion");
 }
--- a/js/src/jit/IonTypes.h
+++ b/js/src/jit/IonTypes.h
@@ -180,16 +180,19 @@ enum class BailoutKind : uint8_t {
   NotDOMProxyGuard,
 
   // Bailout triggered by MGuardIsNotArrayBufferMaybeShared.
   NotArrayBufferMaybeSharedGuard,
 
   // Bailout triggered by MArrayPopShift.
   ArrayPopShift,
 
+  // Bailout triggered by MArraySlice.
+  ArraySlice,
+
   // Bailout triggered by MGuardValue.
   ValueGuard,
 
   // Bailout triggered by MGuardNotOptimizedArguments.
   NotOptimizedArgumentsGuard,
 
   // Bailout triggered by MGuardNullOrUndefined.
   NullOrUndefinedGuard,
@@ -282,16 +285,18 @@ inline const char* BailoutKindString(Bai
     case BailoutKind::NotProxyGuard:
       return "BailoutKind::NotProxyGuard";
     case BailoutKind::NotDOMProxyGuard:
       return "BailoutKind::NotDOMProxyGuard";
     case BailoutKind::NotArrayBufferMaybeSharedGuard:
       return "BailoutKind::NotArrayBufferMaybeSharedGuard";
     case BailoutKind::ArrayPopShift:
       return "BailoutKind::ArrayPopShift";
+    case BailoutKind::ArraySlice:
+      return "BailoutKind::ArraySlice";
     case BailoutKind::ValueGuard:
       return "BailoutKind::ValueGuard";
     case BailoutKind::NotOptimizedArgumentsGuard:
       return "BailoutKind::NotOptimizedArgumentsGuard";
     case BailoutKind::NullOrUndefinedGuard:
       return "BailoutKind::NullOrUndefinedGuard";
     case BailoutKind::TagNotEqualGuard:
       return "BailoutKind::TagNotEqualGuard";
--- a/js/src/jit/Lowering.cpp
+++ b/js/src/jit/Lowering.cpp
@@ -3531,16 +3531,19 @@ void LIRGenerator::visitArraySlice(MArra
   MOZ_ASSERT(ins->begin()->type() == MIRType::Int32);
   MOZ_ASSERT(ins->end()->type() == MIRType::Int32);
 
   LArraySlice* lir = new (alloc())
       LArraySlice(useFixedAtStart(ins->object(), CallTempReg0),
                   useFixedAtStart(ins->begin(), CallTempReg1),
                   useFixedAtStart(ins->end(), CallTempReg2),
                   tempFixed(CallTempReg3), tempFixed(CallTempReg4));
+  if (JitOptions.warpBuilder) {
+    assignSnapshot(lir, BailoutKind::ArraySlice);
+  }
   defineReturn(lir, ins);
   assignSafepoint(lir, ins);
 }
 
 void LIRGenerator::visitArrayJoin(MArrayJoin* ins) {
   MOZ_ASSERT(ins->type() == MIRType::String);
   MOZ_ASSERT(ins->array()->type() == MIRType::Object);
   MOZ_ASSERT(ins->sep()->type() == MIRType::String);
--- a/js/src/jit/MacroAssembler.cpp
+++ b/js/src/jit/MacroAssembler.cpp
@@ -3913,36 +3913,42 @@ void MacroAssembler::debugAssertObjHasFi
   branchTest32(Assembler::NonZero,
                Address(scratch, Shape::offsetOfImmutableFlags()),
                Imm32(Shape::fixedSlotsMask()), &hasFixedSlots);
   assumeUnreachable("Expected a fixed slot");
   bind(&hasFixedSlots);
 #endif
 }
 
+void MacroAssembler::branchArrayIsNotPacked(Register array, Register temp1,
+                                            Register temp2, Label* label) {
+  loadPtr(Address(array, NativeObject::offsetOfElements()), temp1);
+
+  // Test length == initializedLength.
+  Label done;
+  Address initLength(temp1, ObjectElements::offsetOfInitializedLength());
+  load32(Address(temp1, ObjectElements::offsetOfLength()), temp2);
+  branch32(Assembler::NotEqual, initLength, temp2, label);
+
+  // Test the NON_PACKED flag.
+  Address flags(temp1, ObjectElements::offsetOfFlags());
+  branchTest32(Assembler::NonZero, flags, Imm32(ObjectElements::NON_PACKED),
+               label);
+}
+
 void MacroAssembler::setIsPackedArray(Register obj, Register output,
                                       Register temp) {
   // Ensure it's an ArrayObject.
   Label notPackedArray;
   branchTestObjClass(Assembler::NotEqual, obj, &ArrayObject::class_, temp, obj,
                      &notPackedArray);
 
-  loadPtr(Address(obj, NativeObject::offsetOfElements()), temp);
-
-  // Test length == initializedLength.
+  branchArrayIsNotPacked(obj, temp, output, &notPackedArray);
+
   Label done;
-  Address initLength(temp, ObjectElements::offsetOfInitializedLength());
-  load32(Address(temp, ObjectElements::offsetOfLength()), output);
-  branch32(Assembler::NotEqual, initLength, output, &notPackedArray);
-
-  // Test the NON_PACKED flag.
-  Address flags(temp, ObjectElements::offsetOfFlags());
-  branchTest32(Assembler::NonZero, flags, Imm32(ObjectElements::NON_PACKED),
-               &notPackedArray);
-
   move32(Imm32(1), output);
   jump(&done);
 
   bind(&notPackedArray);
   move32(Imm32(0), output);
 
   bind(&done);
 }
--- a/js/src/jit/MacroAssembler.h
+++ b/js/src/jit/MacroAssembler.h
@@ -3608,16 +3608,19 @@ class MacroAssembler : public MacroAssem
                                const Address& dest);
 
   void memoryBarrierBefore(const Synchronization& sync);
   void memoryBarrierAfter(const Synchronization& sync);
 
   void debugAssertIsObject(const ValueOperand& val);
   void debugAssertObjHasFixedSlots(Register obj, Register scratch);
 
+  void branchArrayIsNotPacked(Register array, Register temp1, Register temp2,
+                              Label* label);
+
   void setIsPackedArray(Register obj, Register output, Register temp);
 
   void packedArrayPop(Register array, ValueOperand output, Register temp1,
                       Register temp2, Label* fail);
   void packedArrayShift(Register array, ValueOperand output, Register temp1,
                         Register temp2, LiveRegisterSet volatileRegs,
                         Label* fail);
 
--- a/js/src/jit/WarpCacheIRTranspiler.cpp
+++ b/js/src/jit/WarpCacheIRTranspiler.cpp
@@ -971,16 +971,29 @@ bool WarpCacheIRTranspiler::emitLoadInt3
 
   auto* length = MArrayLength::New(alloc(), elements);
   add(length);
 
   pushResult(length);
   return true;
 }
 
+bool WarpCacheIRTranspiler::emitLoadInt32ArrayLength(ObjOperandId objId,
+                                                     Int32OperandId resultId) {
+  MDefinition* obj = getOperand(objId);
+
+  auto* elements = MElements::New(alloc(), obj);
+  add(elements);
+
+  auto* length = MArrayLength::New(alloc(), elements);
+  add(length);
+
+  return defineOperand(resultId, length);
+}
+
 bool WarpCacheIRTranspiler::emitLoadTypedArrayLengthResult(
     ObjOperandId objId, uint32_t getterOffset) {
   MDefinition* obj = getOperand(objId);
 
   auto* length = MArrayBufferViewLength::New(alloc(), obj);
   add(length);
 
   pushResult(length);
@@ -1976,16 +1989,35 @@ bool WarpCacheIRTranspiler::emitPackedAr
   auto* ins = MArrayPopShift::New(alloc(), array, MArrayPopShift::Shift,
                                   needsHoleCheck, maybeUndefined);
   addEffectful(ins);
 
   pushResult(ins);
   return resumeAfter(ins);
 }
 
+bool WarpCacheIRTranspiler::emitPackedArraySliceResult(
+    uint32_t templateObjectOffset, ObjOperandId arrayId, Int32OperandId beginId,
+    Int32OperandId endId) {
+  JSObject* templateObj = tenuredObjectStubField(templateObjectOffset);
+
+  MDefinition* array = getOperand(arrayId);
+  MDefinition* begin = getOperand(beginId);
+  MDefinition* end = getOperand(endId);
+
+  // TODO: support pre-tenuring.
+  gc::InitialHeap heap = gc::DefaultHeap;
+
+  auto* ins = MArraySlice::New(alloc(), array, begin, end, templateObj, heap);
+  addEffectful(ins);
+
+  pushResult(ins);
+  return resumeAfter(ins);
+}
+
 bool WarpCacheIRTranspiler::emitHasClassResult(ObjOperandId objId,
                                                uint32_t claspOffset) {
   MDefinition* obj = getOperand(objId);
   const JSClass* clasp = classStubField(claspOffset);
 
   auto* hasClass = MHasClass::New(alloc(), obj, clasp);
   add(hasClass);