Bug 1176214 - Part 5: Ion changes. r=h4writer
authorLars T Hansen <lhansen@mozilla.com>
Mon, 30 Nov 2015 11:27:48 +0100
changeset 309419 36df961c4bdc55165397639f999cce8075af3326
parent 309418 97827dc3e6c4c374b03900feecdffa2dc9698609
child 309420 3b97e81dcbd768972a85bd4f8bb96d6f12ee3828
push id5513
push userraliiev@mozilla.com
push dateMon, 25 Jan 2016 13:55:34 +0000
treeherdermozilla-beta@5ee97dd05b5c [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersh4writer
bugs1176214
milestone45.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1176214 - Part 5: Ion changes. r=h4writer
js/src/jit/BaselineBailouts.cpp
js/src/jit/BaselineIC.cpp
js/src/jit/CodeGenerator.cpp
js/src/jit/CodeGenerator.h
js/src/jit/IonBuilder.cpp
js/src/jit/IonBuilder.h
js/src/jit/IonCaches.cpp
js/src/jit/IonCaches.h
js/src/jit/IonTypes.h
js/src/jit/Lowering.cpp
js/src/jit/Lowering.h
js/src/jit/MCallOptimize.cpp
js/src/jit/MIR.cpp
js/src/jit/MIR.h
js/src/jit/MOpcodes.h
js/src/jit/MacroAssembler.cpp
js/src/jit/SharedIC.cpp
js/src/jit/shared/LIR-shared.h
js/src/jit/shared/LOpcodes-shared.h
js/src/vm/ObjectGroup.cpp
js/src/vm/TypeInference.cpp
js/src/vm/TypeInference.h
js/src/vm/UnboxedObject.cpp
--- a/js/src/jit/BaselineBailouts.cpp
+++ b/js/src/jit/BaselineBailouts.cpp
@@ -1890,16 +1890,17 @@ jit::FinishBailoutToBaseline(BaselineBai
       case Bailout_NonInt32Input:
       case Bailout_NonNumericInput:
       case Bailout_NonBooleanInput:
       case Bailout_NonObjectInput:
       case Bailout_NonStringInput:
       case Bailout_NonSymbolInput:
       case Bailout_NonSimdInt32x4Input:
       case Bailout_NonSimdFloat32x4Input:
+      case Bailout_NonSharedTypedArrayInput:
       case Bailout_InitialState:
       case Bailout_Debugger:
       case Bailout_UninitializedThis:
       case Bailout_BadDerivedConstructorReturn:
         // Do nothing.
         break;
 
       case Bailout_FirstExecution:
--- a/js/src/jit/BaselineIC.cpp
+++ b/js/src/jit/BaselineIC.cpp
@@ -2350,17 +2350,17 @@ ICGetElem_UnboxedArray::Compiler::genera
 // GetElem_TypedArray
 //
 
 static void
 LoadTypedThingLength(MacroAssembler& masm, TypedThingLayout layout, Register obj, Register result)
 {
     switch (layout) {
       case Layout_TypedArray:
-        masm.unboxInt32(Address(obj, TypedArrayLayout::lengthOffset()), result);
+        masm.unboxInt32(Address(obj, TypedArrayObject::lengthOffset()), result);
         break;
       case Layout_OutlineTypedObject:
       case Layout_InlineTypedObject:
         masm.loadPtr(Address(obj, JSObject::offsetOfGroup()), result);
         masm.loadPtr(Address(result, ObjectGroup::offsetOfAddendum()), result);
         masm.unboxInt32(Address(result, ArrayTypeDescr::offsetOfLength()), result);
         break;
       default:
--- a/js/src/jit/CodeGenerator.cpp
+++ b/js/src/jit/CodeGenerator.cpp
@@ -5237,25 +5237,25 @@ CodeGenerator::visitSetArrayLength(LSetA
     masm.bumpKey(&newLength, -1);
 }
 
 void
 CodeGenerator::visitTypedArrayLength(LTypedArrayLength* lir)
 {
     Register obj = ToRegister(lir->object());
     Register out = ToRegister(lir->output());
-    masm.unboxInt32(Address(obj, TypedArrayLayout::lengthOffset()), out);
+    masm.unboxInt32(Address(obj, TypedArrayObject::lengthOffset()), out);
 }
 
 void
 CodeGenerator::visitTypedArrayElements(LTypedArrayElements* lir)
 {
     Register obj = ToRegister(lir->object());
     Register out = ToRegister(lir->output());
-    masm.loadPtr(Address(obj, TypedArrayLayout::dataOffset()), out);
+    masm.loadPtr(Address(obj, TypedArrayObject::dataOffset()), out);
 }
 
 void
 CodeGenerator::visitSetDisjointTypedElements(LSetDisjointTypedElements* lir)
 {
     Register target = ToRegister(lir->target());
     Register targetOffset = ToRegister(lir->targetOffset());
     Register source = ToRegister(lir->source());
@@ -7505,16 +7505,31 @@ void
 CodeGenerator::visitCallIteratorStart(LCallIteratorStart* lir)
 {
     pushArg(Imm32(lir->mir()->flags()));
     pushArg(ToRegister(lir->object()));
     callVM(GetIteratorObjectInfo, lir);
 }
 
 void
+CodeGenerator::branchIfNotEmptyObjectElements(Register obj, Label* target)
+{
+    Label emptyObj;
+    masm.branchPtr(Assembler::Equal,
+                   Address(obj, NativeObject::offsetOfElements()),
+                   ImmPtr(js::emptyObjectElements),
+                   &emptyObj);
+    masm.branchPtr(Assembler::NotEqual,
+                   Address(obj, NativeObject::offsetOfElements()),
+                   ImmPtr(js::emptyObjectElementsShared),
+                   target);
+    masm.bind(&emptyObj);
+}
+
+void
 CodeGenerator::visitIteratorStart(LIteratorStart* lir)
 {
     const Register obj = ToRegister(lir->object());
     const Register output = ToRegister(lir->output());
 
     uint32_t flags = lir->mir()->flags();
 
     OutOfLineCode* ool = oolCallVM(GetIteratorObjectInfo, lir,
@@ -7547,46 +7562,37 @@ CodeGenerator::visitIteratorStart(LItera
         Address groupAddr(temp2, offsetof(ReceiverGuard, group));
         Address shapeAddr(temp2, offsetof(ReceiverGuard, shape));
         Label guardDone, shapeMismatch, noExpando;
         masm.loadObjShape(obj, temp1);
         masm.branchPtr(Assembler::NotEqual, shapeAddr, temp1, &shapeMismatch);
 
         // Ensure the object does not have any elements. The presence of dense
         // elements is not captured by the shape tests above.
-        masm.branchPtr(Assembler::NotEqual,
-                       Address(obj, NativeObject::offsetOfElements()),
-                       ImmPtr(js::emptyObjectElements),
-                       ool->entry());
+        branchIfNotEmptyObjectElements(obj, ool->entry());
         masm.jump(&guardDone);
 
         masm.bind(&shapeMismatch);
         masm.loadObjGroup(obj, temp1);
         masm.branchPtr(Assembler::NotEqual, groupAddr, temp1, ool->entry());
         masm.loadPtr(Address(obj, UnboxedPlainObject::offsetOfExpando()), temp1);
         masm.branchTestPtr(Assembler::Zero, temp1, temp1, &noExpando);
-        masm.branchPtr(Assembler::NotEqual,
-                       Address(temp1, NativeObject::offsetOfElements()),
-                       ImmPtr(js::emptyObjectElements),
-                       ool->entry());
+        branchIfNotEmptyObjectElements(temp1, ool->entry());
         masm.loadObjShape(temp1, temp1);
         masm.bind(&noExpando);
         masm.branchPtr(Assembler::NotEqual, shapeAddr, temp1, ool->entry());
         masm.bind(&guardDone);
     }
 
     // Compare shape of object's prototype with the second shape. The prototype
     // must be native, as unboxed objects cannot be prototypes (they cannot
     // have the delegate flag set). Also check for the absence of dense elements.
     Address prototypeShapeAddr(temp2, sizeof(ReceiverGuard) + offsetof(ReceiverGuard, shape));
     masm.loadObjProto(obj, temp1);
-    masm.branchPtr(Assembler::NotEqual,
-                   Address(temp1, NativeObject::offsetOfElements()),
-                   ImmPtr(js::emptyObjectElements),
-                   ool->entry());
+    branchIfNotEmptyObjectElements(temp1, ool->entry());
     masm.loadObjShape(temp1, temp1);
     masm.branchPtr(Assembler::NotEqual, prototypeShapeAddr, temp1, ool->entry());
 
     // Ensure the object's prototype's prototype is nullptr. The last native
     // iterator will always have a prototype chain length of one (i.e. it must
     // be a plain object), so we do not need to generate a loop here.
     masm.loadObjProto(obj, temp1);
     masm.loadObjProto(temp1, temp1);
@@ -9308,27 +9314,27 @@ void
 CodeGenerator::visitLoadTypedArrayElementHole(LLoadTypedArrayElementHole* lir)
 {
     Register object = ToRegister(lir->object());
     const ValueOperand out = ToOutValue(lir);
 
     // Load the length.
     Register scratch = out.scratchReg();
     Int32Key key = ToInt32Key(lir->index());
-    masm.unboxInt32(Address(object, TypedArrayLayout::lengthOffset()), scratch);
+    masm.unboxInt32(Address(object, TypedArrayObject::lengthOffset()), scratch);
 
     // Load undefined unless length > key.
     Label inbounds, done;
     masm.branchKey(Assembler::Above, scratch, key, &inbounds);
     masm.moveValue(UndefinedValue(), out);
     masm.jump(&done);
 
     // Load the elements vector.
     masm.bind(&inbounds);
-    masm.loadPtr(Address(object, TypedArrayLayout::dataOffset()), scratch);
+    masm.loadPtr(Address(object, TypedArrayObject::dataOffset()), scratch);
 
     Scalar::Type arrayType = lir->mir()->arrayType();
     int width = Scalar::byteSize(arrayType);
 
     Label fail;
     if (key.isConstant()) {
         Address source(scratch, key.constant() * width);
         masm.loadFromTypedArray(arrayType, source, out, lir->mir()->allowDouble(),
@@ -9443,16 +9449,30 @@ CodeGenerator::visitAtomicIsLockFree(LAt
     masm.branch32(Assembler::Equal, value, Imm32(1), &Ldone);
     if (!AtomicOperations::isLockfree8())
         masm.bind(&Lfailed);
     masm.move32(Imm32(0), output);
     masm.bind(&Ldone);
 }
 
 void
+CodeGenerator::visitGuardSharedTypedArray(LGuardSharedTypedArray* guard)
+{
+    Register obj = ToRegister(guard->input());
+    Register tmp = ToRegister(guard->tempInt());
+
+    // The shared-memory flag is a bit in the ObjectElements header
+    // that is set if the TypedArray is mapping a SharedArrayBuffer.
+    // The flag is set at construction and does not change subsequently.
+    masm.loadPtr(Address(obj, TypedArrayObject::offsetOfElements()), tmp);
+    masm.load32(Address(tmp, ObjectElements::offsetOfFlags()), tmp);
+    bailoutTest32(Assembler::Zero, tmp, Imm32(ObjectElements::SHARED_MEMORY), guard->snapshot());
+}
+
+void
 CodeGenerator::visitClampIToUint8(LClampIToUint8* lir)
 {
     Register output = ToRegister(lir->output());
     MOZ_ASSERT(output == ToRegister(lir->input()));
     masm.clampIntToUint8(output);
 }
 
 void
--- a/js/src/jit/CodeGenerator.h
+++ b/js/src/jit/CodeGenerator.h
@@ -288,16 +288,17 @@ class CodeGenerator : public CodeGenerat
     void visitArrayConcat(LArrayConcat* lir);
     void visitArraySlice(LArraySlice* lir);
     void visitArrayJoin(LArrayJoin* lir);
     void visitLoadUnboxedScalar(LLoadUnboxedScalar* lir);
     void visitLoadTypedArrayElementHole(LLoadTypedArrayElementHole* lir);
     void visitStoreUnboxedScalar(LStoreUnboxedScalar* lir);
     void visitStoreTypedArrayElementHole(LStoreTypedArrayElementHole* lir);
     void visitAtomicIsLockFree(LAtomicIsLockFree* lir);
+    void visitGuardSharedTypedArray(LGuardSharedTypedArray* lir);
     void visitClampIToUint8(LClampIToUint8* lir);
     void visitClampDToUint8(LClampDToUint8* lir);
     void visitClampVToUint8(LClampVToUint8* lir);
     void visitCallIteratorStart(LCallIteratorStart* lir);
     void visitIteratorStart(LIteratorStart* lir);
     void visitIteratorMore(LIteratorMore* lir);
     void visitIsNoIterAndBranch(LIsNoIterAndBranch* lir);
     void visitIteratorEnd(LIteratorEnd* lir);
@@ -462,16 +463,20 @@ class CodeGenerator : public CodeGenerat
     //
     // This method should be used when subsequent code can't be laid out in a
     // straight line; if it can, branchTest* should be used instead.
     void testObjectEmulatesUndefined(Register objreg,
                                      Label* ifEmulatesUndefined,
                                      Label* ifDoesntEmulateUndefined,
                                      Register scratch, OutOfLineTestObject* ool);
 
+    // Branch to target unless obj has an emptyObjectElements or emptyObjectElementsShared
+    // elements pointer.
+    void branchIfNotEmptyObjectElements(Register obj, Label* target);
+
     // Get a label for the start of block which can be used for jumping, in
     // place of jumpToBlock.
     Label* getJumpLabelForBranch(MBasicBlock* block);
 
     void emitStoreElementTyped(const LAllocation* value, MIRType valueType, MIRType elementType,
                                Register elements, const LAllocation* index,
                                int32_t offsetAdjustment);
 
--- a/js/src/jit/IonBuilder.cpp
+++ b/js/src/jit/IonBuilder.cpp
@@ -13569,16 +13569,24 @@ IonBuilder::addGuardReceiverPolymorphic(
     for (size_t i = 0; i < receivers.length(); i++) {
         if (!guard->addReceiver(receivers[i]))
             return nullptr;
     }
 
     return guard;
 }
 
+MInstruction*
+IonBuilder::addSharedTypedArrayGuard(MDefinition* obj)
+{
+    MGuardSharedTypedArray* guard = MGuardSharedTypedArray::New(alloc(), obj);
+    current->add(guard);
+    return guard;
+}
+
 TemporaryTypeSet*
 IonBuilder::bytecodeTypes(jsbytecode* pc)
 {
     return TypeScript::BytecodeTypes(script(), pc, bytecodeTypeMap, &typeArrayHint, typeArray);
 }
 
 TypedObjectPrediction
 IonBuilder::typedObjectPrediction(MDefinition* typedObj)
--- a/js/src/jit/IonBuilder.h
+++ b/js/src/jit/IonBuilder.h
@@ -394,16 +394,17 @@ class IonBuilder
     MDefinition* walkScopeChain(unsigned hops);
 
     MInstruction* addConvertElementsToDoubles(MDefinition* elements);
     MDefinition* addMaybeCopyElementsForWrite(MDefinition* object, bool checkNative);
     MInstruction* addBoundsCheck(MDefinition* index, MDefinition* length);
     MInstruction* addShapeGuard(MDefinition* obj, Shape* const shape, BailoutKind bailoutKind);
     MInstruction* addGroupGuard(MDefinition* obj, ObjectGroup* group, BailoutKind bailoutKind);
     MInstruction* addUnboxedExpandoGuard(MDefinition* obj, bool hasExpando, BailoutKind bailoutKind);
+    MInstruction* addSharedTypedArrayGuard(MDefinition* obj);
 
     MInstruction*
     addGuardReceiverPolymorphic(MDefinition* obj, const BaselineInspector::ReceiverVector& receivers);
 
     MDefinition* convertShiftToMaskForStaticTypedArray(MDefinition* id,
                                                        Scalar::Type viewType);
 
     bool invalidatedIdempotentCache();
@@ -926,16 +927,17 @@ class IonBuilder
                                    MBasicBlock** fallbackTarget);
 
     enum AtomicCheckResult {
         DontCheckAtomicResult,
         DoCheckAtomicResult
     };
 
     bool atomicsMeetsPreconditions(CallInfo& callInfo, Scalar::Type* arrayElementType,
+                                   bool* requiresDynamicCheck,
                                    AtomicCheckResult checkResult=DoCheckAtomicResult);
     void atomicsCheckBounds(CallInfo& callInfo, MInstruction** elements, MDefinition** index);
 
     bool testNeedsArgumentCheck(JSFunction* target, CallInfo& callInfo);
 
     MCall* makeCallHelper(JSFunction* target, CallInfo& callInfo);
     bool makeCall(JSFunction* target, CallInfo& callInfo);
 
--- a/js/src/jit/IonCaches.cpp
+++ b/js/src/jit/IonCaches.cpp
@@ -32,17 +32,16 @@
 #include "vm/Shape-inl.h"
 
 using namespace js;
 using namespace js::jit;
 
 using mozilla::tl::FloorLog2;
 
 typedef Rooted<TypedArrayObject*> RootedTypedArrayObject;
-typedef Rooted<SharedTypedArrayObject*> RootedSharedTypedArrayObject;
 
 void
 CodeLocationJump::repoint(JitCode* code, MacroAssembler* masm)
 {
     MOZ_ASSERT(state_ == Relative);
     size_t new_off = (size_t)raw_;
 #ifdef JS_SMALL_BRANCH
     size_t jumpTableEntryOffset = reinterpret_cast<size_t>(jumpTableEntry_);
@@ -1203,38 +1202,37 @@ GenerateUnboxedArrayLength(JSContext* cx
     attacher.jumpNextStub(masm);
 }
 
 // In this case, the code for TypedArray and SharedTypedArray is not the same,
 // because the code embeds pointers to the respective class arrays.  Code that
 // caches the stub code must distinguish between the two cases.
 static void
 GenerateTypedArrayLength(JSContext* cx, MacroAssembler& masm, IonCache::StubAttacher& attacher,
-                         const TypedArrayLayout& layout, Register object, TypedOrValueRegister output,
-                         Label* failures)
+                         Register object, TypedOrValueRegister output, Label* failures)
 {
     Register tmpReg;
     if (output.hasValue()) {
         tmpReg = output.valueReg().scratchReg();
     } else {
         MOZ_ASSERT(output.type() == MIRType_Int32);
         tmpReg = output.typedReg().gpr();
     }
     MOZ_ASSERT(object != tmpReg);
 
     // Implement the negated version of JSObject::isTypedArray predicate.
     masm.loadObjClass(object, tmpReg);
-    masm.branchPtr(Assembler::Below, tmpReg, ImmPtr(layout.addressOfFirstClass()),
+    masm.branchPtr(Assembler::Below, tmpReg, ImmPtr(&TypedArrayObject::classes[0]),
                    failures);
     masm.branchPtr(Assembler::AboveOrEqual, tmpReg,
-                   ImmPtr(layout.addressOfMaxClass()),
+                   ImmPtr(&TypedArrayObject::classes[Scalar::MaxTypedArrayViewType]),
                    failures);
 
     // Load length.
-    masm.loadTypedOrValue(Address(object, TypedArrayLayout::lengthOffset()), output);
+    masm.loadTypedOrValue(Address(object, TypedArrayObject::lengthOffset()), output);
 
     /* Success. */
     attacher.jumpRejoin(masm);
 
     /* Failure. */
     masm.bind(failures);
     attacher.jumpNextStub(masm);
 }
@@ -1619,17 +1617,17 @@ GetPropertyIC::tryAttachTypedArrayLength
     MOZ_ASSERT(!*emitted);
 
     if (!IsAnyTypedArray(obj))
         return true;
 
     if (!JSID_IS_ATOM(id, cx->names().length))
         return true;
 
-    if (hasAnyTypedArrayLengthStub(obj))
+    if (hasTypedArrayLengthStub(obj))
         return true;
 
     if (output().type() != MIRType_Value && output().type() != MIRType_Int32) {
         // The next execution should cause an invalidation because the type
         // does not fit.
         return true;
     }
 
@@ -1639,18 +1637,17 @@ GetPropertyIC::tryAttachTypedArrayLength
     *emitted = true;
 
     MacroAssembler masm(cx, ion, outerScript, profilerLeavePc_);
     StubAttacher attacher(*this);
 
     Label failures;
     emitIdGuard(masm, id, &failures);
 
-    GenerateTypedArrayLength(cx, masm, attacher, AnyTypedArrayLayout(obj), object(), output(),
-                             &failures);
+    GenerateTypedArrayLength(cx, masm, attacher, object(), output(), &failures);
 
     setHasTypedArrayLengthStub(obj);
     return linkAndAttachStub(cx, masm, attacher, ion, "typed array length",
                              JS::TrackedOutcome::ICGetPropStub_TypedArrayLength);
 }
 
 static void
 PushObjectOpResult(MacroAssembler& masm)
@@ -2282,17 +2279,16 @@ GetPropertyIC::update(JSContext* cx, Han
     return true;
 }
 
 void
 GetPropertyIC::reset(ReprotectCode reprotect)
 {
     IonCache::reset(reprotect);
     hasTypedArrayLengthStub_ = false;
-    hasSharedTypedArrayLengthStub_ = false;
     hasMappedArgumentsLengthStub_ = false;
     hasUnmappedArgumentsLengthStub_ = false;
     hasMappedArgumentsElementStub_ = false;
     hasUnmappedArgumentsElementStub_ = false;
     hasGenericProxyStub_ = false;
     hasDenseStub_ = false;
 }
 
@@ -4088,25 +4084,25 @@ GenerateGetTypedOrUnboxedArrayElement(JS
             indexReg = index.reg().typedReg().gpr();
         }
     }
 
     Label popObjectAndFail;
 
     if (IsAnyTypedArray(array)) {
         // Guard on the initialized length.
-        Address length(object, TypedArrayLayout::lengthOffset());
+        Address length(object, TypedArrayObject::lengthOffset());
         masm.branch32(Assembler::BelowOrEqual, length, indexReg, &failures);
 
         // Save the object register on the stack in case of failure.
         Register elementReg = object;
         masm.push(object);
 
         // Load elements vector.
-        masm.loadPtr(Address(object, TypedArrayLayout::dataOffset()), elementReg);
+        masm.loadPtr(Address(object, TypedArrayObject::dataOffset()), elementReg);
 
         // Load the value. We use an invalid register because the destination
         // register is necessary a non double register.
         Scalar::Type arrayType = AnyTypedArrayType(array);
         int width = Scalar::byteSize(arrayType);
         BaseIndex source(elementReg, indexReg, ScaleFromElemWidth(width));
         if (output.hasValue()) {
             masm.loadFromTypedArray(arrayType, source, output.valueReg(), allowDoubleResult,
@@ -4537,23 +4533,23 @@ GenerateSetTypedArrayElement(JSContext* 
 
         indexReg = masm.extractInt32(val, tempUnbox);
     } else {
         MOZ_ASSERT(!index.typedReg().isFloat());
         indexReg = index.typedReg().gpr();
     }
 
     // Guard on the length.
-    Address length(object, TypedArrayLayout::lengthOffset());
+    Address length(object, TypedArrayObject::lengthOffset());
     masm.unboxInt32(length, temp);
     masm.branch32(Assembler::BelowOrEqual, temp, indexReg, &done);
 
     // Load the elements vector.
     Register elements = temp;
-    masm.loadPtr(Address(object, TypedArrayLayout::dataOffset()), elements);
+    masm.loadPtr(Address(object, TypedArrayObject::dataOffset()), elements);
 
     // Set the value.
     Scalar::Type arrayType = AnyTypedArrayType(tarr);
     int width = Scalar::byteSize(arrayType);
     BaseIndex target(elements, indexReg, ScaleFromElemWidth(width));
 
     if (arrayType == Scalar::Float32) {
         MOZ_ASSERT_IF(hasUnaliasedDouble(), tempFloat32 != InvalidFloatReg);
--- a/js/src/jit/IonCaches.h
+++ b/js/src/jit/IonCaches.h
@@ -390,17 +390,16 @@ class GetPropertyIC : public IonCache
     size_t numLocations_;
 
     static const size_t MAX_FAILED_UPDATES = 16;
     uint16_t failedUpdates_;
 
     bool monitoredResult_ : 1;
     bool allowDoubleResult_ : 1;
     bool hasTypedArrayLengthStub_ : 1;
-    bool hasSharedTypedArrayLengthStub_ : 1;
     bool hasMappedArgumentsLengthStub_ : 1;
     bool hasUnmappedArgumentsLengthStub_ : 1;
     bool hasMappedArgumentsElementStub_ : 1;
     bool hasUnmappedArgumentsElementStub_ : 1;
     bool hasGenericProxyStub_ : 1;
     bool hasDenseStub_ : 1;
 
     void emitIdGuard(MacroAssembler& masm, jsid id, Label* fail);
@@ -415,17 +414,16 @@ class GetPropertyIC : public IonCache
         id_(id),
         output_(output),
         locationsIndex_(0),
         numLocations_(0),
         failedUpdates_(0),
         monitoredResult_(monitoredResult),
         allowDoubleResult_(allowDoubleResult),
         hasTypedArrayLengthStub_(false),
-        hasSharedTypedArrayLengthStub_(false),
         hasMappedArgumentsLengthStub_(false),
         hasUnmappedArgumentsLengthStub_(false),
         hasMappedArgumentsElementStub_(false),
         hasUnmappedArgumentsElementStub_(false),
         hasGenericProxyStub_(false),
         hasDenseStub_(false)
     {
     }
@@ -441,18 +439,18 @@ class GetPropertyIC : public IonCache
         return id_;
     }
     TypedOrValueRegister output() const {
         return output_;
     }
     bool monitoredResult() const {
         return monitoredResult_;
     }
-    bool hasAnyTypedArrayLengthStub(HandleObject obj) const {
-        return obj->is<TypedArrayObject>() ? hasTypedArrayLengthStub_ : hasSharedTypedArrayLengthStub_;
+    bool hasTypedArrayLengthStub(HandleObject obj) const {
+        return hasTypedArrayLengthStub_;
     }
     bool hasArgumentsLengthStub(bool mapped) const {
         return mapped ? hasMappedArgumentsLengthStub_ : hasUnmappedArgumentsLengthStub_;
     }
     bool hasArgumentsElementStub(bool mapped) const {
         return mapped ? hasMappedArgumentsElementStub_ : hasUnmappedArgumentsElementStub_;
     }
     bool hasGenericProxyStub() const {
@@ -463,23 +461,19 @@ class GetPropertyIC : public IonCache
         return hasDenseStub_;
     }
     void setHasDenseStub() {
         MOZ_ASSERT(!hasDenseStub());
         hasDenseStub_ = true;
     }
 
     void setHasTypedArrayLengthStub(HandleObject obj) {
-        if (obj->is<TypedArrayObject>()) {
-            MOZ_ASSERT(!hasTypedArrayLengthStub_);
-            hasTypedArrayLengthStub_ = true;
-        } else {
-            MOZ_ASSERT(!hasSharedTypedArrayLengthStub_);
-            hasSharedTypedArrayLengthStub_ = true;
-        }
+        MOZ_ASSERT(obj->is<TypedArrayObject>());
+        MOZ_ASSERT(!hasTypedArrayLengthStub_);
+        hasTypedArrayLengthStub_ = true;
     }
 
     void setLocationInfo(size_t locationsIndex, size_t numLocations) {
         MOZ_ASSERT(idempotent());
         MOZ_ASSERT(!numLocations_);
         MOZ_ASSERT(numLocations);
         locationsIndex_ = locationsIndex;
         numLocations_ = numLocations;
--- a/js/src/jit/IonTypes.h
+++ b/js/src/jit/IonTypes.h
@@ -100,16 +100,20 @@ enum BailoutKind
     Bailout_NonObjectInput,
     Bailout_NonStringInput,
     Bailout_NonSymbolInput,
 
     // SIMD Unbox expects a given type, bails out if it doesn't match.
     Bailout_NonSimdInt32x4Input,
     Bailout_NonSimdFloat32x4Input,
 
+    // Atomic operations require shared memory, bail out if the typed array
+    // maps unshared memory.
+    Bailout_NonSharedTypedArrayInput,
+
     // For the initial snapshot when entering a function.
     Bailout_InitialState,
 
     // We hit a |debugger;| statement.
     Bailout_Debugger,
 
     // |this| used uninitialized in a derived constructor
     Bailout_UninitializedThis,
@@ -208,16 +212,18 @@ BailoutKindString(BailoutKind kind)
       case Bailout_NonStringInput:
         return "Bailout_NonStringInput";
       case Bailout_NonSymbolInput:
         return "Bailout_NonSymbolInput";
       case Bailout_NonSimdInt32x4Input:
         return "Bailout_NonSimdInt32x4Input";
       case Bailout_NonSimdFloat32x4Input:
         return "Bailout_NonSimdFloat32x4Input";
+      case Bailout_NonSharedTypedArrayInput:
+        return "Bailout_NonSharedTypedArrayInput";
       case Bailout_InitialState:
         return "Bailout_InitialState";
       case Bailout_Debugger:
         return "Bailout_Debugger";
       case Bailout_UninitializedThis:
         return "Bailout_UninitializedThis";
       case Bailout_BadDerivedConstructorReturn:
         return "Bailout_BadDerivedConstructorReturn";
--- a/js/src/jit/Lowering.cpp
+++ b/js/src/jit/Lowering.cpp
@@ -3395,16 +3395,26 @@ LIRGenerator::visitGuardString(MGuardStr
 {
     // The type policy does all the work, so at this point the input
     // is guaranteed to be a string.
     MOZ_ASSERT(ins->input()->type() == MIRType_String);
     redefine(ins, ins->input());
 }
 
 void
+LIRGenerator::visitGuardSharedTypedArray(MGuardSharedTypedArray* ins)
+{
+    MOZ_ASSERT(ins->input()->type() == MIRType_Object);
+    LGuardSharedTypedArray* guard =
+        new(alloc()) LGuardSharedTypedArray(useRegister(ins->obj()), temp());
+    assignSnapshot(guard, Bailout_NonSharedTypedArrayInput);
+    add(guard, ins);
+}
+
+void
 LIRGenerator::visitPolyInlineGuard(MPolyInlineGuard* ins)
 {
     MOZ_ASSERT(ins->input()->type() == MIRType_Object);
     redefine(ins, ins->input());
 }
 
 void
 LIRGenerator::visitGuardReceiverPolymorphic(MGuardReceiverPolymorphic* ins)
--- a/js/src/jit/Lowering.h
+++ b/js/src/jit/Lowering.h
@@ -302,15 +302,16 @@ class LIRGenerator : public LIRGenerator
     void visitUnknownValue(MUnknownValue* ins);
     void visitLexicalCheck(MLexicalCheck* ins);
     void visitThrowRuntimeLexicalError(MThrowRuntimeLexicalError* ins);
     void visitGlobalNameConflictsCheck(MGlobalNameConflictsCheck* ins);
     void visitDebugger(MDebugger* ins);
     void visitNewTarget(MNewTarget* ins);
     void visitArrowNewTarget(MArrowNewTarget* ins);
     void visitAtomicIsLockFree(MAtomicIsLockFree* ins);
+    void visitGuardSharedTypedArray(MGuardSharedTypedArray* ins);
     void visitCheckReturn(MCheckReturn* ins);
 };
 
 } // namespace jit
 } // namespace js
 
 #endif /* jit_Lowering_h */
--- a/js/src/jit/MCallOptimize.cpp
+++ b/js/src/jit/MCallOptimize.cpp
@@ -301,40 +301,28 @@ IonBuilder::inlineNativeGetter(CallInfo&
     JSNative native = target->native();
 
     if (!optimizationInfo().inlineNative())
         return InliningStatus_NotInlined;
 
     TemporaryTypeSet* thisTypes = callInfo.thisArg()->resultTypeSet();
     MOZ_ASSERT(callInfo.argc() == 0);
 
-    // Try to optimize typed array lengths. There is one getter on
-    // %TypedArray%.prototype for typed arrays and one getter on
-    // SharedTypedArray.prototype for shared typed arrays.  Make sure we're
-    // accessing the right one for the type of the instance object.
+    // Try to optimize typed array lengths.
     if (thisTypes) {
         Scalar::Type type;
 
         type = thisTypes->getTypedArrayType(constraints());
         if (type != Scalar::MaxTypedArrayViewType &&
             TypedArrayObject::isOriginalLengthGetter(native))
         {
             MInstruction* length = addTypedArrayLength(callInfo.thisArg());
             current->push(length);
             return InliningStatus_Inlined;
         }
-
-        type = thisTypes->getSharedTypedArrayType(constraints());
-        if (type != Scalar::MaxTypedArrayViewType &&
-            SharedTypedArrayObject::isOriginalLengthGetter(type, native))
-        {
-            MInstruction* length = addTypedArrayLength(callInfo.thisArg());
-            current->push(length);
-            return InliningStatus_Inlined;
-        }
     }
 
     return InliningStatus_NotInlined;
 }
 
 IonBuilder::InliningStatus
 IonBuilder::inlineNonFunctionCall(CallInfo& callInfo, JSObject* target)
 {
@@ -2699,25 +2687,29 @@ IonBuilder::inlineAtomicsCompareExchange
     if (oldval->mightBeType(MIRType_Object) || oldval->mightBeType(MIRType_Symbol))
         return InliningStatus_NotInlined;
 
     MDefinition* newval = callInfo.getArg(3);
     if (newval->mightBeType(MIRType_Object) || newval->mightBeType(MIRType_Symbol))
         return InliningStatus_NotInlined;
 
     Scalar::Type arrayType;
-    if (!atomicsMeetsPreconditions(callInfo, &arrayType))
+    bool requiresCheck = false;
+    if (!atomicsMeetsPreconditions(callInfo, &arrayType, &requiresCheck))
         return InliningStatus_NotInlined;
 
     callInfo.setImplicitlyUsedUnchecked();
 
     MInstruction* elements;
     MDefinition* index;
     atomicsCheckBounds(callInfo, &elements, &index);
 
+    if (requiresCheck)
+        addSharedTypedArrayGuard(callInfo.getArg(0));
+
     MCompareExchangeTypedArrayElement* cas =
         MCompareExchangeTypedArrayElement::New(alloc(), elements, index, arrayType, oldval, newval);
     cas->setResultType(getInlineReturnType());
     current->add(cas);
     current->push(cas);
 
     if (!resumeAfter(cas))
         return InliningStatus_Error;
@@ -2733,25 +2725,29 @@ IonBuilder::inlineAtomicsExchange(CallIn
         return InliningStatus_NotInlined;
     }
 
     MDefinition* value = callInfo.getArg(2);
     if (value->mightBeType(MIRType_Object) || value->mightBeType(MIRType_Symbol))
         return InliningStatus_NotInlined;
 
     Scalar::Type arrayType;
-    if (!atomicsMeetsPreconditions(callInfo, &arrayType))
+    bool requiresCheck = false;
+    if (!atomicsMeetsPreconditions(callInfo, &arrayType, &requiresCheck))
         return InliningStatus_NotInlined;
 
     callInfo.setImplicitlyUsedUnchecked();
 
     MInstruction* elements;
     MDefinition* index;
     atomicsCheckBounds(callInfo, &elements, &index);
 
+    if (requiresCheck)
+        addSharedTypedArrayGuard(callInfo.getArg(0));
+
     MInstruction* exchange =
         MAtomicExchangeTypedArrayElement::New(alloc(), elements, index, value, arrayType);
     exchange->setResultType(getInlineReturnType());
     current->add(exchange);
     current->push(exchange);
 
     if (!resumeAfter(exchange))
         return InliningStatus_Error;
@@ -2763,25 +2759,29 @@ IonBuilder::InliningStatus
 IonBuilder::inlineAtomicsLoad(CallInfo& callInfo)
 {
     if (callInfo.argc() != 2 || callInfo.constructing()) {
         trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
         return InliningStatus_NotInlined;
     }
 
     Scalar::Type arrayType;
-    if (!atomicsMeetsPreconditions(callInfo, &arrayType))
+    bool requiresCheck = false;
+    if (!atomicsMeetsPreconditions(callInfo, &arrayType, &requiresCheck))
         return InliningStatus_NotInlined;
 
     callInfo.setImplicitlyUsedUnchecked();
 
     MInstruction* elements;
     MDefinition* index;
     atomicsCheckBounds(callInfo, &elements, &index);
 
+    if (requiresCheck)
+        addSharedTypedArrayGuard(callInfo.getArg(0));
+
     MLoadUnboxedScalar* load =
         MLoadUnboxedScalar::New(alloc(), elements, index, arrayType,
                                 DoesRequireMemoryBarrier);
     load->setResultType(getInlineReturnType());
     current->add(load);
     current->push(load);
 
     // Loads are considered effectful (they execute a memory barrier).
@@ -2799,25 +2799,29 @@ IonBuilder::inlineAtomicsStore(CallInfo&
         return InliningStatus_NotInlined;
     }
 
     MDefinition* value = callInfo.getArg(2);
     if (value->mightBeType(MIRType_Object) || value->mightBeType(MIRType_Symbol))
         return InliningStatus_NotInlined;
 
     Scalar::Type arrayType;
-    if (!atomicsMeetsPreconditions(callInfo, &arrayType, DontCheckAtomicResult))
+    bool requiresCheck = false;
+    if (!atomicsMeetsPreconditions(callInfo, &arrayType, &requiresCheck, DontCheckAtomicResult))
         return InliningStatus_NotInlined;
 
     callInfo.setImplicitlyUsedUnchecked();
 
     MInstruction* elements;
     MDefinition* index;
     atomicsCheckBounds(callInfo, &elements, &index);
 
+    if (requiresCheck)
+        addSharedTypedArrayGuard(callInfo.getArg(0));
+
     MDefinition* toWrite = value;
     if (value->type() != MIRType_Int32) {
         toWrite = MTruncateToInt32::New(alloc(), value);
         current->add(toWrite->toInstruction());
     }
     MStoreUnboxedScalar* store =
         MStoreUnboxedScalar::New(alloc(), elements, index, toWrite, arrayType,
                                  MStoreUnboxedScalar::TruncateInput, DoesRequireMemoryBarrier);
@@ -2862,21 +2866,25 @@ IonBuilder::inlineAtomicsBinop(CallInfo&
         return InliningStatus_NotInlined;
     }
 
     MDefinition* value = callInfo.getArg(2);
     if (value->mightBeType(MIRType_Object) || value->mightBeType(MIRType_Symbol))
         return InliningStatus_NotInlined;
 
     Scalar::Type arrayType;
-    if (!atomicsMeetsPreconditions(callInfo, &arrayType))
+    bool requiresCheck = false;
+    if (!atomicsMeetsPreconditions(callInfo, &arrayType, &requiresCheck))
         return InliningStatus_NotInlined;
 
     callInfo.setImplicitlyUsedUnchecked();
 
+    if (requiresCheck)
+        addSharedTypedArrayGuard(callInfo.getArg(0));
+
     MInstruction* elements;
     MDefinition* index;
     atomicsCheckBounds(callInfo, &elements, &index);
 
     AtomicOp k = AtomicFetchAddOp;
     switch (target) {
       case InlinableNative::AtomicsAdd:
         k = AtomicFetchAddOp;
@@ -2924,38 +2932,41 @@ IonBuilder::inlineAtomicsIsLockFree(Call
     current->add(ilf);
     current->push(ilf);
 
     return InliningStatus_Inlined;
 }
 
 bool
 IonBuilder::atomicsMeetsPreconditions(CallInfo& callInfo, Scalar::Type* arrayType,
-                                      AtomicCheckResult checkResult)
+                                      bool* requiresTagCheck, AtomicCheckResult checkResult)
 {
     if (!JitSupportsAtomics())
         return false;
 
     if (callInfo.getArg(0)->type() != MIRType_Object)
         return false;
 
     if (callInfo.getArg(1)->type() != MIRType_Int32)
         return false;
 
-    // Ensure that the first argument is a valid SharedTypedArray.
+    // Ensure that the first argument is a TypedArray that maps shared
+    // memory.
     //
     // Then check both that the element type is something we can
     // optimize and that the return type is suitable for that element
     // type.
 
     TemporaryTypeSet* arg0Types = callInfo.getArg(0)->resultTypeSet();
     if (!arg0Types)
         return false;
 
-    *arrayType = arg0Types->getSharedTypedArrayType(constraints());
+    TemporaryTypeSet::TypedArraySharedness sharedness;
+    *arrayType = arg0Types->getTypedArrayType(constraints(), &sharedness);
+    *requiresTagCheck = sharedness != TemporaryTypeSet::KnownShared;
     switch (*arrayType) {
       case Scalar::Int8:
       case Scalar::Uint8:
       case Scalar::Int16:
       case Scalar::Uint16:
       case Scalar::Int32:
         return checkResult == DontCheckAtomicResult || getInlineReturnType() == MIRType_Int32;
       case Scalar::Uint32:
--- a/js/src/jit/MIR.cpp
+++ b/js/src/jit/MIR.cpp
@@ -4957,19 +4957,16 @@ jit::ElementAccessIsAnyTypedArray(Compil
     if (id->type() != MIRType_Int32 && id->type() != MIRType_Double)
         return false;
 
     TemporaryTypeSet* types = obj->resultTypeSet();
     if (!types)
         return false;
 
     *arrayType = types->getTypedArrayType(constraints);
-    if (*arrayType != Scalar::MaxTypedArrayViewType)
-        return true;
-    *arrayType = types->getSharedTypedArrayType(constraints);
     return *arrayType != Scalar::MaxTypedArrayViewType;
 }
 
 bool
 jit::ElementAccessIsPacked(CompilerConstraintList* constraints, MDefinition* obj)
 {
     TemporaryTypeSet* types = obj->resultTypeSet();
     return types && !types->hasObjectFlags(constraints, OBJECT_FLAG_NON_PACKED);
--- a/js/src/jit/MIR.h
+++ b/js/src/jit/MIR.h
@@ -13133,16 +13133,44 @@ class MAtomicIsLockFree
     bool writeRecoverData(CompactBufferWriter& writer) const override;
     bool canRecoverOnBailout() const override {
         return true;
     }
 
     ALLOW_CLONE(MAtomicIsLockFree)
 };
 
+// This applies to an object that is known to be a TypedArray, it bails out
+// if the obj does not map a SharedArrayBuffer.
+
+class MGuardSharedTypedArray
+  : public MUnaryInstruction,
+    public SingleObjectPolicy::Data
+{
+    explicit MGuardSharedTypedArray(MDefinition* obj)
+      : MUnaryInstruction(obj)
+    {
+        setGuard();
+        setMovable();
+    }
+
+public:
+    INSTRUCTION_HEADER(GuardSharedTypedArray)
+
+    static MGuardSharedTypedArray* New(TempAllocator& alloc, MDefinition* obj) {
+        return new(alloc) MGuardSharedTypedArray(obj);
+    }
+    MDefinition* obj() const {
+        return getOperand(0);
+    }
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+};
+
 class MCompareExchangeTypedArrayElement
   : public MAryInstruction<4>,
     public Mix4Policy<ObjectPolicy<0>, IntPolicy<1>, TruncateToInt32Policy<2>, TruncateToInt32Policy<3>>::Data
 {
     Scalar::Type arrayType_;
 
     explicit MCompareExchangeTypedArrayElement(MDefinition* elements, MDefinition* index,
                                                Scalar::Type arrayType, MDefinition* oldval,
--- a/js/src/jit/MOpcodes.h
+++ b/js/src/jit/MOpcodes.h
@@ -203,16 +203,17 @@ namespace jit {
     _(ArrayConcat)                                                          \
     _(ArraySlice)                                                           \
     _(ArrayJoin)                                                            \
     _(LoadTypedArrayElementHole)                                            \
     _(LoadTypedArrayElementStatic)                                          \
     _(StoreTypedArrayElementHole)                                           \
     _(StoreTypedArrayElementStatic)                                         \
     _(AtomicIsLockFree)                                                     \
+    _(GuardSharedTypedArray)                                                \
     _(CompareExchangeTypedArrayElement)                                     \
     _(AtomicExchangeTypedArrayElement)                                      \
     _(AtomicTypedArrayElementBinop)                                         \
     _(EffectiveAddress)                                                     \
     _(ClampToUint8)                                                         \
     _(LoadFixedSlot)                                                        \
     _(LoadFixedSlotAndUnbox)                                                \
     _(StoreFixedSlot)                                                       \
--- a/js/src/jit/MacroAssembler.cpp
+++ b/js/src/jit/MacroAssembler.cpp
@@ -1095,16 +1095,20 @@ MacroAssembler::initGCThing(Register obj
             store32(Imm32(ntemplate->as<ArrayObject>().length()),
                     Address(obj, elementsOffset + ObjectElements::offsetOfLength()));
             store32(Imm32(convertDoubleElements
                           ? ObjectElements::CONVERT_DOUBLE_ELEMENTS
                           : 0),
                     Address(obj, elementsOffset + ObjectElements::offsetOfFlags()));
             MOZ_ASSERT(!ntemplate->hasPrivate());
         } else {
+            // If the target type could be a TypedArray that maps shared memory
+            // then this would need to store emptyObjectElementsShared in that case.
+            // That cannot happen at present; TypedArray allocation is always
+            // a VM call.
             storePtr(ImmPtr(emptyObjectElements), Address(obj, NativeObject::offsetOfElements()));
 
             initGCSlots(obj, temp, ntemplate, initContents);
 
             if (ntemplate->hasPrivate()) {
                 uint32_t nfixed = ntemplate->numFixedSlots();
                 storePtr(ImmPtr(ntemplate->getPrivate()),
                          Address(obj, NativeObject::getPrivateDataOffset(nfixed)));
--- a/js/src/jit/SharedIC.cpp
+++ b/js/src/jit/SharedIC.cpp
@@ -4231,17 +4231,17 @@ CheckForNeuteredTypedObject(JSContext* c
     masm.branch32(Assembler::NotEqual, AbsoluteAddress(address), Imm32(0), failure);
 }
 
 void
 LoadTypedThingData(MacroAssembler& masm, TypedThingLayout layout, Register obj, Register result)
 {
     switch (layout) {
       case Layout_TypedArray:
-        masm.loadPtr(Address(obj, TypedArrayLayout::dataOffset()), result);
+        masm.loadPtr(Address(obj, TypedArrayObject::dataOffset()), result);
         break;
       case Layout_OutlineTypedObject:
         masm.loadPtr(Address(obj, OutlineTypedObject::offsetOfData()), result);
         break;
       case Layout_InlineTypedObject:
         masm.computeEffectiveAddress(Address(obj, InlineTypedObject::offsetOfDataStart()), result);
         break;
       default:
--- a/js/src/jit/shared/LIR-shared.h
+++ b/js/src/jit/shared/LIR-shared.h
@@ -6518,16 +6518,34 @@ class LGuardClass : public LInstructionH
     const MGuardClass* mir() const {
         return mir_->toGuardClass();
     }
     const LDefinition* tempInt() {
         return getTemp(0);
     }
 };
 
+// Guard against the sharedness of a TypedArray's memory.
+class LGuardSharedTypedArray : public LInstructionHelper<0, 1, 1>
+{
+  public:
+    LIR_HEADER(GuardSharedTypedArray)
+
+    LGuardSharedTypedArray(const LAllocation& in, const LDefinition& temp) {
+        setOperand(0, in);
+        setTemp(0, temp);
+    }
+    const MGuardSharedTypedArray* mir() const {
+        return mir_->toGuardSharedTypedArray();
+    }
+    const LDefinition* tempInt() {
+        return getTemp(0);
+    }
+};
+
 class LIn : public LCallInstructionHelper<1, BOX_PIECES+1, 0>
 {
   public:
     LIR_HEADER(In)
     explicit LIn(const LAllocation& rhs) {
         setOperand(RHS, rhs);
     }
 
--- a/js/src/jit/shared/LOpcodes-shared.h
+++ b/js/src/jit/shared/LOpcodes-shared.h
@@ -252,16 +252,17 @@
     _(ArrayJoin)                    \
     _(StoreElementHoleV)            \
     _(StoreElementHoleT)            \
     _(LoadTypedArrayElementHole)    \
     _(LoadTypedArrayElementStatic)  \
     _(StoreTypedArrayElementHole)   \
     _(StoreTypedArrayElementStatic) \
     _(AtomicIsLockFree)             \
+    _(GuardSharedTypedArray)        \
     _(CompareExchangeTypedArrayElement) \
     _(AtomicExchangeTypedArrayElement) \
     _(AtomicTypedArrayElementBinop) \
     _(AtomicTypedArrayElementBinopForEffect) \
     _(EffectiveAddress)             \
     _(ClampIToUint8)                \
     _(ClampDToUint8)                \
     _(ClampVToUint8)                \
--- a/js/src/vm/ObjectGroup.cpp
+++ b/js/src/vm/ObjectGroup.cpp
@@ -205,18 +205,17 @@ ObjectGroup::useSingletonForAllocationSi
      * singleton types. For now this is only done for plain objects and typed
      * arrays, but not normal arrays.
      */
 
     if (script->functionNonDelazifying() && !script->treatAsRunOnce())
         return GenericObject;
 
     if (key != JSProto_Object &&
-        !(key >= JSProto_Int8Array && key <= JSProto_Uint8ClampedArray) &&
-        !(key >= JSProto_SharedInt8Array && key <= JSProto_SharedUint8ClampedArray))
+        !(key >= JSProto_Int8Array && key <= JSProto_Uint8ClampedArray))
     {
         return GenericObject;
     }
 
     // All loops in the script will have a try note indicating their boundary.
 
     if (!script->hasTrynotes())
         return SingletonObject;
@@ -700,27 +699,16 @@ GetClassForProtoKey(JSProtoKey key)
       case JSProto_Uint16Array:
       case JSProto_Int32Array:
       case JSProto_Uint32Array:
       case JSProto_Float32Array:
       case JSProto_Float64Array:
       case JSProto_Uint8ClampedArray:
         return &TypedArrayObject::classes[key - JSProto_Int8Array];
 
-      case JSProto_SharedInt8Array:
-      case JSProto_SharedUint8Array:
-      case JSProto_SharedInt16Array:
-      case JSProto_SharedUint16Array:
-      case JSProto_SharedInt32Array:
-      case JSProto_SharedUint32Array:
-      case JSProto_SharedFloat32Array:
-      case JSProto_SharedFloat64Array:
-      case JSProto_SharedUint8ClampedArray:
-        return &SharedTypedArrayObject::classes[key - JSProto_SharedInt8Array];
-
       case JSProto_ArrayBuffer:
         return &ArrayBufferObject::class_;
 
       case JSProto_SharedArrayBuffer:
         return &SharedArrayBufferObject::class_;
 
       case JSProto_DataView:
         return &DataViewObject::class_;
--- a/js/src/vm/TypeInference.cpp
+++ b/js/src/vm/TypeInference.cpp
@@ -1857,36 +1857,36 @@ class ConstraintDataFreezeObjectForInlin
 };
 
 // Constraint which triggers recompilation when a typed array's data becomes
 // invalid.
 class ConstraintDataFreezeObjectForTypedArrayData
 {
     NativeObject* obj;
 
-    void* viewData;
+    uintptr_t viewData;
     uint32_t length;
 
   public:
     explicit ConstraintDataFreezeObjectForTypedArrayData(TypedArrayObject& tarray)
       : obj(&tarray),
-        viewData(tarray.viewData()),
+        viewData(tarray.viewDataEither().unwrapValue()),
         length(tarray.length())
     {
         MOZ_ASSERT(tarray.isSingleton());
     }
 
     const char* kind() { return "freezeObjectForTypedArrayData"; }
 
     bool invalidateOnNewType(TypeSet::Type type) { return false; }
     bool invalidateOnNewPropertyState(TypeSet* property) { return false; }
     bool invalidateOnNewObjectState(ObjectGroup* group) {
         MOZ_ASSERT(obj->group() == group);
         TypedArrayObject& tarr = obj->as<TypedArrayObject>();
-        return tarr.viewData() != viewData || tarr.length() != length;
+        return tarr.viewDataEither().unwrapValue() != viewData || tarr.length() != length;
     }
 
     bool constraintHolds(JSContext* cx,
                          const HeapTypeSetKey& property, TemporaryTypeSet* expected)
     {
         return !invalidateOnNewObjectState(property.object()->maybeGroup());
     }
 
@@ -2200,17 +2200,18 @@ TemporaryTypeSet::convertDoubleElements(
         }
 
         HeapTypeSetKey property = key->property(JSID_VOID);
         property.freeze(constraints);
 
         // We can't convert to double elements for objects which do not have
         // double in their element types (as the conversion may render the type
         // information incorrect), nor for non-array objects (as their elements
-        // may point to emptyObjectElements, which cannot be converted).
+        // may point to emptyObjectElements or emptyObjectElementsShared, which
+        // cannot be converted).
         if (!property.maybeTypes() ||
             !property.maybeTypes()->hasType(DoubleType()) ||
             key->clasp() != &ArrayObject::class_)
         {
             dontConvert = true;
             alwaysConvert = false;
             continue;
         }
@@ -2266,16 +2267,24 @@ TemporaryTypeSet::getKnownClass(Compiler
             if (key && !key->hasStableClassAndProto(constraints))
                 return nullptr;
         }
     }
 
     return clasp;
 }
 
+void
+TemporaryTypeSet::getTypedArraySharedness(CompilerConstraintList* constraints,
+                                          TypedArraySharedness* sharedness)
+{
+    // In the future this will inspect the object set.
+    *sharedness = UnknownSharedness;
+}
+
 TemporaryTypeSet::ForAllResult
 TemporaryTypeSet::forAllClasses(CompilerConstraintList* constraints,
                                 bool (*func)(const Class* clasp))
 {
     if (unknownObject())
         return ForAllResult::MIXED;
 
     unsigned count = getObjectCount();
@@ -2303,32 +2312,26 @@ TemporaryTypeSet::forAllClasses(Compiler
     }
 
     MOZ_ASSERT(true_results != false_results);
 
     return true_results ? ForAllResult::ALL_TRUE : ForAllResult::ALL_FALSE;
 }
 
 Scalar::Type
-TemporaryTypeSet::getTypedArrayType(CompilerConstraintList* constraints)
+TemporaryTypeSet::getTypedArrayType(CompilerConstraintList* constraints,
+                                    TypedArraySharedness* sharedness)
 {
     const Class* clasp = getKnownClass(constraints);
 
-    if (clasp && IsTypedArrayClass(clasp))
+    if (clasp && IsTypedArrayClass(clasp)) {
+        if (sharedness)
+            getTypedArraySharedness(constraints, sharedness);
         return (Scalar::Type) (clasp - &TypedArrayObject::classes[0]);
-    return Scalar::MaxTypedArrayViewType;
-}
-
-Scalar::Type
-TemporaryTypeSet::getSharedTypedArrayType(CompilerConstraintList* constraints)
-{
-    const Class* clasp = getKnownClass(constraints);
-
-    if (clasp && IsSharedTypedArrayClass(clasp))
-        return (Scalar::Type) (clasp - &SharedTypedArrayObject::classes[0]);
+    }
     return Scalar::MaxTypedArrayViewType;
 }
 
 bool
 TemporaryTypeSet::isDOMClass(CompilerConstraintList* constraints)
 {
     if (unknownObject())
         return false;
--- a/js/src/vm/TypeInference.h
+++ b/js/src/vm/TypeInference.h
@@ -713,21 +713,29 @@ class TemporaryTypeSet : public TypeSet
                                bool (*func)(const Class* clasp));
 
     /*
      * Returns true if all objects in this set have the same prototype, and
      * assigns this object to *proto. The proto can be nullptr.
      */
     bool getCommonPrototype(CompilerConstraintList* constraints, JSObject** proto);
 
-    /* Get the typed array type of all objects in this set, or Scalar::MaxTypedArrayViewType. */
-    Scalar::Type getTypedArrayType(CompilerConstraintList* constraints);
+    /* Whether the buffer mapped by a TypedArray is shared memory or not */
+    enum TypedArraySharedness {
+        UnknownSharedness=1,    // We can't determine sharedness
+        KnownShared,            // We know for sure the buffer is shared
+        KnownUnshared           // We know for sure the buffer is unshared
+    };
 
-    /* Get the shared typed array type of all objects in this set, or Scalar::MaxTypedArrayViewType. */
-    Scalar::Type getSharedTypedArrayType(CompilerConstraintList* constraints);
+    /* Get the typed array type of all objects in this set, or Scalar::MaxTypedArrayViewType.
+     * If there is such a common type and sharedness is not nullptr then
+     * *sharedness is set to what we know about the sharedness of the memory.
+     */
+    Scalar::Type getTypedArrayType(CompilerConstraintList* constraints,
+                                   TypedArraySharedness* sharedness = nullptr);
 
     /* Whether all objects have JSCLASS_IS_DOMJSCLASS set. */
     bool isDOMClass(CompilerConstraintList* constraints);
 
     /* Whether clasp->isCallable() is true for one or more objects in this set. */
     bool maybeCallable(CompilerConstraintList* constraints);
 
     /* Whether clasp->emulatesUndefined() is true for one or more objects in this set. */
@@ -759,16 +767,20 @@ class TemporaryTypeSet : public TypeSet
         AmbiguousDoubleConversion
     };
 
     /*
      * Whether known double optimizations are possible for element accesses on
      * objects in this type set.
      */
     DoubleConversion convertDoubleElements(CompilerConstraintList* constraints);
+
+  private:
+    void getTypedArraySharedness(CompilerConstraintList* constraints,
+                                 TypedArraySharedness* sharedness);
 };
 
 bool
 AddClearDefiniteGetterSetterForPrototypeChain(JSContext* cx, ObjectGroup* group, HandleId id);
 
 bool
 AddClearDefiniteFunctionUsesInScript(JSContext* cx, ObjectGroup* group,
                                      JSScript* script, JSScript* calleeScript);
--- a/js/src/vm/UnboxedObject.cpp
+++ b/js/src/vm/UnboxedObject.cpp
@@ -975,17 +975,17 @@ UnboxedArrayObject::convertToNativeWithG
     MOZ_ASSERT(result.value == DenseElementResult::Success);
 
     obj->setGroup(group);
 
     ArrayObject* aobj = &obj->as<ArrayObject>();
     aobj->setLastPropertyMakeNative(cx, shape);
 
     // Make sure there is at least one element, so that this array does not
-    // use emptyObjectElements.
+    // use emptyObjectElements / emptyObjectElementsShared.
     if (!aobj->ensureElements(cx, Max<size_t>(initlen, 1)))
         return false;
 
     MOZ_ASSERT(!aobj->getDenseInitializedLength());
     aobj->setDenseInitializedLength(initlen);
     aobj->initDenseElements(0, values.begin(), initlen);
     aobj->setLengthInt32(length);