Bug 1434230 part 1 - Some Spectre mitigations for loadStringChars. r=luke,nbp
☠☠ backed out by a20dc29e502d ☠ ☠
authorJan de Mooij <jdemooij@mozilla.com>
Thu, 08 Feb 2018 22:01:52 +0100
changeset 752947 9c9ba4938b080344149ec4cd6f8cd564f7af3be3
parent 752847 25d85c1648e5d3083c378754d64a4806e1c6835d
child 752948 6598194588d7cbdc2553955f92ea357f15320468
push id98429
push usermak77@bonardo.net
push dateFri, 09 Feb 2018 10:14:12 +0000
reviewersluke, nbp
bugs1434230
milestone60.0a1
Bug 1434230 part 1 - Some Spectre mitigations for loadStringChars. r=luke,nbp
dom/ipc/ContentPrefs.cpp
js/src/jit/CodeGenerator.cpp
js/src/jit/JitOptions.cpp
js/src/jit/JitOptions.h
js/src/jit/MacroAssembler.cpp
js/src/jit/MacroAssembler.h
js/src/jit/arm/MacroAssembler-arm-inl.h
js/src/jit/arm64/MacroAssembler-arm64-inl.h
js/src/jit/x64/MacroAssembler-x64-inl.h
js/src/jit/x86/MacroAssembler-x86-inl.h
js/src/jsapi.cpp
js/src/jsapi.h
js/src/shell/js.cpp
js/xpconnect/src/XPCJSContext.cpp
modules/libpref/init/all.js
--- a/dom/ipc/ContentPrefs.cpp
+++ b/dom/ipc/ContentPrefs.cpp
@@ -121,16 +121,17 @@ const char* mozilla::dom::ContentPrefs::
   "javascript.options.ion.offthread_compilation",
   "javascript.options.ion.threshold",
   "javascript.options.ion.unsafe_eager_compilation",
   "javascript.options.jit.full_debug_checks",
   "javascript.options.native_regexp",
   "javascript.options.parallel_parsing",
   "javascript.options.shared_memory",
   "javascript.options.spectre.index_masking",
+  "javascript.options.spectre.string_mitigations",
   "javascript.options.streams",
   "javascript.options.strict",
   "javascript.options.strict.debug",
   "javascript.options.throw_on_asmjs_validation_failure",
   "javascript.options.throw_on_debuggee_would_run",
   "javascript.options.wasm",
   "javascript.options.wasm_baselinejit",
   "javascript.options.wasm_ionjit",
--- a/js/src/jit/CodeGenerator.cpp
+++ b/js/src/jit/CodeGenerator.cpp
@@ -1578,22 +1578,22 @@ CreateDependentString::generate(MacroAss
         MOZ_ASSERT(startIndexAddress.base == masm.getStackPointer());
         BaseIndex newStartIndexAddress = startIndexAddress;
         newStartIndexAddress.offset += 2 * sizeof(void*);
 
         // Load chars pointer for the new string.
         masm.addPtr(ImmWord(JSInlineString::offsetOfInlineStorage()), string);
 
         // Load the source characters pointer.
-        masm.loadStringChars(base, base);
-        masm.load32(newStartIndexAddress, temp2);
+        masm.loadStringChars(base, temp2);
+        masm.load32(newStartIndexAddress, base);
         if (latin1)
             masm.addPtr(temp2, base);
         else
-            masm.computeEffectiveAddress(BaseIndex(base, temp2, TimesTwo), base);
+            masm.computeEffectiveAddress(BaseIndex(temp2, base, TimesTwo), base);
 
         CopyStringChars(masm, string, base, temp1, temp2, latin1 ? 1 : 2, latin1 ? 1 : 2);
 
         // Null-terminate.
         if (latin1)
             masm.store8(Imm32(0), Address(string, 0));
         else
             masm.store16(Imm32(0), Address(string, 0));
@@ -2446,18 +2446,16 @@ CodeGenerator::visitOutOfLineRegExpInsta
 
     masm.jump(ool->rejoin());
 }
 
 static void
 FindFirstDollarIndex(MacroAssembler& masm, Register str, Register len, Register chars,
                      Register temp, Register output, bool isLatin1)
 {
-    masm.loadStringChars(str, chars);
-
     masm.move32(Imm32(0), output);
 
     Label start, done;
     masm.bind(&start);
     if (isLatin1)
         masm.load8ZeroExtend(BaseIndex(chars, output, TimesOne), temp);
     else
         masm.load16ZeroExtend(BaseIndex(chars, output, TimesTwo), temp);
@@ -2486,16 +2484,18 @@ CodeGenerator::visitGetFirstDollarIndex(
     Register len = ToRegister(ins->temp2());
 
     OutOfLineCode* ool = oolCallVM(GetFirstDollarIndexRawInfo, ins, ArgList(str),
                                    StoreRegisterTo(output));
 
     masm.branchIfRope(str, ool->entry());
     masm.loadStringLength(str, len);
 
+    masm.loadStringChars(str, temp0);
+
     Label isLatin1, done;
     masm.branchLatin1String(str, &isLatin1);
     {
         FindFirstDollarIndex(masm, str, len, temp0, temp1, output, /* isLatin1 = */ false);
     }
     masm.jump(&done);
     {
         masm.bind(&isLatin1);
@@ -7767,25 +7767,26 @@ static void
 CopyStringCharsMaybeInflate(MacroAssembler& masm, Register input, Register destChars,
                             Register temp1, Register temp2)
 {
     // destChars is TwoByte and input is a Latin1 or TwoByte string, so we may
     // have to inflate.
 
     Label isLatin1, done;
     masm.loadStringLength(input, temp1);
+    masm.loadStringChars(input, temp2);
     masm.branchLatin1String(input, &isLatin1);
     {
-        masm.loadStringChars(input, input);
+        masm.movePtr(temp2, input);
         CopyStringChars(masm, destChars, input, temp1, temp2, sizeof(char16_t), sizeof(char16_t));
         masm.jump(&done);
     }
     masm.bind(&isLatin1);
     {
-        masm.loadStringChars(input, input);
+        masm.movePtr(temp2, input);
         CopyStringChars(masm, destChars, input, temp1, temp2, sizeof(char), sizeof(char16_t));
     }
     masm.bind(&done);
 }
 
 static void
 ConcatInlineString(MacroAssembler& masm, Register lhs, Register rhs, Register output,
                    Register temp1, Register temp2, Register temp3,
@@ -7832,26 +7833,28 @@ ConcatInlineString(MacroAssembler& masm,
 
     {
         // Copy lhs chars. Note that this advances temp2 to point to the next
         // char. This also clobbers the lhs register.
         if (isTwoByte) {
             CopyStringCharsMaybeInflate(masm, lhs, temp2, temp1, temp3);
         } else {
             masm.loadStringLength(lhs, temp3);
-            masm.loadStringChars(lhs, lhs);
+            masm.loadStringChars(lhs, temp1);
+            masm.movePtr(temp1, lhs);
             CopyStringChars(masm, temp2, lhs, temp3, temp1, sizeof(char), sizeof(char));
         }
 
         // Copy rhs chars. Clobbers the rhs register.
         if (isTwoByte) {
             CopyStringCharsMaybeInflate(masm, rhs, temp2, temp1, temp3);
         } else {
             masm.loadStringLength(rhs, temp3);
-            masm.loadStringChars(rhs, rhs);
+            masm.loadStringChars(rhs, temp1);
+            masm.movePtr(temp1, rhs);
             CopyStringChars(masm, temp2, rhs, temp3, temp1, sizeof(char), sizeof(char));
         }
 
         // Null-terminate.
         if (isTwoByte)
             masm.store16(Imm32(0), Address(temp2, 0));
         else
             masm.store8(Imm32(0), Address(temp2, 0));
--- a/js/src/jit/JitOptions.cpp
+++ b/js/src/jit/JitOptions.cpp
@@ -228,16 +228,17 @@ DefaultJitOptions::DefaultJitOptions()
     const char* forcedRegisterAllocatorEnv = "JIT_OPTION_forcedRegisterAllocator";
     if (const char* env = getenv(forcedRegisterAllocatorEnv)) {
         forcedRegisterAllocator = LookupRegisterAllocator(env);
         if (!forcedRegisterAllocator.isSome())
             Warn(forcedRegisterAllocatorEnv, env);
     }
 
     SET_DEFAULT(spectreIndexMasking, true);
+    SET_DEFAULT(spectreStringMitigations, false);
 
     // Toggles whether unboxed plain objects can be created by the VM.
     SET_DEFAULT(disableUnboxedObjects, false);
 
     // Test whether Atomics are allowed in asm.js code.
     SET_DEFAULT(asmJSAtomicsEnable, false);
 
     // Toggles the optimization whereby offsets are folded into loads and not
--- a/js/src/jit/JitOptions.h
+++ b/js/src/jit/JitOptions.h
@@ -90,16 +90,17 @@ struct DefaultJitOptions
     uint32_t branchPruningThreshold;
     uint32_t wasmBatchIonThreshold;
     uint32_t wasmBatchBaselineThreshold;
     mozilla::Maybe<uint32_t> forcedDefaultIonWarmUpThreshold;
     mozilla::Maybe<uint32_t> forcedDefaultIonSmallFunctionWarmUpThreshold;
     mozilla::Maybe<IonRegisterAllocator> forcedRegisterAllocator;
 
     bool spectreIndexMasking;
+    bool spectreStringMitigations;
 
     // The options below affect the rest of the VM, and not just the JIT.
     bool disableUnboxedObjects;
 
     DefaultJitOptions();
     bool isSmallFunction(JSScript* script) const;
     void setEagerCompilation();
     void setCompilerWarmUpThreshold(uint32_t warmUpThreshold);
--- a/js/src/jit/MacroAssembler.cpp
+++ b/js/src/jit/MacroAssembler.cpp
@@ -1364,27 +1364,51 @@ MacroAssembler::compareStrings(JSOp op, 
     move32(Imm32(op == JSOP_NE || op == JSOP_STRICTNE), result);
 
     bind(&done);
 }
 
 void
 MacroAssembler::loadStringChars(Register str, Register dest)
 {
-    Label isInline, done;
-    branchTest32(Assembler::NonZero, Address(str, JSString::offsetOfFlags()),
-                 Imm32(JSString::INLINE_CHARS_BIT), &isInline);
-
-    loadPtr(Address(str, JSString::offsetOfNonInlineChars()), dest);
-    jump(&done);
-
-    bind(&isInline);
+    MOZ_ASSERT(str != dest);
+
+    // First, if the string is a rope, zero the |str| register. The code below
+    // depends on str->flags so this should block speculative execution.
+    if (JitOptions.spectreStringMitigations) {
+        movePtr(ImmWord(0), dest);
+        test32MovePtr(Assembler::Zero,
+                      Address(str, JSString::offsetOfFlags()), Imm32(JSString::LINEAR_BIT),
+                      dest, str);
+    }
+
+    // Load the inline chars.
     computeEffectiveAddress(Address(str, JSInlineString::offsetOfInlineStorage()), dest);
 
-    bind(&done);
+    // If it's not an inline string, load the non-inline chars. Use a
+    // conditional move to prevent speculative execution.
+    test32LoadPtr(Assembler::Zero,
+                  Address(str, JSString::offsetOfFlags()), Imm32(JSString::INLINE_CHARS_BIT),
+                  Address(str, JSString::offsetOfNonInlineChars()), dest);
+}
+
+void
+MacroAssembler::loadRopeLeftChild(Register str, Register dest)
+{
+    MOZ_ASSERT(str != dest);
+
+    if (JitOptions.spectreStringMitigations) {
+        // Zero the output register if the input was not a rope.
+        movePtr(ImmWord(0), dest);
+        test32LoadPtr(Assembler::Zero,
+                      Address(str, JSString::offsetOfFlags()), Imm32(JSString::LINEAR_BIT),
+                      Address(str, JSRope::offsetOfLeft()), dest);
+    } else {
+        loadPtr(Address(str, JSRope::offsetOfLeft()), dest);
+    }
 }
 
 void
 MacroAssembler::loadStringChar(Register str, Register index, Register output, Register scratch,
                                Label* fail)
 {
     MOZ_ASSERT(str != output);
     MOZ_ASSERT(str != index);
@@ -1392,40 +1416,38 @@ MacroAssembler::loadStringChar(Register 
     MOZ_ASSERT(output != scratch);
 
     movePtr(str, output);
 
     // This follows JSString::getChar.
     Label notRope;
     branchIfNotRope(str, &notRope);
 
-    // Load leftChild.
-    loadPtr(Address(str, JSRope::offsetOfLeft()), output);
+    loadRopeLeftChild(str, output);
 
     // Check if the index is contained in the leftChild.
     // Todo: Handle index in the rightChild.
     boundsCheck32ForLoad(index, Address(output, JSString::offsetOfLength()), scratch, fail);
 
     // If the left side is another rope, give up.
     branchIfRope(output, fail);
 
     bind(&notRope);
 
+    loadStringChars(output, scratch);
+
     Label isLatin1, done;
     // We have to check the left/right side for ropes,
     // because a TwoByte rope might have a Latin1 child.
     branchLatin1String(output, &isLatin1);
-
-    loadStringChars(output, output);
-    load16ZeroExtend(BaseIndex(output, index, TimesTwo), output);
+    load16ZeroExtend(BaseIndex(scratch, index, TimesTwo), output);
     jump(&done);
 
     bind(&isLatin1);
-    loadStringChars(output, output);
-    load8ZeroExtend(BaseIndex(output, index, TimesOne), output);
+    load8ZeroExtend(BaseIndex(scratch, index, TimesOne), output);
 
     bind(&done);
 }
 
 void
 MacroAssembler::loadStringIndexValue(Register str, Register dest, Label* fail)
 {
     MOZ_ASSERT(str != dest);
--- a/js/src/jit/MacroAssembler.h
+++ b/js/src/jit/MacroAssembler.h
@@ -1363,16 +1363,24 @@ class MacroAssembler : public MacroAssem
     inline void cmp32Move32(Condition cond, Register lhs, Register rhs, Register src,
                             Register dest)
         DEFINED_ON(arm, arm64, x86_shared);
 
     inline void cmp32Move32(Condition cond, Register lhs, const Address& rhs, Register src,
                             Register dest)
         DEFINED_ON(arm, arm64, x86_shared);
 
+    inline void test32LoadPtr(Condition cond, const Address& addr, Imm32 mask, const Address& src,
+                              Register dest)
+        DEFINED_ON(arm, arm64, x86, x64);
+
+    inline void test32MovePtr(Condition cond, const Address& addr, Imm32 mask, Register src,
+                              Register dest)
+        DEFINED_ON(arm, arm64, x86, x64);
+
     // Performs a bounds check and zeroes the index register if out-of-bounds
     // (to mitigate Spectre).
     inline void boundsCheck32ForLoad(Register index, Register length, Register scratch,
                                      Label* failure)
         DEFINED_ON(arm, arm64, x86_shared);
     inline void boundsCheck32ForLoad(Register index, const Address& length, Register scratch,
                                      Label* failure)
         DEFINED_ON(arm, arm64, x86_shared);
@@ -1952,16 +1960,17 @@ class MacroAssembler : public MacroAssem
 
     void loadStringLength(Register str, Register dest) {
         load32(Address(str, JSString::offsetOfLength()), dest);
     }
 
     void loadStringChars(Register str, Register dest);
     void loadStringChar(Register str, Register index, Register output, Register scratch,
                         Label* fail);
+    void loadRopeLeftChild(Register str, Register dest);
 
     void loadStringIndexValue(Register str, Register dest, Label* fail);
 
     void loadJSContext(Register dest);
     void loadJitActivation(Register dest) {
         loadJSContext(dest);
         loadPtr(Address(dest, offsetof(JSContext, activation_)), dest);
     }
--- a/js/src/jit/arm/MacroAssembler-arm-inl.h
+++ b/js/src/jit/arm/MacroAssembler-arm-inl.h
@@ -2150,16 +2150,35 @@ MacroAssembler::cmp32Move32(Condition co
 {
     ScratchRegisterScope scratch(*this);
     SecondScratchRegisterScope scratch2(*this);
     ma_ldr(rhs, scratch, scratch2);
     cmp32Move32(cond, lhs, scratch, src, dest);
 }
 
 void
+MacroAssembler::test32LoadPtr(Condition cond, const Address& addr, Imm32 mask, const Address& src,
+                              Register dest)
+{
+    MOZ_ASSERT(cond == Assembler::Zero || cond == Assembler::NonZero);
+    test32(addr, mask);
+    ScratchRegisterScope scratch(*this);
+    ma_ldr(src, dest, scratch, Offset, cond);
+}
+
+void
+MacroAssembler::test32MovePtr(Condition cond, const Address& addr, Imm32 mask, Register src,
+                              Register dest)
+{
+    MOZ_ASSERT(cond == Assembler::Zero || cond == Assembler::NonZero);
+    test32(addr, mask);
+    ma_mov(src, dest, LeaveCC, cond);
+}
+
+void
 MacroAssembler::boundsCheck32ForLoad(Register index, Register length, Register scratch,
                                      Label* failure)
 {
     MOZ_ASSERT(index != length);
     MOZ_ASSERT(length != scratch);
     MOZ_ASSERT(index != scratch);
 
     if (JitOptions.spectreIndexMasking)
--- a/js/src/jit/arm64/MacroAssembler-arm64-inl.h
+++ b/js/src/jit/arm64/MacroAssembler-arm64-inl.h
@@ -1722,16 +1722,42 @@ void
 MacroAssembler::cmp32Move32(Condition cond, Register lhs, const Address& rhs, Register src,
                             Register dest)
 {
     cmp32(lhs, rhs);
     Csel(ARMRegister(dest, 32), ARMRegister(src, 32), ARMRegister(dest, 32), cond);
 }
 
 void
+MacroAssembler::test32LoadPtr(Condition cond, const Address& addr, Imm32 mask, const Address& src,
+                              Register dest)
+{
+    MOZ_ASSERT(cond == Assembler::Zero || cond == Assembler::NonZero);
+
+    // ARM64 does not support conditional loads, so we use a branch with a CSel
+    // (to prevent Spectre attacks).
+    vixl::UseScratchRegisterScope temps(this);
+    const ARMRegister scratch64 = temps.AcquireX();
+    Label done;
+    branchTest32(Assembler::InvertCondition(cond), addr, mask, &done);
+    loadPtr(src, scratch64.asUnsized());
+    Csel(ARMRegister(dest, 64), scratch64, ARMRegister(dest, 64), cond);
+    bind(&done);
+}
+
+void
+MacroAssembler::test32MovePtr(Condition cond, const Address& addr, Imm32 mask, Register src,
+                              Register dest)
+{
+    MOZ_ASSERT(cond == Assembler::Zero || cond == Assembler::NonZero);
+    test32(addr, mask);
+    Csel(ARMRegister(dest, 64), ARMRegister(src, 64), ARMRegister(dest, 64), cond);
+}
+
+void
 MacroAssembler::boundsCheck32ForLoad(Register index, Register length, Register scratch,
                                      Label* failure)
 {
     MOZ_ASSERT(index != length);
     MOZ_ASSERT(length != scratch);
     MOZ_ASSERT(index != scratch);
 
     branch32(Assembler::BelowOrEqual, length, index, failure);
--- a/js/src/jit/x64/MacroAssembler-x64-inl.h
+++ b/js/src/jit/x64/MacroAssembler-x64-inl.h
@@ -815,16 +815,34 @@ MacroAssembler::branchTestMagic(Conditio
 }
 
 void
 MacroAssembler::branchToComputedAddress(const BaseIndex& address)
 {
     jmp(Operand(address));
 }
 
+void
+MacroAssembler::test32LoadPtr(Condition cond, const Address& addr, Imm32 mask, const Address& src,
+                              Register dest)
+{
+    MOZ_ASSERT(cond == Assembler::Zero || cond == Assembler::NonZero);
+    test32(addr, mask);
+    cmovCCq(cond, Operand(src), dest);
+}
+
+void
+MacroAssembler::test32MovePtr(Condition cond, const Address& addr, Imm32 mask, Register src,
+                              Register dest)
+{
+    MOZ_ASSERT(cond == Assembler::Zero || cond == Assembler::NonZero);
+    test32(addr, mask);
+    cmovCCq(cond, Operand(src), dest);
+}
+
 // ========================================================================
 // Truncate floating point.
 
 void
 MacroAssembler::truncateFloat32ToUInt64(Address src, Address dest, Register temp,
                                         FloatRegister floatTemp)
 {
     Label done;
--- a/js/src/jit/x86/MacroAssembler-x86-inl.h
+++ b/js/src/jit/x86/MacroAssembler-x86-inl.h
@@ -1004,16 +1004,34 @@ MacroAssembler::branchTestMagic(Conditio
 }
 
 void
 MacroAssembler::branchToComputedAddress(const BaseIndex& addr)
 {
     jmp(Operand(addr));
 }
 
+void
+MacroAssembler::test32LoadPtr(Condition cond, const Address& addr, Imm32 mask, const Address& src,
+                              Register dest)
+{
+    MOZ_ASSERT(cond == Assembler::Zero || cond == Assembler::NonZero);
+    test32(addr, mask);
+    cmovCCl(cond, Operand(src), dest);
+}
+
+void
+MacroAssembler::test32MovePtr(Condition cond, const Address& addr, Imm32 mask, Register src,
+                              Register dest)
+{
+    MOZ_ASSERT(cond == Assembler::Zero || cond == Assembler::NonZero);
+    test32(addr, mask);
+    cmovCCl(cond, Operand(src), dest);
+}
+
 // ========================================================================
 // Truncate floating point.
 
 void
 MacroAssembler::truncateFloat32ToUInt64(Address src, Address dest, Register temp,
                                         FloatRegister floatTemp)
 {
     Label done;
--- a/js/src/jsapi.cpp
+++ b/js/src/jsapi.cpp
@@ -7251,16 +7251,19 @@ JS_SetGlobalJitCompilerOption(JSContext*
         jit::JitOptions.jumpThreshold = value;
         break;
       case JSJITCOMPILER_SIMULATOR_ALWAYS_INTERRUPT:
         jit::JitOptions.simulatorAlwaysInterrupt = !!value;
         break;
       case JSJITCOMPILER_SPECTRE_INDEX_MASKING:
         jit::JitOptions.spectreIndexMasking = !!value;
         break;
+      case JSJITCOMPILER_SPECTRE_STRING_MITIGATIONS:
+        jit::JitOptions.spectreStringMitigations = !!value;
+        break;
       case JSJITCOMPILER_ASMJS_ATOMICS_ENABLE:
         jit::JitOptions.asmJSAtomicsEnable = !!value;
         break;
       case JSJITCOMPILER_WASM_FOLD_OFFSETS:
         jit::JitOptions.wasmFoldOffsets = !!value;
         break;
       case JSJITCOMPILER_ION_INTERRUPT_WITHOUT_SIGNAL:
         jit::JitOptions.ionInterruptWithoutSignals = !!value;
--- a/js/src/jsapi.h
+++ b/js/src/jsapi.h
@@ -5907,17 +5907,18 @@ JS_SetOffthreadIonCompilationEnabled(JSC
     Register(ION_INTERRUPT_WITHOUT_SIGNAL, "ion.interrupt-without-signals") \
     Register(ION_CHECK_RANGE_ANALYSIS, "ion.check-range-analysis")          \
     Register(BASELINE_ENABLE, "baseline.enable")                            \
     Register(OFFTHREAD_COMPILATION_ENABLE, "offthread-compilation.enable")  \
     Register(FULL_DEBUG_CHECKS, "jit.full-debug-checks")                    \
     Register(JUMP_THRESHOLD, "jump-threshold")                              \
     Register(SIMULATOR_ALWAYS_INTERRUPT, "simulator.always-interrupt")      \
     Register(SPECTRE_INDEX_MASKING, "spectre.index-masking")                \
-    Register(ASMJS_ATOMICS_ENABLE, "asmjs.atomics.enable")                  \
+    Register(SPECTRE_STRING_MITIGATIONS, "spectre.string-mitigations")      \
+Register(ASMJS_ATOMICS_ENABLE, "asmjs.atomics.enable")  \
     Register(WASM_FOLD_OFFSETS, "wasm.fold-offsets")
 
 typedef enum JSJitCompilerOption {
 #define JIT_COMPILER_DECLARE(key, str) \
     JSJITCOMPILER_ ## key,
 
     JIT_COMPILER_OPTIONS(JIT_COMPILER_DECLARE)
 #undef JIT_COMPILER_DECLARE
--- a/js/src/shell/js.cpp
+++ b/js/src/shell/js.cpp
@@ -8518,22 +8518,25 @@ SetContextOptions(JSContext* cx, const O
             jit::JitOptions.disableCacheIR = false;
         else if (strcmp(str, "off") == 0)
             jit::JitOptions.disableCacheIR = true;
         else
             return OptionFailure("cache-ir-stubs", str);
     }
 
     if (const char* str = op.getStringOption("spectre-mitigations")) {
-        if (strcmp(str, "on") == 0)
+        if (strcmp(str, "on") == 0) {
             jit::JitOptions.spectreIndexMasking = true;
-        else if (strcmp(str, "off") == 0)
+            jit::JitOptions.spectreStringMitigations = true;
+        } else if (strcmp(str, "off") == 0) {
             jit::JitOptions.spectreIndexMasking = false;
-        else
+            jit::JitOptions.spectreStringMitigations = false;
+        } else {
             return OptionFailure("spectre-mitigations", str);
+        }
     }
 
     if (const char* str = op.getStringOption("ion-scalar-replacement")) {
         if (strcmp(str, "on") == 0)
             jit::JitOptions.disableScalarReplacement = false;
         else if (strcmp(str, "off") == 0)
             jit::JitOptions.disableScalarReplacement = true;
         else
--- a/js/xpconnect/src/XPCJSContext.cpp
+++ b/js/xpconnect/src/XPCJSContext.cpp
@@ -804,16 +804,18 @@ ReloadPrefsCallback(const char* pref, vo
 
     bool werror = Preferences::GetBool(JS_OPTIONS_DOT_STR "werror");
 
     bool extraWarnings = Preferences::GetBool(JS_OPTIONS_DOT_STR "strict");
 
     bool streams = Preferences::GetBool(JS_OPTIONS_DOT_STR "streams");
 
     bool spectreIndexMasking = Preferences::GetBool(JS_OPTIONS_DOT_STR "spectre.index_masking");
+    bool spectreStringMitigations =
+        Preferences::GetBool(JS_OPTIONS_DOT_STR "spectre.string_mitigations");
 
     sSharedMemoryEnabled = Preferences::GetBool(JS_OPTIONS_DOT_STR "shared_memory");
 
 #ifdef DEBUG
     sExtraWarningsForSystemJS = Preferences::GetBool(JS_OPTIONS_DOT_STR "strict.debug");
 #endif
 
 #ifdef JS_GC_ZEAL
@@ -866,16 +868,18 @@ ReloadPrefsCallback(const char* pref, vo
                                   useBaselineEager ? 0 : baselineThreshold);
     JS_SetGlobalJitCompilerOption(cx, JSJITCOMPILER_ION_WARMUP_TRIGGER,
                                   useIonEager ? 0 : ionThreshold);
 #ifdef DEBUG
     JS_SetGlobalJitCompilerOption(cx, JSJITCOMPILER_FULL_DEBUG_CHECKS, fullJitDebugChecks);
 #endif
 
     JS_SetGlobalJitCompilerOption(cx, JSJITCOMPILER_SPECTRE_INDEX_MASKING, spectreIndexMasking);
+    JS_SetGlobalJitCompilerOption(cx, JSJITCOMPILER_SPECTRE_STRING_MITIGATIONS,
+                                  spectreStringMitigations);
 }
 
 XPCJSContext::~XPCJSContext()
 {
     MOZ_COUNT_DTOR_INHERITED(XPCJSContext, CycleCollectedJSContext);
     // Elsewhere we abort immediately if XPCJSContext initialization fails.
     // Therefore the context must be non-null.
     MOZ_ASSERT(MaybeContext());
--- a/modules/libpref/init/all.js
+++ b/modules/libpref/init/all.js
@@ -1547,16 +1547,17 @@ pref("javascript.options.showInConsole",
 
 pref("javascript.options.shared_memory", false);
 
 pref("javascript.options.throw_on_debuggee_would_run", false);
 pref("javascript.options.dump_stack_on_debuggee_would_run", false);
 
 // Spectre security vulnerability mitigations.
 pref("javascript.options.spectre.index_masking", true);
+pref("javascript.options.spectre.string_mitigations", false);
 
 // Streams API
 pref("javascript.options.streams", false);
 
 // advanced prefs
 pref("advanced.mailftp",                    false);
 pref("image.animation_mode",                "normal");