Bug 1137573 - OdinMonkey: Alignment Mask Analysis r=luke
authorDan Gohman <sunfish@mozilla.com>
Thu, 26 Feb 2015 22:17:51 -0800
changeset 231188 5def1d193a0c6f6a8b0ae175ef2be25b46ee248a
parent 231187 b94bcbc389e828344479f700d3970537c4abfff2
child 231189 4c9ebb3591c45ab0d70d7709e7baf960585e844d
push id56193
push userdgohman@mozilla.com
push dateFri, 27 Feb 2015 19:05:04 +0000
treeherdermozilla-inbound@4c9ebb3591c4 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersluke
bugs1137573
milestone39.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1137573 - OdinMonkey: Alignment Mask Analysis r=luke
js/src/jit/AlignmentMaskAnalysis.cpp
js/src/jit/AlignmentMaskAnalysis.h
js/src/jit/EffectiveAddressAnalysis.cpp
js/src/jit/Ion.cpp
js/src/jit/IonOptimizationLevels.cpp
js/src/jit/IonOptimizationLevels.h
js/src/jit/JitOptions.cpp
js/src/jit/JitOptions.h
js/src/moz.build
js/src/vm/TraceLogging.cpp
js/src/vm/TraceLoggingTypes.h
copy from js/src/jit/EffectiveAddressAnalysis.cpp
copy to js/src/jit/AlignmentMaskAnalysis.cpp
--- a/js/src/jit/EffectiveAddressAnalysis.cpp
+++ b/js/src/jit/AlignmentMaskAnalysis.cpp
@@ -1,185 +1,89 @@
 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
  * vim: set ts=8 sts=4 et sw=4 tw=99:
  * This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
-#include "jit/EffectiveAddressAnalysis.h"
+#include "jit/AlignmentMaskAnalysis.h"
 #include "jit/MIR.h"
 #include "jit/MIRGraph.h"
 
 using namespace js;
 using namespace jit;
 
-static void
-AnalyzeLsh(TempAllocator &alloc, MLsh *lsh)
-{
-    if (lsh->specialization() != MIRType_Int32)
-        return;
-
-    MDefinition *index = lsh->lhs();
-    MOZ_ASSERT(index->type() == MIRType_Int32);
-
-    MDefinition *shift = lsh->rhs();
-    if (!shift->isConstantValue())
-        return;
-
-    Value shiftValue = shift->constantValue();
-    if (!shiftValue.isInt32() || !IsShiftInScaleRange(shiftValue.toInt32()))
-        return;
-
-    Scale scale = ShiftToScale(shiftValue.toInt32());
-
-    int32_t displacement = 0;
-    MInstruction *last = lsh;
-    MDefinition *base = nullptr;
-    while (true) {
-        if (!last->hasOneUse())
-            break;
-
-        MUseIterator use = last->usesBegin();
-        if (!use->consumer()->isDefinition() || !use->consumer()->toDefinition()->isAdd())
-            break;
-
-        MAdd *add = use->consumer()->toDefinition()->toAdd();
-        if (add->specialization() != MIRType_Int32 || !add->isTruncated())
-            break;
-
-        MDefinition *other = add->getOperand(1 - add->indexOf(*use));
-
-        if (other->isConstantValue()) {
-            displacement += other->constantValue().toInt32();
-        } else {
-            if (base)
-                break;
-            base = other;
-        }
-
-        last = add;
-    }
-
-    if (!base) {
-        uint32_t elemSize = 1 << ScaleToShift(scale);
-        if (displacement % elemSize != 0)
-            return;
-
-        if (!last->hasOneUse())
-            return;
-
-        MUseIterator use = last->usesBegin();
-        if (!use->consumer()->isDefinition() || !use->consumer()->toDefinition()->isBitAnd())
-            return;
-
-        MBitAnd *bitAnd = use->consumer()->toDefinition()->toBitAnd();
-        MDefinition *other = bitAnd->getOperand(1 - bitAnd->indexOf(*use));
-        if (!other->isConstantValue() || !other->constantValue().isInt32())
-            return;
-
-        uint32_t bitsClearedByShift = elemSize - 1;
-        uint32_t bitsClearedByMask = ~uint32_t(other->constantValue().toInt32());
-        if ((bitsClearedByShift & bitsClearedByMask) != bitsClearedByMask)
-            return;
-
-        bitAnd->replaceAllUsesWith(last);
-        return;
-    }
-
-    MEffectiveAddress *eaddr = MEffectiveAddress::New(alloc, base, index, scale, displacement);
-    last->replaceAllUsesWith(eaddr);
-    last->block()->insertAfter(last, eaddr);
-}
-
 static bool
 IsAlignmentMask(uint32_t m)
 {
     // Test whether m is just leading ones and trailing zeros.
     return (-m & ~m) == 0;
 }
 
-template<typename MAsmJSHeapAccessType>
 static void
-AnalyzeAsmHeapAccess(MAsmJSHeapAccessType *ins, MIRGraph &graph)
+AnalyzeAsmHeapAddress(MDefinition *ptr, MIRGraph &graph)
 {
-    MDefinition *ptr = ins->ptr();
+    // Fold (a+i)&m to (a&m)+i, since the users of the BitAnd include heap
+    // accesses. This will expose the redundancy for GVN when expressions
+    // like this:
+    //   a&m
+    //   (a+1)&m,
+    //   (a+2)&m,
+    // are transformed into this:
+    //   a&m
+    //   (a&m)+1
+    //   (a&m)+2
+    // and it will allow the constants to be folded by the
+    // EffectiveAddressAnalysis pass.
+
+    if (!ptr->isBitAnd())
+        return;
 
-    if (ptr->isConstantValue()) {
-        // Look for heap[i] where i is a constant offset, and fold the offset.
-        // By doing the folding now, we simplify the task of codegen; the offset
-        // is always the address mode immediate. This also allows it to avoid
-        // a situation where the sum of a constant pointer value and a non-zero
-        // offset doesn't actually fit into the address mode immediate.
-        int32_t imm = ptr->constantValue().toInt32();
-        if (imm != 0 && ins->tryAddDisplacement(imm)) {
-            MInstruction *zero = MConstant::New(graph.alloc(), Int32Value(0));
-            ins->block()->insertBefore(ins, zero);
-            ins->replacePtr(zero);
-        }
-    } else if (ptr->isAdd()) {
-        // Look for heap[a+i] where i is a constant offset, and fold the offset.
-        MDefinition *op0 = ptr->toAdd()->getOperand(0);
-        MDefinition *op1 = ptr->toAdd()->getOperand(1);
-        if (op0->isConstantValue())
-            mozilla::Swap(op0, op1);
-        if (op1->isConstantValue()) {
-            int32_t imm = op1->constantValue().toInt32();
-            if (ins->tryAddDisplacement(imm))
-                ins->replacePtr(op0);
-        }
-    } else if (ptr->isBitAnd() && ptr->hasOneUse()) {
-        // Transform heap[(a+i)&m] to heap[(a&m)+i] so that we can fold i into
-        // the access. Since we currently just mutate the BitAnd in place, this
-        // requires that we are its only user.
-        MDefinition *lhs = ptr->toBitAnd()->getOperand(0);
-        MDefinition *rhs = ptr->toBitAnd()->getOperand(1);
-        int lhsIndex = 0;
-        if (lhs->isConstantValue()) {
-            mozilla::Swap(lhs, rhs);
-            lhsIndex = 1;
-        }
-        if (lhs->isAdd() && rhs->isConstantValue()) {
-            MDefinition *op0 = lhs->toAdd()->getOperand(0);
-            MDefinition *op1 = lhs->toAdd()->getOperand(1);
-            if (op0->isConstantValue())
-                mozilla::Swap(op0, op1);
-            if (op1->isConstantValue()) {
-                uint32_t i = op1->constantValue().toInt32();
-                uint32_t m = rhs->constantValue().toInt32();
-                if (IsAlignmentMask(m) && ((i & m) == i) && ins->tryAddDisplacement(i))
-                    ptr->toBitAnd()->replaceOperand(lhsIndex, op0);
-            }
-        }
+    MDefinition *lhs = ptr->toBitAnd()->getOperand(0);
+    MDefinition *rhs = ptr->toBitAnd()->getOperand(1);
+    int lhsIndex = 0;
+    if (lhs->isConstantValue()) {
+        mozilla::Swap(lhs, rhs);
+        lhsIndex = 1;
     }
+    if (!lhs->isAdd() || !lhs->hasOneUse() || !rhs->isConstantValue())
+        return;
+
+    MDefinition *op0 = lhs->toAdd()->getOperand(0);
+    MDefinition *op1 = lhs->toAdd()->getOperand(1);
+    int op0Index = 0;
+    if (op0->isConstantValue()) {
+        mozilla::Swap(op0, op1);
+        op0Index = 1;
+    }
+    if (!op1->isConstantValue())
+        return;
+
+    uint32_t i = op1->constantValue().toInt32();
+    uint32_t m = rhs->constantValue().toInt32();
+    if (!IsAlignmentMask(m) || ((i & m) != i))
+        return;
+
+    ptr->replaceAllUsesWith(lhs);
+    ptr->toBitAnd()->replaceOperand(lhsIndex, op0);
+    lhs->toAdd()->replaceOperand(op0Index, ptr);
+
+    MInstructionIterator iter = ptr->block()->begin(ptr->toBitAnd());
+    ++iter;
+    lhs->block()->moveBefore(*iter, lhs->toAdd());
 }
 
-// This analysis converts patterns of the form:
-//   truncate(x + (y << {0,1,2,3}))
-//   truncate(x + (y << {0,1,2,3}) + imm32)
-// into a single lea instruction, and patterns of the form:
-//   asmload(x + imm32)
-//   asmload(x << {0,1,2,3})
-//   asmload((x << {0,1,2,3}) + imm32)
-//   asmload((x << {0,1,2,3}) & mask)            (where mask is redundant with shift)
-//   asmload(((x << {0,1,2,3}) + imm32) & mask)  (where mask is redundant with shift + imm32)
-// into a single asmload instruction (and for asmstore too).
-//
-// Additionally, we should consider the general forms:
-//   truncate(x + y + imm32)
-//   truncate((y << {0,1,2,3}) + imm32)
 bool
-EffectiveAddressAnalysis::analyze()
+AlignmentMaskAnalysis::analyze()
 {
     for (ReversePostorderIterator block(graph_.rpoBegin()); block != graph_.rpoEnd(); block++) {
         for (MInstructionIterator i = block->begin(); i != block->end(); i++) {
             // Note that we don't check for MAsmJSCompareExchangeHeap
             // or MAsmJSAtomicBinopHeap, because the backend and the OOB
             // mechanism don't support non-zero offsets for them yet.
-            if (i->isLsh())
-                AnalyzeLsh(graph_.alloc(), i->toLsh());
-            else if (i->isAsmJSLoadHeap())
-                AnalyzeAsmHeapAccess(i->toAsmJSLoadHeap(), graph_);
+            if (i->isAsmJSLoadHeap())
+                AnalyzeAsmHeapAddress(i->toAsmJSLoadHeap()->ptr(), graph_);
             else if (i->isAsmJSStoreHeap())
-                AnalyzeAsmHeapAccess(i->toAsmJSStoreHeap(), graph_);
+                AnalyzeAsmHeapAddress(i->toAsmJSStoreHeap()->ptr(), graph_);
         }
     }
     return true;
 }
copy from js/src/jit/EffectiveAddressAnalysis.h
copy to js/src/jit/AlignmentMaskAnalysis.h
--- a/js/src/jit/EffectiveAddressAnalysis.h
+++ b/js/src/jit/AlignmentMaskAnalysis.h
@@ -1,30 +1,30 @@
 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
  * vim: set ts=8 sts=4 et sw=4 tw=99:
  * This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
-#ifndef jit_EffectiveAddressAnalysis_h
-#define jit_EffectiveAddressAnalysis_h
+#ifndef jit_AlignmentMaskAnalysis_h
+#define jit_AlignmentMaskAnalysis_h
 
 namespace js {
 namespace jit {
 
 class MIRGraph;
 
-class EffectiveAddressAnalysis
+class AlignmentMaskAnalysis
 {
     MIRGraph &graph_;
 
   public:
-    explicit EffectiveAddressAnalysis(MIRGraph &graph)
+    explicit AlignmentMaskAnalysis(MIRGraph &graph)
       : graph_(graph)
     {}
 
     bool analyze();
 };
 
 } /* namespace jit */
 } /* namespace js */
 
-#endif /* jit_EffectiveAddressAnalysis_h */
+#endif /* jit_AlignmentMaskAnalysis_h */
--- a/js/src/jit/EffectiveAddressAnalysis.cpp
+++ b/js/src/jit/EffectiveAddressAnalysis.cpp
@@ -84,23 +84,16 @@ AnalyzeLsh(TempAllocator &alloc, MLsh *l
         return;
     }
 
     MEffectiveAddress *eaddr = MEffectiveAddress::New(alloc, base, index, scale, displacement);
     last->replaceAllUsesWith(eaddr);
     last->block()->insertAfter(last, eaddr);
 }
 
-static bool
-IsAlignmentMask(uint32_t m)
-{
-    // Test whether m is just leading ones and trailing zeros.
-    return (-m & ~m) == 0;
-}
-
 template<typename MAsmJSHeapAccessType>
 static void
 AnalyzeAsmHeapAccess(MAsmJSHeapAccessType *ins, MIRGraph &graph)
 {
     MDefinition *ptr = ins->ptr();
 
     if (ptr->isConstantValue()) {
         // Look for heap[i] where i is a constant offset, and fold the offset.
@@ -111,48 +104,27 @@ AnalyzeAsmHeapAccess(MAsmJSHeapAccessTyp
         int32_t imm = ptr->constantValue().toInt32();
         if (imm != 0 && ins->tryAddDisplacement(imm)) {
             MInstruction *zero = MConstant::New(graph.alloc(), Int32Value(0));
             ins->block()->insertBefore(ins, zero);
             ins->replacePtr(zero);
         }
     } else if (ptr->isAdd()) {
         // Look for heap[a+i] where i is a constant offset, and fold the offset.
+        // Alignment masks have already been moved out of the way by the
+        // Alignment Mask Analysis pass.
         MDefinition *op0 = ptr->toAdd()->getOperand(0);
         MDefinition *op1 = ptr->toAdd()->getOperand(1);
         if (op0->isConstantValue())
             mozilla::Swap(op0, op1);
         if (op1->isConstantValue()) {
             int32_t imm = op1->constantValue().toInt32();
             if (ins->tryAddDisplacement(imm))
                 ins->replacePtr(op0);
         }
-    } else if (ptr->isBitAnd() && ptr->hasOneUse()) {
-        // Transform heap[(a+i)&m] to heap[(a&m)+i] so that we can fold i into
-        // the access. Since we currently just mutate the BitAnd in place, this
-        // requires that we are its only user.
-        MDefinition *lhs = ptr->toBitAnd()->getOperand(0);
-        MDefinition *rhs = ptr->toBitAnd()->getOperand(1);
-        int lhsIndex = 0;
-        if (lhs->isConstantValue()) {
-            mozilla::Swap(lhs, rhs);
-            lhsIndex = 1;
-        }
-        if (lhs->isAdd() && rhs->isConstantValue()) {
-            MDefinition *op0 = lhs->toAdd()->getOperand(0);
-            MDefinition *op1 = lhs->toAdd()->getOperand(1);
-            if (op0->isConstantValue())
-                mozilla::Swap(op0, op1);
-            if (op1->isConstantValue()) {
-                uint32_t i = op1->constantValue().toInt32();
-                uint32_t m = rhs->constantValue().toInt32();
-                if (IsAlignmentMask(m) && ((i & m) == i) && ins->tryAddDisplacement(i))
-                    ptr->toBitAnd()->replaceOperand(lhsIndex, op0);
-            }
-        }
     }
 }
 
 // This analysis converts patterns of the form:
 //   truncate(x + (y << {0,1,2,3}))
 //   truncate(x + (y << {0,1,2,3}) + imm32)
 // into a single lea instruction, and patterns of the form:
 //   asmload(x + imm32)
--- a/js/src/jit/Ion.cpp
+++ b/js/src/jit/Ion.cpp
@@ -9,16 +9,17 @@
 #include "mozilla/MemoryReporting.h"
 #include "mozilla/ThreadLocal.h"
 
 #include "jscompartment.h"
 #include "jsprf.h"
 
 #include "gc/Marking.h"
 #include "jit/AliasAnalysis.h"
+#include "jit/AlignmentMaskAnalysis.h"
 #include "jit/BacktrackingAllocator.h"
 #include "jit/BaselineFrame.h"
 #include "jit/BaselineInspector.h"
 #include "jit/BaselineJIT.h"
 #include "jit/CodeGenerator.h"
 #include "jit/EdgeCaseAnalysis.h"
 #include "jit/EffectiveAddressAnalysis.h"
 #include "jit/IonAnalysis.h"
@@ -1243,16 +1244,28 @@ OptimizeMIR(MIRGenerator *mir)
             return false;
         IonSpewPass("Apply types");
         AssertExtendedGraphCoherency(graph);
 
         if (mir->shouldCancel("Apply types"))
             return false;
     }
 
+    if (mir->optimizationInfo().amaEnabled()) {
+        AutoTraceLog log(logger, TraceLogger_AlignmentMaskAnalysis);
+        AlignmentMaskAnalysis ama(graph);
+        if (!ama.analyze())
+            return false;
+        IonSpewPass("Alignment Mask Analysis");
+        AssertExtendedGraphCoherency(graph);
+
+        if (mir->shouldCancel("Alignment Mask Analysis"))
+            return false;
+    }
+
     ValueNumberer gvn(mir, graph);
     if (!gvn.init())
         return false;
 
     // Alias analysis is required for LICM and GVN so that we don't move
     // loads across stores.
     if (mir->optimizationInfo().licmEnabled() ||
         mir->optimizationInfo().gvnEnabled())
--- a/js/src/jit/IonOptimizationLevels.cpp
+++ b/js/src/jit/IonOptimizationLevels.cpp
@@ -50,16 +50,17 @@ void
 OptimizationInfo::initAsmjsOptimizationInfo()
 {
     // The AsmJS optimization level
     // Disables some passes that don't work well with asmjs.
 
     // Take normal option values for not specified values.
     initNormalOptimizationInfo();
 
+    ama_ = true;
     level_ = Optimization_AsmJS;
     edgeCaseAnalysis_ = false;
     eliminateRedundantChecks_ = false;
     autoTruncate_ = false;
     sink_ = false;
     registerAllocator_ = RegisterAllocator_Backtracking;
     scalarReplacement_ = false;        // AsmJS has no objects.
 }
--- a/js/src/jit/IonOptimizationLevels.h
+++ b/js/src/jit/IonOptimizationLevels.h
@@ -44,16 +44,19 @@ OptimizationLevelString(OptimizationLeve
 class OptimizationInfo
 {
   public:
     OptimizationLevel level_;
 
     // Toggles whether Effective Address Analysis is performed.
     bool eaa_;
 
+    // Toggles whether Alignment Mask Analysis is performed.
+    bool ama_;
+
     // Toggles whether Edge Case Analysis is used.
     bool edgeCaseAnalysis_;
 
     // Toggles whether redundant checks get removed.
     bool eliminateRedundantChecks_;
 
     // Toggles whether interpreted scripts get inlined.
     bool inlineInterpreted_;
@@ -162,16 +165,20 @@ class OptimizationInfo
     bool sinkEnabled() const {
         return sink_ && !js_JitOptions.disableSink;
     }
 
     bool eaaEnabled() const {
         return eaa_ && !js_JitOptions.disableEaa;
     }
 
+    bool amaEnabled() const {
+        return ama_ && !js_JitOptions.disableAma;
+    }
+
     bool edgeCaseAnalysisEnabled() const {
         return edgeCaseAnalysis_ && !js_JitOptions.disableEdgeCaseAnalysis;
     }
 
     bool eliminateRedundantChecksEnabled() const {
         return eliminateRedundantChecks_;
     }
 
--- a/js/src/jit/JitOptions.cpp
+++ b/js/src/jit/JitOptions.cpp
@@ -100,16 +100,19 @@ JitOptions::JitOptions()
     SET_DEFAULT(disableSink, true);
 
     // Toggles whether Loop Unrolling is globally disabled.
     SET_DEFAULT(disableLoopUnrolling, true);
 
     // Toggles whether Effective Address Analysis is globally disabled.
     SET_DEFAULT(disableEaa, false);
 
+    // Toggles whether Alignment Mask Analysis is globally disabled.
+    SET_DEFAULT(disableAma, false);
+
     // Whether functions are compiled immediately.
     SET_DEFAULT(eagerCompilation, false);
 
     // Force how many invocation or loop iterations are needed before compiling
     // a function with the highest ionmonkey optimization level.
     // (i.e. OptimizationLevel_Normal)
     const char *forcedDefaultIonWarmUpThresholdEnv = "JIT_OPTION_forcedDefaultIonWarmUpThreshold";
     if (const char *env = getenv(forcedDefaultIonWarmUpThresholdEnv)) {
--- a/js/src/jit/JitOptions.h
+++ b/js/src/jit/JitOptions.h
@@ -50,16 +50,17 @@ struct JitOptions
     bool disableGvn;
     bool disableLicm;
     bool disableInlining;
     bool disableEdgeCaseAnalysis;
     bool disableRangeAnalysis;
     bool disableSink;
     bool disableLoopUnrolling;
     bool disableEaa;
+    bool disableAma;
     bool eagerCompilation;
     mozilla::Maybe<uint32_t> forcedDefaultIonWarmUpThreshold;
     mozilla::Maybe<IonRegisterAllocator> forcedRegisterAllocator;
     bool limitScriptSize;
     bool osr;
     uint32_t baselineWarmUpThreshold;
     uint32_t exceptionBailoutThreshold;
     uint32_t frequentBailoutThreshold;
--- a/js/src/moz.build
+++ b/js/src/moz.build
@@ -135,16 +135,17 @@ UNIFIED_SOURCES += [
     'irregexp/NativeRegExpMacroAssembler.cpp',
     'irregexp/RegExpAST.cpp',
     'irregexp/RegExpEngine.cpp',
     'irregexp/RegExpInterpreter.cpp',
     'irregexp/RegExpMacroAssembler.cpp',
     'irregexp/RegExpParser.cpp',
     'irregexp/RegExpStack.cpp',
     'jit/AliasAnalysis.cpp',
+    'jit/AlignmentMaskAnalysis.cpp',
     'jit/BacktrackingAllocator.cpp',
     'jit/Bailouts.cpp',
     'jit/BaselineBailouts.cpp',
     'jit/BaselineCompiler.cpp',
     'jit/BaselineDebugModeOSR.cpp',
     'jit/BaselineFrame.cpp',
     'jit/BaselineFrameInfo.cpp',
     'jit/BaselineIC.cpp',
--- a/js/src/vm/TraceLogging.cpp
+++ b/js/src/vm/TraceLogging.cpp
@@ -678,16 +678,17 @@ TraceLoggerThreadState::init()
         enabledTextIds[TraceLogger_PhiAnalysis] = true;
         enabledTextIds[TraceLogger_ApplyTypes] = true;
         enabledTextIds[TraceLogger_AliasAnalysis] = true;
         enabledTextIds[TraceLogger_GVN] = true;
         enabledTextIds[TraceLogger_LICM] = true;
         enabledTextIds[TraceLogger_RangeAnalysis] = true;
         enabledTextIds[TraceLogger_LoopUnrolling] = true;
         enabledTextIds[TraceLogger_EffectiveAddressAnalysis] = true;
+        enabledTextIds[TraceLogger_AlignmentMaskAnalysis] = true;
         enabledTextIds[TraceLogger_EliminateDeadCode] = true;
         enabledTextIds[TraceLogger_EdgeCaseAnalysis] = true;
         enabledTextIds[TraceLogger_EliminateRedundantChecks] = true;
         enabledTextIds[TraceLogger_GenerateLIR] = true;
         enabledTextIds[TraceLogger_RegisterAllocation] = true;
         enabledTextIds[TraceLogger_GenerateCode] = true;
         enabledTextIds[TraceLogger_Scripts] = true;
     }
--- a/js/src/vm/TraceLoggingTypes.h
+++ b/js/src/vm/TraceLoggingTypes.h
@@ -45,16 +45,17 @@
     _(MakeLoopsContiguous)                            \
     _(ApplyTypes)                                     \
     _(AliasAnalysis)                                  \
     _(GVN)                                            \
     _(LICM)                                           \
     _(RangeAnalysis)                                  \
     _(LoopUnrolling)                                  \
     _(EffectiveAddressAnalysis)                       \
+    _(AlignmentMaskAnalysis)                          \
     _(EliminateDeadCode)                              \
     _(EdgeCaseAnalysis)                               \
     _(EliminateRedundantChecks)                       \
     _(GenerateLIR)                                    \
     _(RegisterAllocation)                             \
     _(GenerateCode)
 
 #define TRACELOGGER_LOG_ITEMS(_)                      \