Bug 1148232 - OdinMonkey: Always put asm.js heap alignment masks just before their accesses r=luke
authorDan Gohman <sunfish@mozilla.com>
Fri, 27 Mar 2015 10:15:21 -0700
changeset 236261 d2abf02e8df8e4bd1c444b8b61337bd97f186dbf
parent 236260 da24bccfcaf00f48abcccd0c3ce0fd196dfcc056
child 236262 982ae8f096bdaaad83381750b88c9438c8e55f07
push id28494
push userphilringnalda@gmail.com
push dateSat, 28 Mar 2015 18:46:06 +0000
treeherdermozilla-central@e16dc56f90d5 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersluke
bugs1148232
milestone39.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1148232 - OdinMonkey: Always put asm.js heap alignment masks just before their accesses r=luke
js/src/asmjs/AsmJSValidate.cpp
--- a/js/src/asmjs/AsmJSValidate.cpp
+++ b/js/src/asmjs/AsmJSValidate.cpp
@@ -4453,19 +4453,22 @@ FoldMaskedArrayIndex(FunctionCompiler &f
         *mask &= mask2;
         *indexExpr = indexNode;
         return true;
     }
 
     return false;
 }
 
+static const int32_t NoMask = -1;
+
 static bool
 CheckArrayAccess(FunctionCompiler &f, ParseNode *viewName, ParseNode *indexExpr,
-                 Scalar::Type *viewType, MDefinition **def, NeedsBoundsCheck *needsBoundsCheck)
+                 Scalar::Type *viewType, MDefinition **def, NeedsBoundsCheck *needsBoundsCheck,
+                 int32_t *mask)
 {
     *needsBoundsCheck = NEEDS_BOUNDS_CHECK;
 
     if (!viewName->isKind(PNK_NAME))
         return f.fail(viewName, "base of array access must be a typed array view name");
 
     const ModuleCompiler::Global *global = f.lookupGlobal(viewName->name());
     if (!global || !global->isAnyArrayView())
@@ -4481,65 +4484,66 @@ CheckArrayAccess(FunctionCompiler &f, Pa
 
         unsigned elementSize = TypedArrayElemSize(*viewType);
         if (!f.m().tryRequireHeapLengthToBeAtLeast(byteOffset + elementSize)) {
             return f.failf(indexExpr, "constant index outside heap size range declared by the "
                                       "change-heap function (0x%x - 0x%x)",
                                       f.m().minHeapLength(), f.m().module().maxHeapLength());
         }
 
+        *mask = NoMask;
         *needsBoundsCheck = NO_BOUNDS_CHECK;
         *def = f.constant(Int32Value(byteOffset), Type::Int);
         return true;
     }
 
     // Mask off the low bits to account for the clearing effect of a right shift
     // followed by the left shift implicit in the array access. E.g., H32[i>>2]
     // loses the low two bits.
-    int32_t mask = ~(TypedArrayElemSize(*viewType) - 1);
+    *mask = ~(TypedArrayElemSize(*viewType) - 1);
 
     MDefinition *pointerDef;
     if (indexExpr->isKind(PNK_RSH)) {
         ParseNode *shiftAmountNode = BitwiseRight(indexExpr);
 
         uint32_t shift;
         if (!IsLiteralInt(f.m(), shiftAmountNode, &shift))
             return f.failf(shiftAmountNode, "shift amount must be constant");
 
         unsigned requiredShift = TypedArrayShift(*viewType);
         if (shift != requiredShift)
             return f.failf(shiftAmountNode, "shift amount must be %u", requiredShift);
 
         ParseNode *pointerNode = BitwiseLeft(indexExpr);
 
         if (pointerNode->isKind(PNK_BITAND))
-            FoldMaskedArrayIndex(f, &pointerNode, &mask, needsBoundsCheck);
+            FoldMaskedArrayIndex(f, &pointerNode, mask, needsBoundsCheck);
 
         f.enterHeapExpression();
 
         Type pointerType;
         if (!CheckExpr(f, pointerNode, &pointerDef, &pointerType))
             return false;
 
         f.leaveHeapExpression();
 
         if (!pointerType.isIntish())
             return f.failf(pointerNode, "%s is not a subtype of int", pointerType.toChars());
     } else {
         // For legacy compatibility, accept Int8/Uint8 accesses with no shift.
         if (TypedArrayShift(*viewType) != 0)
             return f.fail(indexExpr, "index expression isn't shifted; must be an Int8/Uint8 access");
 
-        MOZ_ASSERT(mask == -1);
+        MOZ_ASSERT(*mask == NoMask);
         bool folded = false;
 
         ParseNode *pointerNode = indexExpr;
 
         if (pointerNode->isKind(PNK_BITAND))
-            folded = FoldMaskedArrayIndex(f, &pointerNode, &mask, needsBoundsCheck);
+            folded = FoldMaskedArrayIndex(f, &pointerNode, mask, needsBoundsCheck);
 
         f.enterHeapExpression();
 
         Type pointerType;
         if (!CheckExpr(f, pointerNode, &pointerDef, &pointerType))
             return false;
 
         f.leaveHeapExpression();
@@ -4548,34 +4552,41 @@ CheckArrayAccess(FunctionCompiler &f, Pa
             if (!pointerType.isIntish())
                 return f.failf(pointerNode, "%s is not a subtype of intish", pointerType.toChars());
         } else {
             if (!pointerType.isInt())
                 return f.failf(pointerNode, "%s is not a subtype of int", pointerType.toChars());
         }
     }
 
+    *def = pointerDef;
+    return true;
+}
+
+static void
+PrepareArrayIndex(FunctionCompiler &f, MDefinition **def, NeedsBoundsCheck needsBoundsCheck,
+                  int32_t mask)
+{
     // Don't generate the mask op if there is no need for it which could happen for
-    // a shift of zero.
-    if (mask == -1)
-        *def = pointerDef;
-    else
-        *def = f.bitwise<MBitAnd>(pointerDef, f.constant(Int32Value(mask), Type::Int));
-
-    return true;
+    // a shift of zero or a SIMD access.
+    if (mask != NoMask)
+        *def = f.bitwise<MBitAnd>(*def, f.constant(Int32Value(mask), Type::Int));
 }
 
 static bool
 CheckLoadArray(FunctionCompiler &f, ParseNode *elem, MDefinition **def, Type *type)
 {
     Scalar::Type viewType;
     MDefinition *pointerDef;
     NeedsBoundsCheck needsBoundsCheck;
-    if (!CheckArrayAccess(f, ElemBase(elem), ElemIndex(elem), &viewType, &pointerDef, &needsBoundsCheck))
-        return false;
+    int32_t mask;
+    if (!CheckArrayAccess(f, ElemBase(elem), ElemIndex(elem), &viewType, &pointerDef, &needsBoundsCheck, &mask))
+        return false;
+
+    PrepareArrayIndex(f, &pointerDef, needsBoundsCheck, mask);
 
     *def = f.loadHeap(viewType, pointerDef, needsBoundsCheck);
     *type = TypedArrayLoadType(viewType);
     return true;
 }
 
 static bool
 CheckDotAccess(FunctionCompiler &f, ParseNode *elem, MDefinition **def, Type *type)
@@ -4623,17 +4634,18 @@ CheckDotAccess(FunctionCompiler &f, Pars
 }
 
 static bool
 CheckStoreArray(FunctionCompiler &f, ParseNode *lhs, ParseNode *rhs, MDefinition **def, Type *type)
 {
     Scalar::Type viewType;
     MDefinition *pointerDef;
     NeedsBoundsCheck needsBoundsCheck;
-    if (!CheckArrayAccess(f, ElemBase(lhs), ElemIndex(lhs), &viewType, &pointerDef, &needsBoundsCheck))
+    int32_t mask;
+    if (!CheckArrayAccess(f, ElemBase(lhs), ElemIndex(lhs), &viewType, &pointerDef, &needsBoundsCheck, &mask))
         return false;
 
     f.enterHeapExpression();
 
     MDefinition *rhsDef;
     Type rhsType;
     if (!CheckExpr(f, rhs, &rhsDef, &rhsType))
         return false;
@@ -4661,16 +4673,18 @@ CheckStoreArray(FunctionCompiler &f, Par
             rhsDef = f.unary<MToDouble>(rhsDef);
         else if (!rhsType.isMaybeDouble())
             return f.failf(lhs, "%s is not a subtype of float? or double?", rhsType.toChars());
         break;
       default:
         MOZ_CRASH("Unexpected view type");
     }
 
+    PrepareArrayIndex(f, &pointerDef, needsBoundsCheck, mask);
+
     f.storeHeap(viewType, pointerDef, rhsDef, needsBoundsCheck);
 
     *def = rhsDef;
     *type = rhsType;
     return true;
 }
 
 static bool
@@ -4877,19 +4891,19 @@ CheckMathMinMax(FunctionCompiler &f, Par
 
     *def = lastDef;
     return true;
 }
 
 static bool
 CheckSharedArrayAtomicAccess(FunctionCompiler &f, ParseNode *viewName, ParseNode *indexExpr,
                              Scalar::Type *viewType, MDefinition** pointerDef,
-                             NeedsBoundsCheck *needsBoundsCheck)
-{
-    if (!CheckArrayAccess(f, viewName, indexExpr, viewType, pointerDef, needsBoundsCheck))
+                             NeedsBoundsCheck *needsBoundsCheck, int32_t *mask)
+{
+    if (!CheckArrayAccess(f, viewName, indexExpr, viewType, pointerDef, needsBoundsCheck, mask))
         return false;
 
     // Atomic accesses may be made on shared integer arrays only.
 
     // The global will be sane, CheckArrayAccess checks it.
     const ModuleCompiler::Global *global = f.lookupGlobal(viewName->name());
     if (global->which() != ModuleCompiler::Global::ArrayView || !f.m().module().isSharedView())
         return f.fail(viewName, "base of array access must be a shared typed array view name");
@@ -4927,18 +4941,21 @@ CheckAtomicsLoad(FunctionCompiler &f, Pa
         return f.fail(call, "Atomics.load must be passed 2 arguments");
 
     ParseNode *arrayArg = CallArgList(call);
     ParseNode *indexArg = NextNode(arrayArg);
 
     Scalar::Type viewType;
     MDefinition *pointerDef;
     NeedsBoundsCheck needsBoundsCheck;
-    if (!CheckSharedArrayAtomicAccess(f, arrayArg, indexArg, &viewType, &pointerDef, &needsBoundsCheck))
-        return false;
+    int32_t mask;
+    if (!CheckSharedArrayAtomicAccess(f, arrayArg, indexArg, &viewType, &pointerDef, &needsBoundsCheck, &mask))
+        return false;
+
+    PrepareArrayIndex(f, &pointerDef, needsBoundsCheck, mask);
 
     *def = f.atomicLoadHeap(viewType, pointerDef, needsBoundsCheck);
     *type = Type::Signed;
     return true;
 }
 
 static bool
 CheckAtomicsStore(FunctionCompiler &f, ParseNode *call, MDefinition **def, Type *type)
@@ -4948,27 +4965,30 @@ CheckAtomicsStore(FunctionCompiler &f, P
 
     ParseNode *arrayArg = CallArgList(call);
     ParseNode *indexArg = NextNode(arrayArg);
     ParseNode *valueArg = NextNode(indexArg);
 
     Scalar::Type viewType;
     MDefinition *pointerDef;
     NeedsBoundsCheck needsBoundsCheck;
-    if (!CheckSharedArrayAtomicAccess(f, arrayArg, indexArg, &viewType, &pointerDef, &needsBoundsCheck))
+    int32_t mask;
+    if (!CheckSharedArrayAtomicAccess(f, arrayArg, indexArg, &viewType, &pointerDef, &needsBoundsCheck, &mask))
         return false;
 
     MDefinition *rhsDef;
     Type rhsType;
     if (!CheckExpr(f, valueArg, &rhsDef, &rhsType))
         return false;
 
     if (!rhsType.isIntish())
         return f.failf(arrayArg, "%s is not a subtype of intish", rhsType.toChars());
 
+    PrepareArrayIndex(f, &pointerDef, needsBoundsCheck, mask);
+
     f.atomicStoreHeap(viewType, pointerDef, rhsDef, needsBoundsCheck);
 
     *def = rhsDef;
     *type = Type::Signed;
     return true;
 }
 
 static bool
@@ -4979,27 +4999,30 @@ CheckAtomicsBinop(FunctionCompiler &f, P
 
     ParseNode *arrayArg = CallArgList(call);
     ParseNode *indexArg = NextNode(arrayArg);
     ParseNode *valueArg = NextNode(indexArg);
 
     Scalar::Type viewType;
     MDefinition *pointerDef;
     NeedsBoundsCheck needsBoundsCheck;
-    if (!CheckSharedArrayAtomicAccess(f, arrayArg, indexArg, &viewType, &pointerDef, &needsBoundsCheck))
+    int32_t mask;
+    if (!CheckSharedArrayAtomicAccess(f, arrayArg, indexArg, &viewType, &pointerDef, &needsBoundsCheck, &mask))
         return false;
 
     MDefinition *valueArgDef;
     Type valueArgType;
     if (!CheckExpr(f, valueArg, &valueArgDef, &valueArgType))
         return false;
 
     if (!valueArgType.isIntish())
         return f.failf(valueArg, "%s is not a subtype of intish", valueArgType.toChars());
 
+    PrepareArrayIndex(f, &pointerDef, needsBoundsCheck, mask);
+
     *def = f.atomicBinopHeap(op, viewType, pointerDef, valueArgDef, needsBoundsCheck);
     *type = Type::Signed;
     return true;
 }
 
 static bool
 CheckAtomicsCompareExchange(FunctionCompiler &f, ParseNode *call, MDefinition **def, Type *type)
 {
@@ -5009,17 +5032,18 @@ CheckAtomicsCompareExchange(FunctionComp
     ParseNode *arrayArg = CallArgList(call);
     ParseNode *indexArg = NextNode(arrayArg);
     ParseNode *oldValueArg = NextNode(indexArg);
     ParseNode *newValueArg = NextNode(oldValueArg);
 
     Scalar::Type viewType;
     MDefinition *pointerDef;
     NeedsBoundsCheck needsBoundsCheck;
-    if (!CheckSharedArrayAtomicAccess(f, arrayArg, indexArg, &viewType, &pointerDef, &needsBoundsCheck))
+    int32_t mask;
+    if (!CheckSharedArrayAtomicAccess(f, arrayArg, indexArg, &viewType, &pointerDef, &needsBoundsCheck, &mask))
         return false;
 
     MDefinition *oldValueArgDef;
     Type oldValueArgType;
     if (!CheckExpr(f, oldValueArg, &oldValueArgDef, &oldValueArgType))
         return false;
 
     MDefinition *newValueArgDef;
@@ -5028,16 +5052,18 @@ CheckAtomicsCompareExchange(FunctionComp
         return false;
 
     if (!oldValueArgType.isIntish())
         return f.failf(oldValueArg, "%s is not a subtype of intish", oldValueArgType.toChars());
 
     if (!newValueArgType.isIntish())
         return f.failf(newValueArg, "%s is not a subtype of intish", newValueArgType.toChars());
 
+    PrepareArrayIndex(f, &pointerDef, needsBoundsCheck, mask);
+
     *def = f.atomicCompareExchangeHeap(viewType, pointerDef, oldValueArgDef, newValueArgDef,
                                        needsBoundsCheck);
     *type = Type::Signed;
     return true;
 }
 
 static bool
 CheckAtomicsBuiltinCall(FunctionCompiler &f, ParseNode *callNode, AsmJSAtomicsBuiltinFunction func,
@@ -5805,16 +5831,18 @@ CheckSimdLoad(FunctionCompiler &f, Parse
         return f.failf(call, "expected 2 arguments to SIMD load, got %u", numArgs);
 
     Scalar::Type viewType;
     MDefinition *index;
     NeedsBoundsCheck needsBoundsCheck;
     if (!CheckSimdLoadStoreArgs(f, call, opType, numElems, &viewType, &index, &needsBoundsCheck))
         return false;
 
+    PrepareArrayIndex(f, &index, needsBoundsCheck, NoMask);
+
     *def = f.loadSimdHeap(viewType, index, needsBoundsCheck, numElems);
     *type = opType;
     return true;
 }
 
 static bool
 CheckSimdStore(FunctionCompiler &f, ParseNode *call, AsmJSSimdType opType,
                unsigned numElems, MDefinition **def, Type *type)
@@ -5833,16 +5861,18 @@ CheckSimdStore(FunctionCompiler &f, Pars
     ParseNode *vecExpr = NextNode(NextNode(CallArgList(call)));
     MDefinition *vec;
     Type vecType;
     if (!CheckExpr(f, vecExpr, &vec, &vecType))
         return false;
     if (!(vecType <= retType))
         return f.failf(vecExpr, "%s is not a subtype of %s", vecType.toChars(), retType.toChars());
 
+    PrepareArrayIndex(f, &index, needsBoundsCheck, NoMask);
+
     f.storeSimdHeap(viewType, index, vec, needsBoundsCheck, numElems);
     *def = vec;
     *type = vecType;
     return true;
 }
 
 static bool
 CheckSimdSelect(FunctionCompiler &f, ParseNode *call, AsmJSSimdType opType, bool isElementWise,