Bug 1404251 - Fix various UBSan issues in SpiderMonkey. r=bbouvier
authorAndré Bargull <andre.bargull@gmail.com>
Thu, 16 Nov 2017 11:25:48 -0800
changeset 436741 4d6b80ba61d01832127e2ffca8eed0941159fdda
parent 436740 33f70e1ee06026ab5dde1f3334d1b469538ff868
child 436742 f8ea0fdcd06651f20f81deeb575534f34dcdab65
push id117
push userfmarier@mozilla.com
push dateTue, 28 Nov 2017 20:17:16 +0000
reviewersbbouvier
bugs1404251
milestone59.0a1
Bug 1404251 - Fix various UBSan issues in SpiderMonkey. r=bbouvier
js/src/builtin/SIMD.cpp
js/src/jit/IonAnalysis.cpp
js/src/jit/JitcodeMap.cpp
js/src/jit/MCallOptimize.cpp
js/src/jit/MoveResolver.cpp
js/src/jit/RegisterSets.h
js/src/jit/shared/CodeGenerator-shared-inl.h
js/src/jit/x86-shared/Disassembler-x86-shared.cpp
js/src/vm/Interpreter.cpp
js/src/vm/UbiNodeCensus.cpp
js/src/wasm/WasmBaselineCompile.cpp
js/src/wasm/WasmIonCompile.cpp
js/src/wasm/WasmModule.cpp
js/src/wasm/WasmSerialize.h
js/src/wasm/WasmStubs.cpp
js/src/wasm/WasmValidate.h
--- a/js/src/builtin/SIMD.cpp
+++ b/js/src/builtin/SIMD.cpp
@@ -11,16 +11,17 @@
  * The objects float32x4 and int32x4 are installed on the SIMD pseudo-module.
  */
 
 #include "builtin/SIMD.h"
 
 #include "mozilla/FloatingPoint.h"
 #include "mozilla/IntegerTypeTraits.h"
 #include "mozilla/Sprintf.h"
+#include "mozilla/TypeTraits.h"
 
 #include "jsapi.h"
 #include "jsfriendapi.h"
 #include "jsnum.h"
 #include "jsprf.h"
 
 #include "builtin/TypedObject.h"
 #include "jit/AtomicOperations.h"
@@ -32,16 +33,21 @@
 
 using namespace js;
 
 using mozilla::ArrayLength;
 using mozilla::IsFinite;
 using mozilla::IsNaN;
 using mozilla::FloorLog2;
 using mozilla::NumberIsInt32;
+using mozilla::EnableIf;
+using mozilla::IsIntegral;
+using mozilla::IsFloatingPoint;
+using mozilla::IsSigned;
+using mozilla::MakeUnsigned;
 
 ///////////////////////////////////////////////////////////////////////////
 // SIMD
 
 static_assert(unsigned(SimdType::Count) == 12, "sync with TypedObjectConstants.h");
 
 static bool ArgumentToLaneIndex(JSContext* cx, JS::HandleValue v, unsigned limit, unsigned* lane);
 
@@ -718,66 +724,89 @@ js::CreateSimd(JSContext* cx, const type
 
 FOR_EACH_SIMD(InstantiateCreateSimd_)
 
 #undef InstantiateCreateSimd_
 
 #undef FOR_EACH_SIMD
 
 namespace js {
+
+namespace detail {
+
+template<typename T, typename Enable = void>
+struct MaybeMakeUnsigned {
+    using Type = T;
+};
+
+template<typename T>
+struct MaybeMakeUnsigned<T, typename EnableIf<IsIntegral<T>::value && IsSigned<T>::value>::Type> {
+    using Type = typename MakeUnsigned<T>::Type;
+};
+
+} // namespace detail
+
 // Unary SIMD operators
 template<typename T>
 struct Identity {
     static T apply(T x) { return x; }
 };
 template<typename T>
 struct Abs {
     static T apply(T x) { return mozilla::Abs(x); }
 };
 template<typename T>
 struct Neg {
-    static T apply(T x) { return -1 * x; }
+    using MaybeUnsignedT = typename detail::MaybeMakeUnsigned<T>::Type;
+    static T apply(T x) { return MaybeUnsignedT(-1) * MaybeUnsignedT(x); }
 };
 template<typename T>
 struct Not {
     static T apply(T x) { return ~x; }
 };
 template<typename T>
 struct LogicalNot {
     static T apply(T x) { return !x; }
 };
 template<typename T>
 struct RecApprox {
+    static_assert(IsFloatingPoint<T>::value, "RecApprox only supported for floating points");
     static T apply(T x) { return 1 / x; }
 };
 template<typename T>
 struct RecSqrtApprox {
+    static_assert(IsFloatingPoint<T>::value, "RecSqrtApprox only supported for floating points");
     static T apply(T x) { return 1 / sqrt(x); }
 };
 template<typename T>
 struct Sqrt {
+    static_assert(IsFloatingPoint<T>::value, "Sqrt only supported for floating points");
     static T apply(T x) { return sqrt(x); }
 };
 
 // Binary SIMD operators
 template<typename T>
 struct Add {
-    static T apply(T l, T r) { return l + r; }
+    using MaybeUnsignedT = typename detail::MaybeMakeUnsigned<T>::Type;
+    static T apply(T l, T r) { return MaybeUnsignedT(l) + MaybeUnsignedT(r); }
 };
 template<typename T>
 struct Sub {
-    static T apply(T l, T r) { return l - r; }
+    using MaybeUnsignedT = typename detail::MaybeMakeUnsigned<T>::Type;
+    static T apply(T l, T r) { return MaybeUnsignedT(l) - MaybeUnsignedT(r); }
 };
 template<typename T>
 struct Div {
+    static_assert(IsFloatingPoint<T>::value, "Div only supported for floating points");
     static T apply(T l, T r) { return l / r; }
 };
 template<typename T>
 struct Mul {
-    static T apply(T l, T r) { return l * r; }
+    using MaybeUnsignedT = typename detail::MaybeMakeUnsigned<T>::Type;
+    static T apply(T l, T r) { return MaybeUnsignedT(l) * MaybeUnsignedT(r); }
 };
 template<typename T>
 struct Minimum {
     static T apply(T l, T r) { return math_min_impl(l, r); }
 };
 template<typename T>
 struct MinNum {
     static T apply(T l, T r) { return IsNaN(l) ? r : (IsNaN(r) ? l : math_min_impl(l, r)); }
--- a/js/src/jit/IonAnalysis.cpp
+++ b/js/src/jit/IonAnalysis.cpp
@@ -3200,28 +3200,28 @@ jit::ExtractLinearSum(MDefinition* ins, 
     // terms, then ignore extracted linear sums.
     if (lsum.term && rsum.term)
         return SimpleLinearSum(ins, 0);
 
     // Check if this is of the form <SUM> + n or n + <SUM>.
     if (ins->isAdd()) {
         int32_t constant;
         if (space == MathSpace::Modulo)
-            constant = lsum.constant + rsum.constant;
+            constant = uint32_t(lsum.constant) + uint32_t(rsum.constant);
         else if (!SafeAdd(lsum.constant, rsum.constant, &constant))
             return SimpleLinearSum(ins, 0);
         return SimpleLinearSum(lsum.term ? lsum.term : rsum.term, constant);
     }
 
     MOZ_ASSERT(ins->isSub());
     // Check if this is of the form <SUM> - n.
     if (lsum.term) {
         int32_t constant;
         if (space == MathSpace::Modulo)
-            constant = lsum.constant - rsum.constant;
+            constant = uint32_t(lsum.constant) - uint32_t(rsum.constant);
         else if (!SafeSub(lsum.constant, rsum.constant, &constant))
             return SimpleLinearSum(ins, 0);
         return SimpleLinearSum(lsum.term, constant);
     }
 
     // Ignore any of the form n - <SUM>.
     return SimpleLinearSum(ins, 0);
 }
--- a/js/src/jit/JitcodeMap.cpp
+++ b/js/src/jit/JitcodeMap.cpp
@@ -1104,30 +1104,30 @@ JitcodeRegionEntry::WriteDelta(CompactBu
         }
     }
 
     //  NNNN-NNNN NNNB-BBBB BBBB-B011
     if (pcDelta >= ENC3_PC_DELTA_MIN && pcDelta <= ENC3_PC_DELTA_MAX &&
         nativeDelta <= ENC3_NATIVE_DELTA_MAX)
     {
         uint32_t encVal = ENC3_MASK_VAL |
-                          ((pcDelta << ENC3_PC_DELTA_SHIFT) & ENC3_PC_DELTA_MASK) |
+                          ((uint32_t(pcDelta) << ENC3_PC_DELTA_SHIFT) & ENC3_PC_DELTA_MASK) |
                           (nativeDelta << ENC3_NATIVE_DELTA_SHIFT);
         writer.writeByte(encVal & 0xff);
         writer.writeByte((encVal >> 8) & 0xff);
         writer.writeByte((encVal >> 16) & 0xff);
         return;
     }
 
     //  NNNN-NNNN NNNN-NNNN BBBB-BBBB BBBB-B111
     if (pcDelta >= ENC4_PC_DELTA_MIN && pcDelta <= ENC4_PC_DELTA_MAX &&
         nativeDelta <= ENC4_NATIVE_DELTA_MAX)
     {
         uint32_t encVal = ENC4_MASK_VAL |
-                          ((pcDelta << ENC4_PC_DELTA_SHIFT) & ENC4_PC_DELTA_MASK) |
+                          ((uint32_t(pcDelta) << ENC4_PC_DELTA_SHIFT) & ENC4_PC_DELTA_MASK) |
                           (nativeDelta << ENC4_NATIVE_DELTA_SHIFT);
         writer.writeByte(encVal & 0xff);
         writer.writeByte((encVal >> 8) & 0xff);
         writer.writeByte((encVal >> 16) & 0xff);
         writer.writeByte((encVal >> 24) & 0xff);
         return;
     }
 
--- a/js/src/jit/MCallOptimize.cpp
+++ b/js/src/jit/MCallOptimize.cpp
@@ -3503,17 +3503,17 @@ IonBuilder::atomicsMeetsPreconditions(Ca
     // Then check both that the element type is something we can
     // optimize and that the return type is suitable for that element
     // type.
 
     TemporaryTypeSet* arg0Types = callInfo.getArg(0)->resultTypeSet();
     if (!arg0Types)
         return false;
 
-    TemporaryTypeSet::TypedArraySharedness sharedness;
+    TemporaryTypeSet::TypedArraySharedness sharedness = TemporaryTypeSet::UnknownSharedness;
     *arrayType = arg0Types->getTypedArrayType(constraints(), &sharedness);
     *requiresTagCheck = sharedness != TemporaryTypeSet::KnownShared;
     switch (*arrayType) {
       case Scalar::Int8:
       case Scalar::Uint8:
       case Scalar::Int16:
       case Scalar::Uint16:
       case Scalar::Int32:
--- a/js/src/jit/MoveResolver.cpp
+++ b/js/src/jit/MoveResolver.cpp
@@ -33,16 +33,18 @@ MoveOperand::MoveOperand(MacroAssembler&
         kind_ = FLOAT_REG;
         code_ = arg.fpu().code();
         break;
       case ABIArg::Stack:
         kind_ = MEMORY;
         code_ = masm.getStackPointer().code();
         disp_ = arg.offsetFromArgBase();
         break;
+      case ABIArg::Uninitialized:
+        MOZ_CRASH("Uninitialized ABIArg kind");
     }
 }
 
 MoveResolver::MoveResolver()
   : numCycles_(0), curCycles_(0)
 {
 }
 
--- a/js/src/jit/RegisterSets.h
+++ b/js/src/jit/RegisterSets.h
@@ -1300,47 +1300,51 @@ class ABIArg
 {
   public:
     enum Kind {
         GPR,
 #ifdef JS_CODEGEN_REGISTER_PAIR
         GPR_PAIR,
 #endif
         FPU,
-        Stack
+        Stack,
+        Uninitialized = -1
     };
 
   private:
     Kind kind_;
     union {
         Register::Code gpr_;
         FloatRegister::Code fpu_;
         uint32_t offset_;
     } u;
 
   public:
-    ABIArg() : kind_(Kind(-1)) { u.offset_ = -1; }
+    ABIArg() : kind_(Uninitialized) { u.offset_ = -1; }
     explicit ABIArg(Register gpr) : kind_(GPR) { u.gpr_ = gpr.code(); }
     explicit ABIArg(Register gprLow, Register gprHigh)
     {
 #if defined(JS_CODEGEN_REGISTER_PAIR)
         kind_ = GPR_PAIR;
 #else
         MOZ_CRASH("Unsupported type of ABI argument.");
 #endif
         u.gpr_ = gprLow.code();
         MOZ_ASSERT(u.gpr_ % 2 == 0);
         MOZ_ASSERT(u.gpr_ + 1 == gprHigh.code());
     }
     explicit ABIArg(FloatRegister fpu) : kind_(FPU) { u.fpu_ = fpu.code(); }
     explicit ABIArg(uint32_t offset) : kind_(Stack) { u.offset_ = offset; }
 
-    Kind kind() const { return kind_; }
+    Kind kind() const {
+        MOZ_ASSERT(kind_ != Uninitialized);
+        return kind_;
+    }
 #ifdef JS_CODEGEN_REGISTER_PAIR
-    bool isGeneralRegPair() const { return kind_ == GPR_PAIR; }
+    bool isGeneralRegPair() const { return kind() == GPR_PAIR; }
 #else
     bool isGeneralRegPair() const { return false; }
 #endif
 
     Register gpr() const {
         MOZ_ASSERT(kind() == GPR);
         return Register::FromCode(u.gpr_);
     }
@@ -1364,32 +1368,32 @@ class ABIArg
         return FloatRegister::FromCode(u.fpu_);
     }
     uint32_t offsetFromArgBase() const {
         MOZ_ASSERT(kind() == Stack);
         return u.offset_;
     }
 
     bool argInRegister() const { return kind() != Stack; }
-    AnyRegister reg() const { return kind_ == GPR ? AnyRegister(gpr()) : AnyRegister(fpu()); }
+    AnyRegister reg() const { return kind() == GPR ? AnyRegister(gpr()) : AnyRegister(fpu()); }
 
     bool operator==(const ABIArg& rhs) const {
         if (kind_ != rhs.kind_)
             return false;
 
-        switch((int8_t)kind_) {
+        switch(kind_) {
             case GPR:   return u.gpr_ == rhs.u.gpr_;
 #if defined(JS_CODEGEN_REGISTER_PAIR)
             case GPR_PAIR: return u.gpr_ == rhs.u.gpr_;
 #endif
             case FPU:   return u.fpu_ == rhs.u.fpu_;
             case Stack: return u.offset_ == rhs.u.offset_;
-            case -1:    return true;
-            default:    MOZ_CRASH("Invalid value for ABIArg kind");
+            case Uninitialized: return true;
         }
+        MOZ_CRASH("Invalid value for ABIArg kind");
     }
 
     bool operator!=(const ABIArg& rhs) const {
         return !(*this == rhs);
     }
 };
 
 // Get the set of registers which should be saved by a block of code which
--- a/js/src/jit/shared/CodeGenerator-shared-inl.h
+++ b/js/src/jit/shared/CodeGenerator-shared-inl.h
@@ -371,17 +371,17 @@ CodeGeneratorShared::verifyHeapAccessDis
             // x86 doesn't allow encoding an imm64 to memory move; the value
             // is wrapped anyways.
             int32_t i = ToInt32(&alloc);
 
             // Sign-extend the immediate value out to 32 bits. We do this even
             // for unsigned element types so that we match what the disassembly
             // code does, as it doesn't know about signedness of stores.
             unsigned shift = 32 - TypedArrayElemSize(type) * 8;
-            i = i << shift >> shift;
+            i = int32_t(uint32_t(i) << shift) >> shift;
             op = OtherOperand(i);
         }
         break;
       case Scalar::Int64:
         // Can't encode an imm64-to-memory move.
         op = OtherOperand(ToRegister(alloc).encoding());
         break;
       case Scalar::Float32:
--- a/js/src/jit/x86-shared/Disassembler-x86-shared.cpp
+++ b/js/src/jit/x86-shared/Disassembler-x86-shared.cpp
@@ -348,17 +348,17 @@ js::jit::Disassembler::DisassembleHeapAc
         // 32-bit signed immediate
         memcpy(&imm, ptr, sizeof(int32_t));
         ptr += sizeof(int32_t);
         haveImm = true;
         break;
       case OP_GROUP11_EvIz:
         // opsize-sized signed immediate
         memcpy(&imm, ptr, opsize);
-        imm = (imm << (32 - opsize * 8)) >> (32 - opsize * 8);
+        imm = int32_t(uint32_t(imm) << (32 - opsize * 8)) >> (32 - opsize * 8);
         ptr += opsize;
         haveImm = true;
         break;
       default:
         break;
     }
 
     // Interpret the opcode.
--- a/js/src/vm/Interpreter.cpp
+++ b/js/src/vm/Interpreter.cpp
@@ -3380,17 +3380,17 @@ CASE(JSOP_TABLESWITCH)
             ADVANCE_AND_DISPATCH(len);
     }
 
     pc2 += JUMP_OFFSET_LEN;
     int32_t low = GET_JUMP_OFFSET(pc2);
     pc2 += JUMP_OFFSET_LEN;
     int32_t high = GET_JUMP_OFFSET(pc2);
 
-    i -= low;
+    i = uint32_t(i) - uint32_t(low);
     if ((uint32_t)i < (uint32_t)(high - low + 1)) {
         pc2 += JUMP_OFFSET_LEN + JUMP_OFFSET_LEN * i;
         int32_t off = (int32_t) GET_JUMP_OFFSET(pc2);
         if (off)
             len = off;
     }
     ADVANCE_AND_DISPATCH(len);
 }
--- a/js/src/vm/UbiNodeCensus.cpp
+++ b/js/src/vm/UbiNodeCensus.cpp
@@ -353,18 +353,20 @@ countMapToObject(JSContext* cx, Map& map
     if (!entries.reserve(map.count())) {
         ReportOutOfMemory(cx);
         return nullptr;
     }
 
     for (auto r = map.all(); !r.empty(); r.popFront())
         entries.infallibleAppend(&r.front());
 
-    qsort(entries.begin(), entries.length(), sizeof(*entries.begin()),
-          compareEntries<typename Map::Entry>);
+    if (entries.length()) {
+        qsort(entries.begin(), entries.length(), sizeof(*entries.begin()),
+              compareEntries<typename Map::Entry>);
+    }
 
     RootedPlainObject obj(cx, NewBuiltinClassInstance<PlainObject>(cx));
     if (!obj)
         return nullptr;
 
     for (auto& entry : entries) {
         CountBasePtr& thenCount = entry->value();
         RootedValue thenReport(cx);
@@ -570,17 +572,18 @@ ByUbinodeType::report(JSContext* cx, Cou
     // Build a vector of pointers to entries; sort by total; and then use
     // that to build the result object. This makes the ordering of entries
     // more interesting, and a little less non-deterministic.
     JS::ubi::Vector<Entry*> entries;
     if (!entries.reserve(count.table.count()))
         return false;
     for (Table::Range r = count.table.all(); !r.empty(); r.popFront())
         entries.infallibleAppend(&r.front());
-    qsort(entries.begin(), entries.length(), sizeof(*entries.begin()), compareEntries<Entry>);
+    if (entries.length())
+        qsort(entries.begin(), entries.length(), sizeof(*entries.begin()), compareEntries<Entry>);
 
     // Now build the result by iterating over the sorted vector.
     RootedPlainObject obj(cx, NewBuiltinClassInstance<PlainObject>(cx));
     if (!obj)
         return false;
     for (Entry** entryPtr = entries.begin(); entryPtr < entries.end(); entryPtr++) {
         Entry& entry = **entryPtr;
         CountBasePtr& typeCount = entry.value();
@@ -736,17 +739,18 @@ ByAllocationStack::report(JSContext* cx,
     // Build a vector of pointers to entries; sort by total; and then use
     // that to build the result object. This makes the ordering of entries
     // more interesting, and a little less non-deterministic.
     JS::ubi::Vector<Entry*> entries;
     if (!entries.reserve(count.table.count()))
         return false;
     for (Table::Range r = count.table.all(); !r.empty(); r.popFront())
         entries.infallibleAppend(&r.front());
-    qsort(entries.begin(), entries.length(), sizeof(*entries.begin()), compareEntries<Entry>);
+    if (entries.length())
+        qsort(entries.begin(), entries.length(), sizeof(*entries.begin()), compareEntries<Entry>);
 
     // Now build the result by iterating over the sorted vector.
     Rooted<MapObject*> map(cx, MapObject::create(cx));
     if (!map)
         return false;
     for (Entry** entryPtr = entries.begin(); entryPtr < entries.end(); entryPtr++) {
         Entry& entry = **entryPtr;
         MOZ_ASSERT(entry.key());
--- a/js/src/wasm/WasmBaselineCompile.cpp
+++ b/js/src/wasm/WasmBaselineCompile.cpp
@@ -2555,16 +2555,18 @@ class BaseCompiler
 #endif
               case ABIArg::FPU: {
                 loadF64(argLoc.fpu(), arg);
                 break;
               }
               case ABIArg::GPR: {
                 MOZ_CRASH("Unexpected parameter passing discipline");
               }
+              case ABIArg::Uninitialized:
+                MOZ_CRASH("Uninitialized ABIArg kind");
             }
             break;
           }
           case ValType::F32: {
             ABIArg argLoc = call.abi.next(MIRType::Float32);
             switch (argLoc.kind()) {
               case ABIArg::Stack: {
                 ScratchF32 scratch(*this);
@@ -2582,16 +2584,18 @@ class BaseCompiler
                 loadF32(argLoc.fpu(), arg);
                 break;
               }
 #if defined(JS_CODEGEN_REGISTER_PAIR)
               case ABIArg::GPR_PAIR: {
                 MOZ_CRASH("Unexpected parameter passing discipline");
               }
 #endif
+              case ABIArg::Uninitialized:
+                MOZ_CRASH("Uninitialized ABIArg kind");
             }
             break;
           }
           default:
             MOZ_CRASH("Function argument type");
         }
     }
 
--- a/js/src/wasm/WasmIonCompile.cpp
+++ b/js/src/wasm/WasmIonCompile.cpp
@@ -988,19 +988,20 @@ class FunctionCompiler
           case ABIArg::GPR:
           case ABIArg::FPU:
             return call->regArgs_.append(MWasmCall::Arg(arg.reg(), argDef));
           case ABIArg::Stack: {
             auto* mir = MWasmStackArg::New(alloc(), arg.offsetFromArgBase(), argDef);
             curBlock_->add(mir);
             return call->stackArgs_.append(mir);
           }
-          default:
-            MOZ_CRASH("Unknown ABIArg kind.");
+          case ABIArg::Uninitialized:
+            MOZ_ASSERT_UNREACHABLE("Uninitialized ABIArg kind");
         }
+        MOZ_CRASH("Unknown ABIArg kind.");
     }
 
     void propagateMaxStackArgBytes(uint32_t stackBytes)
     {
         if (callStack_.empty()) {
             // Outermost call
             maxStackArgBytes_ = Max(maxStackArgBytes_, stackBytes);
             return;
--- a/js/src/wasm/WasmModule.cpp
+++ b/js/src/wasm/WasmModule.cpp
@@ -431,17 +431,18 @@ Module::assumptionsMatch(const Assumptio
 Module::deserialize(const uint8_t* bytecodeBegin, size_t bytecodeSize,
                     const uint8_t* compiledBegin, size_t compiledSize,
                     Metadata* maybeMetadata)
 {
     MutableBytes bytecode = js_new<ShareableBytes>();
     if (!bytecode || !bytecode->bytes.initLengthUninitialized(bytecodeSize))
         return nullptr;
 
-    memcpy(bytecode->bytes.begin(), bytecodeBegin, bytecodeSize);
+    if (bytecodeSize)
+        memcpy(bytecode->bytes.begin(), bytecodeBegin, bytecodeSize);
 
     Assumptions assumptions;
     const uint8_t* cursor = assumptions.deserialize(compiledBegin, compiledSize);
     if (!cursor)
         return nullptr;
 
     MutableMetadata metadata(maybeMetadata);
     if (!metadata) {
--- a/js/src/wasm/WasmSerialize.h
+++ b/js/src/wasm/WasmSerialize.h
@@ -25,24 +25,26 @@ namespace js {
 namespace wasm {
 
 // Factor out common serialization, cloning and about:memory size-computation
 // functions for reuse when serializing wasm and asm.js modules.
 
 static inline uint8_t*
 WriteBytes(uint8_t* dst, const void* src, size_t nbytes)
 {
-    memcpy(dst, src, nbytes);
+    if (nbytes)
+        memcpy(dst, src, nbytes);
     return dst + nbytes;
 }
 
 static inline const uint8_t*
 ReadBytes(const uint8_t* src, void* dst, size_t nbytes)
 {
-    memcpy(dst, src, nbytes);
+    if (nbytes)
+        memcpy(dst, src, nbytes);
     return src + nbytes;
 }
 
 static inline const uint8_t*
 ReadBytesChecked(const uint8_t* src, size_t* remain, void* dst, size_t nbytes)
 {
     if (*remain < nbytes)
         return nullptr;
--- a/js/src/wasm/WasmStubs.cpp
+++ b/js/src/wasm/WasmStubs.cpp
@@ -174,16 +174,18 @@ SetupABIArguments(MacroAssembler& masm, 
                 masm.loadUnalignedSimd128Float(src, ScratchSimd128Reg);
                 masm.storeAlignedSimd128Float(
                   ScratchSimd128Reg, Address(masm.getStackPointer(), iter->offsetFromArgBase()));
                 break;
               default:
                 MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("unexpected stack arg type");
             }
             break;
+          case ABIArg::Uninitialized:
+            MOZ_CRASH("Uninitialized ABIArg kind");
         }
     }
 }
 
 static void
 StoreABIReturn(MacroAssembler& masm, const FuncExport& fe, Register argv)
 {
     // Store the return value in argv[0].
@@ -483,16 +485,18 @@ FillArgumentArray(MacroAssembler& masm, 
                     masm.canonicalizeDouble(ScratchDoubleReg);
                     masm.storeDouble(ScratchDoubleReg, dst);
                 }
             } else {
                 StackCopy(masm, type, scratch, src, dst);
             }
             break;
           }
+          case ABIArg::Uninitialized:
+            MOZ_CRASH("Uninitialized ABIArg kind");
         }
     }
 }
 
 // Generate a wrapper function with the standard intra-wasm call ABI which simply
 // calls an import. This wrapper function allows any import to be treated like a
 // normal wasm function for the purposes of exports and table calls. In
 // particular, the wrapper function provides:
--- a/js/src/wasm/WasmValidate.h
+++ b/js/src/wasm/WasmValidate.h
@@ -14,16 +14,18 @@
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
 
 #ifndef wasm_validate_h
 #define wasm_validate_h
 
+#include "mozilla/TypeTraits.h"
+
 #include "wasm/WasmCode.h"
 #include "wasm/WasmTypes.h"
 
 namespace js {
 namespace wasm {
 
 // This struct captures the bytecode offset of a section's payload (so not
 // including the header) and the size of the payload.
@@ -381,40 +383,41 @@ class Decoder
             return false;
         *out = u | (UInt(byte) << numBitsInSevens);
         MOZ_ASSERT_IF(sizeof(UInt) == 4, unsigned(cur_ - before) <= MaxVarU32DecodedBytes);
         return true;
     }
 
     template <typename SInt>
     MOZ_MUST_USE bool readVarS(SInt* out) {
+        using UInt = typename mozilla::MakeUnsigned<SInt>::Type;
         const unsigned numBits = sizeof(SInt) * CHAR_BIT;
         const unsigned remainderBits = numBits % 7;
         const unsigned numBitsInSevens = numBits - remainderBits;
         SInt s = 0;
         uint8_t byte;
         unsigned shift = 0;
         do {
             if (!readFixedU8(&byte))
                 return false;
             s |= SInt(byte & 0x7f) << shift;
             shift += 7;
             if (!(byte & 0x80)) {
                 if (byte & 0x40)
-                    s |= SInt(-1) << shift;
+                    s |= UInt(-1) << shift;
                 *out = s;
                 return true;
             }
         } while (shift < numBitsInSevens);
         if (!remainderBits || !readFixedU8(&byte) || (byte & 0x80))
             return false;
         uint8_t mask = 0x7f & (uint8_t(-1) << remainderBits);
         if ((byte & mask) != ((byte & (1 << (remainderBits - 1))) ? mask : 0))
             return false;
-        *out = s | SInt(byte) << shift;
+        *out = s | UInt(byte) << shift;
         return true;
     }
 
   public:
     Decoder(const uint8_t* begin, const uint8_t* end, size_t offsetInModule, UniqueChars* error,
             bool resilientMode = false)
       : beg_(begin),
         end_(end),