Bug 1551339 - Update VIXL to recent Utils and Globals files. r=nbp
☠☠ backed out by 7b3a1ee70cd7 ☠ ☠
authorSean Stangl <sean.stangl@gmail.com>
Tue, 14 May 2019 16:16:20 +0000
changeset 473834 bfa4ce8a0ef6b464e349f252b1f8e46cbf9bba11
parent 473833 e3640e306c198fecaeb1725ad9810d0d062c82e1
child 473835 ebef76ce78f70c8f4a3b26ce0dbf7d3e556eba8d
push id36017
push userrgurzau@mozilla.com
push dateWed, 15 May 2019 09:25:56 +0000
treeherdermozilla-central@76bbedc1ec1a [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersnbp
bugs1551339
milestone68.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1551339 - Update VIXL to recent Utils and Globals files. r=nbp This is preliminary work to allowing encoding of JSCVTFP, the instruction that exists on new AArch64 devices that greatly speeds up websites that use floating-point math. Differential Revision: https://phabricator.services.mozilla.com/D30997
js/src/jit/arm64/vixl/Assembler-vixl.cpp
js/src/jit/arm64/vixl/Assembler-vixl.h
js/src/jit/arm64/vixl/Cpu-vixl.h
js/src/jit/arm64/vixl/Globals-vixl.h
js/src/jit/arm64/vixl/Instructions-vixl.cpp
js/src/jit/arm64/vixl/Instructions-vixl.h
js/src/jit/arm64/vixl/Logic-vixl.cpp
js/src/jit/arm64/vixl/MacroAssembler-vixl.cpp
js/src/jit/arm64/vixl/MozAssembler-vixl.cpp
js/src/jit/arm64/vixl/MozInstructions-vixl.cpp
js/src/jit/arm64/vixl/MozSimulator-vixl.cpp
js/src/jit/arm64/vixl/Simulator-vixl.cpp
js/src/jit/arm64/vixl/Simulator-vixl.h
js/src/jit/arm64/vixl/Utils-vixl.cpp
js/src/jit/arm64/vixl/Utils-vixl.h
--- a/js/src/jit/arm64/vixl/Assembler-vixl.cpp
+++ b/js/src/jit/arm64/vixl/Assembler-vixl.cpp
@@ -3078,21 +3078,21 @@ void Assembler::movi(const VRegister& vd
       if (byte == 0xff) {
         imm8 |= (1 << i);
       }
     }
     int q = vd.Is2D() ? NEON_Q : 0;
     Emit(q | NEONModImmOp(1) | NEONModifiedImmediate_MOVI |
          ImmNEONabcdefgh(imm8) | NEONCmode(0xe) | Rd(vd));
   } else if (shift == LSL) {
-    VIXL_ASSERT(is_uint8(imm));
+    VIXL_ASSERT(IsUint8(imm));
     NEONModifiedImmShiftLsl(vd, static_cast<int>(imm), shift_amount,
                             NEONModifiedImmediate_MOVI);
   } else {
-    VIXL_ASSERT(is_uint8(imm));
+    VIXL_ASSERT(IsUint8(imm));
     NEONModifiedImmShiftMsl(vd, static_cast<int>(imm), shift_amount,
                             NEONModifiedImmediate_MOVI);
   }
 }
 
 
 void Assembler::mvn(const VRegister& vd,
                     const VRegister& vn) {
@@ -4193,17 +4193,17 @@ void Assembler::uqrshrn2(const VRegister
 
 // Note:
 // Below, a difference in case for the same letter indicates a
 // negated bit.
 // If b is 1, then B is 0.
 uint32_t Assembler::FP32ToImm8(float imm) {
   VIXL_ASSERT(IsImmFP32(imm));
   // bits: aBbb.bbbc.defg.h000.0000.0000.0000.0000
-  uint32_t bits = float_to_rawbits(imm);
+  uint32_t bits = FloatToRawbits(imm);
   // bit7: a000.0000
   uint32_t bit7 = ((bits >> 31) & 0x1) << 7;
   // bit6: 0b00.0000
   uint32_t bit6 = ((bits >> 29) & 0x1) << 6;
   // bit5_to_0: 00cd.efgh
   uint32_t bit5_to_0 = (bits >> 19) & 0x3f;
 
   return bit7 | bit6 | bit5_to_0;
@@ -4214,17 +4214,17 @@ Instr Assembler::ImmFP32(float imm) {
   return FP32ToImm8(imm) << ImmFP_offset;
 }
 
 
 uint32_t Assembler::FP64ToImm8(double imm) {
   VIXL_ASSERT(IsImmFP64(imm));
   // bits: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
   //       0000.0000.0000.0000.0000.0000.0000.0000
-  uint64_t bits = double_to_rawbits(imm);
+  uint64_t bits = DoubleToRawbits(imm);
   // bit7: a000.0000
   uint64_t bit7 = ((bits >> 63) & 0x1) << 7;
   // bit6: 0b00.0000
   uint64_t bit6 = ((bits >> 61) & 0x1) << 6;
   // bit5_to_0: 00cd.efgh
   uint64_t bit5_to_0 = (bits >> 48) & 0x3f;
 
   return static_cast<uint32_t>(bit7 | bit6 | bit5_to_0);
@@ -4271,17 +4271,17 @@ void Assembler::MoveWide(const Register&
       shift = 2;
     } else if ((imm & 0x0000ffffffffffff) == 0) {
       VIXL_ASSERT(rd.Is64Bits());
       imm >>= 48;
       shift = 3;
     }
   }
 
-  VIXL_ASSERT(is_uint16(imm));
+  VIXL_ASSERT(IsUint16(imm));
 
   Emit(SF(rd) | MoveWideImmediateFixed | mov_op |
        Rd(rd) | ImmMoveWide(imm) | ShiftMoveWide(shift));
 }
 
 
 void Assembler::AddSub(const Register& rd,
                        const Register& rn,
@@ -4328,23 +4328,23 @@ void Assembler::AddSubWithCarry(const Re
   VIXL_ASSERT(rd.size() == rn.size());
   VIXL_ASSERT(rd.size() == operand.reg().size());
   VIXL_ASSERT(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
   Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) | Rn(rn) | Rd(rd));
 }
 
 
 void Assembler::hlt(int code) {
-  VIXL_ASSERT(is_uint16(code));
+  VIXL_ASSERT(IsUint16(code));
   Emit(HLT | ImmException(code));
 }
 
 
 void Assembler::brk(int code) {
-  VIXL_ASSERT(is_uint16(code));
+  VIXL_ASSERT(IsUint16(code));
   Emit(BRK | ImmException(code));
 }
 
 
 void Assembler::svc(int code) {
   Emit(SVC | ImmException(code));
 }
 
@@ -4398,17 +4398,17 @@ void Assembler::FPDataProcessing3Source(
 void Assembler::NEONModifiedImmShiftLsl(const VRegister& vd,
                                         const int imm8,
                                         const int left_shift,
                                         NEONModifiedImmediateOp op) {
   VIXL_ASSERT(vd.Is8B() || vd.Is16B() || vd.Is4H() || vd.Is8H() ||
               vd.Is2S() || vd.Is4S());
   VIXL_ASSERT((left_shift == 0) || (left_shift == 8) ||
               (left_shift == 16) || (left_shift == 24));
-  VIXL_ASSERT(is_uint8(imm8));
+  VIXL_ASSERT(IsUint8(imm8));
 
   int cmode_1, cmode_2, cmode_3;
   if (vd.Is8B() || vd.Is16B()) {
     VIXL_ASSERT(op == NEONModifiedImmediate_MOVI);
     cmode_1 = 1;
     cmode_2 = 1;
     cmode_3 = 1;
   } else {
@@ -4429,17 +4429,17 @@ void Assembler::NEONModifiedImmShiftLsl(
 
 
 void Assembler::NEONModifiedImmShiftMsl(const VRegister& vd,
                                         const int imm8,
                                         const int shift_amount,
                                         NEONModifiedImmediateOp op) {
   VIXL_ASSERT(vd.Is2S() || vd.Is4S());
   VIXL_ASSERT((shift_amount == 8) || (shift_amount == 16));
-  VIXL_ASSERT(is_uint8(imm8));
+  VIXL_ASSERT(IsUint8(imm8));
 
   int cmode_0 = (shift_amount >> 4) & 1;
   int cmode = 0xc | cmode_0;
 
   int q = vd.IsQ() ? NEON_Q : 0;
 
   Emit(q | op | ImmNEONabcdefgh(imm8) | NEONCmode(cmode) | Rd(vd));
 }
@@ -4596,30 +4596,30 @@ void Assembler::Prefetch(PrefetchOperati
   VIXL_ASSERT(addr.IsRegisterOffset() || addr.IsImmediateOffset());
 
   Instr prfop = ImmPrefetchOperation(op);
   Emit(PRFM | prfop | LoadStoreMemOperand(addr, kXRegSizeInBytesLog2, option));
 }
 
 
 bool Assembler::IsImmAddSub(int64_t immediate) {
-  return is_uint12(immediate) ||
-         (is_uint12(immediate >> 12) && ((immediate & 0xfff) == 0));
+  return IsUint12(immediate) ||
+         (IsUint12(immediate >> 12) && ((immediate & 0xfff) == 0));
 }
 
 
 bool Assembler::IsImmConditionalCompare(int64_t immediate) {
-  return is_uint5(immediate);
+  return IsUint5(immediate);
 }
 
 
 bool Assembler::IsImmFP32(float imm) {
   // Valid values will have the form:
   // aBbb.bbbc.defg.h000.0000.0000.0000.0000
-  uint32_t bits = float_to_rawbits(imm);
+  uint32_t bits = FloatToRawbits(imm);
   // bits[19..0] are cleared.
   if ((bits & 0x7ffff) != 0) {
     return false;
   }
 
   // bits[29..25] are all set or all cleared.
   uint32_t b_pattern = (bits >> 16) & 0x3e00;
   if (b_pattern != 0 && b_pattern != 0x3e00) {
@@ -4634,17 +4634,17 @@ bool Assembler::IsImmFP32(float imm) {
   return true;
 }
 
 
 bool Assembler::IsImmFP64(double imm) {
   // Valid values will have the form:
   // aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
   // 0000.0000.0000.0000.0000.0000.0000.0000
-  uint64_t bits = double_to_rawbits(imm);
+  uint64_t bits = DoubleToRawbits(imm);
   // bits[47..0] are cleared.
   if ((bits & 0x0000ffffffffffff) != 0) {
     return false;
   }
 
   // bits[61..54] are all set or all cleared.
   uint32_t b_pattern = (bits >> 48) & 0x3fc0;
   if ((b_pattern != 0) && (b_pattern != 0x3fc0)) {
@@ -4659,30 +4659,30 @@ bool Assembler::IsImmFP64(double imm) {
   return true;
 }
 
 
 bool Assembler::IsImmLSPair(int64_t offset, unsigned access_size) {
   VIXL_ASSERT(access_size <= kQRegSizeInBytesLog2);
   bool offset_is_size_multiple =
       (((offset >> access_size) << access_size) == offset);
-  return offset_is_size_multiple && is_int7(offset >> access_size);
+  return offset_is_size_multiple && IsInt7(offset >> access_size);
 }
 
 
 bool Assembler::IsImmLSScaled(int64_t offset, unsigned access_size) {
   VIXL_ASSERT(access_size <= kQRegSizeInBytesLog2);
   bool offset_is_size_multiple =
       (((offset >> access_size) << access_size) == offset);
-  return offset_is_size_multiple && is_uint12(offset >> access_size);
+  return offset_is_size_multiple && IsUint12(offset >> access_size);
 }
 
 
 bool Assembler::IsImmLSUnscaled(int64_t offset) {
-  return is_int9(offset);
+  return IsInt9(offset);
 }
 
 
 // The movn instruction can generate immediates containing an arbitrary 16-bit
 // value, with remaining bits set, eg. 0xffff1234, 0xffff1234ffffffff.
 bool Assembler::IsImmMovn(uint64_t imm, unsigned reg_size) {
   return IsImmMovz(~imm, reg_size);
 }
--- a/js/src/jit/arm64/vixl/Assembler-vixl.h
+++ b/js/src/jit/arm64/vixl/Assembler-vixl.h
@@ -3623,240 +3623,240 @@ class Assembler : public MozBaseAssemble
   }
 
   static Instr Cond(Condition cond) {
     return cond << Condition_offset;
   }
 
   // PC-relative address encoding.
   static Instr ImmPCRelAddress(int imm21) {
-    VIXL_ASSERT(is_int21(imm21));
-    Instr imm = static_cast<Instr>(truncate_to_int21(imm21));
+    VIXL_ASSERT(IsInt21(imm21));
+    Instr imm = static_cast<Instr>(TruncateToUint21(imm21));
     Instr immhi = (imm >> ImmPCRelLo_width) << ImmPCRelHi_offset;
     Instr immlo = imm << ImmPCRelLo_offset;
     return (immhi & ImmPCRelHi_mask) | (immlo & ImmPCRelLo_mask);
   }
 
   // Branch encoding.
   static Instr ImmUncondBranch(int imm26) {
-    VIXL_ASSERT(is_int26(imm26));
-    return truncate_to_int26(imm26) << ImmUncondBranch_offset;
+    VIXL_ASSERT(IsInt26(imm26));
+    return TruncateToUint26(imm26) << ImmUncondBranch_offset;
   }
 
   static Instr ImmCondBranch(int imm19) {
-    VIXL_ASSERT(is_int19(imm19));
-    return truncate_to_int19(imm19) << ImmCondBranch_offset;
+    VIXL_ASSERT(IsInt19(imm19));
+    return TruncateToUint19(imm19) << ImmCondBranch_offset;
   }
 
   static Instr ImmCmpBranch(int imm19) {
-    VIXL_ASSERT(is_int19(imm19));
-    return truncate_to_int19(imm19) << ImmCmpBranch_offset;
+    VIXL_ASSERT(IsInt19(imm19));
+    return TruncateToUint19(imm19) << ImmCmpBranch_offset;
   }
 
   static Instr ImmTestBranch(int imm14) {
-    VIXL_ASSERT(is_int14(imm14));
-    return truncate_to_int14(imm14) << ImmTestBranch_offset;
+    VIXL_ASSERT(IsInt14(imm14));
+    return TruncateToUint14(imm14) << ImmTestBranch_offset;
   }
 
   static Instr ImmTestBranchBit(unsigned bit_pos) {
-    VIXL_ASSERT(is_uint6(bit_pos));
+    VIXL_ASSERT(IsUint6(bit_pos));
     // Subtract five from the shift offset, as we need bit 5 from bit_pos.
     unsigned b5 = bit_pos << (ImmTestBranchBit5_offset - 5);
     unsigned b40 = bit_pos << ImmTestBranchBit40_offset;
     b5 &= ImmTestBranchBit5_mask;
     b40 &= ImmTestBranchBit40_mask;
     return b5 | b40;
   }
 
   // Data Processing encoding.
   static Instr SF(Register rd) {
       return rd.Is64Bits() ? SixtyFourBits : ThirtyTwoBits;
   }
 
   static Instr ImmAddSub(int imm) {
     VIXL_ASSERT(IsImmAddSub(imm));
-    if (is_uint12(imm)) {  // No shift required.
+    if (IsUint12(imm)) {  // No shift required.
       imm <<= ImmAddSub_offset;
     } else {
       imm = ((imm >> 12) << ImmAddSub_offset) | (1 << ShiftAddSub_offset);
     }
     return imm;
   }
 
   static Instr ImmS(unsigned imms, unsigned reg_size) {
-    VIXL_ASSERT(((reg_size == kXRegSize) && is_uint6(imms)) ||
-           ((reg_size == kWRegSize) && is_uint5(imms)));
+    VIXL_ASSERT(((reg_size == kXRegSize) && IsUint6(imms)) ||
+           ((reg_size == kWRegSize) && IsUint5(imms)));
     USE(reg_size);
     return imms << ImmS_offset;
   }
 
   static Instr ImmR(unsigned immr, unsigned reg_size) {
-    VIXL_ASSERT(((reg_size == kXRegSize) && is_uint6(immr)) ||
-           ((reg_size == kWRegSize) && is_uint5(immr)));
+    VIXL_ASSERT(((reg_size == kXRegSize) && IsUint6(immr)) ||
+           ((reg_size == kWRegSize) && IsUint5(immr)));
     USE(reg_size);
-    VIXL_ASSERT(is_uint6(immr));
+    VIXL_ASSERT(IsUint6(immr));
     return immr << ImmR_offset;
   }
 
   static Instr ImmSetBits(unsigned imms, unsigned reg_size) {
     VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
-    VIXL_ASSERT(is_uint6(imms));
-    VIXL_ASSERT((reg_size == kXRegSize) || is_uint6(imms + 3));
+    VIXL_ASSERT(IsUint6(imms));
+    VIXL_ASSERT((reg_size == kXRegSize) || IsUint6(imms + 3));
     USE(reg_size);
     return imms << ImmSetBits_offset;
   }
 
   static Instr ImmRotate(unsigned immr, unsigned reg_size) {
     VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
-    VIXL_ASSERT(((reg_size == kXRegSize) && is_uint6(immr)) ||
-           ((reg_size == kWRegSize) && is_uint5(immr)));
+    VIXL_ASSERT(((reg_size == kXRegSize) && IsUint6(immr)) ||
+           ((reg_size == kWRegSize) && IsUint5(immr)));
     USE(reg_size);
     return immr << ImmRotate_offset;
   }
 
   static Instr ImmLLiteral(int imm19) {
-    VIXL_ASSERT(is_int19(imm19));
-    return truncate_to_int19(imm19) << ImmLLiteral_offset;
+    VIXL_ASSERT(IsInt19(imm19));
+    return TruncateToUint19(imm19) << ImmLLiteral_offset;
   }
 
   static Instr BitN(unsigned bitn, unsigned reg_size) {
     VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
     VIXL_ASSERT((reg_size == kXRegSize) || (bitn == 0));
     USE(reg_size);
     return bitn << BitN_offset;
   }
 
   static Instr ShiftDP(Shift shift) {
     VIXL_ASSERT(shift == LSL || shift == LSR || shift == ASR || shift == ROR);
     return shift << ShiftDP_offset;
   }
 
   static Instr ImmDPShift(unsigned amount) {
-    VIXL_ASSERT(is_uint6(amount));
+    VIXL_ASSERT(IsUint6(amount));
     return amount << ImmDPShift_offset;
   }
 
   static Instr ExtendMode(Extend extend) {
     return extend << ExtendMode_offset;
   }
 
   static Instr ImmExtendShift(unsigned left_shift) {
     VIXL_ASSERT(left_shift <= 4);
     return left_shift << ImmExtendShift_offset;
   }
 
   static Instr ImmCondCmp(unsigned imm) {
-    VIXL_ASSERT(is_uint5(imm));
+    VIXL_ASSERT(IsUint5(imm));
     return imm << ImmCondCmp_offset;
   }
 
   static Instr Nzcv(StatusFlags nzcv) {
     return ((nzcv >> Flags_offset) & 0xf) << Nzcv_offset;
   }
 
   // MemOperand offset encoding.
   static Instr ImmLSUnsigned(int imm12) {
-    VIXL_ASSERT(is_uint12(imm12));
+    VIXL_ASSERT(IsUint12(imm12));
     return imm12 << ImmLSUnsigned_offset;
   }
 
   static Instr ImmLS(int imm9) {
-    VIXL_ASSERT(is_int9(imm9));
-    return truncate_to_int9(imm9) << ImmLS_offset;
+    VIXL_ASSERT(IsInt9(imm9));
+    return TruncateToUint9(imm9) << ImmLS_offset;
   }
 
   static Instr ImmLSPair(int imm7, unsigned access_size) {
     VIXL_ASSERT(((imm7 >> access_size) << access_size) == imm7);
     int scaled_imm7 = imm7 >> access_size;
-    VIXL_ASSERT(is_int7(scaled_imm7));
-    return truncate_to_int7(scaled_imm7) << ImmLSPair_offset;
+    VIXL_ASSERT(IsInt7(scaled_imm7));
+    return TruncateToUint7(scaled_imm7) << ImmLSPair_offset;
   }
 
   static Instr ImmShiftLS(unsigned shift_amount) {
-    VIXL_ASSERT(is_uint1(shift_amount));
+    VIXL_ASSERT(IsUint1(shift_amount));
     return shift_amount << ImmShiftLS_offset;
   }
 
   static Instr ImmPrefetchOperation(int imm5) {
-    VIXL_ASSERT(is_uint5(imm5));
+    VIXL_ASSERT(IsUint5(imm5));
     return imm5 << ImmPrefetchOperation_offset;
   }
 
   static Instr ImmException(int imm16) {
-    VIXL_ASSERT(is_uint16(imm16));
+    VIXL_ASSERT(IsUint16(imm16));
     return imm16 << ImmException_offset;
   }
 
   static Instr ImmSystemRegister(int imm15) {
-    VIXL_ASSERT(is_uint15(imm15));
+    VIXL_ASSERT(IsUint15(imm15));
     return imm15 << ImmSystemRegister_offset;
   }
 
   static Instr ImmHint(int imm7) {
-    VIXL_ASSERT(is_uint7(imm7));
+    VIXL_ASSERT(IsUint7(imm7));
     return imm7 << ImmHint_offset;
   }
 
   static Instr CRm(int imm4) {
-    VIXL_ASSERT(is_uint4(imm4));
+    VIXL_ASSERT(IsUint4(imm4));
     return imm4 << CRm_offset;
   }
 
   static Instr CRn(int imm4) {
-    VIXL_ASSERT(is_uint4(imm4));
+    VIXL_ASSERT(IsUint4(imm4));
     return imm4 << CRn_offset;
   }
 
   static Instr SysOp(int imm14) {
-    VIXL_ASSERT(is_uint14(imm14));
+    VIXL_ASSERT(IsUint14(imm14));
     return imm14 << SysOp_offset;
   }
 
   static Instr ImmSysOp1(int imm3) {
-    VIXL_ASSERT(is_uint3(imm3));
+    VIXL_ASSERT(IsUint3(imm3));
     return imm3 << SysOp1_offset;
   }
 
   static Instr ImmSysOp2(int imm3) {
-    VIXL_ASSERT(is_uint3(imm3));
+    VIXL_ASSERT(IsUint3(imm3));
     return imm3 << SysOp2_offset;
   }
 
   static Instr ImmBarrierDomain(int imm2) {
-    VIXL_ASSERT(is_uint2(imm2));
+    VIXL_ASSERT(IsUint2(imm2));
     return imm2 << ImmBarrierDomain_offset;
   }
 
   static Instr ImmBarrierType(int imm2) {
-    VIXL_ASSERT(is_uint2(imm2));
+    VIXL_ASSERT(IsUint2(imm2));
     return imm2 << ImmBarrierType_offset;
   }
 
   // Move immediates encoding.
   static Instr ImmMoveWide(uint64_t imm) {
-    VIXL_ASSERT(is_uint16(imm));
+    VIXL_ASSERT(IsUint16(imm));
     return static_cast<Instr>(imm << ImmMoveWide_offset);
   }
 
   static Instr ShiftMoveWide(int64_t shift) {
-    VIXL_ASSERT(is_uint2(shift));
+    VIXL_ASSERT(IsUint2(shift));
     return static_cast<Instr>(shift << ShiftMoveWide_offset);
   }
 
   // FP Immediates.
   static Instr ImmFP32(float imm);
   static Instr ImmFP64(double imm);
 
   // FP register type.
   static Instr FPType(FPRegister fd) {
     return fd.Is64Bits() ? FP64 : FP32;
   }
 
   static Instr FPScale(unsigned scale) {
-    VIXL_ASSERT(is_uint6(scale));
+    VIXL_ASSERT(IsUint6(scale));
     return scale << FPScale_offset;
   }
 
   // Immediate field checking helpers.
   static bool IsImmAddSub(int64_t immediate);
   static bool IsImmConditionalCompare(int64_t immediate);
   static bool IsImmFP32(float imm);
   static bool IsImmFP64(double imm);
@@ -3944,68 +3944,68 @@ class Assembler : public MozBaseAssemble
       case 8: return NEON_D;
       default: return 0xffffffff;
     }
   }
 
   static Instr ImmNEONHLM(int index, int num_bits) {
     int h, l, m;
     if (num_bits == 3) {
-      VIXL_ASSERT(is_uint3(index));
+      VIXL_ASSERT(IsUint3(index));
       h  = (index >> 2) & 1;
       l  = (index >> 1) & 1;
       m  = (index >> 0) & 1;
     } else if (num_bits == 2) {
-      VIXL_ASSERT(is_uint2(index));
+      VIXL_ASSERT(IsUint2(index));
       h  = (index >> 1) & 1;
       l  = (index >> 0) & 1;
       m  = 0;
     } else {
-      VIXL_ASSERT(is_uint1(index) && (num_bits == 1));
+      VIXL_ASSERT(IsUint1(index) && (num_bits == 1));
       h  = (index >> 0) & 1;
       l  = 0;
       m  = 0;
     }
     return (h << NEONH_offset) | (l << NEONL_offset) | (m << NEONM_offset);
   }
 
   static Instr ImmNEONExt(int imm4) {
-    VIXL_ASSERT(is_uint4(imm4));
+    VIXL_ASSERT(IsUint4(imm4));
     return imm4 << ImmNEONExt_offset;
   }
 
   static Instr ImmNEON5(Instr format, int index) {
-    VIXL_ASSERT(is_uint4(index));
+    VIXL_ASSERT(IsUint4(index));
     int s = LaneSizeInBytesLog2FromFormat(static_cast<VectorFormat>(format));
     int imm5 = (index << (s + 1)) | (1 << s);
     return imm5 << ImmNEON5_offset;
   }
 
   static Instr ImmNEON4(Instr format, int index) {
-    VIXL_ASSERT(is_uint4(index));
+    VIXL_ASSERT(IsUint4(index));
     int s = LaneSizeInBytesLog2FromFormat(static_cast<VectorFormat>(format));
     int imm4 = index << s;
     return imm4 << ImmNEON4_offset;
   }
 
   static Instr ImmNEONabcdefgh(int imm8) {
-    VIXL_ASSERT(is_uint8(imm8));
+    VIXL_ASSERT(IsUint8(imm8));
     Instr instr;
     instr  = ((imm8 >> 5) & 7) << ImmNEONabc_offset;
     instr |= (imm8 & 0x1f) << ImmNEONdefgh_offset;
     return instr;
   }
 
   static Instr NEONCmode(int cmode) {
-    VIXL_ASSERT(is_uint4(cmode));
+    VIXL_ASSERT(IsUint4(cmode));
     return cmode << NEONCmode_offset;
   }
 
   static Instr NEONModImmOp(int op) {
-    VIXL_ASSERT(is_uint1(op));
+    VIXL_ASSERT(IsUint1(op));
     return op << NEONModImmOp_offset;
   }
 
   size_t size() const {
     return SizeOfCodeGenerated();
   }
 
   size_t SizeOfCodeGenerated() const {
--- a/js/src/jit/arm64/vixl/Cpu-vixl.h
+++ b/js/src/jit/arm64/vixl/Cpu-vixl.h
@@ -41,17 +41,17 @@ class CPU {
   // the I and D caches. I and D caches are not automatically coherent on ARM
   // so this operation is required before any dynamically generated code can
   // safely run.
   static void EnsureIAndDCacheCoherency(void *address, size_t length);
 
   // Handle tagged pointers.
   template <typename T>
   static T SetPointerTag(T pointer, uint64_t tag) {
-    VIXL_ASSERT(is_uintn(kAddressTagWidth, tag));
+    VIXL_ASSERT(IsUintN(kAddressTagWidth, tag));
 
     // Use C-style casts to get static_cast behaviour for integral types (T),
     // and reinterpret_cast behaviour for other types.
 
     uint64_t raw = (uint64_t)pointer;
     VIXL_STATIC_ASSERT(sizeof(pointer) == sizeof(raw));
 
     raw = (raw & ~kAddressTagMask) | (tag << kAddressTagOffset);
--- a/js/src/jit/arm64/vixl/Globals-vixl.h
+++ b/js/src/jit/arm64/vixl/Globals-vixl.h
@@ -1,9 +1,9 @@
-// Copyright 2015, ARM Limited
+// Copyright 2015, VIXL authors
 // All rights reserved.
 //
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are met:
 //
 //   * Redistributions of source code must retain the above copyright notice,
 //     this list of conditions and the following disclaimer.
 //   * Redistributions in binary form must reproduce the above copyright notice,
@@ -37,85 +37,232 @@
 #endif
 
 #ifndef __STDC_FORMAT_MACROS
 #define __STDC_FORMAT_MACROS
 #endif
 
 #include "mozilla/Assertions.h"
 
+#include <cstdarg>
+#include <cstddef>
+#include <cstdio>
+#include <cstdlib>
+
+extern "C" {
 #include <inttypes.h>
-#include <stdarg.h>
-#include <stddef.h>
 #include <stdint.h>
-#include <stdio.h>
-#include <stdlib.h>
+}
 
 #include "js-config.h"
 
 #include "jit/arm64/vixl/Platform-vixl.h"
 #include "js/Utility.h"
 
+#ifdef VIXL_NEGATIVE_TESTING
+#include <sstream>
+#include <stdexcept>
+#include <string>
+#endif
+
+namespace vixl {
 
 typedef uint8_t byte;
 
-// Type for half-precision (16 bit) floating point numbers.
-typedef uint16_t float16;
-
 const int KBytes = 1024;
 const int MBytes = 1024 * KBytes;
 
-#define VIXL_ABORT() \
-    do { printf("in %s, line %i", __FILE__, __LINE__); abort(); } while (false)
+const int kBitsPerByte = 8;
+
+template <int SizeInBits>
+struct Unsigned;
+
+template <>
+struct Unsigned<32> {
+  typedef uint32_t type;
+};
+
+template <>
+struct Unsigned<64> {
+  typedef uint64_t type;
+};
+
+}  // namespace vixl
+
+// Detect the host's pointer size.
+#if (UINTPTR_MAX == UINT32_MAX)
+#define VIXL_HOST_POINTER_32
+#elif (UINTPTR_MAX == UINT64_MAX)
+#define VIXL_HOST_POINTER_64
+#else
+#error "Unsupported host pointer size."
+#endif
+
+#ifdef VIXL_NEGATIVE_TESTING
+#define VIXL_ABORT()                                                         \
+  do {                                                                       \
+    std::ostringstream oss;                                                  \
+    oss << "Aborting in " << __FILE__ << ", line " << __LINE__ << std::endl; \
+    throw std::runtime_error(oss.str());                                     \
+  } while (false)
+#define VIXL_ABORT_WITH_MSG(msg)                                             \
+  do {                                                                       \
+    std::ostringstream oss;                                                  \
+    oss << (msg) << "in " << __FILE__ << ", line " << __LINE__ << std::endl; \
+    throw std::runtime_error(oss.str());                                     \
+  } while (false)
+#define VIXL_CHECK(condition)                                \
+  do {                                                       \
+    if (!(condition)) {                                      \
+      std::ostringstream oss;                                \
+      oss << "Assertion failed (" #condition ")\nin ";       \
+      oss << __FILE__ << ", line " << __LINE__ << std::endl; \
+      throw std::runtime_error(oss.str());                   \
+    }                                                        \
+  } while (false)
+#else
+#define VIXL_ABORT()                                         \
+  do {                                                       \
+    MOZ_CRASH();                                             \
+  } while (false)
+#define VIXL_ABORT_WITH_MSG(msg)                             \
+  do {                                                       \
+    MOZ_CRASH(msg);                                          \
+  } while (false)
+#define VIXL_CHECK(condition)                           \
+  do {                                                  \
+    if (!(condition)) {                                 \
+      MOZ_CRASH();                                      \
+    }                                                   \
+  } while (false)
+#endif
 #ifdef DEBUG
-  #define VIXL_ASSERT(condition) MOZ_ASSERT(condition)
-  #define VIXL_CHECK(condition) VIXL_ASSERT(condition)
-  #define VIXL_UNIMPLEMENTED() \
-    do { fprintf(stderr, "UNIMPLEMENTED\t"); VIXL_ABORT(); } while (false)
-  #define VIXL_UNREACHABLE() \
-    do { fprintf(stderr, "UNREACHABLE\t"); VIXL_ABORT(); } while (false)
+#define VIXL_ASSERT(condition) MOZ_ASSERT(condition)
+#define VIXL_UNIMPLEMENTED()               \
+  do {                                     \
+    VIXL_ABORT_WITH_MSG("UNIMPLEMENTED "); \
+  } while (false)
+#define VIXL_UNREACHABLE()               \
+  do {                                   \
+    VIXL_ABORT_WITH_MSG("UNREACHABLE "); \
+  } while (false)
 #else
-  #define VIXL_ASSERT(condition) ((void) 0)
-  #define VIXL_CHECK(condition) ((void) 0)
-  #define VIXL_UNIMPLEMENTED() ((void) 0)
-  #define VIXL_UNREACHABLE() MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("vixl unreachable")
+#define VIXL_ASSERT(condition) ((void)0)
+#define VIXL_UNIMPLEMENTED() ((void)0)
+#define VIXL_UNREACHABLE() ((void)0)
 #endif
 // This is not as powerful as template based assertions, but it is simple.
 // It assumes that the descriptions are unique. If this starts being a problem,
 // we can switch to a different implemention.
-#define VIXL_S(x) #x
-#define VIXL_STATIC_ASSERT_LINE(line, condition) \
-    static_assert(condition, "STATIC_ASSERT_LINE_" VIXL_S(line))
+#define VIXL_CONCAT(a, b) a##b
+#if __cplusplus >= 201103L
+#define VIXL_STATIC_ASSERT_LINE(line_unused, condition, message) \
+  static_assert(condition, message)
+#else
+#define VIXL_STATIC_ASSERT_LINE(line, condition, message_unused)            \
+  typedef char VIXL_CONCAT(STATIC_ASSERT_LINE_, line)[(condition) ? 1 : -1] \
+      __attribute__((unused))
+#endif
 #define VIXL_STATIC_ASSERT(condition) \
-    VIXL_STATIC_ASSERT_LINE(__LINE__, condition)
+  VIXL_STATIC_ASSERT_LINE(__LINE__, condition, "")
+#define VIXL_STATIC_ASSERT_MESSAGE(condition, message) \
+  VIXL_STATIC_ASSERT_LINE(__LINE__, condition, message)
+
+#define VIXL_WARNING(message)                                          \
+  do {                                                                 \
+    printf("WARNING in %s, line %i: %s", __FILE__, __LINE__, message); \
+  } while (false)
 
 template <typename T1>
-inline void USE(T1) {}
+inline void USE(const T1&) {}
 
 template <typename T1, typename T2>
-inline void USE(T1, T2) {}
+inline void USE(const T1&, const T2&) {}
 
 template <typename T1, typename T2, typename T3>
-inline void USE(T1, T2, T3) {}
+inline void USE(const T1&, const T2&, const T3&) {}
 
 template <typename T1, typename T2, typename T3, typename T4>
-inline void USE(T1, T2, T3, T4) {}
+inline void USE(const T1&, const T2&, const T3&, const T4&) {}
 
-#define VIXL_ALIGNMENT_EXCEPTION() \
-    do { fprintf(stderr, "ALIGNMENT EXCEPTION\t"); VIXL_ABORT(); } while (0)
+#define VIXL_ALIGNMENT_EXCEPTION()                \
+  do {                                            \
+    VIXL_ABORT_WITH_MSG("ALIGNMENT EXCEPTION\t"); \
+  } while (0)
 
 // The clang::fallthrough attribute is used along with the Wimplicit-fallthrough
 // argument to annotate intentional fall-through between switch labels.
 // For more information please refer to:
 // http://clang.llvm.org/docs/AttributeReference.html#fallthrough-clang-fallthrough
 #ifndef __has_warning
-  #define __has_warning(x)  0
+#define __has_warning(x) 0
+#endif
+
+// Fallthrough annotation for Clang and C++11(201103L).
+#if __has_warning("-Wimplicit-fallthrough") && __cplusplus >= 201103L
+#define VIXL_FALLTHROUGH() [[clang::fallthrough]]
+// Fallthrough annotation for GCC >= 7.
+#elif __GNUC__ >= 7
+#define VIXL_FALLTHROUGH() __attribute__((fallthrough))
+#else
+#define VIXL_FALLTHROUGH() \
+  do {                     \
+  } while (0)
+#endif
+
+#if __cplusplus >= 201103L
+#define VIXL_NO_RETURN [[noreturn]]
+#else
+#define VIXL_NO_RETURN __attribute__((noreturn))
+#endif
+#ifdef VIXL_DEBUG
+#define VIXL_NO_RETURN_IN_DEBUG_MODE VIXL_NO_RETURN
+#else
+#define VIXL_NO_RETURN_IN_DEBUG_MODE
+#endif
+
+#if __cplusplus >= 201103L
+#define VIXL_OVERRIDE override
+#else
+#define VIXL_OVERRIDE
 #endif
 
-// Note: This option is only available for Clang. And will only be enabled for
-// C++11(201103L).
-#if __has_warning("-Wimplicit-fallthrough") && __cplusplus >= 201103L
-  #define VIXL_FALLTHROUGH() [[clang::fallthrough]] //NOLINT
+#ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
+#ifndef VIXL_AARCH64_GENERATE_SIMULATOR_CODE
+#define VIXL_AARCH64_GENERATE_SIMULATOR_CODE 1
+#endif
 #else
-  #define VIXL_FALLTHROUGH() do {} while (0)
+#ifndef VIXL_AARCH64_GENERATE_SIMULATOR_CODE
+#define VIXL_AARCH64_GENERATE_SIMULATOR_CODE 0
+#endif
+#if VIXL_AARCH64_GENERATE_SIMULATOR_CODE
+#warning "Generating Simulator instructions without Simulator support."
+#endif
 #endif
 
+// We do not have a simulator for AArch32, although we can pretend we do so that
+// tests that require running natively can be skipped.
+#ifndef __arm__
+#define VIXL_INCLUDE_SIMULATOR_AARCH32
+#ifndef VIXL_AARCH32_GENERATE_SIMULATOR_CODE
+#define VIXL_AARCH32_GENERATE_SIMULATOR_CODE 1
+#endif
+#else
+#ifndef VIXL_AARCH32_GENERATE_SIMULATOR_CODE
+#define VIXL_AARCH32_GENERATE_SIMULATOR_CODE 0
+#endif
+#endif
+
+// Target Architecture/ISA
+#ifdef VIXL_INCLUDE_TARGET_A64
+#define VIXL_INCLUDE_TARGET_AARCH64
+#endif
+
+#if defined(VIXL_INCLUDE_TARGET_A32) && defined(VIXL_INCLUDE_TARGET_T32)
+#define VIXL_INCLUDE_TARGET_AARCH32
+#elif defined(VIXL_INCLUDE_TARGET_A32)
+#define VIXL_INCLUDE_TARGET_A32_ONLY
+#else
+#define VIXL_INCLUDE_TARGET_T32_ONLY
+#endif
+
+
 #endif  // VIXL_GLOBALS_H
--- a/js/src/jit/arm64/vixl/Instructions-vixl.cpp
+++ b/js/src/jit/arm64/vixl/Instructions-vixl.cpp
@@ -25,44 +25,16 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 #include "jit/arm64/vixl/Instructions-vixl.h"
 
 #include "jit/arm64/vixl/Assembler-vixl.h"
 
 namespace vixl {
 
-
-// Floating-point infinity values.
-const float16 kFP16PositiveInfinity = 0x7c00;
-const float16 kFP16NegativeInfinity = 0xfc00;
-const float kFP32PositiveInfinity = rawbits_to_float(0x7f800000);
-const float kFP32NegativeInfinity = rawbits_to_float(0xff800000);
-const double kFP64PositiveInfinity =
-    rawbits_to_double(UINT64_C(0x7ff0000000000000));
-const double kFP64NegativeInfinity =
-    rawbits_to_double(UINT64_C(0xfff0000000000000));
-
-
-// The default NaN values (for FPCR.DN=1).
-const double kFP64DefaultNaN = rawbits_to_double(UINT64_C(0x7ff8000000000000));
-const float kFP32DefaultNaN = rawbits_to_float(0x7fc00000);
-const float16 kFP16DefaultNaN = 0x7e00;
-
-
-static uint64_t RotateRight(uint64_t value,
-                            unsigned int rotate,
-                            unsigned int width) {
-  VIXL_ASSERT(width <= 64);
-  rotate &= 63;
-  return ((value & ((UINT64_C(1) << rotate) - 1)) <<
-          (width - rotate)) | (value >> rotate);
-}
-
-
 static uint64_t RepeatBitsAcrossReg(unsigned reg_size,
                                     uint64_t value,
                                     unsigned width) {
   VIXL_ASSERT((width == 2) || (width == 4) || (width == 8) || (width == 16) ||
               (width == 32));
   VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
   uint64_t result = value & ((UINT64_C(1) << width) - 1);
   for (unsigned i = width; i < reg_size; i *= 2) {
@@ -191,17 +163,17 @@ float Instruction::Imm8ToFP32(uint32_t i
   // Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits)
   // where B is b ^ 1
   uint32_t bits = imm8;
   uint32_t bit7 = (bits >> 7) & 0x1;
   uint32_t bit6 = (bits >> 6) & 0x1;
   uint32_t bit5_to_0 = bits & 0x3f;
   uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19);
 
-  return rawbits_to_float(result);
+  return RawbitsToFloat(result);
 }
 
 
 float Instruction::ImmFP32() const {
   return Imm8ToFP32(ImmFP());
 }
 
 
@@ -211,17 +183,17 @@ double Instruction::Imm8ToFP64(uint32_t 
   //         0000.0000.0000.0000.0000.0000.0000.0000 (64 bits)
   // where B is b ^ 1
   uint32_t bits = imm8;
   uint64_t bit7 = (bits >> 7) & 0x1;
   uint64_t bit6 = (bits >> 6) & 0x1;
   uint64_t bit5_to_0 = bits & 0x3f;
   uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48);
 
-  return rawbits_to_double(result);
+  return RawbitsToDouble(result);
 }
 
 
 double Instruction::ImmFP64() const {
   return Imm8ToFP64(ImmFP());
 }
 
 
@@ -284,17 +256,17 @@ int Instruction::ImmBranchRangeBitwidth(
 int32_t Instruction::ImmBranchForwardRange(ImmBranchType branch_type) {
   int32_t encoded_max = 1 << (ImmBranchRangeBitwidth(branch_type) - 1);
   return encoded_max * kInstructionSize;
 }
 
 
 bool Instruction::IsValidImmPCOffset(ImmBranchType branch_type,
                                      int64_t offset) {
-  return is_intn(ImmBranchRangeBitwidth(branch_type), offset);
+  return IsIntN(ImmBranchRangeBitwidth(branch_type), offset);
 }
 
 ImmBranchRangeType Instruction::ImmBranchTypeToRange(ImmBranchType branch_type)
 {
   switch (branch_type) {
     case UncondBranchType:
       return UncondBranchRangeType;
     case CondBranchType:
--- a/js/src/jit/arm64/vixl/Instructions-vixl.h
+++ b/js/src/jit/arm64/vixl/Instructions-vixl.h
@@ -97,37 +97,16 @@ const unsigned kSPRegInternalCode = 63;
 const unsigned kRegCodeMask = 0x1f;
 
 const unsigned kAddressTagOffset = 56;
 const unsigned kAddressTagWidth = 8;
 const uint64_t kAddressTagMask =
     ((UINT64_C(1) << kAddressTagWidth) - 1) << kAddressTagOffset;
 VIXL_STATIC_ASSERT(kAddressTagMask == UINT64_C(0xff00000000000000));
 
-// AArch64 floating-point specifics. These match IEEE-754.
-const unsigned kDoubleMantissaBits = 52;
-const unsigned kDoubleExponentBits = 11;
-const unsigned kFloatMantissaBits = 23;
-const unsigned kFloatExponentBits = 8;
-const unsigned kFloat16MantissaBits = 10;
-const unsigned kFloat16ExponentBits = 5;
-
-// Floating-point infinity values.
-extern const float16 kFP16PositiveInfinity;
-extern const float16 kFP16NegativeInfinity;
-extern const float kFP32PositiveInfinity;
-extern const float kFP32NegativeInfinity;
-extern const double kFP64PositiveInfinity;
-extern const double kFP64NegativeInfinity;
-
-// The default NaN values (for FPCR.DN=1).
-extern const float16 kFP16DefaultNaN;
-extern const float kFP32DefaultNaN;
-extern const double kFP64DefaultNaN;
-
 unsigned CalcLSDataSize(LoadStoreOp op);
 unsigned CalcLSPairDataSize(LoadStorePairOp op);
 
 enum ImmBranchType {
   UnknownBranchType = 0,
   CondBranchType    = 1,
   UncondBranchType  = 2,
   CompareBranchType = 3,
@@ -148,29 +127,16 @@ enum ImmBranchRangeType {
 };
 
 enum AddrMode {
   Offset,
   PreIndex,
   PostIndex
 };
 
-enum FPRounding {
-  // The first four values are encodable directly by FPCR<RMode>.
-  FPTieEven = 0x0,
-  FPPositiveInfinity = 0x1,
-  FPNegativeInfinity = 0x2,
-  FPZero = 0x3,
-
-  // The final rounding modes are only available when explicitly specified by
-  // the instruction (such as with fcvta). It cannot be set in FPCR.
-  FPTieAway,
-  FPRoundOdd
-};
-
 enum Reg31Mode {
   Reg31IsStackPointer,
   Reg31IsZeroRegister
 };
 
 // Instructions. ---------------------------------------------------------------
 
 class Instruction {
@@ -183,22 +149,22 @@ class Instruction {
     *(reinterpret_cast<Instr*>(this)) = new_instr;
   }
 
   int Bit(int pos) const {
     return (InstructionBits() >> pos) & 1;
   }
 
   uint32_t Bits(int msb, int lsb) const {
-    return unsigned_bitextract_32(msb, lsb, InstructionBits());
+    return ExtractUnsignedBitfield32(msb, lsb, InstructionBits());
   }
 
   int32_t SignedBits(int msb, int lsb) const {
     int32_t bits = *(reinterpret_cast<const int32_t*>(this));
-    return signed_bitextract_32(msb, lsb, bits);
+    return ExtractSignedBitfield32(msb, lsb, bits);
   }
 
   Instr Mask(uint32_t mask) const {
     return InstructionBits() & mask;
   }
 
   #define DEFINE_GETTER(Name, HighBit, LowBit, Func)             \
   int32_t Name() const { return Func(HighBit, LowBit); }
@@ -211,17 +177,17 @@ class Instruction {
   #undef DEFINE_SETTER
 
   // ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST),
   // formed from ImmPCRelLo and ImmPCRelHi.
   int ImmPCRel() const {
     int offset =
         static_cast<int>((ImmPCRelHi() << ImmPCRelLo_width) | ImmPCRelLo());
     int width = ImmPCRelLo_width + ImmPCRelHi_width;
-    return signed_bitextract_32(width - 1, 0, offset);
+    return ExtractSignedBitfield32(width - 1, 0, offset);
   }
 
   uint64_t ImmLogical() const;
   unsigned ImmNEONabcdefgh() const;
   float ImmFP32() const;
   double ImmFP64() const;
   float ImmNEONFP32() const;
   double ImmNEONFP64() const;
@@ -462,21 +428,21 @@ class Instruction {
     return literal;
   }
 
   void SetLiteral64(uint64_t literal) const {
     memcpy(LiteralAddress<void*>(), &literal, sizeof(literal));
   }
 
   float LiteralFP32() const {
-    return rawbits_to_float(Literal32());
+    return RawbitsToFloat(Literal32());
   }
 
   double LiteralFP64() const {
-    return rawbits_to_double(Literal64());
+    return RawbitsToDouble(Literal64());
   }
 
   const Instruction* NextInstruction() const {
     return this + kInstructionSize;
   }
 
   // Skip any constant pools with artificial guards at this point.
   // Return either |this| or the first instruction after the pool.
--- a/js/src/jit/arm64/vixl/Logic-vixl.cpp
+++ b/js/src/jit/arm64/vixl/Logic-vixl.cpp
@@ -36,49 +36,16 @@ template<> double Simulator::FPDefaultNa
   return kFP64DefaultNaN;
 }
 
 
 template<> float Simulator::FPDefaultNaN<float>() {
   return kFP32DefaultNaN;
 }
 
-// See FPRound for a description of this function.
-static inline double FPRoundToDouble(int64_t sign, int64_t exponent,
-                                     uint64_t mantissa, FPRounding round_mode) {
-  int64_t bits =
-      FPRound<int64_t, kDoubleExponentBits, kDoubleMantissaBits>(sign,
-                                                                 exponent,
-                                                                 mantissa,
-                                                                 round_mode);
-  return rawbits_to_double(bits);
-}
-
-
-// See FPRound for a description of this function.
-static inline float FPRoundToFloat(int64_t sign, int64_t exponent,
-                                   uint64_t mantissa, FPRounding round_mode) {
-  int32_t bits =
-      FPRound<int32_t, kFloatExponentBits, kFloatMantissaBits>(sign,
-                                                               exponent,
-                                                               mantissa,
-                                                               round_mode);
-  return rawbits_to_float(bits);
-}
-
-
-// See FPRound for a description of this function.
-static inline float16 FPRoundToFloat16(int64_t sign,
-                                       int64_t exponent,
-                                       uint64_t mantissa,
-                                       FPRounding round_mode) {
-  return FPRound<float16, kFloat16ExponentBits, kFloat16MantissaBits>(
-      sign, exponent, mantissa, round_mode);
-}
-
 
 double Simulator::FixedToDouble(int64_t src, int fbits, FPRounding round) {
   if (src >= 0) {
     return UFixedToDouble(src, fbits, round);
   } else {
     // This works for all negative values, including INT64_MIN.
     return -UFixedToDouble(-src, fbits, round);
   }
@@ -122,277 +89,16 @@ float Simulator::UFixedToFloat(uint64_t 
   // 2^exponent.
   const int highest_significant_bit = 63 - CountLeadingZeros(src);
   const int32_t exponent = highest_significant_bit - fbits;
 
   return FPRoundToFloat(0, exponent, src, round);
 }
 
 
-double Simulator::FPToDouble(float value) {
-  switch (std::fpclassify(value)) {
-    case FP_NAN: {
-      if (IsSignallingNaN(value)) {
-        FPProcessException();
-      }
-      if (DN()) return kFP64DefaultNaN;
-
-      // Convert NaNs as the processor would:
-      //  - The sign is propagated.
-      //  - The payload (mantissa) is transferred entirely, except that the top
-      //    bit is forced to '1', making the result a quiet NaN. The unused
-      //    (low-order) payload bits are set to 0.
-      uint32_t raw = float_to_rawbits(value);
-
-      uint64_t sign = raw >> 31;
-      uint64_t exponent = (1 << 11) - 1;
-      uint64_t payload = unsigned_bitextract_64(21, 0, raw);
-      payload <<= (52 - 23);  // The unused low-order bits should be 0.
-      payload |= (UINT64_C(1) << 51);  // Force a quiet NaN.
-
-      return rawbits_to_double((sign << 63) | (exponent << 52) | payload);
-    }
-
-    case FP_ZERO:
-    case FP_NORMAL:
-    case FP_SUBNORMAL:
-    case FP_INFINITE: {
-      // All other inputs are preserved in a standard cast, because every value
-      // representable using an IEEE-754 float is also representable using an
-      // IEEE-754 double.
-      return static_cast<double>(value);
-    }
-  }
-
-  VIXL_UNREACHABLE();
-  return static_cast<double>(value);
-}
-
-
-float Simulator::FPToFloat(float16 value) {
-  uint32_t sign = value >> 15;
-  uint32_t exponent = unsigned_bitextract_32(
-      kFloat16MantissaBits + kFloat16ExponentBits - 1, kFloat16MantissaBits,
-      value);
-  uint32_t mantissa = unsigned_bitextract_32(
-      kFloat16MantissaBits - 1, 0, value);
-
-  switch (float16classify(value)) {
-    case FP_ZERO:
-      return (sign == 0) ? 0.0f : -0.0f;
-
-    case FP_INFINITE:
-      return (sign == 0) ? kFP32PositiveInfinity : kFP32NegativeInfinity;
-
-    case FP_SUBNORMAL: {
-      // Calculate shift required to put mantissa into the most-significant bits
-      // of the destination mantissa.
-      int shift = CountLeadingZeros(mantissa << (32 - 10));
-
-      // Shift mantissa and discard implicit '1'.
-      mantissa <<= (kFloatMantissaBits - kFloat16MantissaBits) + shift + 1;
-      mantissa &= (1 << kFloatMantissaBits) - 1;
-
-      // Adjust the exponent for the shift applied, and rebias.
-      exponent = exponent - shift + (-15 + 127);
-      break;
-    }
-
-    case FP_NAN:
-      if (IsSignallingNaN(value)) {
-        FPProcessException();
-      }
-      if (DN()) return kFP32DefaultNaN;
-
-      // Convert NaNs as the processor would:
-      //  - The sign is propagated.
-      //  - The payload (mantissa) is transferred entirely, except that the top
-      //    bit is forced to '1', making the result a quiet NaN. The unused
-      //    (low-order) payload bits are set to 0.
-      exponent = (1 << kFloatExponentBits) - 1;
-
-      // Increase bits in mantissa, making low-order bits 0.
-      mantissa <<= (kFloatMantissaBits - kFloat16MantissaBits);
-      mantissa |= 1 << 22;  // Force a quiet NaN.
-      break;
-
-    case FP_NORMAL:
-      // Increase bits in mantissa, making low-order bits 0.
-      mantissa <<= (kFloatMantissaBits - kFloat16MantissaBits);
-
-      // Change exponent bias.
-      exponent += (-15 + 127);
-      break;
-
-    default: VIXL_UNREACHABLE();
-  }
-  return rawbits_to_float((sign << 31) |
-                          (exponent << kFloatMantissaBits) |
-                          mantissa);
-}
-
-
-float16 Simulator::FPToFloat16(float value, FPRounding round_mode) {
-  // Only the FPTieEven rounding mode is implemented.
-  VIXL_ASSERT(round_mode == FPTieEven);
-  USE(round_mode);
-
-  uint32_t raw = float_to_rawbits(value);
-  int32_t sign = raw >> 31;
-  int32_t exponent = unsigned_bitextract_32(30, 23, raw) - 127;
-  uint32_t mantissa = unsigned_bitextract_32(22, 0, raw);
-
-  switch (std::fpclassify(value)) {
-    case FP_NAN: {
-      if (IsSignallingNaN(value)) {
-        FPProcessException();
-      }
-      if (DN()) return kFP16DefaultNaN;
-
-      // Convert NaNs as the processor would:
-      //  - The sign is propagated.
-      //  - The payload (mantissa) is transferred as much as possible, except
-      //    that the top bit is forced to '1', making the result a quiet NaN.
-      float16 result = (sign == 0) ? kFP16PositiveInfinity
-                                   : kFP16NegativeInfinity;
-      result |= mantissa >> (kFloatMantissaBits - kFloat16MantissaBits);
-      result |= (1 << 9);  // Force a quiet NaN;
-      return result;
-    }
-
-    case FP_ZERO:
-      return (sign == 0) ? 0 : 0x8000;
-
-    case FP_INFINITE:
-      return (sign == 0) ? kFP16PositiveInfinity : kFP16NegativeInfinity;
-
-    case FP_NORMAL:
-    case FP_SUBNORMAL: {
-      // Convert float-to-half as the processor would, assuming that FPCR.FZ
-      // (flush-to-zero) is not set.
-
-      // Add the implicit '1' bit to the mantissa.
-      mantissa += (1 << 23);
-      return FPRoundToFloat16(sign, exponent, mantissa, round_mode);
-    }
-  }
-
-  VIXL_UNREACHABLE();
-  return 0;
-}
-
-
-float16 Simulator::FPToFloat16(double value, FPRounding round_mode) {
-  // Only the FPTieEven rounding mode is implemented.
-  VIXL_ASSERT(round_mode == FPTieEven);
-  USE(round_mode);
-
-  uint64_t raw = double_to_rawbits(value);
-  int32_t sign = raw >> 63;
-  int64_t exponent = unsigned_bitextract_64(62, 52, raw) - 1023;
-  uint64_t mantissa = unsigned_bitextract_64(51, 0, raw);
-
-  switch (std::fpclassify(value)) {
-    case FP_NAN: {
-      if (IsSignallingNaN(value)) {
-        FPProcessException();
-      }
-      if (DN()) return kFP16DefaultNaN;
-
-      // Convert NaNs as the processor would:
-      //  - The sign is propagated.
-      //  - The payload (mantissa) is transferred as much as possible, except
-      //    that the top bit is forced to '1', making the result a quiet NaN.
-      float16 result = (sign == 0) ? kFP16PositiveInfinity
-                                   : kFP16NegativeInfinity;
-      result |= mantissa >> (kDoubleMantissaBits - kFloat16MantissaBits);
-      result |= (1 << 9);  // Force a quiet NaN;
-      return result;
-    }
-
-    case FP_ZERO:
-      return (sign == 0) ? 0 : 0x8000;
-
-    case FP_INFINITE:
-      return (sign == 0) ? kFP16PositiveInfinity : kFP16NegativeInfinity;
-
-    case FP_NORMAL:
-    case FP_SUBNORMAL: {
-      // Convert double-to-half as the processor would, assuming that FPCR.FZ
-      // (flush-to-zero) is not set.
-
-      // Add the implicit '1' bit to the mantissa.
-      mantissa += (UINT64_C(1) << 52);
-      return FPRoundToFloat16(sign, exponent, mantissa, round_mode);
-    }
-  }
-
-  VIXL_UNREACHABLE();
-  return 0;
-}
-
-
-float Simulator::FPToFloat(double value, FPRounding round_mode) {
-  // Only the FPTieEven rounding mode is implemented.
-  VIXL_ASSERT((round_mode == FPTieEven) || (round_mode == FPRoundOdd));
-  USE(round_mode);
-
-  switch (std::fpclassify(value)) {
-    case FP_NAN: {
-      if (IsSignallingNaN(value)) {
-        FPProcessException();
-      }
-      if (DN()) return kFP32DefaultNaN;
-
-      // Convert NaNs as the processor would:
-      //  - The sign is propagated.
-      //  - The payload (mantissa) is transferred as much as possible, except
-      //    that the top bit is forced to '1', making the result a quiet NaN.
-      uint64_t raw = double_to_rawbits(value);
-
-      uint32_t sign = raw >> 63;
-      uint32_t exponent = (1 << 8) - 1;
-      uint32_t payload =
-          static_cast<uint32_t>(unsigned_bitextract_64(50, 52 - 23, raw));
-      payload |= (1 << 22);   // Force a quiet NaN.
-
-      return rawbits_to_float((sign << 31) | (exponent << 23) | payload);
-    }
-
-    case FP_ZERO:
-    case FP_INFINITE: {
-      // In a C++ cast, any value representable in the target type will be
-      // unchanged. This is always the case for +/-0.0 and infinities.
-      return static_cast<float>(value);
-    }
-
-    case FP_NORMAL:
-    case FP_SUBNORMAL: {
-      // Convert double-to-float as the processor would, assuming that FPCR.FZ
-      // (flush-to-zero) is not set.
-      uint64_t raw = double_to_rawbits(value);
-      // Extract the IEEE-754 double components.
-      uint32_t sign = raw >> 63;
-      // Extract the exponent and remove the IEEE-754 encoding bias.
-      int32_t exponent =
-          static_cast<int32_t>(unsigned_bitextract_64(62, 52, raw)) - 1023;
-      // Extract the mantissa and add the implicit '1' bit.
-      uint64_t mantissa = unsigned_bitextract_64(51, 0, raw);
-      if (std::fpclassify(value) == FP_NORMAL) {
-        mantissa |= (UINT64_C(1) << 52);
-      }
-      return FPRoundToFloat(sign, exponent, mantissa, round_mode);
-    }
-  }
-
-  VIXL_UNREACHABLE();
-  return value;
-}
-
-
 void Simulator::ld1(VectorFormat vform,
                     LogicVRegister dst,
                     uint64_t addr) {
   dst.ClearForWrite(vform);
   for (int i = 0; i < LaneCountFromFormat(vform); i++) {
     dst.ReadUintFromMem(vform, i, addr);
     addr += LaneSizeInBytesFromFormat(vform);
   }
@@ -4015,22 +3721,22 @@ LogicVRegister Simulator::fcmp(VectorFor
 
 
 LogicVRegister Simulator::fcmp_zero(VectorFormat vform,
                                     LogicVRegister dst,
                                     const LogicVRegister& src,
                                     Condition cond) {
   SimVRegister temp;
   if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
-    LogicVRegister zero_reg = dup_immediate(vform, temp, float_to_rawbits(0.0));
+    LogicVRegister zero_reg = dup_immediate(vform, temp, FloatToRawbits(0.0));
     fcmp<float>(vform, dst, src, zero_reg, cond);
   } else {
     VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
     LogicVRegister zero_reg = dup_immediate(vform, temp,
-                                            double_to_rawbits(0.0));
+                                            DoubleToRawbits(0.0));
     fcmp<double>(vform, dst, src, zero_reg, cond);
   }
   return dst;
 }
 
 
 LogicVRegister Simulator::fabscmp(VectorFormat vform,
                                   LogicVRegister dst,
@@ -4428,100 +4134,113 @@ LogicVRegister Simulator::fcvtu(VectorFo
 }
 
 
 LogicVRegister Simulator::fcvtl(VectorFormat vform,
                                 LogicVRegister dst,
                                 const LogicVRegister& src) {
   if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
     for (int i = LaneCountFromFormat(vform) - 1; i >= 0; i--) {
-      dst.SetFloat(i, FPToFloat(src.Float<float16>(i)));
+      // TODO: Full support for SimFloat16 in SimRegister(s).
+      dst.SetFloat(i,
+                   FPToFloat(RawbitsToFloat16(src.Float<uint16_t>(i)),
+                             ReadDN()));
     }
   } else {
     VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
     for (int i = LaneCountFromFormat(vform) - 1; i >= 0; i--) {
-      dst.SetFloat(i, FPToDouble(src.Float<float>(i)));
+      dst.SetFloat(i, FPToDouble(src.Float<float>(i), ReadDN()));
     }
   }
   return dst;
 }
 
 
 LogicVRegister Simulator::fcvtl2(VectorFormat vform,
                                  LogicVRegister dst,
                                  const LogicVRegister& src) {
   int lane_count = LaneCountFromFormat(vform);
   if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
     for (int i = 0; i < lane_count; i++) {
-      dst.SetFloat(i, FPToFloat(src.Float<float16>(i + lane_count)));
+      // TODO: Full support for SimFloat16 in SimRegister(s).
+      dst.SetFloat(i,
+                   FPToFloat(RawbitsToFloat16(
+                                 src.Float<uint16_t>(i + lane_count)),
+                             ReadDN()));
     }
   } else {
     VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
     for (int i = 0; i < lane_count; i++) {
-      dst.SetFloat(i, FPToDouble(src.Float<float>(i + lane_count)));
+      dst.SetFloat(i, FPToDouble(src.Float<float>(i + lane_count), ReadDN()));
     }
   }
   return dst;
 }
 
 
 LogicVRegister Simulator::fcvtn(VectorFormat vform,
                                 LogicVRegister dst,
                                 const LogicVRegister& src) {
   if (LaneSizeInBitsFromFormat(vform) == kHRegSize) {
     for (int i = 0; i < LaneCountFromFormat(vform); i++) {
-      dst.SetFloat(i, FPToFloat16(src.Float<float>(i), FPTieEven));
+      dst.SetFloat(i,
+                   Float16ToRawbits(
+                       FPToFloat16(src.Float<float>(i), FPTieEven, ReadDN())));
     }
   } else {
     VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kSRegSize);
     for (int i = 0; i < LaneCountFromFormat(vform); i++) {
-      dst.SetFloat(i, FPToFloat(src.Float<double>(i), FPTieEven));
+      dst.SetFloat(i, FPToFloat(src.Float<double>(i), FPTieEven, ReadDN()));
     }
   }
   return dst;
 }
 
 
 LogicVRegister Simulator::fcvtn2(VectorFormat vform,
                                  LogicVRegister dst,
                                  const LogicVRegister& src) {
   int lane_count = LaneCountFromFormat(vform) / 2;
   if (LaneSizeInBitsFromFormat(vform) == kHRegSize) {
     for (int i = lane_count - 1; i >= 0; i--) {
-      dst.SetFloat(i + lane_count, FPToFloat16(src.Float<float>(i), FPTieEven));
+      dst.SetFloat(i + lane_count,
+                   Float16ToRawbits(
+                       FPToFloat16(src.Float<float>(i), FPTieEven, ReadDN())));
     }
   } else {
     VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kSRegSize);
     for (int i = lane_count - 1; i >= 0; i--) {
-      dst.SetFloat(i + lane_count, FPToFloat(src.Float<double>(i), FPTieEven));
+      dst.SetFloat(i + lane_count,
+                   FPToFloat(src.Float<double>(i), FPTieEven, ReadDN()));
     }
   }
   return dst;
 }
 
 
 LogicVRegister Simulator::fcvtxn(VectorFormat vform,
                                  LogicVRegister dst,
                                  const LogicVRegister& src) {
   dst.ClearForWrite(vform);
   VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kSRegSize);
   for (int i = 0; i < LaneCountFromFormat(vform); i++) {
-    dst.SetFloat(i, FPToFloat(src.Float<double>(i), FPRoundOdd));
+    dst.SetFloat(i, FPToFloat(src.Float<double>(i), FPRoundOdd, ReadDN()));
   }
   return dst;
 }
 
 
 LogicVRegister Simulator::fcvtxn2(VectorFormat vform,
                                   LogicVRegister dst,
                                   const LogicVRegister& src) {
   VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kSRegSize);
   int lane_count = LaneCountFromFormat(vform) / 2;
   for (int i = lane_count - 1; i >= 0; i--) {
-    dst.SetFloat(i + lane_count, FPToFloat(src.Float<double>(i), FPRoundOdd));
+    dst.SetFloat(i + lane_count,
+                 FPToFloat(src.Float<double>(i), FPRoundOdd, ReadDN()));
   }
   return dst;
 }
 
 
 // Based on reference C function recip_sqrt_estimate from ARM ARM.
 double Simulator::recip_sqrt_estimate(double a) {
   int q0, q1, s;
@@ -4534,17 +4253,17 @@ double Simulator::recip_sqrt_estimate(do
     r = 1.0 / sqrt((static_cast<double>(q1) + 0.5) / 256.0);
   }
   s = static_cast<int>(256.0 * r + 0.5);
   return static_cast<double>(s) / 256.0;
 }
 
 
 static inline uint64_t Bits(uint64_t val, int start_bit, int end_bit) {
-  return unsigned_bitextract_64(start_bit, end_bit, val);
+  return ExtractUnsignedBitfield64(start_bit, end_bit, val);
 }
 
 
 template <typename T>
 T Simulator::FPRecipSqrtEstimate(T op) {
   if (std::isnan(op)) {
     return FPProcessNaN(op);
   } else if (op == 0.0) {
@@ -4558,53 +4277,53 @@ T Simulator::FPRecipSqrtEstimate(T op) {
     return FPDefaultNaN<T>();
   } else if (std::isinf(op)) {
     return 0.0;
   } else {
     uint64_t fraction;
     int exp, result_exp;
 
     if (sizeof(T) == sizeof(float)) {  // NOLINT(runtime/sizeof)
-      exp = float_exp(op);
-      fraction = float_mantissa(op);
+      exp = FloatExp(op);
+      fraction = FloatMantissa(op);
       fraction <<= 29;
     } else {
-      exp = double_exp(op);
-      fraction = double_mantissa(op);
+      exp = DoubleExp(op);
+      fraction = DoubleMantissa(op);
     }
 
     if (exp == 0) {
       while (Bits(fraction, 51, 51) == 0) {
         fraction = Bits(fraction, 50, 0) << 1;
         exp -= 1;
       }
       fraction = Bits(fraction, 50, 0) << 1;
     }
 
     double scaled;
     if (Bits(exp, 0, 0) == 0) {
-      scaled = double_pack(0, 1022, Bits(fraction, 51, 44) << 44);
+      scaled = DoublePack(0, 1022, Bits(fraction, 51, 44) << 44);
     } else {
-      scaled = double_pack(0, 1021, Bits(fraction, 51, 44) << 44);
+      scaled = DoublePack(0, 1021, Bits(fraction, 51, 44) << 44);
     }
 
     if (sizeof(T) == sizeof(float)) {  // NOLINT(runtime/sizeof)
       result_exp = (380 - exp) / 2;
     } else {
       result_exp = (3068 - exp) / 2;
     }
 
-    uint64_t estimate = double_to_rawbits(recip_sqrt_estimate(scaled));
+    uint64_t estimate = DoubleToRawbits(recip_sqrt_estimate(scaled));
 
     if (sizeof(T) == sizeof(float)) {  // NOLINT(runtime/sizeof)
       uint32_t exp_bits = static_cast<uint32_t>(Bits(result_exp, 7, 0));
       uint32_t est_bits = static_cast<uint32_t>(Bits(estimate, 51, 29));
-      return float_pack(0, exp_bits, est_bits);
+      return FloatPack(0, exp_bits, est_bits);
     } else {
-      return double_pack(0, Bits(result_exp, 10, 0), Bits(estimate, 51, 0));
+      return DoublePack(0, Bits(result_exp, 10, 0), Bits(estimate, 51, 0));
     }
   }
 }
 
 
 LogicVRegister Simulator::frsqrte(VectorFormat vform,
                                   LogicVRegister dst,
                                   const LogicVRegister& src) {
@@ -4624,19 +4343,19 @@ LogicVRegister Simulator::frsqrte(Vector
   return dst;
 }
 
 template <typename T>
 T Simulator::FPRecipEstimate(T op, FPRounding rounding) {
   uint32_t sign;
 
   if (sizeof(T) == sizeof(float)) {  // NOLINT(runtime/sizeof)
-    sign = float_sign(op);
+    sign = FloatSign(op);
   } else {
-    sign = double_sign(op);
+    sign = DoubleSign(op);
   }
 
   if (std::isnan(op)) {
     return FPProcessNaN(op);
   } else if (std::isinf(op)) {
     return (sign == 1) ? -0.0 : 0.0;
   } else if (op == 0.0) {
     FPProcessException();  // FPExc_DivideByZero exception.
@@ -4654,69 +4373,69 @@ T Simulator::FPRecipEstimate(T op, FPRou
       default: break;
     }
     FPProcessException();  // FPExc_Overflow and FPExc_Inexact.
     if (overflow_to_inf) {
       return (sign == 1) ? kFP64NegativeInfinity : kFP64PositiveInfinity;
     } else {
       // Return FPMaxNormal(sign).
       if (sizeof(T) == sizeof(float)) {  // NOLINT(runtime/sizeof)
-        return float_pack(sign, 0xfe, 0x07fffff);
+        return FloatPack(sign, 0xfe, 0x07fffff);
       } else {
-        return double_pack(sign, 0x7fe, 0x0fffffffffffffl);
+        return DoublePack(sign, 0x7fe, 0x0fffffffffffffl);
       }
     }
   } else {
     uint64_t fraction;
     int exp, result_exp;
     uint32_t sign;
 
     if (sizeof(T) == sizeof(float)) {  // NOLINT(runtime/sizeof)
-      sign = float_sign(op);
-      exp = float_exp(op);
-      fraction = float_mantissa(op);
+      sign = FloatSign(op);
+      exp = FloatExp(op);
+      fraction = FloatMantissa(op);
       fraction <<= 29;
     } else {
-      sign = double_sign(op);
-      exp = double_exp(op);
-      fraction = double_mantissa(op);
+      sign = DoubleSign(op);
+      exp = DoubleExp(op);
+      fraction = DoubleMantissa(op);
     }
 
     if (exp == 0) {
       if (Bits(fraction, 51, 51) == 0) {
         exp -= 1;
         fraction = Bits(fraction, 49, 0) << 2;
       } else {
         fraction = Bits(fraction, 50, 0) << 1;
       }
     }
 
-    double scaled = double_pack(0, 1022, Bits(fraction, 51, 44) << 44);
+    double scaled = DoublePack(0, 1022, Bits(fraction, 51, 44) << 44);
 
     if (sizeof(T) == sizeof(float)) {  // NOLINT(runtime/sizeof)
       result_exp = (253 - exp);  // In range 253-254 = -1 to 253+1 = 254.
     } else {
       result_exp = (2045 - exp);  // In range 2045-2046 = -1 to 2045+1 = 2046.
     }
 
     double estimate = recip_estimate(scaled);
 
-    fraction = double_mantissa(estimate);
+    fraction = DoubleMantissa(estimate);
     if (result_exp == 0) {
       fraction = (UINT64_C(1) << 51) | Bits(fraction, 51, 1);
     } else if (result_exp == -1) {
       fraction = (UINT64_C(1) << 50) | Bits(fraction, 51, 2);
       result_exp = 0;
     }
     if (sizeof(T) == sizeof(float)) {  // NOLINT(runtime/sizeof)
       uint32_t exp_bits = static_cast<uint32_t>(Bits(result_exp, 7, 0));
       uint32_t frac_bits = static_cast<uint32_t>(Bits(fraction, 51, 29));
-      return float_pack(sign, exp_bits, frac_bits);
+      return FloatPack(sign, exp_bits, frac_bits);
     } else {
-      return double_pack(sign, Bits(result_exp, 10, 0), Bits(fraction, 51, 0));
+      return DoublePack(sign, Bits(result_exp, 10, 0), Bits(fraction, 51, 0));
     }
   }
 }
 
 
 LogicVRegister Simulator::frecpe(VectorFormat vform,
                                  LogicVRegister dst,
                                  const LogicVRegister& src,
@@ -4801,25 +4520,25 @@ LogicVRegister Simulator::frecpx(VectorF
     T op = src.Float<T>(i);
     T result;
     if (std::isnan(op)) {
        result = FPProcessNaN(op);
     } else {
       int exp;
       uint32_t sign;
       if (sizeof(T) == sizeof(float)) {  // NOLINT(runtime/sizeof)
-        sign = float_sign(op);
-        exp = float_exp(op);
+        sign = FloatSign(op);
+        exp = FloatExp(op);
         exp = (exp == 0) ? (0xFF - 1) : static_cast<int>(Bits(~exp, 7, 0));
-        result = float_pack(sign, exp, 0);
+        result = FloatPack(sign, exp, 0);
       } else {
-        sign = double_sign(op);
-        exp = double_exp(op);
+        sign = DoubleSign(op);
+        exp = DoubleExp(op);
         exp = (exp == 0) ? (0x7FF - 1) : static_cast<int>(Bits(~exp, 10, 0));
-        result = double_pack(sign, exp, 0);
+        result = DoublePack(sign, exp, 0);
       }
     }
     dst.SetFloat(i, result);
   }
   return dst;
 }
 
 
--- a/js/src/jit/arm64/vixl/MacroAssembler-vixl.cpp
+++ b/js/src/jit/arm64/vixl/MacroAssembler-vixl.cpp
@@ -43,17 +43,17 @@ void MacroAssembler::FinalizeCode() {
   Assembler::FinalizeCode();
 }
 
 
 int MacroAssembler::MoveImmediateHelper(MacroAssembler* masm,
                                         const Register &rd,
                                         uint64_t imm) {
   bool emit_code = (masm != NULL);
-  VIXL_ASSERT(is_uint32(imm) || is_int32(imm) || rd.Is64Bits());
+  VIXL_ASSERT(IsUint32(imm) || IsInt32(imm) || rd.Is64Bits());
   // The worst case for size is mov 64-bit immediate to sp:
   //  * up to 4 instructions to materialise the constant
   //  * 1 instruction to move to sp
   MacroEmissionCheckScope guard(masm);
 
   // Immediates on Aarch64 can be produced using an initial value, and zero to
   // three move keep operations.
   //
@@ -359,17 +359,17 @@ void MacroAssembler::LogicalMacro(const 
     // Ignore the top 32 bits of an immediate if we're moving to a W register.
     if (rd.Is32Bits()) {
       // Check that the top 32 bits are consistent.
       VIXL_ASSERT(((immediate >> kWRegSize) == 0) ||
                   ((immediate >> kWRegSize) == -1));
       immediate &= kWRegMask;
     }
 
-    VIXL_ASSERT(rd.Is64Bits() || is_uint32(immediate));
+    VIXL_ASSERT(rd.Is64Bits() || IsUint32(immediate));
 
     // Special cases for all set or all clear immediates.
     if (immediate == 0) {
       switch (op) {
         case AND:
           Mov(rd, 0);
           return;
         case ORR:
@@ -490,17 +490,17 @@ void MacroAssembler::Mov(const Register&
                                   (discard_mode == kDontDiscardForSameWReg))) {
       mov(rd, operand.reg());
     }
   }
 }
 
 
 void MacroAssembler::Movi16bitHelper(const VRegister& vd, uint64_t imm) {
-  VIXL_ASSERT(is_uint16(imm));
+  VIXL_ASSERT(IsUint16(imm));
   int byte1 = (imm & 0xff);
   int byte2 = ((imm >> 8) & 0xff);
   if (byte1 == byte2) {
     movi(vd.Is64Bits() ? vd.V8B() : vd.V16B(), byte1);
   } else if (byte1 == 0) {
     movi(vd, byte2, LSL, 8);
   } else if (byte2 == 0) {
     movi(vd, byte1);
@@ -513,17 +513,17 @@ void MacroAssembler::Movi16bitHelper(con
     Register temp = temps.AcquireW();
     movz(temp, imm);
     dup(vd, temp);
   }
 }
 
 
 void MacroAssembler::Movi32bitHelper(const VRegister& vd, uint64_t imm) {
-  VIXL_ASSERT(is_uint32(imm));
+  VIXL_ASSERT(IsUint32(imm));
 
   uint8_t bytes[sizeof(imm)];
   memcpy(bytes, &imm, sizeof(imm));
 
   // All bytes are either 0x00 or 0xff.
   {
     bool all0orff = true;
     for (int i = 0; i < 4; ++i) {
@@ -636,17 +636,17 @@ void MacroAssembler::Movi(const VRegiste
                           uint64_t imm,
                           Shift shift,
                           int shift_amount) {
   MacroEmissionCheckScope guard(this);
   if (shift_amount != 0 || shift != LSL) {
     movi(vd, imm, shift, shift_amount);
   } else if (vd.Is8B() || vd.Is16B()) {
     // 8-bit immediate.
-    VIXL_ASSERT(is_uint8(imm));
+    VIXL_ASSERT(IsUint8(imm));
     movi(vd, imm);
   } else if (vd.Is4H() || vd.Is8H()) {
     // 16-bit immediate.
     Movi16bitHelper(vd, imm);
   } else if (vd.Is2S() || vd.Is4S()) {
     // 32-bit immediate.
     Movi32bitHelper(vd, imm);
   } else {
@@ -882,17 +882,17 @@ void MacroAssembler::Fmov(VRegister vd, 
     Fmov(vd, static_cast<float>(imm));
     return;
   }
 
   VIXL_ASSERT(vd.Is1D() || vd.Is2D());
   if (IsImmFP64(imm)) {
     fmov(vd, imm);
   } else {
-    uint64_t rawbits = double_to_rawbits(imm);
+    uint64_t rawbits = DoubleToRawbits(imm);
     if (vd.IsScalar()) {
       if (rawbits == 0) {
         fmov(vd, xzr);
       } else {
         Assembler::fImmPool64(vd, imm);
       }
     } else {
       // TODO: consider NEON support for load literal.
@@ -910,17 +910,17 @@ void MacroAssembler::Fmov(VRegister vd, 
     Fmov(vd, static_cast<double>(imm));
     return;
   }
 
   VIXL_ASSERT(vd.Is1S() || vd.Is2S() || vd.Is4S());
   if (IsImmFP32(imm)) {
     fmov(vd, imm);
   } else {
-    uint32_t rawbits = float_to_rawbits(imm);
+    uint32_t rawbits = FloatToRawbits(imm);
     if (vd.IsScalar()) {
       if (rawbits == 0) {
         fmov(vd, wzr);
       } else {
         Assembler::fImmPool32(vd, imm);
       }
     } else {
       // TODO: consider NEON support for load literal.
@@ -1126,17 +1126,17 @@ void MacroAssembler::AddSubWithCarryMacr
     Register temp = temps.AcquireSameSizeAs(rn);
     VIXL_ASSERT(!temp.Is(rd) && !temp.Is(rn) && !temp.Is(operand.maybeReg()));
     Mov(temp, operand);
     AddSubWithCarry(rd, rn, Operand(temp), S, op);
   } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
     // Add/sub with carry (shifted register).
     VIXL_ASSERT(operand.reg().size() == rd.size());
     VIXL_ASSERT(operand.shift() != ROR);
-    VIXL_ASSERT(is_uintn(rd.size() == kXRegSize ? kXRegSizeLog2 : kWRegSizeLog2,
+    VIXL_ASSERT(IsUintN(rd.size() == kXRegSize ? kXRegSizeLog2 : kWRegSizeLog2,
                     operand.shift_amount()));
     temps.Exclude(operand.reg());
     Register temp = temps.AcquireSameSizeAs(rn);
     VIXL_ASSERT(!temp.Is(rd) && !temp.Is(rn) && !temp.Is(operand.maybeReg()));
     EmitShift(temp, operand.reg(), operand.shift(), operand.shift_amount());
     AddSubWithCarry(rd, rn, Operand(temp), S, op);
   } else if (operand.IsExtendedRegister()) {
     // Add/sub with carry (extended register).
--- a/js/src/jit/arm64/vixl/MozAssembler-vixl.cpp
+++ b/js/src/jit/arm64/vixl/MozAssembler-vixl.cpp
@@ -394,17 +394,17 @@ BufferOffset Assembler::hint(SystemHint 
 
 
 void Assembler::hint(Instruction* at, SystemHint code) {
   Emit(at, HINT | ImmHint(code));
 }
 
 
 void Assembler::svc(Instruction* at, int code) {
-  VIXL_ASSERT(is_uint16(code));
+  VIXL_ASSERT(IsUint16(code));
   Emit(at, SVC | ImmException(code));
 }
 
 
 void Assembler::nop(Instruction* at) {
   hint(at, NOP);
 }
 
@@ -419,17 +419,17 @@ BufferOffset Assembler::Logical(const Re
 {
   VIXL_ASSERT(rd.size() == rn.size());
   if (operand.IsImmediate()) {
     int64_t immediate = operand.immediate();
     unsigned reg_size = rd.size();
 
     VIXL_ASSERT(immediate != 0);
     VIXL_ASSERT(immediate != -1);
-    VIXL_ASSERT(rd.Is64Bits() || is_uint32(immediate));
+    VIXL_ASSERT(rd.Is64Bits() || IsUint32(immediate));
 
     // If the operation is NOT, invert the operation and immediate.
     if ((op & NOT) == NOT) {
       op = static_cast<LogicalOp>(op & ~NOT);
       immediate = rd.Is64Bits() ? ~immediate : (~immediate & kWRegMask);
     }
 
     unsigned n, imm_s, imm_r;
@@ -458,17 +458,17 @@ BufferOffset Assembler::LogicalImmediate
                 ImmSetBits(imm_s, reg_size) | ImmRotate(imm_r, reg_size) | dest_reg | Rn(rn));
 }
 
 
 BufferOffset Assembler::DataProcShiftedRegister(const Register& rd, const Register& rn,
                                                 const Operand& operand, FlagsUpdate S, Instr op)
 {
   VIXL_ASSERT(operand.IsShiftedRegister());
-  VIXL_ASSERT(rn.Is64Bits() || (rn.Is32Bits() && is_uint5(operand.shift_amount())));
+  VIXL_ASSERT(rn.Is64Bits() || (rn.Is32Bits() && IsUint5(operand.shift_amount())));
   return Emit(SF(rd) | op | Flags(S) |
               ShiftDP(operand.shift()) | ImmDPShift(operand.shift_amount()) |
               Rm(operand.reg()) | Rn(rn) | Rd(rd));
 }
 
 
 void MozBaseAssembler::InsertIndexIntoTag(uint8_t* load, uint32_t index) {
   // Store the js::jit::PoolEntry index into the instruction.
@@ -680,17 +680,17 @@ void MozBaseAssembler::RetargetNearBranc
     return;
   }
 
   // Valid test branches are TBZ and TBNZ.
   if (i->IsTestBranch()) {
     VIXL_ASSERT(byteOffset % kInstructionSize == 0);
     // Opposite of ImmTestBranchBit(): MSB in bit 5, 0:5 at bit 40.
     unsigned bit_pos = (i->ImmTestBranchBit5() << 5) | (i->ImmTestBranchBit40());
-    VIXL_ASSERT(is_uint6(bit_pos));
+    VIXL_ASSERT(IsUint6(bit_pos));
 
     // Register size doesn't matter for the encoding.
     Register rt = Register::XRegFromCode(i->Rt());
 
     if (i->IsTBZ()) {
       Assembler::tbz(i, rt, bit_pos, instOffset);
     } else {
       VIXL_ASSERT(i->IsTBNZ());
--- a/js/src/jit/arm64/vixl/MozInstructions-vixl.cpp
+++ b/js/src/jit/arm64/vixl/MozInstructions-vixl.cpp
@@ -116,23 +116,23 @@ bool Instruction::IsBranchLinkImm() cons
 }
 
 
 bool Instruction::IsTargetReachable(const Instruction* target) const {
     VIXL_ASSERT(((target - this) & 3) == 0);
     int offset = (target - this) >> kInstructionSizeLog2;
     switch (BranchType()) {
       case CondBranchType:
-        return is_int19(offset);
+        return IsInt19(offset);
       case UncondBranchType:
-        return is_int26(offset);
+        return IsInt26(offset);
       case CompareBranchType:
-        return is_int19(offset);
+        return IsInt19(offset);
       case TestBranchType:
-        return is_int14(offset);
+        return IsInt14(offset);
       default:
         VIXL_UNREACHABLE();
     }
 }
 
 
 ptrdiff_t Instruction::ImmPCRawOffset() const {
   ptrdiff_t offset;
--- a/js/src/jit/arm64/vixl/MozSimulator-vixl.cpp
+++ b/js/src/jit/arm64/vixl/MozSimulator-vixl.cpp
@@ -84,18 +84,18 @@ void Simulator::ResetState() {
   // Reset registers to 0.
   pc_ = nullptr;
   pc_modified_ = false;
   for (unsigned i = 0; i < kNumberOfRegisters; i++) {
     set_xreg(i, 0xbadbeef);
   }
   // Set FP registers to a value that is a NaN in both 32-bit and 64-bit FP.
   uint64_t nan_bits = UINT64_C(0x7ff0dead7f8beef1);
-  VIXL_ASSERT(IsSignallingNaN(rawbits_to_double(nan_bits & kDRegMask)));
-  VIXL_ASSERT(IsSignallingNaN(rawbits_to_float(nan_bits & kSRegMask)));
+  VIXL_ASSERT(IsSignallingNaN(RawbitsToDouble(nan_bits & kDRegMask)));
+  VIXL_ASSERT(IsSignallingNaN(RawbitsToFloat(nan_bits & kSRegMask)));
   for (unsigned i = 0; i < kNumberOfFPRegisters; i++) {
     set_dreg_bits(i, nan_bits);
   }
   // Returning to address 0 exits the Simulator.
   set_lr(kEndOfSimAddress);
 }
 
 
--- a/js/src/jit/arm64/vixl/Simulator-vixl.cpp
+++ b/js/src/jit/arm64/vixl/Simulator-vixl.cpp
@@ -36,17 +36,17 @@
 #include "jit/AtomicOperations.h"
 
 namespace vixl {
 
 const Instruction* Simulator::kEndOfSimAddress = NULL;
 
 void SimSystemRegister::SetBits(int msb, int lsb, uint32_t bits) {
   int width = msb - lsb + 1;
-  VIXL_ASSERT(is_uintn(width, bits) || is_intn(width, bits));
+  VIXL_ASSERT(IsUintN(width, bits) || IsIntN(width, bits));
 
   bits <<= lsb;
   uint32_t mask = ((1 << width) - 1) << lsb;
   VIXL_ASSERT((mask & write_ignore_mask_) == 0);
 
   value_ = (value_ & ~mask) | (bits & mask);
 }
 
@@ -2147,22 +2147,34 @@ void Simulator::VisitFPDataProcessing1So
 
   switch (instr->Mask(FPDataProcessing1SourceMask)) {
     case FMOV_s: set_sreg(fd, sreg(fn)); return;
     case FMOV_d: set_dreg(fd, dreg(fn)); return;
     case FABS_s: fabs_(kFormatS, vreg(fd), vreg(fn)); return;
     case FABS_d: fabs_(kFormatD, vreg(fd), vreg(fn)); return;
     case FNEG_s: fneg(kFormatS, vreg(fd), vreg(fn)); return;
     case FNEG_d: fneg(kFormatD, vreg(fd), vreg(fn)); return;
-    case FCVT_ds: set_dreg(fd, FPToDouble(sreg(fn))); return;
-    case FCVT_sd: set_sreg(fd, FPToFloat(dreg(fn), FPTieEven)); return;
-    case FCVT_hs: set_hreg(fd, FPToFloat16(sreg(fn), FPTieEven)); return;
-    case FCVT_sh: set_sreg(fd, FPToFloat(hreg(fn))); return;
-    case FCVT_dh: set_dreg(fd, FPToDouble(FPToFloat(hreg(fn)))); return;
-    case FCVT_hd: set_hreg(fd, FPToFloat16(dreg(fn), FPTieEven)); return;
+    case FCVT_ds:
+      set_dreg(fd, FPToDouble(sreg(fn), ReadDN()));
+      return;
+    case FCVT_sd:
+      set_sreg(fd, FPToFloat(dreg(fn), FPTieEven, ReadDN()));
+      return;
+    case FCVT_hs:
+      set_hreg(fd, Float16ToRawbits(FPToFloat16(sreg(fn), FPTieEven, ReadDN())));
+      return;
+    case FCVT_sh:
+      set_sreg(fd, FPToFloat(RawbitsToFloat16(hreg(fn)), ReadDN()));
+      return;
+    case FCVT_dh:
+      set_dreg(fd, FPToDouble(hreg(fn), ReadDN()));
+      return;
+    case FCVT_hd:
+      set_hreg(fd, Float16ToRawbits(FPToFloat16(dreg(fn), FPTieEven, ReadDN())));
+      return;
     case FSQRT_s:
     case FSQRT_d: fsqrt(vform, rd, rn); return;
     case FRINTI_s:
     case FRINTI_d: break;  // Use FPCR rounding mode.
     case FRINTX_s:
     case FRINTX_d: inexact_exception = true; break;
     case FRINTA_s:
     case FRINTA_d: fpcr_rounding = FPTieAway; break;
@@ -3284,20 +3296,20 @@ void Simulator::VisitNEONModifiedImmedia
         for (int i = 0; i < 8; ++i) {
           if (imm8 & (1ULL << i)) {
             imm |= (UINT64_C(0xff) << (8 * i));
           }
         }
       } else {  // cmode_0 == 1, cmode == 0xf.
         if (op_bit == 0) {
           vform = q ? kFormat4S : kFormat2S;
-          imm = float_to_rawbits(instr->ImmNEONFP32());
+          imm = FloatToRawbits(instr->ImmNEONFP32());
         } else if (q == 1) {
           vform = kFormat2D;
-          imm = double_to_rawbits(instr->ImmNEONFP64());
+          imm = DoubleToRawbits(instr->ImmNEONFP64());
         } else {
           VIXL_ASSERT((q == 0) && (op_bit == 1) && (cmode == 0xf));
           VisitUnallocated(instr);
         }
       }
       break;
     default: VIXL_UNREACHABLE(); break;
   }
--- a/js/src/jit/arm64/vixl/Simulator-vixl.h
+++ b/js/src/jit/arm64/vixl/Simulator-vixl.h
@@ -44,212 +44,16 @@
 #include "jit/IonTypes.h"
 #include "js/AllocPolicy.h"
 #include "vm/MutexIDs.h"
 #include "vm/PosixNSPR.h"
 #include "wasm/WasmSignalHandlers.h"
 
 namespace vixl {
 
-// Assemble the specified IEEE-754 components into the target type and apply
-// appropriate rounding.
-//  sign:     0 = positive, 1 = negative
-//  exponent: Unbiased IEEE-754 exponent.
-//  mantissa: The mantissa of the input. The top bit (which is not encoded for
-//            normal IEEE-754 values) must not be omitted. This bit has the
-//            value 'pow(2, exponent)'.
-//
-// The input value is assumed to be a normalized value. That is, the input may
-// not be infinity or NaN. If the source value is subnormal, it must be
-// normalized before calling this function such that the highest set bit in the
-// mantissa has the value 'pow(2, exponent)'.
-//
-// Callers should use FPRoundToFloat or FPRoundToDouble directly, rather than
-// calling a templated FPRound.
-template <class T, int ebits, int mbits>
-T FPRound(int64_t sign, int64_t exponent, uint64_t mantissa,
-                 FPRounding round_mode) {
-  VIXL_ASSERT((sign == 0) || (sign == 1));
-
-  // Only FPTieEven and FPRoundOdd rounding modes are implemented.
-  VIXL_ASSERT((round_mode == FPTieEven) || (round_mode == FPRoundOdd));
-
-  // Rounding can promote subnormals to normals, and normals to infinities. For
-  // example, a double with exponent 127 (FLT_MAX_EXP) would appear to be
-  // encodable as a float, but rounding based on the low-order mantissa bits
-  // could make it overflow. With ties-to-even rounding, this value would become
-  // an infinity.
-
-  // ---- Rounding Method ----
-  //
-  // The exponent is irrelevant in the rounding operation, so we treat the
-  // lowest-order bit that will fit into the result ('onebit') as having
-  // the value '1'. Similarly, the highest-order bit that won't fit into
-  // the result ('halfbit') has the value '0.5'. The 'point' sits between
-  // 'onebit' and 'halfbit':
-  //
-  //            These bits fit into the result.
-  //               |---------------------|
-  //  mantissa = 0bxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-  //                                     ||
-  //                                    / |
-  //                                   /  halfbit
-  //                               onebit
-  //
-  // For subnormal outputs, the range of representable bits is smaller and
-  // the position of onebit and halfbit depends on the exponent of the
-  // input, but the method is otherwise similar.
-  //
-  //   onebit(frac)
-  //     |
-  //     | halfbit(frac)          halfbit(adjusted)
-  //     | /                      /
-  //     | |                      |
-  //  0b00.0 (exact)      -> 0b00.0 (exact)                    -> 0b00
-  //  0b00.0...           -> 0b00.0...                         -> 0b00
-  //  0b00.1 (exact)      -> 0b00.0111..111                    -> 0b00
-  //  0b00.1...           -> 0b00.1...                         -> 0b01
-  //  0b01.0 (exact)      -> 0b01.0 (exact)                    -> 0b01
-  //  0b01.0...           -> 0b01.0...                         -> 0b01
-  //  0b01.1 (exact)      -> 0b01.1 (exact)                    -> 0b10
-  //  0b01.1...           -> 0b01.1...                         -> 0b10
-  //  0b10.0 (exact)      -> 0b10.0 (exact)                    -> 0b10
-  //  0b10.0...           -> 0b10.0...                         -> 0b10
-  //  0b10.1 (exact)      -> 0b10.0111..111                    -> 0b10
-  //  0b10.1...           -> 0b10.1...                         -> 0b11
-  //  0b11.0 (exact)      -> 0b11.0 (exact)                    -> 0b11
-  //  ...                   /             |                      /   |
-  //                       /              |                     /    |
-  //                                                           /     |
-  // adjusted = frac - (halfbit(mantissa) & ~onebit(frac));   /      |
-  //
-  //                   mantissa = (mantissa >> shift) + halfbit(adjusted);
-
-  static const int mantissa_offset = 0;
-  static const int exponent_offset = mantissa_offset + mbits;
-  static const int sign_offset = exponent_offset + ebits;
-  VIXL_ASSERT(sign_offset == (sizeof(T) * 8 - 1));
-
-  // Bail out early for zero inputs.
-  if (mantissa == 0) {
-    return static_cast<T>(sign << sign_offset);
-  }
-
-  // If all bits in the exponent are set, the value is infinite or NaN.
-  // This is true for all binary IEEE-754 formats.
-  static const int infinite_exponent = (1 << ebits) - 1;
-  static const int max_normal_exponent = infinite_exponent - 1;
-
-  // Apply the exponent bias to encode it for the result. Doing this early makes
-  // it easy to detect values that will be infinite or subnormal.
-  exponent += max_normal_exponent >> 1;
-
-  if (exponent > max_normal_exponent) {
-    // Overflow: the input is too large for the result type to represent.
-    if (round_mode == FPTieEven) {
-      // FPTieEven rounding mode handles overflows using infinities.
-      exponent = infinite_exponent;
-      mantissa = 0;
-    } else {
-      VIXL_ASSERT(round_mode == FPRoundOdd);
-      // FPRoundOdd rounding mode handles overflows using the largest magnitude
-      // normal number.
-      exponent = max_normal_exponent;
-      mantissa = (UINT64_C(1) << exponent_offset) - 1;
-    }
-    return static_cast<T>((sign << sign_offset) |
-                          (exponent << exponent_offset) |
-                          (mantissa << mantissa_offset));
-  }
-
-  // Calculate the shift required to move the top mantissa bit to the proper
-  // place in the destination type.
-  const int highest_significant_bit = 63 - CountLeadingZeros(mantissa);
-  int shift = highest_significant_bit - mbits;
-
-  if (exponent <= 0) {
-    // The output will be subnormal (before rounding).
-    // For subnormal outputs, the shift must be adjusted by the exponent. The +1
-    // is necessary because the exponent of a subnormal value (encoded as 0) is
-    // the same as the exponent of the smallest normal value (encoded as 1).
-    shift += -exponent + 1;
-
-    // Handle inputs that would produce a zero output.
-    //
-    // Shifts higher than highest_significant_bit+1 will always produce a zero
-    // result. A shift of exactly highest_significant_bit+1 might produce a
-    // non-zero result after rounding.
-    if (shift > (highest_significant_bit + 1)) {
-      if (round_mode == FPTieEven) {
-        // The result will always be +/-0.0.
-        return static_cast<T>(sign << sign_offset);
-      } else {
-        VIXL_ASSERT(round_mode == FPRoundOdd);
-        VIXL_ASSERT(mantissa != 0);
-        // For FPRoundOdd, if the mantissa is too small to represent and
-        // non-zero return the next "odd" value.
-        return static_cast<T>((sign << sign_offset) | 1);
-      }
-    }
-
-    // Properly encode the exponent for a subnormal output.
-    exponent = 0;
-  } else {
-    // Clear the topmost mantissa bit, since this is not encoded in IEEE-754
-    // normal values.
-    mantissa &= ~(UINT64_C(1) << highest_significant_bit);
-  }
-
-  if (shift > 0) {
-    if (round_mode == FPTieEven) {
-      // We have to shift the mantissa to the right. Some precision is lost, so
-      // we need to apply rounding.
-      uint64_t onebit_mantissa = (mantissa >> (shift)) & 1;
-      uint64_t halfbit_mantissa = (mantissa >> (shift-1)) & 1;
-      uint64_t adjustment = (halfbit_mantissa & ~onebit_mantissa);
-      uint64_t adjusted = mantissa - adjustment;
-      T halfbit_adjusted = (adjusted >> (shift-1)) & 1;
-
-      T result = static_cast<T>((sign << sign_offset) |
-                                (exponent << exponent_offset) |
-                                ((mantissa >> shift) << mantissa_offset));
-
-      // A very large mantissa can overflow during rounding. If this happens,
-      // the exponent should be incremented and the mantissa set to 1.0
-      // (encoded as 0). Applying halfbit_adjusted after assembling the float
-      // has the nice side-effect that this case is handled for free.
-      //
-      // This also handles cases where a very large finite value overflows to
-      // infinity, or where a very large subnormal value overflows to become
-      // normal.
-      return result + halfbit_adjusted;
-    } else {
-      VIXL_ASSERT(round_mode == FPRoundOdd);
-      // If any bits at position halfbit or below are set, onebit (ie. the
-      // bottom bit of the resulting mantissa) must be set.
-      uint64_t fractional_bits = mantissa & ((UINT64_C(1) << shift) - 1);
-      if (fractional_bits != 0) {
-        mantissa |= UINT64_C(1) << shift;
-      }
-
-      return static_cast<T>((sign << sign_offset) |
-                            (exponent << exponent_offset) |
-                            ((mantissa >> shift) << mantissa_offset));
-    }
-  } else {
-    // We have to shift the mantissa to the left (or not at all). The input
-    // mantissa is exactly representable in the output mantissa, so apply no
-    // rounding correction.
-    return static_cast<T>((sign << sign_offset) |
-                          (exponent << exponent_offset) |
-                          ((mantissa << -shift) << mantissa_offset));
-  }
-}
-
-
 // Representation of memory, with typed getters and setters for access.
 class Memory {
  public:
   template <typename T>
   static T AddressUntag(T address) {
     // Cast the address using a C-style cast. A reinterpret_cast would be
     // appropriate, but it can't cast one integral type to another.
     uint64_t bits = (uint64_t)address;
@@ -588,21 +392,21 @@ class SimSystemRegister {
     return value_;
   }
 
   void SetRawValue(uint32_t new_value) {
     value_ = (value_ & write_ignore_mask_) | (new_value & ~write_ignore_mask_);
   }
 
   uint32_t Bits(int msb, int lsb) const {
-    return unsigned_bitextract_32(msb, lsb, value_);
+    return ExtractUnsignedBitfield32(msb, lsb, value_);
   }
 
   int32_t SignedBits(int msb, int lsb) const {
-    return signed_bitextract_32(msb, lsb, value_);
+    return ExtractSignedBitfield32(msb, lsb, value_);
   }
 
   void SetBits(int msb, int lsb, uint32_t bits);
 
   // Default system register values.
   static SimSystemRegister DefaultValueFor(SystemRegister id);
 
 #define DEFINE_GETTER(Name, HighBit, LowBit, Func)                            \
@@ -1048,16 +852,20 @@ class Simulator : public DecoderVisitor 
   SimSystemRegister& nzcv() { return nzcv_; }
 
   // TODO: Find a way to make the fpcr_ members return the proper types, so
   // these accessors are not necessary.
   FPRounding RMode() { return static_cast<FPRounding>(fpcr_.RMode()); }
   bool DN() { return fpcr_.DN() != 0; }
   SimSystemRegister& fpcr() { return fpcr_; }
 
+  UseDefaultNaN ReadDN() const {
+    return fpcr_.DN() != 0 ? kUseDefaultNaN : kIgnoreDefaultNaN;
+  }
+
   // Specify relevant register formats for Print(V)Register and related helpers.
   enum PrintRegisterFormat {
     // The lane size.
     kPrintRegLaneSizeB = 0 << 0,
     kPrintRegLaneSizeH = 1 << 0,
     kPrintRegLaneSizeS = 2 << 0,
     kPrintRegLaneSizeW = kPrintRegLaneSizeS,
     kPrintRegLaneSizeD = 3 << 0,
@@ -2466,21 +2274,16 @@ class Simulator : public DecoderVisitor 
   T FPRecipSqrtEstimate(T op);
   template <typename T>
   T FPRecipEstimate(T op, FPRounding rounding);
   template <typename T, typename R>
   R FPToFixed(T op, int fbits, bool is_signed, FPRounding rounding);
 
   void FPCompare(double val0, double val1, FPTrapFlags trap);
   double FPRoundInt(double value, FPRounding round_mode);
-  double FPToDouble(float value);
-  float FPToFloat(double value, FPRounding round_mode);
-  float FPToFloat(float16 value);
-  float16 FPToFloat16(float value, FPRounding round_mode);
-  float16 FPToFloat16(double value, FPRounding round_mode);
   double recip_sqrt_estimate(double a);
   double recip_estimate(double a);
   double FPRecipSqrtEstimate(double a);
   double FPRecipEstimate(double a);
   double FixedToDouble(int64_t src, int fbits, FPRounding round_mode);
   double UFixedToDouble(uint64_t src, int fbits, FPRounding round_mode);
   float FixedToFloat(int64_t src, int fbits, FPRounding round_mode);
   float UFixedToFloat(uint64_t src, int fbits, FPRounding round_mode);
--- a/js/src/jit/arm64/vixl/Utils-vixl.cpp
+++ b/js/src/jit/arm64/vixl/Utils-vixl.cpp
@@ -1,9 +1,9 @@
-// Copyright 2015, ARM Limited
+// Copyright 2015, VIXL authors
 // All rights reserved.
 //
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are met:
 //
 //   * Redistributions of source code must retain the above copyright notice,
 //     this list of conditions and the following disclaimer.
 //   * Redistributions in binary form must reproduce the above copyright notice,
@@ -21,105 +21,163 @@
 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 #include "jit/arm64/vixl/Utils-vixl.h"
 
-#include "mozilla/MathAlgorithms.h"
-
-#include <stdio.h>
+#include <cstdio>
 
 namespace vixl {
 
-uint32_t float_to_rawbits(float value) {
+// The default NaN values (for FPCR.DN=1).
+const double kFP64DefaultNaN = RawbitsToDouble(UINT64_C(0x7ff8000000000000));
+const float kFP32DefaultNaN = RawbitsToFloat(0x7fc00000);
+const Float16 kFP16DefaultNaN = RawbitsToFloat16(0x7e00);
+
+// Floating-point zero values.
+const Float16 kFP16PositiveZero = RawbitsToFloat16(0x0);
+const Float16 kFP16NegativeZero = RawbitsToFloat16(0x8000);
+
+// Floating-point infinity values.
+const Float16 kFP16PositiveInfinity = RawbitsToFloat16(0x7c00);
+const Float16 kFP16NegativeInfinity = RawbitsToFloat16(0xfc00);
+const float kFP32PositiveInfinity = RawbitsToFloat(0x7f800000);
+const float kFP32NegativeInfinity = RawbitsToFloat(0xff800000);
+const double kFP64PositiveInfinity =
+    RawbitsToDouble(UINT64_C(0x7ff0000000000000));
+const double kFP64NegativeInfinity =
+    RawbitsToDouble(UINT64_C(0xfff0000000000000));
+
+bool IsZero(Float16 value) {
+  uint16_t bits = Float16ToRawbits(value);
+  return (bits == Float16ToRawbits(kFP16PositiveZero) ||
+          bits == Float16ToRawbits(kFP16NegativeZero));
+}
+
+uint16_t Float16ToRawbits(Float16 value) { return value.rawbits_; }
+
+uint32_t FloatToRawbits(float value) {
   uint32_t bits = 0;
   memcpy(&bits, &value, 4);
   return bits;
 }
 
 
-uint64_t double_to_rawbits(double value) {
+uint64_t DoubleToRawbits(double value) {
   uint64_t bits = 0;
   memcpy(&bits, &value, 8);
   return bits;
 }
 
 
-float rawbits_to_float(uint32_t bits) {
+Float16 RawbitsToFloat16(uint16_t bits) {
+  Float16 f;
+  f.rawbits_ = bits;
+  return f;
+}
+
+
+float RawbitsToFloat(uint32_t bits) {
   float value = 0.0;
   memcpy(&value, &bits, 4);
   return value;
 }
 
 
-double rawbits_to_double(uint64_t bits) {
+double RawbitsToDouble(uint64_t bits) {
   double value = 0.0;
   memcpy(&value, &bits, 8);
   return value;
 }
 
 
-uint32_t float_sign(float val) {
-  uint32_t rawbits = float_to_rawbits(val);
-  return unsigned_bitextract_32(31, 31, rawbits);
+uint32_t Float16Sign(internal::SimFloat16 val) {
+  uint16_t rawbits = Float16ToRawbits(val);
+  return ExtractUnsignedBitfield32(15, 15, rawbits);
 }
 
 
-uint32_t float_exp(float val) {
-  uint32_t rawbits = float_to_rawbits(val);
-  return unsigned_bitextract_32(30, 23, rawbits);
+uint32_t Float16Exp(internal::SimFloat16 val) {
+  uint16_t rawbits = Float16ToRawbits(val);
+  return ExtractUnsignedBitfield32(14, 10, rawbits);
+}
+
+uint32_t Float16Mantissa(internal::SimFloat16 val) {
+  uint16_t rawbits = Float16ToRawbits(val);
+  return ExtractUnsignedBitfield32(9, 0, rawbits);
 }
 
 
-uint32_t float_mantissa(float val) {
-  uint32_t rawbits = float_to_rawbits(val);
-  return unsigned_bitextract_32(22, 0, rawbits);
+uint32_t FloatSign(float val) {
+  uint32_t rawbits = FloatToRawbits(val);
+  return ExtractUnsignedBitfield32(31, 31, rawbits);
 }
 
 
-uint32_t double_sign(double val) {
-  uint64_t rawbits = double_to_rawbits(val);
-  return static_cast<uint32_t>(unsigned_bitextract_64(63, 63, rawbits));
+uint32_t FloatExp(float val) {
+  uint32_t rawbits = FloatToRawbits(val);
+  return ExtractUnsignedBitfield32(30, 23, rawbits);
+}
+
+
+uint32_t FloatMantissa(float val) {
+  uint32_t rawbits = FloatToRawbits(val);
+  return ExtractUnsignedBitfield32(22, 0, rawbits);
 }
 
 
-uint32_t double_exp(double val) {
-  uint64_t rawbits = double_to_rawbits(val);
-  return static_cast<uint32_t>(unsigned_bitextract_64(62, 52, rawbits));
+uint32_t DoubleSign(double val) {
+  uint64_t rawbits = DoubleToRawbits(val);
+  return static_cast<uint32_t>(ExtractUnsignedBitfield64(63, 63, rawbits));
 }
 
 
-uint64_t double_mantissa(double val) {
-  uint64_t rawbits = double_to_rawbits(val);
-  return unsigned_bitextract_64(51, 0, rawbits);
+uint32_t DoubleExp(double val) {
+  uint64_t rawbits = DoubleToRawbits(val);
+  return static_cast<uint32_t>(ExtractUnsignedBitfield64(62, 52, rawbits));
+}
+
+
+uint64_t DoubleMantissa(double val) {
+  uint64_t rawbits = DoubleToRawbits(val);
+  return ExtractUnsignedBitfield64(51, 0, rawbits);
 }
 
 
-float float_pack(uint32_t sign, uint32_t exp, uint32_t mantissa) {
-  uint32_t bits = (sign << 31) | (exp << 23) | mantissa;
-  return rawbits_to_float(bits);
+internal::SimFloat16 Float16Pack(uint16_t sign,
+                                 uint16_t exp,
+                                 uint16_t mantissa) {
+  uint16_t bits = (sign << 15) | (exp << 10) | mantissa;
+  return RawbitsToFloat16(bits);
 }
 
 
-double double_pack(uint64_t sign, uint64_t exp, uint64_t mantissa) {
-  uint64_t bits = (sign << 63) | (exp << 52) | mantissa;
-  return rawbits_to_double(bits);
+float FloatPack(uint32_t sign, uint32_t exp, uint32_t mantissa) {
+  uint32_t bits = (sign << 31) | (exp << 23) | mantissa;
+  return RawbitsToFloat(bits);
 }
 
 
-int float16classify(float16 value) {
+double DoublePack(uint64_t sign, uint64_t exp, uint64_t mantissa) {
+  uint64_t bits = (sign << 63) | (exp << 52) | mantissa;
+  return RawbitsToDouble(bits);
+}
+
+
+int Float16Classify(Float16 value) {
+  uint16_t bits = Float16ToRawbits(value);
   uint16_t exponent_max = (1 << 5) - 1;
   uint16_t exponent_mask = exponent_max << 10;
   uint16_t mantissa_mask = (1 << 10) - 1;
 
-  uint16_t exponent = (value & exponent_mask) >> 10;
-  uint16_t mantissa = value & mantissa_mask;
+  uint16_t exponent = (bits & exponent_mask) >> 10;
+  uint16_t mantissa = bits & mantissa_mask;
   if (exponent == 0) {
     if (mantissa == 0) {
       return FP_ZERO;
     }
     return FP_SUBNORMAL;
   } else if (exponent == exponent_max) {
     if (mantissa == 0) {
       return FP_INFINITE;
@@ -137,9 +195,361 @@ unsigned CountClearHalfWords(uint64_t im
     if ((imm & 0xffff) == 0) {
       count++;
     }
     imm >>= 16;
   }
   return count;
 }
 
+
+int BitCount(uint64_t value) { return CountSetBits(value); }
+
+// Float16 definitions.
+
+Float16::Float16(double dvalue) {
+  rawbits_ =
+      Float16ToRawbits(FPToFloat16(dvalue, FPTieEven, kIgnoreDefaultNaN));
+}
+
+namespace internal {
+
+SimFloat16 SimFloat16::operator-() const {
+  return RawbitsToFloat16(rawbits_ ^ 0x8000);
+}
+
+// SimFloat16 definitions.
+SimFloat16 SimFloat16::operator+(SimFloat16 rhs) const {
+  return static_cast<double>(*this) + static_cast<double>(rhs);
+}
+
+SimFloat16 SimFloat16::operator-(SimFloat16 rhs) const {
+  return static_cast<double>(*this) - static_cast<double>(rhs);
+}
+
+SimFloat16 SimFloat16::operator*(SimFloat16 rhs) const {
+  return static_cast<double>(*this) * static_cast<double>(rhs);
+}
+
+SimFloat16 SimFloat16::operator/(SimFloat16 rhs) const {
+  return static_cast<double>(*this) / static_cast<double>(rhs);
+}
+
+bool SimFloat16::operator<(SimFloat16 rhs) const {
+  return static_cast<double>(*this) < static_cast<double>(rhs);
+}
+
+bool SimFloat16::operator>(SimFloat16 rhs) const {
+  return static_cast<double>(*this) > static_cast<double>(rhs);
+}
+
+bool SimFloat16::operator==(SimFloat16 rhs) const {
+  if (IsNaN(*this) || IsNaN(rhs)) {
+    return false;
+  } else if (IsZero(rhs) && IsZero(*this)) {
+    // +0 and -0 should be treated as equal.
+    return true;
+  }
+  return this->rawbits_ == rhs.rawbits_;
+}
+
+bool SimFloat16::operator!=(SimFloat16 rhs) const { return !(*this == rhs); }
+
+bool SimFloat16::operator==(double rhs) const {
+  return static_cast<double>(*this) == static_cast<double>(rhs);
+}
+
+SimFloat16::operator double() const {
+  return FPToDouble(*this, kIgnoreDefaultNaN);
+}
+
+Int64 BitCount(Uint32 value) { return CountSetBits(value.Get()); }
+
+}  // namespace internal
+
+float FPToFloat(Float16 value, UseDefaultNaN DN, bool* exception) {
+  uint16_t bits = Float16ToRawbits(value);
+  uint32_t sign = bits >> 15;
+  uint32_t exponent =
+      ExtractUnsignedBitfield32(kFloat16MantissaBits + kFloat16ExponentBits - 1,
+                                kFloat16MantissaBits,
+                                bits);
+  uint32_t mantissa =
+      ExtractUnsignedBitfield32(kFloat16MantissaBits - 1, 0, bits);
+
+  switch (Float16Classify(value)) {
+    case FP_ZERO:
+      return (sign == 0) ? 0.0f : -0.0f;
+
+    case FP_INFINITE:
+      return (sign == 0) ? kFP32PositiveInfinity : kFP32NegativeInfinity;
+
+    case FP_SUBNORMAL: {
+      // Calculate shift required to put mantissa into the most-significant bits
+      // of the destination mantissa.
+      int shift = CountLeadingZeros(mantissa << (32 - 10));
+
+      // Shift mantissa and discard implicit '1'.
+      mantissa <<= (kFloatMantissaBits - kFloat16MantissaBits) + shift + 1;
+      mantissa &= (1 << kFloatMantissaBits) - 1;
+
+      // Adjust the exponent for the shift applied, and rebias.
+      exponent = exponent - shift + (-15 + 127);
+      break;
+    }
+
+    case FP_NAN:
+      if (IsSignallingNaN(value)) {
+        if (exception != NULL) {
+          *exception = true;
+        }
+      }
+      if (DN == kUseDefaultNaN) return kFP32DefaultNaN;
+
+      // Convert NaNs as the processor would:
+      //  - The sign is propagated.
+      //  - The payload (mantissa) is transferred entirely, except that the top
+      //    bit is forced to '1', making the result a quiet NaN. The unused
+      //    (low-order) payload bits are set to 0.
+      exponent = (1 << kFloatExponentBits) - 1;
+
+      // Increase bits in mantissa, making low-order bits 0.
+      mantissa <<= (kFloatMantissaBits - kFloat16MantissaBits);
+      mantissa |= 1 << 22;  // Force a quiet NaN.
+      break;
+
+    case FP_NORMAL:
+      // Increase bits in mantissa, making low-order bits 0.
+      mantissa <<= (kFloatMantissaBits - kFloat16MantissaBits);
+
+      // Change exponent bias.
+      exponent += (-15 + 127);
+      break;
+
+    default:
+      VIXL_UNREACHABLE();
+  }
+  return RawbitsToFloat((sign << 31) | (exponent << kFloatMantissaBits) |
+                        mantissa);
+}
+
+
+float FPToFloat(double value,
+                FPRounding round_mode,
+                UseDefaultNaN DN,
+                bool* exception) {
+  // Only the FPTieEven rounding mode is implemented.
+  VIXL_ASSERT((round_mode == FPTieEven) || (round_mode == FPRoundOdd));
+  USE(round_mode);
+
+  switch (std::fpclassify(value)) {
+    case FP_NAN: {
+      if (IsSignallingNaN(value)) {
+        if (exception != NULL) {
+          *exception = true;
+        }
+      }
+      if (DN == kUseDefaultNaN) return kFP32DefaultNaN;
+
+      // Convert NaNs as the processor would:
+      //  - The sign is propagated.
+      //  - The payload (mantissa) is transferred as much as possible, except
+      //    that the top bit is forced to '1', making the result a quiet NaN.
+      uint64_t raw = DoubleToRawbits(value);
+
+      uint32_t sign = raw >> 63;
+      uint32_t exponent = (1 << 8) - 1;
+      uint32_t payload =
+          static_cast<uint32_t>(ExtractUnsignedBitfield64(50, 52 - 23, raw));
+      payload |= (1 << 22);  // Force a quiet NaN.
+
+      return RawbitsToFloat((sign << 31) | (exponent << 23) | payload);
+    }
+
+    case FP_ZERO:
+    case FP_INFINITE: {
+      // In a C++ cast, any value representable in the target type will be
+      // unchanged. This is always the case for +/-0.0 and infinities.
+      return static_cast<float>(value);
+    }
+
+    case FP_NORMAL:
+    case FP_SUBNORMAL: {
+      // Convert double-to-float as the processor would, assuming that FPCR.FZ
+      // (flush-to-zero) is not set.
+      uint64_t raw = DoubleToRawbits(value);
+      // Extract the IEEE-754 double components.
+      uint32_t sign = raw >> 63;
+      // Extract the exponent and remove the IEEE-754 encoding bias.
+      int32_t exponent =
+          static_cast<int32_t>(ExtractUnsignedBitfield64(62, 52, raw)) - 1023;
+      // Extract the mantissa and add the implicit '1' bit.
+      uint64_t mantissa = ExtractUnsignedBitfield64(51, 0, raw);
+      if (std::fpclassify(value) == FP_NORMAL) {
+        mantissa |= (UINT64_C(1) << 52);
+      }
+      return FPRoundToFloat(sign, exponent, mantissa, round_mode);
+    }
+  }
+
+  VIXL_UNREACHABLE();
+  return value;
+}
+
+// TODO: We should consider implementing a full FPToDouble(Float16)
+// conversion function (for performance reasons).
+double FPToDouble(Float16 value, UseDefaultNaN DN, bool* exception) {
+  // We can rely on implicit float to double conversion here.
+  return FPToFloat(value, DN, exception);
+}
+
+
+double FPToDouble(float value, UseDefaultNaN DN, bool* exception) {
+  switch (std::fpclassify(value)) {
+    case FP_NAN: {
+      if (IsSignallingNaN(value)) {
+        if (exception != NULL) {
+          *exception = true;
+        }
+      }
+      if (DN == kUseDefaultNaN) return kFP64DefaultNaN;
+
+      // Convert NaNs as the processor would:
+      //  - The sign is propagated.
+      //  - The payload (mantissa) is transferred entirely, except that the top
+      //    bit is forced to '1', making the result a quiet NaN. The unused
+      //    (low-order) payload bits are set to 0.
+      uint32_t raw = FloatToRawbits(value);
+
+      uint64_t sign = raw >> 31;
+      uint64_t exponent = (1 << 11) - 1;
+      uint64_t payload = ExtractUnsignedBitfield64(21, 0, raw);
+      payload <<= (52 - 23);           // The unused low-order bits should be 0.
+      payload |= (UINT64_C(1) << 51);  // Force a quiet NaN.
+
+      return RawbitsToDouble((sign << 63) | (exponent << 52) | payload);
+    }
+
+    case FP_ZERO:
+    case FP_NORMAL:
+    case FP_SUBNORMAL:
+    case FP_INFINITE: {
+      // All other inputs are preserved in a standard cast, because every value
+      // representable using an IEEE-754 float is also representable using an
+      // IEEE-754 double.
+      return static_cast<double>(value);
+    }
+  }
+
+  VIXL_UNREACHABLE();
+  return static_cast<double>(value);
+}
+
+
+Float16 FPToFloat16(float value,
+                    FPRounding round_mode,
+                    UseDefaultNaN DN,
+                    bool* exception) {
+  // Only the FPTieEven rounding mode is implemented.
+  VIXL_ASSERT(round_mode == FPTieEven);
+  USE(round_mode);
+
+  uint32_t raw = FloatToRawbits(value);
+  int32_t sign = raw >> 31;
+  int32_t exponent = ExtractUnsignedBitfield32(30, 23, raw) - 127;
+  uint32_t mantissa = ExtractUnsignedBitfield32(22, 0, raw);
+
+  switch (std::fpclassify(value)) {
+    case FP_NAN: {
+      if (IsSignallingNaN(value)) {
+        if (exception != NULL) {
+          *exception = true;
+        }
+      }
+      if (DN == kUseDefaultNaN) return kFP16DefaultNaN;
+
+      // Convert NaNs as the processor would:
+      //  - The sign is propagated.
+      //  - The payload (mantissa) is transferred as much as possible, except
+      //    that the top bit is forced to '1', making the result a quiet NaN.
+      uint16_t result = (sign == 0) ? Float16ToRawbits(kFP16PositiveInfinity)
+                                    : Float16ToRawbits(kFP16NegativeInfinity);
+      result |= mantissa >> (kFloatMantissaBits - kFloat16MantissaBits);
+      result |= (1 << 9);  // Force a quiet NaN;
+      return RawbitsToFloat16(result);
+    }
+
+    case FP_ZERO:
+      return (sign == 0) ? kFP16PositiveZero : kFP16NegativeZero;
+
+    case FP_INFINITE:
+      return (sign == 0) ? kFP16PositiveInfinity : kFP16NegativeInfinity;
+
+    case FP_NORMAL:
+    case FP_SUBNORMAL: {
+      // Convert float-to-half as the processor would, assuming that FPCR.FZ
+      // (flush-to-zero) is not set.
+
+      // Add the implicit '1' bit to the mantissa.
+      mantissa += (1 << 23);
+      return FPRoundToFloat16(sign, exponent, mantissa, round_mode);
+    }
+  }
+
+  VIXL_UNREACHABLE();
+  return kFP16PositiveZero;
+}
+
+
+Float16 FPToFloat16(double value,
+                    FPRounding round_mode,
+                    UseDefaultNaN DN,
+                    bool* exception) {
+  // Only the FPTieEven rounding mode is implemented.
+  VIXL_ASSERT(round_mode == FPTieEven);
+  USE(round_mode);
+
+  uint64_t raw = DoubleToRawbits(value);
+  int32_t sign = raw >> 63;
+  int64_t exponent = ExtractUnsignedBitfield64(62, 52, raw) - 1023;
+  uint64_t mantissa = ExtractUnsignedBitfield64(51, 0, raw);
+
+  switch (std::fpclassify(value)) {
+    case FP_NAN: {
+      if (IsSignallingNaN(value)) {
+        if (exception != NULL) {
+          *exception = true;
+        }
+      }
+      if (DN == kUseDefaultNaN) return kFP16DefaultNaN;
+
+      // Convert NaNs as the processor would:
+      //  - The sign is propagated.
+      //  - The payload (mantissa) is transferred as much as possible, except
+      //    that the top bit is forced to '1', making the result a quiet NaN.
+      uint16_t result = (sign == 0) ? Float16ToRawbits(kFP16PositiveInfinity)
+                                    : Float16ToRawbits(kFP16NegativeInfinity);
+      result |= mantissa >> (kDoubleMantissaBits - kFloat16MantissaBits);
+      result |= (1 << 9);  // Force a quiet NaN;
+      return RawbitsToFloat16(result);
+    }
+
+    case FP_ZERO:
+      return (sign == 0) ? kFP16PositiveZero : kFP16NegativeZero;
+
+    case FP_INFINITE:
+      return (sign == 0) ? kFP16PositiveInfinity : kFP16NegativeInfinity;
+    case FP_NORMAL:
+    case FP_SUBNORMAL: {
+      // Convert double-to-half as the processor would, assuming that FPCR.FZ
+      // (flush-to-zero) is not set.
+
+      // Add the implicit '1' bit to the mantissa.
+      mantissa += (UINT64_C(1) << 52);
+      return FPRoundToFloat16(sign, exponent, mantissa, round_mode);
+    }
+  }
+
+  VIXL_UNREACHABLE();
+  return kFP16PositiveZero;
+}
+
 }  // namespace vixl
--- a/js/src/jit/arm64/vixl/Utils-vixl.h
+++ b/js/src/jit/arm64/vixl/Utils-vixl.h
@@ -1,9 +1,9 @@
-// Copyright 2015, ARM Limited
+// Copyright 2015, VIXL authors
 // All rights reserved.
 //
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are met:
 //
 //   * Redistributions of source code must retain the above copyright notice,
 //     this list of conditions and the following disclaimer.
 //   * Redistributions in binary form must reproduce the above copyright notice,
@@ -24,263 +24,1260 @@
 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 #ifndef VIXL_UTILS_H
 #define VIXL_UTILS_H
 
 #include "mozilla/FloatingPoint.h"
 
+#include <cmath>
+#include <cstring>
+#include <limits>
+#include <vector>
+
 #include "jit/arm64/vixl/CompilerIntrinsics-vixl.h"
 #include "jit/arm64/vixl/Globals-vixl.h"
 
 namespace vixl {
 
 // Macros for compile-time format checking.
-#if defined(__GNUC__)
+#if GCC_VERSION_OR_NEWER(4, 4, 0)
 #define PRINTF_CHECK(format_index, varargs_index) \
-  __attribute__((format(printf, format_index, varargs_index)))
+  __attribute__((format(gnu_printf, format_index, varargs_index)))
 #else
 #define PRINTF_CHECK(format_index, varargs_index)
 #endif
 
+#ifdef __GNUC__
+#define VIXL_HAS_DEPRECATED_WITH_MSG
+#elif defined(__clang__)
+#ifdef __has_extension(attribute_deprecated_with_message)
+#define VIXL_HAS_DEPRECATED_WITH_MSG
+#endif
+#endif
+
+#ifdef VIXL_HAS_DEPRECATED_WITH_MSG
+#define VIXL_DEPRECATED(replaced_by, declarator) \
+  __attribute__((deprecated("Use \"" replaced_by "\" instead"))) declarator
+#else
+#define VIXL_DEPRECATED(replaced_by, declarator) declarator
+#endif
+
+#ifdef VIXL_DEBUG
+#define VIXL_UNREACHABLE_OR_FALLTHROUGH() VIXL_UNREACHABLE()
+#else
+#define VIXL_UNREACHABLE_OR_FALLTHROUGH() VIXL_FALLTHROUGH()
+#endif
+
+template <typename T, size_t n>
+size_t ArrayLength(const T (&)[n]) {
+  return n;
+}
+
 // Check number width.
-inline bool is_intn(unsigned n, int64_t x) {
+// TODO: Refactor these using templates.
+inline bool IsIntN(unsigned n, uint32_t x) {
+  VIXL_ASSERT((0 < n) && (n < 32));
+  uint32_t limit = UINT32_C(1) << (n - 1);
+  return x < limit;
+}
+inline bool IsIntN(unsigned n, int32_t x) {
+  VIXL_ASSERT((0 < n) && (n < 32));
+  int32_t limit = INT32_C(1) << (n - 1);
+  return (-limit <= x) && (x < limit);
+}
+inline bool IsIntN(unsigned n, uint64_t x) {
+  VIXL_ASSERT((0 < n) && (n < 64));
+  uint64_t limit = UINT64_C(1) << (n - 1);
+  return x < limit;
+}
+inline bool IsIntN(unsigned n, int64_t x) {
   VIXL_ASSERT((0 < n) && (n < 64));
   int64_t limit = INT64_C(1) << (n - 1);
   return (-limit <= x) && (x < limit);
 }
+VIXL_DEPRECATED("IsIntN", inline bool is_intn(unsigned n, int64_t x)) {
+  return IsIntN(n, x);
+}
 
-inline bool is_uintn(unsigned n, int64_t x) {
+inline bool IsUintN(unsigned n, uint32_t x) {
+  VIXL_ASSERT((0 < n) && (n < 32));
+  return !(x >> n);
+}
+inline bool IsUintN(unsigned n, int32_t x) {
+  VIXL_ASSERT((0 < n) && (n < 32));
+  // Convert to an unsigned integer to avoid implementation-defined behavior.
+  return !(static_cast<uint32_t>(x) >> n);
+}
+inline bool IsUintN(unsigned n, uint64_t x) {
   VIXL_ASSERT((0 < n) && (n < 64));
   return !(x >> n);
 }
-
-inline uint32_t truncate_to_intn(unsigned n, int64_t x) {
+inline bool IsUintN(unsigned n, int64_t x) {
   VIXL_ASSERT((0 < n) && (n < 64));
-  return static_cast<uint32_t>(x & ((INT64_C(1) << n) - 1));
+  // Convert to an unsigned integer to avoid implementation-defined behavior.
+  return !(static_cast<uint64_t>(x) >> n);
+}
+VIXL_DEPRECATED("IsUintN", inline bool is_uintn(unsigned n, int64_t x)) {
+  return IsUintN(n, x);
 }
 
-#define INT_1_TO_63_LIST(V)                                                    \
+inline uint64_t TruncateToUintN(unsigned n, uint64_t x) {
+  VIXL_ASSERT((0 < n) && (n < 64));
+  return static_cast<uint64_t>(x) & ((UINT64_C(1) << n) - 1);
+}
+VIXL_DEPRECATED("TruncateToUintN",
+                inline uint64_t truncate_to_intn(unsigned n, int64_t x)) {
+  return TruncateToUintN(n, x);
+}
+
+// clang-format off
+#define INT_1_TO_32_LIST(V)                                                    \
 V(1)  V(2)  V(3)  V(4)  V(5)  V(6)  V(7)  V(8)                                 \
 V(9)  V(10) V(11) V(12) V(13) V(14) V(15) V(16)                                \
 V(17) V(18) V(19) V(20) V(21) V(22) V(23) V(24)                                \
-V(25) V(26) V(27) V(28) V(29) V(30) V(31) V(32)                                \
+V(25) V(26) V(27) V(28) V(29) V(30) V(31) V(32)
+
+#define INT_33_TO_63_LIST(V)                                                   \
 V(33) V(34) V(35) V(36) V(37) V(38) V(39) V(40)                                \
 V(41) V(42) V(43) V(44) V(45) V(46) V(47) V(48)                                \
 V(49) V(50) V(51) V(52) V(53) V(54) V(55) V(56)                                \
 V(57) V(58) V(59) V(60) V(61) V(62) V(63)
 
-#define DECLARE_IS_INT_N(N)                                                    \
-inline bool is_int##N(int64_t x) { return is_intn(N, x); }
-#define DECLARE_IS_UINT_N(N)                                                   \
-inline bool is_uint##N(int64_t x) { return is_uintn(N, x); }
-#define DECLARE_TRUNCATE_TO_INT_N(N)                                           \
-inline uint32_t truncate_to_int##N(int x) { return truncate_to_intn(N, x); }
+#define INT_1_TO_63_LIST(V) INT_1_TO_32_LIST(V) INT_33_TO_63_LIST(V)
+
+// clang-format on
+
+#define DECLARE_IS_INT_N(N)                                       \
+  inline bool IsInt##N(int64_t x) { return IsIntN(N, x); }        \
+  VIXL_DEPRECATED("IsInt" #N, inline bool is_int##N(int64_t x)) { \
+    return IsIntN(N, x);                                          \
+  }
+
+#define DECLARE_IS_UINT_N(N)                                        \
+  inline bool IsUint##N(int64_t x) { return IsUintN(N, x); }        \
+  VIXL_DEPRECATED("IsUint" #N, inline bool is_uint##N(int64_t x)) { \
+    return IsUintN(N, x);                                           \
+  }
+
+#define DECLARE_TRUNCATE_TO_UINT_32(N)                             \
+  inline uint32_t TruncateToUint##N(uint64_t x) {                  \
+    return static_cast<uint32_t>(TruncateToUintN(N, x));           \
+  }                                                                \
+  VIXL_DEPRECATED("TruncateToUint" #N,                             \
+                  inline uint32_t truncate_to_int##N(int64_t x)) { \
+    return TruncateToUint##N(x);                                   \
+  }
+
 INT_1_TO_63_LIST(DECLARE_IS_INT_N)
 INT_1_TO_63_LIST(DECLARE_IS_UINT_N)
-INT_1_TO_63_LIST(DECLARE_TRUNCATE_TO_INT_N)
+INT_1_TO_32_LIST(DECLARE_TRUNCATE_TO_UINT_32)
+
 #undef DECLARE_IS_INT_N
 #undef DECLARE_IS_UINT_N
 #undef DECLARE_TRUNCATE_TO_INT_N
 
 // Bit field extraction.
-inline uint32_t unsigned_bitextract_32(int msb, int lsb, uint32_t x) {
-  return (x >> lsb) & ((1 << (1 + msb - lsb)) - 1);
-}
-
-inline uint64_t unsigned_bitextract_64(int msb, int lsb, uint64_t x) {
+inline uint64_t ExtractUnsignedBitfield64(int msb, int lsb, uint64_t x) {
+  VIXL_ASSERT((static_cast<size_t>(msb) < sizeof(x) * 8) && (lsb >= 0) &&
+              (msb >= lsb));
+  if ((msb == 63) && (lsb == 0)) return x;
   return (x >> lsb) & ((static_cast<uint64_t>(1) << (1 + msb - lsb)) - 1);
 }
 
-inline int32_t signed_bitextract_32(int msb, int lsb, int32_t x) {
-  return (x << (31 - msb)) >> (lsb + 31 - msb);
+
+inline uint32_t ExtractUnsignedBitfield32(int msb, int lsb, uint32_t x) {
+  VIXL_ASSERT((static_cast<size_t>(msb) < sizeof(x) * 8) && (lsb >= 0) &&
+              (msb >= lsb));
+  return TruncateToUint32(ExtractUnsignedBitfield64(msb, lsb, x));
+}
+
+
+inline int64_t ExtractSignedBitfield64(int msb, int lsb, int64_t x) {
+  VIXL_ASSERT((static_cast<size_t>(msb) < sizeof(x) * 8) && (lsb >= 0) &&
+              (msb >= lsb));
+  uint64_t temp = ExtractUnsignedBitfield64(msb, lsb, x);
+  // If the highest extracted bit is set, sign extend.
+  if ((temp >> (msb - lsb)) == 1) {
+    temp |= ~UINT64_C(0) << (msb - lsb);
+  }
+  int64_t result;
+  memcpy(&result, &temp, sizeof(result));
+  return result;
 }
 
-inline int64_t signed_bitextract_64(int msb, int lsb, int64_t x) {
-  return (x << (63 - msb)) >> (lsb + 63 - msb);
+
+inline int32_t ExtractSignedBitfield32(int msb, int lsb, int32_t x) {
+  VIXL_ASSERT((static_cast<size_t>(msb) < sizeof(x) * 8) && (lsb >= 0) &&
+              (msb >= lsb));
+  uint32_t temp = TruncateToUint32(ExtractSignedBitfield64(msb, lsb, x));
+  int32_t result;
+  memcpy(&result, &temp, sizeof(result));
+  return result;
 }
 
+
+inline uint64_t RotateRight(uint64_t value,
+                            unsigned int rotate,
+                            unsigned int width) {
+  VIXL_ASSERT((width > 0) && (width <= 64));
+  uint64_t width_mask = ~UINT64_C(0) >> (64 - width);
+  rotate &= 63;
+  if (rotate > 0) {
+    value &= width_mask;
+    value = (value << (width - rotate)) | (value >> rotate);
+  }
+  return value & width_mask;
+}
+
+
+// Wrapper class for passing FP16 values through the assembler.
+// This is purely to aid with type checking/casting.
+class Float16 {
+ public:
+  explicit Float16(double dvalue);
+  Float16() : rawbits_(0x0) {}
+  friend uint16_t Float16ToRawbits(Float16 value);
+  friend Float16 RawbitsToFloat16(uint16_t bits);
+
+ protected:
+  uint16_t rawbits_;
+};
+
 // Floating point representation.
-uint32_t float_to_rawbits(float value);
-uint64_t double_to_rawbits(double value);
-float rawbits_to_float(uint32_t bits);
-double rawbits_to_double(uint64_t bits);
+uint16_t Float16ToRawbits(Float16 value);
+
+
+uint32_t FloatToRawbits(float value);
+VIXL_DEPRECATED("FloatToRawbits",
+                inline uint32_t float_to_rawbits(float value)) {
+  return FloatToRawbits(value);
+}
+
+uint64_t DoubleToRawbits(double value);
+VIXL_DEPRECATED("DoubleToRawbits",
+                inline uint64_t double_to_rawbits(double value)) {
+  return DoubleToRawbits(value);
+}
+
+Float16 RawbitsToFloat16(uint16_t bits);
+
+float RawbitsToFloat(uint32_t bits);
+VIXL_DEPRECATED("RawbitsToFloat",
+                inline float rawbits_to_float(uint32_t bits)) {
+  return RawbitsToFloat(bits);
+}
+
+double RawbitsToDouble(uint64_t bits);
+VIXL_DEPRECATED("RawbitsToDouble",
+                inline double rawbits_to_double(uint64_t bits)) {
+  return RawbitsToDouble(bits);
+}
+
+namespace internal {
 
-uint32_t float_sign(float val);
-uint32_t float_exp(float val);
-uint32_t float_mantissa(float val);
-uint32_t double_sign(double val);
-uint32_t double_exp(double val);
-uint64_t double_mantissa(double val);
+// Internal simulation class used solely by the simulator to
+// provide an abstraction layer for any half-precision arithmetic.
+class SimFloat16 : public Float16 {
+ public:
+  // TODO: We should investigate making this constructor explicit.
+  // This is currently difficult to do due to a number of templated
+  // functions in the simulator which rely on returning double values.
+  SimFloat16(double dvalue) : Float16(dvalue) {}  // NOLINT(runtime/explicit)
+  SimFloat16(Float16 f) {                         // NOLINT(runtime/explicit)
+    this->rawbits_ = Float16ToRawbits(f);
+  }
+  SimFloat16() : Float16() {}
+  SimFloat16 operator-() const;
+  SimFloat16 operator+(SimFloat16 rhs) const;
+  SimFloat16 operator-(SimFloat16 rhs) const;
+  SimFloat16 operator*(SimFloat16 rhs) const;
+  SimFloat16 operator/(SimFloat16 rhs) const;
+  bool operator<(SimFloat16 rhs) const;
+  bool operator>(SimFloat16 rhs) const;
+  bool operator==(SimFloat16 rhs) const;
+  bool operator!=(SimFloat16 rhs) const;
+  // This is necessary for conversions peformed in (macro asm) Fmov.
+  bool operator==(double rhs) const;
+  operator double() const;
+};
+}  // namespace internal
+
+uint32_t Float16Sign(internal::SimFloat16 value);
+
+uint32_t Float16Exp(internal::SimFloat16 value);
+
+uint32_t Float16Mantissa(internal::SimFloat16 value);
+
+uint32_t FloatSign(float value);
+VIXL_DEPRECATED("FloatSign", inline uint32_t float_sign(float value)) {
+  return FloatSign(value);
+}
 
-float float_pack(uint32_t sign, uint32_t exp, uint32_t mantissa);
-double double_pack(uint64_t sign, uint64_t exp, uint64_t mantissa);
+uint32_t FloatExp(float value);
+VIXL_DEPRECATED("FloatExp", inline uint32_t float_exp(float value)) {
+  return FloatExp(value);
+}
+
+uint32_t FloatMantissa(float value);
+VIXL_DEPRECATED("FloatMantissa", inline uint32_t float_mantissa(float value)) {
+  return FloatMantissa(value);
+}
+
+uint32_t DoubleSign(double value);
+VIXL_DEPRECATED("DoubleSign", inline uint32_t double_sign(double value)) {
+  return DoubleSign(value);
+}
+
+uint32_t DoubleExp(double value);
+VIXL_DEPRECATED("DoubleExp", inline uint32_t double_exp(double value)) {
+  return DoubleExp(value);
+}
+
+uint64_t DoubleMantissa(double value);
+VIXL_DEPRECATED("DoubleMantissa",
+                inline uint64_t double_mantissa(double value)) {
+  return DoubleMantissa(value);
+}
+
+internal::SimFloat16 Float16Pack(uint16_t sign,
+                                 uint16_t exp,
+                                 uint16_t mantissa);
+
+float FloatPack(uint32_t sign, uint32_t exp, uint32_t mantissa);
+VIXL_DEPRECATED("FloatPack",
+                inline float float_pack(uint32_t sign,
+                                        uint32_t exp,
+                                        uint32_t mantissa)) {
+  return FloatPack(sign, exp, mantissa);
+}
+
+double DoublePack(uint64_t sign, uint64_t exp, uint64_t mantissa);
+VIXL_DEPRECATED("DoublePack",
+                inline double double_pack(uint32_t sign,
+                                          uint32_t exp,
+                                          uint64_t mantissa)) {
+  return DoublePack(sign, exp, mantissa);
+}
 
 // An fpclassify() function for 16-bit half-precision floats.
-int float16classify(float16 value);
+int Float16Classify(Float16 value);
+VIXL_DEPRECATED("Float16Classify", inline int float16classify(uint16_t value)) {
+  return Float16Classify(RawbitsToFloat16(value));
+}
+
+bool IsZero(Float16 value);
+
+inline bool IsNaN(float value) { return std::isnan(value); }
+
+inline bool IsNaN(double value) { return std::isnan(value); }
+
+inline bool IsNaN(Float16 value) { return Float16Classify(value) == FP_NAN; }
+
+inline bool IsInf(float value) { return std::isinf(value); }
+
+inline bool IsInf(double value) { return std::isinf(value); }
+
+inline bool IsInf(Float16 value) {
+  return Float16Classify(value) == FP_INFINITE;
+}
+
 
 // NaN tests.
 inline bool IsSignallingNaN(double num) {
   const uint64_t kFP64QuietNaNMask = UINT64_C(0x0008000000000000);
-  uint64_t raw = double_to_rawbits(num);
-  if (mozilla::IsNaN(num) && ((raw & kFP64QuietNaNMask) == 0)) {
+  uint64_t raw = DoubleToRawbits(num);
+  if (IsNaN(num) && ((raw & kFP64QuietNaNMask) == 0)) {
     return true;
   }
   return false;
 }
 
 
 inline bool IsSignallingNaN(float num) {
   const uint32_t kFP32QuietNaNMask = 0x00400000;
-  uint32_t raw = float_to_rawbits(num);
-  if (mozilla::IsNaN(num) && ((raw & kFP32QuietNaNMask) == 0)) {
+  uint32_t raw = FloatToRawbits(num);
+  if (IsNaN(num) && ((raw & kFP32QuietNaNMask) == 0)) {
     return true;
   }
   return false;
 }
 
 
-inline bool IsSignallingNaN(float16 num) {
+inline bool IsSignallingNaN(Float16 num) {
   const uint16_t kFP16QuietNaNMask = 0x0200;
-  return (float16classify(num) == FP_NAN) &&
-         ((num & kFP16QuietNaNMask) == 0);
+  return IsNaN(num) && ((Float16ToRawbits(num) & kFP16QuietNaNMask) == 0);
 }
 
 
 template <typename T>
 inline bool IsQuietNaN(T num) {
-  return mozilla::IsNaN(num) && !IsSignallingNaN(num);
+  return IsNaN(num) && !IsSignallingNaN(num);
 }
 
 
 // Convert the NaN in 'num' to a quiet NaN.
 inline double ToQuietNaN(double num) {
   const uint64_t kFP64QuietNaNMask = UINT64_C(0x0008000000000000);
-  VIXL_ASSERT(mozilla::IsNaN(num));
-  return rawbits_to_double(double_to_rawbits(num) | kFP64QuietNaNMask);
+  VIXL_ASSERT(IsNaN(num));
+  return RawbitsToDouble(DoubleToRawbits(num) | kFP64QuietNaNMask);
 }
 
 
 inline float ToQuietNaN(float num) {
   const uint32_t kFP32QuietNaNMask = 0x00400000;
-  VIXL_ASSERT(mozilla::IsNaN(num));
-  return rawbits_to_float(float_to_rawbits(num) | kFP32QuietNaNMask);
+  VIXL_ASSERT(IsNaN(num));
+  return RawbitsToFloat(FloatToRawbits(num) | kFP32QuietNaNMask);
+}
+
+
+inline internal::SimFloat16 ToQuietNaN(internal::SimFloat16 num) {
+  const uint16_t kFP16QuietNaNMask = 0x0200;
+  VIXL_ASSERT(IsNaN(num));
+  return internal::SimFloat16(
+      RawbitsToFloat16(Float16ToRawbits(num) | kFP16QuietNaNMask));
 }
 
 
 // Fused multiply-add.
 inline double FusedMultiplyAdd(double op1, double op2, double a) {
   return fma(op1, op2, a);
 }
 
 
 inline float FusedMultiplyAdd(float op1, float op2, float a) {
   return fmaf(op1, op2, a);
 }
 
 
-inline uint64_t LowestSetBit(uint64_t value) {
-  return value & (0 - value);
-}
+inline uint64_t LowestSetBit(uint64_t value) { return value & -value; }
 
 
-template<typename T>
+template <typename T>
 inline int HighestSetBitPosition(T value) {
   VIXL_ASSERT(value != 0);
   return (sizeof(value) * 8 - 1) - CountLeadingZeros(value);
 }
 
 
-template<typename V>
+template <typename V>
 inline int WhichPowerOf2(V value) {
   VIXL_ASSERT(IsPowerOf2(value));
   return CountTrailingZeros(value);
 }
 
 
 unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size);
 
 
+int BitCount(uint64_t value);
+
+
 template <typename T>
 T ReverseBits(T value) {
   VIXL_ASSERT((sizeof(value) == 1) || (sizeof(value) == 2) ||
               (sizeof(value) == 4) || (sizeof(value) == 8));
   T result = 0;
   for (unsigned i = 0; i < (sizeof(value) * 8); i++) {
     result = (result << 1) | (value & 1);
     value >>= 1;
   }
   return result;
 }
 
 
 template <typename T>
+inline T SignExtend(T val, int bitSize) {
+  VIXL_ASSERT(bitSize > 0);
+  T mask = (T(2) << (bitSize - 1)) - T(1);
+  val &= mask;
+  T sign_bits = -((val >> (bitSize - 1)) << bitSize);
+  val |= sign_bits;
+  return val;
+}
+
+
+template <typename T>
 T ReverseBytes(T value, int block_bytes_log2) {
   VIXL_ASSERT((sizeof(value) == 4) || (sizeof(value) == 8));
-  VIXL_ASSERT((1ULL << block_bytes_log2) <= sizeof(value));
+  VIXL_ASSERT((1U << block_bytes_log2) <= sizeof(value));
   // Split the 64-bit value into an 8-bit array, where b[0] is the least
   // significant byte, and b[7] is the most significant.
   uint8_t bytes[8];
-  uint64_t mask = 0xff00000000000000;
+  uint64_t mask = UINT64_C(0xff00000000000000);
   for (int i = 7; i >= 0; i--) {
     bytes[i] = (static_cast<uint64_t>(value) & mask) >> (i * 8);
     mask >>= 8;
   }
 
   // Permutation tables for REV instructions.
   //  permute_table[0] is used by REV16_x, REV16_w
   //  permute_table[1] is used by REV32_x, REV_w
   //  permute_table[2] is used by REV_x
   VIXL_ASSERT((0 < block_bytes_log2) && (block_bytes_log2 < 4));
-  static const uint8_t permute_table[3][8] = { {6, 7, 4, 5, 2, 3, 0, 1},
-                                               {4, 5, 6, 7, 0, 1, 2, 3},
-                                               {0, 1, 2, 3, 4, 5, 6, 7} };
-  T result = 0;
+  static const uint8_t permute_table[3][8] = {{6, 7, 4, 5, 2, 3, 0, 1},
+                                              {4, 5, 6, 7, 0, 1, 2, 3},
+                                              {0, 1, 2, 3, 4, 5, 6, 7}};
+  uint64_t temp = 0;
   for (int i = 0; i < 8; i++) {
-    result <<= 8;
-    result |= bytes[permute_table[block_bytes_log2 - 1][i]];
+    temp <<= 8;
+    temp |= bytes[permute_table[block_bytes_log2 - 1][i]];
   }
+
+  T result;
+  VIXL_STATIC_ASSERT(sizeof(result) <= sizeof(temp));
+  memcpy(&result, &temp, sizeof(result));
   return result;
 }
 
+template <unsigned MULTIPLE, typename T>
+inline bool IsMultiple(T value) {
+  VIXL_ASSERT(IsPowerOf2(MULTIPLE));
+  return (value & (MULTIPLE - 1)) == 0;
+}
+
+template <typename T>
+inline bool IsMultiple(T value, unsigned multiple) {
+  VIXL_ASSERT(IsPowerOf2(multiple));
+  return (value & (multiple - 1)) == 0;
+}
+
+template <typename T>
+inline bool IsAligned(T pointer, int alignment) {
+  VIXL_ASSERT(IsPowerOf2(alignment));
+  return (pointer & (alignment - 1)) == 0;
+}
 
 // Pointer alignment
 // TODO: rename/refactor to make it specific to instructions.
-template<typename T>
+template <unsigned ALIGN, typename T>
+inline bool IsAligned(T pointer) {
+  VIXL_ASSERT(sizeof(pointer) == sizeof(intptr_t));  // NOLINT(runtime/sizeof)
+  // Use C-style casts to get static_cast behaviour for integral types (T), and
+  // reinterpret_cast behaviour for other types.
+  return IsAligned((intptr_t)(pointer), ALIGN);
+}
+
+template <typename T>
 bool IsWordAligned(T pointer) {
-  VIXL_ASSERT(sizeof(pointer) == sizeof(intptr_t));   // NOLINT(runtime/sizeof)
-  return ((intptr_t)(pointer) & 3) == 0;
+  return IsAligned<4>(pointer);
 }
 
-// Increment a pointer (up to 64 bits) until it has the specified alignment.
-template<class T>
-T AlignUp(T pointer, size_t alignment) {
+// Increment a pointer until it has the specified alignment. The alignment must
+// be a power of two.
+template <class T>
+T AlignUp(T pointer,
+          typename Unsigned<sizeof(T) * kBitsPerByte>::type alignment) {
+  VIXL_ASSERT(IsPowerOf2(alignment));
+  // Use C-style casts to get static_cast behaviour for integral types (T), and
+  // reinterpret_cast behaviour for other types.
+
+  typename Unsigned<sizeof(T)* kBitsPerByte>::type pointer_raw =
+      (typename Unsigned<sizeof(T) * kBitsPerByte>::type)pointer;
+  VIXL_STATIC_ASSERT(sizeof(pointer) <= sizeof(pointer_raw));
+
+  size_t mask = alignment - 1;
+  T result = (T)((pointer_raw + mask) & ~mask);
+  VIXL_ASSERT(result >= pointer);
+
+  return result;
+}
+
+// Decrement a pointer until it has the specified alignment. The alignment must
+// be a power of two.
+template <class T>
+T AlignDown(T pointer,
+            typename Unsigned<sizeof(T) * kBitsPerByte>::type alignment) {
+  VIXL_ASSERT(IsPowerOf2(alignment));
   // Use C-style casts to get static_cast behaviour for integral types (T), and
   // reinterpret_cast behaviour for other types.
 
-  uint64_t pointer_raw = (uint64_t)pointer;
+  typename Unsigned<sizeof(T)* kBitsPerByte>::type pointer_raw =
+      (typename Unsigned<sizeof(T) * kBitsPerByte>::type)pointer;
   VIXL_STATIC_ASSERT(sizeof(pointer) <= sizeof(pointer_raw));
 
-  size_t align_step = (alignment - pointer_raw) % alignment;
-  VIXL_ASSERT((pointer_raw + align_step) % alignment == 0);
+  size_t mask = alignment - 1;
+  return (T)(pointer_raw & ~mask);
+}
+
+
+template <typename T>
+inline T ExtractBit(T value, unsigned bit) {
+  return (value >> bit) & T(1);
+}
+
+template <typename Ts, typename Td>
+inline Td ExtractBits(Ts value, int least_significant_bit, Td mask) {
+  return Td((value >> least_significant_bit) & Ts(mask));
+}
 
-  return (T)(pointer_raw + align_step);
+template <typename Ts, typename Td>
+inline void AssignBit(Td& dst,  // NOLINT(runtime/references)
+                      int bit,
+                      Ts value) {
+  VIXL_ASSERT((value == Ts(0)) || (value == Ts(1)));
+  VIXL_ASSERT(bit >= 0);
+  VIXL_ASSERT(bit < static_cast<int>(sizeof(Td) * 8));
+  Td mask(1);
+  dst &= ~(mask << bit);
+  dst |= Td(value) << bit;
+}
+
+template <typename Td, typename Ts>
+inline void AssignBits(Td& dst,  // NOLINT(runtime/references)
+                       int least_significant_bit,
+                       Ts mask,
+                       Ts value) {
+  VIXL_ASSERT(least_significant_bit >= 0);
+  VIXL_ASSERT(least_significant_bit < static_cast<int>(sizeof(Td) * 8));
+  VIXL_ASSERT(((Td(mask) << least_significant_bit) >> least_significant_bit) ==
+              Td(mask));
+  VIXL_ASSERT((value & mask) == value);
+  dst &= ~(Td(mask) << least_significant_bit);
+  dst |= Td(value) << least_significant_bit;
 }
 
-// Decrement a pointer (up to 64 bits) until it has the specified alignment.
-template<class T>
-T AlignDown(T pointer, size_t alignment) {
-  // Use C-style casts to get static_cast behaviour for integral types (T), and
-  // reinterpret_cast behaviour for other types.
+class VFP {
+ public:
+  static uint32_t FP32ToImm8(float imm) {
+    // bits: aBbb.bbbc.defg.h000.0000.0000.0000.0000
+    uint32_t bits = FloatToRawbits(imm);
+    // bit7: a000.0000
+    uint32_t bit7 = ((bits >> 31) & 0x1) << 7;
+    // bit6: 0b00.0000
+    uint32_t bit6 = ((bits >> 29) & 0x1) << 6;
+    // bit5_to_0: 00cd.efgh
+    uint32_t bit5_to_0 = (bits >> 19) & 0x3f;
+    return static_cast<uint32_t>(bit7 | bit6 | bit5_to_0);
+  }
+  static uint32_t FP64ToImm8(double imm) {
+    // bits: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
+    //       0000.0000.0000.0000.0000.0000.0000.0000
+    uint64_t bits = DoubleToRawbits(imm);
+    // bit7: a000.0000
+    uint64_t bit7 = ((bits >> 63) & 0x1) << 7;
+    // bit6: 0b00.0000
+    uint64_t bit6 = ((bits >> 61) & 0x1) << 6;
+    // bit5_to_0: 00cd.efgh
+    uint64_t bit5_to_0 = (bits >> 48) & 0x3f;
+
+    return static_cast<uint32_t>(bit7 | bit6 | bit5_to_0);
+  }
+  static float Imm8ToFP32(uint32_t imm8) {
+    //   Imm8: abcdefgh (8 bits)
+    // Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits)
+    // where B is b ^ 1
+    uint32_t bits = imm8;
+    uint32_t bit7 = (bits >> 7) & 0x1;
+    uint32_t bit6 = (bits >> 6) & 0x1;
+    uint32_t bit5_to_0 = bits & 0x3f;
+    uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19);
+
+    return RawbitsToFloat(result);
+  }
+  static double Imm8ToFP64(uint32_t imm8) {
+    //   Imm8: abcdefgh (8 bits)
+    // Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
+    //         0000.0000.0000.0000.0000.0000.0000.0000 (64 bits)
+    // where B is b ^ 1
+    uint32_t bits = imm8;
+    uint64_t bit7 = (bits >> 7) & 0x1;
+    uint64_t bit6 = (bits >> 6) & 0x1;
+    uint64_t bit5_to_0 = bits & 0x3f;
+    uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48);
+    return RawbitsToDouble(result);
+  }
+  static bool IsImmFP32(float imm) {
+    // Valid values will have the form:
+    // aBbb.bbbc.defg.h000.0000.0000.0000.0000
+    uint32_t bits = FloatToRawbits(imm);
+    // bits[19..0] are cleared.
+    if ((bits & 0x7ffff) != 0) {
+      return false;
+    }
+
+
+    // bits[29..25] are all set or all cleared.
+    uint32_t b_pattern = (bits >> 16) & 0x3e00;
+    if (b_pattern != 0 && b_pattern != 0x3e00) {
+      return false;
+    }
+    // bit[30] and bit[29] are opposite.
+    if (((bits ^ (bits << 1)) & 0x40000000) == 0) {
+      return false;
+    }
+    return true;
+  }
+  static bool IsImmFP64(double imm) {
+    // Valid values will have the form:
+    // aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
+    // 0000.0000.0000.0000.0000.0000.0000.0000
+    uint64_t bits = DoubleToRawbits(imm);
+    // bits[47..0] are cleared.
+    if ((bits & 0x0000ffffffffffff) != 0) {
+      return false;
+    }
+    // bits[61..54] are all set or all cleared.
+    uint32_t b_pattern = (bits >> 48) & 0x3fc0;
+    if ((b_pattern != 0) && (b_pattern != 0x3fc0)) {
+      return false;
+    }
+    // bit[62] and bit[61] are opposite.
+    if (((bits ^ (bits << 1)) & (UINT64_C(1) << 62)) == 0) {
+      return false;
+    }
+    return true;
+  }
+};
+
+class BitField {
+  // ForEachBitHelper is a functor that will call
+  // bool ForEachBitHelper::execute(ElementType id) const
+  //   and expects a boolean in return whether to continue (if true)
+  //   or stop (if false)
+  // check_set will check if the bits are on (true) or off(false)
+  template <typename ForEachBitHelper, bool check_set>
+  bool ForEachBit(const ForEachBitHelper& helper) {
+    for (int i = 0; static_cast<size_t>(i) < bitfield_.size(); i++) {
+      if (bitfield_[i] == check_set)
+        if (!helper.execute(i)) return false;
+    }
+    return true;
+  }
+
+ public:
+  explicit BitField(unsigned size) : bitfield_(size, 0) {}
+
+  void Set(int i) {
+    VIXL_ASSERT((i >= 0) && (static_cast<size_t>(i) < bitfield_.size()));
+    bitfield_[i] = true;
+  }
+
+  void Unset(int i) {
+    VIXL_ASSERT((i >= 0) && (static_cast<size_t>(i) < bitfield_.size()));
+    bitfield_[i] = true;
+  }
+
+  bool IsSet(int i) const { return bitfield_[i]; }
+
+  // For each bit not set in the bitfield call the execute functor
+  // execute.
+  // ForEachBitSetHelper::execute returns true if the iteration through
+  // the bits can continue, otherwise it will stop.
+  // struct ForEachBitSetHelper {
+  //   bool execute(int /*id*/) { return false; }
+  // };
+  template <typename ForEachBitNotSetHelper>
+  bool ForEachBitNotSet(const ForEachBitNotSetHelper& helper) {
+    return ForEachBit<ForEachBitNotSetHelper, false>(helper);
+  }
+
+  // For each bit set in the bitfield call the execute functor
+  // execute.
+  template <typename ForEachBitSetHelper>
+  bool ForEachBitSet(const ForEachBitSetHelper& helper) {
+    return ForEachBit<ForEachBitSetHelper, true>(helper);
+  }
+
+ private:
+  std::vector<bool> bitfield_;
+};
+
+namespace internal {
+
+typedef int64_t Int64;
+class Uint64;
+class Uint128;
+
+class Uint32 {
+  uint32_t data_;
+
+ public:
+  // Unlike uint32_t, Uint32 has a default constructor.
+  Uint32() { data_ = 0; }
+  explicit Uint32(uint32_t data) : data_(data) {}
+  inline explicit Uint32(Uint64 data);
+  uint32_t Get() const { return data_; }
+  template <int N>
+  int32_t GetSigned() const {
+    return ExtractSignedBitfield32(N - 1, 0, data_);
+  }
+  int32_t GetSigned() const { return data_; }
+  Uint32 operator~() const { return Uint32(~data_); }
+  Uint32 operator-() const { return Uint32(-data_); }
+  bool operator==(Uint32 value) const { return data_ == value.data_; }
+  bool operator!=(Uint32 value) const { return data_ != value.data_; }
+  bool operator>(Uint32 value) const { return data_ > value.data_; }
+  Uint32 operator+(Uint32 value) const { return Uint32(data_ + value.data_); }
+  Uint32 operator-(Uint32 value) const { return Uint32(data_ - value.data_); }
+  Uint32 operator&(Uint32 value) const { return Uint32(data_ & value.data_); }
+  Uint32 operator&=(Uint32 value) {
+    data_ &= value.data_;
+    return *this;
+  }
+  Uint32 operator^(Uint32 value) const { return Uint32(data_ ^ value.data_); }
+  Uint32 operator^=(Uint32 value) {
+    data_ ^= value.data_;
+    return *this;
+  }
+  Uint32 operator|(Uint32 value) const { return Uint32(data_ | value.data_); }
+  Uint32 operator|=(Uint32 value) {
+    data_ |= value.data_;
+    return *this;
+  }
+  // Unlike uint32_t, the shift functions can accept negative shift and
+  // return 0 when the shift is too big.
+  Uint32 operator>>(int shift) const {
+    if (shift == 0) return *this;
+    if (shift < 0) {
+      int tmp = -shift;
+      if (tmp >= 32) return Uint32(0);
+      return Uint32(data_ << tmp);
+    }
+    int tmp = shift;
+    if (tmp >= 32) return Uint32(0);
+    return Uint32(data_ >> tmp);
+  }
+  Uint32 operator<<(int shift) const {
+    if (shift == 0) return *this;
+    if (shift < 0) {
+      int tmp = -shift;
+      if (tmp >= 32) return Uint32(0);
+      return Uint32(data_ >> tmp);
+    }
+    int tmp = shift;
+    if (tmp >= 32) return Uint32(0);
+    return Uint32(data_ << tmp);
+  }
+};
+
+class Uint64 {
+  uint64_t data_;
+
+ public:
+  // Unlike uint64_t, Uint64 has a default constructor.
+  Uint64() { data_ = 0; }
+  explicit Uint64(uint64_t data) : data_(data) {}
+  explicit Uint64(Uint32 data) : data_(data.Get()) {}
+  inline explicit Uint64(Uint128 data);
+  uint64_t Get() const { return data_; }
+  int64_t GetSigned(int N) const {
+    return ExtractSignedBitfield64(N - 1, 0, data_);
+  }
+  int64_t GetSigned() const { return data_; }
+  Uint32 ToUint32() const {
+    VIXL_ASSERT((data_ >> 32) == 0);
+    return Uint32(static_cast<uint32_t>(data_));
+  }
+  Uint32 GetHigh32() const { return Uint32(data_ >> 32); }
+  Uint32 GetLow32() const { return Uint32(data_ & 0xffffffff); }
+  Uint64 operator~() const { return Uint64(~data_); }
+  Uint64 operator-() const { return Uint64(-data_); }
+  bool operator==(Uint64 value) const { return data_ == value.data_; }
+  bool operator!=(Uint64 value) const { return data_ != value.data_; }
+  Uint64 operator+(Uint64 value) const { return Uint64(data_ + value.data_); }
+  Uint64 operator-(Uint64 value) const { return Uint64(data_ - value.data_); }
+  Uint64 operator&(Uint64 value) const { return Uint64(data_ & value.data_); }
+  Uint64 operator&=(Uint64 value) {
+    data_ &= value.data_;
+    return *this;
+  }
+  Uint64 operator^(Uint64 value) const { return Uint64(data_ ^ value.data_); }
+  Uint64 operator^=(Uint64 value) {
+    data_ ^= value.data_;
+    return *this;
+  }
+  Uint64 operator|(Uint64 value) const { return Uint64(data_ | value.data_); }
+  Uint64 operator|=(Uint64 value) {
+    data_ |= value.data_;
+    return *this;
+  }
+  // Unlike uint64_t, the shift functions can accept negative shift and
+  // return 0 when the shift is too big.
+  Uint64 operator>>(int shift) const {
+    if (shift == 0) return *this;
+    if (shift < 0) {
+      int tmp = -shift;
+      if (tmp >= 64) return Uint64(0);
+      return Uint64(data_ << tmp);
+    }
+    int tmp = shift;
+    if (tmp >= 64) return Uint64(0);
+    return Uint64(data_ >> tmp);
+  }
+  Uint64 operator<<(int shift) const {
+    if (shift == 0) return *this;
+    if (shift < 0) {
+      int tmp = -shift;
+      if (tmp >= 64) return Uint64(0);
+      return Uint64(data_ >> tmp);
+    }
+    int tmp = shift;
+    if (tmp >= 64) return Uint64(0);
+    return Uint64(data_ << tmp);
+  }
+};
+
+class Uint128 {
+  uint64_t data_high_;
+  uint64_t data_low_;
 
-  uint64_t pointer_raw = (uint64_t)pointer;
-  VIXL_STATIC_ASSERT(sizeof(pointer) <= sizeof(pointer_raw));
+ public:
+  Uint128() : data_high_(0), data_low_(0) {}
+  explicit Uint128(uint64_t data_low) : data_high_(0), data_low_(data_low) {}
+  explicit Uint128(Uint64 data_low)
+      : data_high_(0), data_low_(data_low.Get()) {}
+  Uint128(uint64_t data_high, uint64_t data_low)
+      : data_high_(data_high), data_low_(data_low) {}
+  Uint64 ToUint64() const {
+    VIXL_ASSERT(data_high_ == 0);
+    return Uint64(data_low_);
+  }
+  Uint64 GetHigh64() const { return Uint64(data_high_); }
+  Uint64 GetLow64() const { return Uint64(data_low_); }
+  Uint128 operator~() const { return Uint128(~data_high_, ~data_low_); }
+  bool operator==(Uint128 value) const {
+    return (data_high_ == value.data_high_) && (data_low_ == value.data_low_);
+  }
+  Uint128 operator&(Uint128 value) const {
+    return Uint128(data_high_ & value.data_high_, data_low_ & value.data_low_);
+  }
+  Uint128 operator&=(Uint128 value) {
+    data_high_ &= value.data_high_;
+    data_low_ &= value.data_low_;
+    return *this;
+  }
+  Uint128 operator|=(Uint128 value) {
+    data_high_ |= value.data_high_;
+    data_low_ |= value.data_low_;
+    return *this;
+  }
+  Uint128 operator>>(int shift) const {
+    VIXL_ASSERT((shift >= 0) && (shift < 128));
+    if (shift == 0) return *this;
+    if (shift >= 64) {
+      return Uint128(0, data_high_ >> (shift - 64));
+    }
+    uint64_t tmp = (data_high_ << (64 - shift)) | (data_low_ >> shift);
+    return Uint128(data_high_ >> shift, tmp);
+  }
+  Uint128 operator<<(int shift) const {
+    VIXL_ASSERT((shift >= 0) && (shift < 128));
+    if (shift == 0) return *this;
+    if (shift >= 64) {
+      return Uint128(data_low_ << (shift - 64), 0);
+    }
+    uint64_t tmp = (data_high_ << shift) | (data_low_ >> (64 - shift));
+    return Uint128(tmp, data_low_ << shift);
+  }
+};
+
+Uint32::Uint32(Uint64 data) : data_(data.ToUint32().Get()) {}
+Uint64::Uint64(Uint128 data) : data_(data.ToUint64().Get()) {}
+
+Int64 BitCount(Uint32 value);
+
+}  // namespace internal
+
+// The default NaN values (for FPCR.DN=1).
+extern const double kFP64DefaultNaN;
+extern const float kFP32DefaultNaN;
+extern const Float16 kFP16DefaultNaN;
+
+// Floating-point infinity values.
+extern const Float16 kFP16PositiveInfinity;
+extern const Float16 kFP16NegativeInfinity;
+extern const float kFP32PositiveInfinity;
+extern const float kFP32NegativeInfinity;
+extern const double kFP64PositiveInfinity;
+extern const double kFP64NegativeInfinity;
+
+// Floating-point zero values.
+extern const Float16 kFP16PositiveZero;
+extern const Float16 kFP16NegativeZero;
+
+// AArch64 floating-point specifics. These match IEEE-754.
+const unsigned kDoubleMantissaBits = 52;
+const unsigned kDoubleExponentBits = 11;
+const unsigned kFloatMantissaBits = 23;
+const unsigned kFloatExponentBits = 8;
+const unsigned kFloat16MantissaBits = 10;
+const unsigned kFloat16ExponentBits = 5;
+
+enum FPRounding {
+  // The first four values are encodable directly by FPCR<RMode>.
+  FPTieEven = 0x0,
+  FPPositiveInfinity = 0x1,
+  FPNegativeInfinity = 0x2,
+  FPZero = 0x3,
+
+  // The final rounding modes are only available when explicitly specified by
+  // the instruction (such as with fcvta). It cannot be set in FPCR.
+  FPTieAway,
+  FPRoundOdd
+};
+
+enum UseDefaultNaN { kUseDefaultNaN, kIgnoreDefaultNaN };
+
+// Assemble the specified IEEE-754 components into the target type and apply
+// appropriate rounding.
+//  sign:     0 = positive, 1 = negative
+//  exponent: Unbiased IEEE-754 exponent.
+//  mantissa: The mantissa of the input. The top bit (which is not encoded for
+//            normal IEEE-754 values) must not be omitted. This bit has the
+//            value 'pow(2, exponent)'.
+//
+// The input value is assumed to be a normalized value. That is, the input may
+// not be infinity or NaN. If the source value is subnormal, it must be
+// normalized before calling this function such that the highest set bit in the
+// mantissa has the value 'pow(2, exponent)'.
+//
+// Callers should use FPRoundToFloat or FPRoundToDouble directly, rather than
+// calling a templated FPRound.
+template <class T, int ebits, int mbits>
+T FPRound(int64_t sign,
+          int64_t exponent,
+          uint64_t mantissa,
+          FPRounding round_mode) {
+  VIXL_ASSERT((sign == 0) || (sign == 1));
+
+  // Only FPTieEven and FPRoundOdd rounding modes are implemented.
+  VIXL_ASSERT((round_mode == FPTieEven) || (round_mode == FPRoundOdd));
+
+  // Rounding can promote subnormals to normals, and normals to infinities. For
+  // example, a double with exponent 127 (FLT_MAX_EXP) would appear to be
+  // encodable as a float, but rounding based on the low-order mantissa bits
+  // could make it overflow. With ties-to-even rounding, this value would become
+  // an infinity.
 
-  size_t align_step = pointer_raw % alignment;
-  VIXL_ASSERT((pointer_raw - align_step) % alignment == 0);
+  // ---- Rounding Method ----
+  //
+  // The exponent is irrelevant in the rounding operation, so we treat the
+  // lowest-order bit that will fit into the result ('onebit') as having
+  // the value '1'. Similarly, the highest-order bit that won't fit into
+  // the result ('halfbit') has the value '0.5'. The 'point' sits between
+  // 'onebit' and 'halfbit':
+  //
+  //            These bits fit into the result.
+  //               |---------------------|
+  //  mantissa = 0bxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+  //                                     ||
+  //                                    / |
+  //                                   /  halfbit
+  //                               onebit
+  //
+  // For subnormal outputs, the range of representable bits is smaller and
+  // the position of onebit and halfbit depends on the exponent of the
+  // input, but the method is otherwise similar.
+  //
+  //   onebit(frac)
+  //     |
+  //     | halfbit(frac)          halfbit(adjusted)
+  //     | /                      /
+  //     | |                      |
+  //  0b00.0 (exact)      -> 0b00.0 (exact)                    -> 0b00
+  //  0b00.0...           -> 0b00.0...                         -> 0b00
+  //  0b00.1 (exact)      -> 0b00.0111..111                    -> 0b00
+  //  0b00.1...           -> 0b00.1...                         -> 0b01
+  //  0b01.0 (exact)      -> 0b01.0 (exact)                    -> 0b01
+  //  0b01.0...           -> 0b01.0...                         -> 0b01
+  //  0b01.1 (exact)      -> 0b01.1 (exact)                    -> 0b10
+  //  0b01.1...           -> 0b01.1...                         -> 0b10
+  //  0b10.0 (exact)      -> 0b10.0 (exact)                    -> 0b10
+  //  0b10.0...           -> 0b10.0...                         -> 0b10
+  //  0b10.1 (exact)      -> 0b10.0111..111                    -> 0b10
+  //  0b10.1...           -> 0b10.1...                         -> 0b11
+  //  0b11.0 (exact)      -> 0b11.0 (exact)                    -> 0b11
+  //  ...                   /             |                      /   |
+  //                       /              |                     /    |
+  //                                                           /     |
+  // adjusted = frac - (halfbit(mantissa) & ~onebit(frac));   /      |
+  //
+  //                   mantissa = (mantissa >> shift) + halfbit(adjusted);
+
+  static const int mantissa_offset = 0;
+  static const int exponent_offset = mantissa_offset + mbits;
+  static const int sign_offset = exponent_offset + ebits;
+  VIXL_ASSERT(sign_offset == (sizeof(T) * 8 - 1));
+
+  // Bail out early for zero inputs.
+  if (mantissa == 0) {
+    return static_cast<T>(sign << sign_offset);
+  }
+
+  // If all bits in the exponent are set, the value is infinite or NaN.
+  // This is true for all binary IEEE-754 formats.
+  static const int infinite_exponent = (1 << ebits) - 1;
+  static const int max_normal_exponent = infinite_exponent - 1;
+
+  // Apply the exponent bias to encode it for the result. Doing this early makes
+  // it easy to detect values that will be infinite or subnormal.
+  exponent += max_normal_exponent >> 1;
+
+  if (exponent > max_normal_exponent) {
+    // Overflow: the input is too large for the result type to represent.
+    if (round_mode == FPTieEven) {
+      // FPTieEven rounding mode handles overflows using infinities.
+      exponent = infinite_exponent;
+      mantissa = 0;
+    } else {
+      VIXL_ASSERT(round_mode == FPRoundOdd);
+      // FPRoundOdd rounding mode handles overflows using the largest magnitude
+      // normal number.
+      exponent = max_normal_exponent;
+      mantissa = (UINT64_C(1) << exponent_offset) - 1;
+    }
+    return static_cast<T>((sign << sign_offset) |
+                          (exponent << exponent_offset) |
+                          (mantissa << mantissa_offset));
+  }
 
-  return (T)(pointer_raw - align_step);
+  // Calculate the shift required to move the top mantissa bit to the proper
+  // place in the destination type.
+  const int highest_significant_bit = 63 - CountLeadingZeros(mantissa);
+  int shift = highest_significant_bit - mbits;
+
+  if (exponent <= 0) {
+    // The output will be subnormal (before rounding).
+    // For subnormal outputs, the shift must be adjusted by the exponent. The +1
+    // is necessary because the exponent of a subnormal value (encoded as 0) is
+    // the same as the exponent of the smallest normal value (encoded as 1).
+    shift += -exponent + 1;
+
+    // Handle inputs that would produce a zero output.
+    //
+    // Shifts higher than highest_significant_bit+1 will always produce a zero
+    // result. A shift of exactly highest_significant_bit+1 might produce a
+    // non-zero result after rounding.
+    if (shift > (highest_significant_bit + 1)) {
+      if (round_mode == FPTieEven) {
+        // The result will always be +/-0.0.
+        return static_cast<T>(sign << sign_offset);
+      } else {
+        VIXL_ASSERT(round_mode == FPRoundOdd);
+        VIXL_ASSERT(mantissa != 0);
+        // For FPRoundOdd, if the mantissa is too small to represent and
+        // non-zero return the next "odd" value.
+        return static_cast<T>((sign << sign_offset) | 1);
+      }
+    }
+
+    // Properly encode the exponent for a subnormal output.
+    exponent = 0;
+  } else {
+    // Clear the topmost mantissa bit, since this is not encoded in IEEE-754
+    // normal values.
+    mantissa &= ~(UINT64_C(1) << highest_significant_bit);
+  }
+
+  // The casts below are only well-defined for unsigned integers.
+  VIXL_STATIC_ASSERT(std::numeric_limits<T>::is_integer);
+  VIXL_STATIC_ASSERT(!std::numeric_limits<T>::is_signed);
+
+  if (shift > 0) {
+    if (round_mode == FPTieEven) {
+      // We have to shift the mantissa to the right. Some precision is lost, so
+      // we need to apply rounding.
+      uint64_t onebit_mantissa = (mantissa >> (shift)) & 1;
+      uint64_t halfbit_mantissa = (mantissa >> (shift - 1)) & 1;
+      uint64_t adjustment = (halfbit_mantissa & ~onebit_mantissa);
+      uint64_t adjusted = mantissa - adjustment;
+      T halfbit_adjusted = (adjusted >> (shift - 1)) & 1;
+
+      T result =
+          static_cast<T>((sign << sign_offset) | (exponent << exponent_offset) |
+                         ((mantissa >> shift) << mantissa_offset));
+
+      // A very large mantissa can overflow during rounding. If this happens,
+      // the exponent should be incremented and the mantissa set to 1.0
+      // (encoded as 0). Applying halfbit_adjusted after assembling the float
+      // has the nice side-effect that this case is handled for free.
+      //
+      // This also handles cases where a very large finite value overflows to
+      // infinity, or where a very large subnormal value overflows to become
+      // normal.
+      return result + halfbit_adjusted;
+    } else {
+      VIXL_ASSERT(round_mode == FPRoundOdd);
+      // If any bits at position halfbit or below are set, onebit (ie. the
+      // bottom bit of the resulting mantissa) must be set.
+      uint64_t fractional_bits = mantissa & ((UINT64_C(1) << shift) - 1);
+      if (fractional_bits != 0) {
+        mantissa |= UINT64_C(1) << shift;
+      }
+
+      return static_cast<T>((sign << sign_offset) |
+                            (exponent << exponent_offset) |
+                            ((mantissa >> shift) << mantissa_offset));
+    }
+  } else {
+    // We have to shift the mantissa to the left (or not at all). The input
+    // mantissa is exactly representable in the output mantissa, so apply no
+    // rounding correction.
+    return static_cast<T>((sign << sign_offset) |
+                          (exponent << exponent_offset) |
+                          ((mantissa << -shift) << mantissa_offset));
+  }
 }
 
+
+// See FPRound for a description of this function.
+inline double FPRoundToDouble(int64_t sign,
+                              int64_t exponent,
+                              uint64_t mantissa,
+                              FPRounding round_mode) {
+  uint64_t bits =
+      FPRound<uint64_t, kDoubleExponentBits, kDoubleMantissaBits>(sign,
+                                                                  exponent,
+                                                                  mantissa,
+                                                                  round_mode);
+  return RawbitsToDouble(bits);
+}
+
+
+// See FPRound for a description of this function.
+inline Float16 FPRoundToFloat16(int64_t sign,
+                                int64_t exponent,
+                                uint64_t mantissa,
+                                FPRounding round_mode) {
+  return RawbitsToFloat16(
+      FPRound<uint16_t,
+              kFloat16ExponentBits,
+              kFloat16MantissaBits>(sign, exponent, mantissa, round_mode));
+}
+
+
+// See FPRound for a description of this function.
+static inline float FPRoundToFloat(int64_t sign,
+                                   int64_t exponent,
+                                   uint64_t mantissa,
+                                   FPRounding round_mode) {
+  uint32_t bits =
+      FPRound<uint32_t, kFloatExponentBits, kFloatMantissaBits>(sign,
+                                                                exponent,
+                                                                mantissa,
+                                                                round_mode);
+  return RawbitsToFloat(bits);
+}
+
+
+float FPToFloat(Float16 value, UseDefaultNaN DN, bool* exception = NULL);
+float FPToFloat(double value,
+                FPRounding round_mode,
+                UseDefaultNaN DN,
+                bool* exception = NULL);
+
+double FPToDouble(Float16 value, UseDefaultNaN DN, bool* exception = NULL);
+double FPToDouble(float value, UseDefaultNaN DN, bool* exception = NULL);
+
+Float16 FPToFloat16(float value,
+                    FPRounding round_mode,
+                    UseDefaultNaN DN,
+                    bool* exception = NULL);
+
+Float16 FPToFloat16(double value,
+                    FPRounding round_mode,
+                    UseDefaultNaN DN,
+                    bool* exception = NULL);
 }  // namespace vixl
 
 #endif  // VIXL_UTILS_H