Bug 1551128 - Limit result bit length in BigInt.asUintN on negative values r=jwalden
authorAndy Wingo <wingo@igalia.com>
Thu, 16 May 2019 07:39:14 +0000
changeset 474041 1e82c40506077c6d5864a658b3585b0b2f2d8631
parent 474040 4a49d2484923003962d44d7a09aa057bc3f578c7
child 474042 e55213bb470a88e0bd26a75b91497235fd3e9d97
push id36022
push userncsoregi@mozilla.com
push dateThu, 16 May 2019 21:55:16 +0000
treeherdermozilla-central@96802be91766 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjwalden
bugs1551128
milestone68.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1551128 - Limit result bit length in BigInt.asUintN on negative values r=jwalden Differential Revision: https://phabricator.services.mozilla.com/D30874
js/src/jit-test/tests/bigint/bug1551128.js
js/src/vm/BigIntType.cpp
js/src/vm/BigIntType.h
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/bigint/bug1551128.js
@@ -0,0 +1,6 @@
+load(libdir + "asserts.js");
+
+assertEq(BigInt.asUintN(32, -1n), 0xffffffffn);
+assertThrowsInstanceOf(() => BigInt.asUintN(2**32 - 1, -1n), RangeError);
+assertThrowsInstanceOf(() => BigInt.asUintN(2**32, -1n), RangeError);
+assertThrowsInstanceOf(() => BigInt.asUintN(2**53 - 1, -1n), RangeError);
--- a/js/src/vm/BigIntType.cpp
+++ b/js/src/vm/BigIntType.cpp
@@ -2239,16 +2239,22 @@ uint64_t BigInt::toUint64(BigInt* x) {
 // Compute `2**bits - (x & (2**bits - 1))`.  Used when treating BigInt values as
 // arbitrary-precision two's complement signed integers.
 BigInt* BigInt::truncateAndSubFromPowerOfTwo(JSContext* cx, HandleBigInt x,
                                              uint64_t bits,
                                              bool resultNegative) {
   MOZ_ASSERT(bits != 0);
   MOZ_ASSERT(!x->isZero());
 
+  if (bits > MaxBitLength) {
+    JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+                              JSMSG_BIGINT_TOO_LARGE);
+    return nullptr;
+  }
+
   size_t resultLength = CeilDiv(bits, DigitBits);
   RootedBigInt result(cx,
                       createUninitialized(cx, resultLength, resultNegative));
   if (!result) {
     return nullptr;
   }
 
   // Process all digits except the MSD.
--- a/js/src/vm/BigIntType.h
+++ b/js/src/vm/BigIntType.h
@@ -199,20 +199,21 @@ class BigInt final : public js::gc::Tenu
  private:
   static constexpr size_t DigitBits = sizeof(Digit) * CHAR_BIT;
   static constexpr size_t HalfDigitBits = DigitBits / 2;
   static constexpr Digit HalfDigitMask = (1ull << HalfDigitBits) - 1;
 
   static_assert(DigitBits == 32 || DigitBits == 64,
                 "Unexpected BigInt Digit size");
 
-  // The maximum number of digits that the current implementation supports
-  // would be 0x7fffffff / DigitBits. However, we use a lower limit for now,
-  // because raising it later is easier than lowering it.  Support up to 1
-  // million bits.
+  // Limit the size of bigint values to 1 million bits, to prevent excessive
+  // memory usage.  This limit may be raised in the future if needed.  Note
+  // however that there are many parts of the implementation that rely on being
+  // able to count and index bits using a 32-bit signed ints, so until those
+  // sites are fixed, the practical limit is 0x7fffffff bits.
   static constexpr size_t MaxBitLength = 1024 * 1024;
   static constexpr size_t MaxDigitLength = MaxBitLength / DigitBits;
 
   // BigInts can be serialized to strings of radix between 2 and 36.  For a
   // given bigint, radix 2 will take the most characters (one per bit).
   // Ensure that the max bigint size is small enough so that we can fit the
   // corresponding character count into a size_t, with space for a possible
   // sign prefix.