Bug 784295 - Part 2: Compile self-hosted JS in extra warnings mode (in DEBUG builds). r=till
authorChris Peterson <cpeterson@mozilla.com>
Sat, 30 Nov 2013 00:16:43 -0800
changeset 172815 763fbb0e1c459acb7ffe79a5d326046fb62f74bd
parent 172814 c399711f365887f6e2890e0ae6ef1846d3421c3c
child 172816 836ee14f6a37f465df20697e6601be07a0e894fd
push id3224
push userlsblakk@mozilla.com
push dateTue, 04 Feb 2014 01:06:49 +0000
treeherdermozilla-beta@60c04d0987f1 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerstill
bugs784295
milestone28.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 784295 - Part 2: Compile self-hosted JS in extra warnings mode (in DEBUG builds). r=till
js/src/builtin/Array.js
js/src/builtin/ParallelArray.js
js/src/builtin/TypedObject.js
js/src/vm/SelfHosting.cpp
--- a/js/src/builtin/Array.js
+++ b/js/src/builtin/Array.js
@@ -551,18 +551,16 @@ function ArrayValues() {
 function ArrayEntries() {
     return CreateArrayIterator(this, ITEM_KIND_KEY_AND_VALUE);
 }
 
 function ArrayKeys() {
     return CreateArrayIterator(this, ITEM_KIND_KEY);
 }
 
-#ifdef ENABLE_PARALLEL_JS
-
 /*
  * Strawman spec:
  *   http://wiki.ecmascript.org/doku.php?id=strawman:data_parallelism
  */
 
 /* The mode asserts options object. */
 #define TRY_PARALLEL(MODE) \
   ((!MODE || MODE.mode !== "seq"))
@@ -611,39 +609,53 @@ function ComputeNumChunks(length) {
 /**
  * Computes the bounds for slice |sliceIndex| of |numItems| items,
  * assuming |numSlices| total slices. If numItems is not evenly
  * divisible by numSlices, then the final thread may have a bit of
  * extra work.
  */
 function ComputeSliceBounds(numItems, sliceIndex, numSlices) {
   var sliceWidth = (numItems / numSlices) | 0;
-  var startIndex = sliceWidth * sliceIndex;
-  var endIndex = sliceIndex === numSlices - 1 ? numItems : sliceWidth * (sliceIndex + 1);
+  var extraChunks = (numItems % numSlices) | 0;
+
+  var startIndex = sliceWidth * sliceIndex + std_Math_min(extraChunks, sliceIndex);
+  var endIndex = startIndex + sliceWidth;
+  if (sliceIndex < extraChunks)
+    endIndex += 1;
   return [startIndex, endIndex];
 }
 
 /**
  * Divides |numItems| items amongst |numSlices| slices. The result
  * is an array containing multiple values per slice: the start
  * index, end index, current position, and some padding. The
  * current position is initially the same as the start index. To
  * access the values for a particular slice, use the macros
  * SLICE_START() and so forth.
  */
 function ComputeAllSliceBounds(numItems, numSlices) {
   // FIXME(bug 844890): Use typed arrays here.
+  var sliceWidth = (numItems / numSlices) | 0;
+  var extraChunks = (numItems % numSlices) | 0;
+  var counter = 0;
   var info = [];
-  for (var i = 0; i < numSlices; i++) {
-    var [start, end] = ComputeSliceBounds(numItems, i, numSlices);
-    ARRAY_PUSH(info, SLICE_INFO(start, end));
+  var i = 0;
+  for (; i < extraChunks; i++) {
+    ARRAY_PUSH(info, SLICE_INFO(counter, counter + sliceWidth + 1));
+    counter += sliceWidth + 1;
+  }
+  for (; i < numSlices; i++) {
+    ARRAY_PUSH(info, SLICE_INFO(counter, counter + sliceWidth));
+    counter += sliceWidth;
   }
   return info;
 }
 
+#ifdef ENABLE_PARALLEL_JS
+
 /**
  * Creates a new array by applying |func(e, i, self)| for each element |e|
  * with index |i|.
  */
 function ArrayMapPar(func, mode) {
   if (!IsCallable(func))
     ThrowError(JSMSG_NOT_FUNCTION, DecompileArg(0, func));
 
@@ -693,16 +705,18 @@ function ArrayMapPar(func, mode) {
       for (var i = indexStart; i < indexEnd; i++)
         UnsafePutElements(buffer, i, func(self[i], i, self));
 
       UnsafePutElements(info, SLICE_POS(sliceId), ++chunkPos);
     }
 
     return chunkEnd === info[SLICE_END(sliceId)];
   }
+
+  return undefined;
 }
 
 /**
  * Reduces the elements in an array in parallel. Order is not fixed and |func|
  * is assumed to be associative.
  */
 function ArrayReducePar(func, mode) {
   if (!IsCallable(func))
@@ -777,16 +791,18 @@ function ArrayReducePar(func, mode) {
   }
 
   function reduceChunk(accumulator, from, to) {
     to = std_Math_min(to, length);
     for (var i = from; i < to; i++)
       accumulator = func(accumulator, self[i]);
     return accumulator;
   }
+
+  return undefined;
 }
 
 /**
  * Returns an array [s_0, ..., s_N] where |s_i| is equal to the reduction (as
  * per |reduce()|) of elements |0..i|. This is the generalization of partial
  * sum.
  */
 function ArrayScanPar(func, mode) {
@@ -964,16 +980,18 @@ function ArrayScanPar(func, mode) {
     var intermediate = intermediates[sliceId - 1];
     for (; indexPos < indexEnd; indexPos++) {
       UnsafePutElements(buffer, indexPos, func(intermediate, buffer[indexPos]),
                         info, SLICE_POS(sliceId), indexPos + 1);
     }
 
     return indexEnd === info[SLICE_END(sliceId)];
   }
+
+  return undefined;
 }
 
 /**
  * |scatter()| redistributes the elements in the array into a new array.
  *
  * - targets: The index targets[i] indicates where the ith element
  *   should appear in the result.
  *
@@ -1116,16 +1134,18 @@ function ArrayScatterPar(targets, defaul
           continue;
         if (conflicts[t])
           x = collide(x, buffer[t]);
         UnsafePutElements(buffer, t, x, conflicts, t, true, checkpoints, sliceId, indexPos + 1);
       }
 
       return indexEnd === targetsLength;
     }
+
+    return undefined;
   }
 
   function parDivideScatterVector() {
     // Subtle: because we will be mutating the localBuffers and
     // conflict arrays in place, we can never replay an entry in the
     // target array for fear of inducing a conflict where none existed
     // before. Therefore, we must proceed not by chunks but rather by
     // individual indices.
@@ -1194,16 +1214,18 @@ function ArrayScatterPar(targets, defaul
             } else {
               buffer[j] = otherbuffer[j];
               conflicts[j] = true;
             }
           }
         }
       }
     }
+
+    return undefined;
   }
 
   function seq() {
     var buffer = NewDenseArray(length);
     var conflicts = NewDenseArray(length);
 
     for (var i = 0; i < length; i++) {
       UnsafePutElements(buffer, i, defaultValue);
@@ -1227,16 +1249,18 @@ function ArrayScatterPar(targets, defaul
       ThrowError(JSMSG_PAR_ARRAY_SCATTER_BAD_TARGET, i);
 
     if (t < 0 || t >= length)
       ThrowError(JSMSG_PAR_ARRAY_SCATTER_BOUNDS);
 
     // It's not enough to return t, as -0 | 0 === -0.
     return TO_INT32(t);
   }
+
+  return undefined;
 }
 
 /**
  * The filter operation applied in parallel.
  */
 function ArrayFilterPar(func, mode) {
   if (!IsCallable(func))
     ThrowError(JSMSG_NOT_FUNCTION, DecompileArg(0, func));
@@ -1362,16 +1386,18 @@ function ArrayFilterPar(func, mode) {
           if (count === total)
             break;
         }
       }
     }
 
     return true;
   }
+
+  return undefined;
 }
 
 /**
  * "Comprehension form": This is the function invoked for
  * |Array.{build,buildPar}(len, fn)| It creates a new array with length |len|
  * where index |i| is equal to |fn(i)|.
  *
  * The final |mode| argument is an internal argument used only during our
@@ -1433,16 +1459,18 @@ function ArrayStaticBuildPar(length, fun
 
     return chunkEnd === info[SLICE_END(sliceId)];
   }
 
   function fill(indexStart, indexEnd) {
     for (var i = indexStart; i < indexEnd; i++)
       UnsafePutElements(buffer, i, func(i));
   }
+
+  return undefined;
 }
 
 /*
  * Mark the main operations as clone-at-callsite for better precision.
  * This is slightly overkill, as all that we really need is to
  * specialize to the receiver and the elemental function, but in
  * practice this is likely not so different, since element functions
  * are often used in exactly one place.
--- a/js/src/builtin/ParallelArray.js
+++ b/js/src/builtin/ParallelArray.js
@@ -1,100 +1,14 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 // FIXME(bug 844882): Parallel array properties should not be exposed.
 
-// The mode asserts options object.
-#define TRY_PARALLEL(MODE) \
-  ((!MODE || MODE.mode !== "seq"))
-#define ASSERT_SEQUENTIAL_IS_OK(MODE) \
-  do { if (MODE) AssertSequentialIsOK(MODE) } while(false)
-
-// Slice array: see ComputeAllSliceBounds()
-#define SLICE_INFO(START, END) START, END, START, 0
-#define SLICE_START(ID) ((ID << 2) + 0)
-#define SLICE_END(ID)   ((ID << 2) + 1)
-#define SLICE_POS(ID)   ((ID << 2) + 2)
-
-// How many items at a time do we do recomp. for parallel execution.
-// Note that filter currently assumes that this is no greater than 32
-// in order to make use of a bitset.
-#define CHUNK_SHIFT 5
-#define CHUNK_SIZE 32
-
-// Safe versions of ARRAY.push(ELEMENT)
-#define ARRAY_PUSH(ARRAY, ELEMENT) \
-  callFunction(std_Array_push, ARRAY, ELEMENT);
-#define ARRAY_SLICE(ARRAY, ELEMENT) \
-  callFunction(std_Array_slice, ARRAY, ELEMENT);
-
-/**
- * The ParallelSpew intrinsic is only defined in debug mode, so define a dummy
- * if debug is not on.
- */
-#ifndef DEBUG
-#define ParallelSpew(args)
-#endif
-
-/**
- * Determine the number of chunks of size CHUNK_SIZE;
- * note that the final chunk may be smaller than CHUNK_SIZE.
- */
-function ComputeNumChunks(length) {
-  var chunks = length >>> CHUNK_SHIFT;
-  if (chunks << CHUNK_SHIFT === length)
-    return chunks;
-  return chunks + 1;
-}
-
-/**
- * Computes the bounds for slice |sliceIndex| of |numItems| items,
- * assuming |numSlices| total slices. If numItems is not evenly
- * divisible by numSlices, then the final thread may have a bit of
- * extra work.
- */
-function ComputeSliceBounds(numItems, sliceIndex, numSlices) {
-  var sliceWidth = (numItems / numSlices) | 0;
-  var extraChunks = (numItems % numSlices) | 0;
-
-  var startIndex = sliceWidth * sliceIndex + std_Math_min(extraChunks, sliceIndex);
-  var endIndex = startIndex + sliceWidth;
-  if (sliceIndex < extraChunks)
-    endIndex += 1;
-  return [startIndex, endIndex];
-}
-
-/**
- * Divides |numItems| items amongst |numSlices| slices. The result
- * is an array containing multiple values per slice: the start
- * index, end index, current position, and some padding. The
- * current position is initially the same as the start index. To
- * access the values for a particular slice, use the macros
- * SLICE_START() and so forth.
- */
-function ComputeAllSliceBounds(numItems, numSlices) {
-  // FIXME(bug 844890): Use typed arrays here.
-  var sliceWidth = (numItems / numSlices) | 0;
-  var extraChunks = (numItems % numSlices) | 0;
-  var counter = 0;
-  var info = [];
-  var i = 0;
-  for (; i < extraChunks; i++) {
-    ARRAY_PUSH(info, SLICE_INFO(counter, counter + sliceWidth + 1));
-    counter += sliceWidth + 1;
-  }
-  for (; i < numSlices; i++) {
-    ARRAY_PUSH(info, SLICE_INFO(counter, counter + sliceWidth));
-    counter += sliceWidth;
-  }
-  return info;
-}
-
 /**
  * Compute the partial products in reverse order.
  * e.g., if the shape is [A,B,C,D], then the
  * array |products| will be [1,D,CD,BCD].
  */
 function ComputeProducts(shape) {
   var product = 1;
   var products = [1];
@@ -156,18 +70,18 @@ function ParallelArrayConstructEmpty() {
   this.shape = [0];
   this.get = ParallelArrayGet1;
 }
 
 /**
  * This is the function invoked for |new ParallelArray(array)|.
  * It copies the data from its array-like argument |array|.
  */
-function ParallelArrayConstructFromArray(buffer) {
-  var buffer = ToObject(buffer);
+function ParallelArrayConstructFromArray(array) {
+  var buffer = ToObject(array);
   var length = buffer.length >>> 0;
   if (length !== buffer.length)
     ThrowError(JSMSG_PAR_ARRAY_BAD_ARG, "");
 
   var buffer1 = [];
   for (var i = 0; i < length; i++)
     ARRAY_PUSH(buffer1, buffer[i]);
 
@@ -436,16 +350,18 @@ function ParallelArrayMap(func, mode) {
       for (var i = indexStart; i < indexEnd; i++)
         UnsafePutElements(buffer, i, func(self.get(i), i, self));
 
       UnsafePutElements(info, SLICE_POS(sliceId), ++chunkPos);
     }
 
     return chunkEnd == info[SLICE_END(sliceId)];
   }
+
+  return undefined;
 }
 
 /**
  * Reduces the elements in a parallel array's outermost dimension
  * using the given reduction function.
  */
 function ParallelArrayReduce(func, mode) {
   // FIXME(bug 844887): Check |this instanceof ParallelArray|
@@ -520,16 +436,18 @@ function ParallelArrayReduce(func, mode)
   }
 
   function reduceChunk(accumulator, from, to) {
     to = std_Math_min(to, length);
     for (var i = from; i < to; i++)
       accumulator = func(accumulator, self.get(i));
     return accumulator;
   }
+
+  return undefined;
 }
 
 /**
  * |scan()| returns an array [s_0, ..., s_N] where
  * |s_i| is equal to the reduction (as per |reduce()|)
  * of elements |0..i|. This is the generalization
  * of partial sum.
  */
@@ -708,16 +626,18 @@ function ParallelArrayScan(func, mode) {
     var intermediate = intermediates[sliceId - 1];
     for (; indexPos < indexEnd; indexPos++) {
       UnsafePutElements(buffer, indexPos, func(intermediate, buffer[indexPos]),
                         info, SLICE_POS(sliceId), indexPos + 1);
     }
 
     return indexEnd == info[SLICE_END(sliceId)];
   }
+
+  return undefined;
 }
 
 /**
  * |scatter()| redistributes the elements in the parallel array
  * into a new parallel array.
  *
  * - targets: The index targets[i] indicates where the ith element
  *   should appear in the result.
@@ -865,16 +785,18 @@ function ParallelArrayScatter(targets, d
           continue;
         if (conflicts[t])
           x = collide(x, buffer[t]);
         UnsafePutElements(buffer, t, x, conflicts, t, true, checkpoints, sliceId, indexPos + 1);
       }
 
       return indexEnd == targetsLength;
     }
+
+    return undefined;
   }
 
   function parDivideScatterVector() {
     // Subtle: because we will be mutating the localBuffers and
     // conflict arrays in place, we can never replay an entry in the
     // target array for fear of inducing a conflict where none existed
     // before. Therefore, we must proceed not by chunks but rather by
     // individual indices.
@@ -943,16 +865,18 @@ function ParallelArrayScatter(targets, d
             } else {
               buffer[j] = otherbuffer[j];
               conflicts[j] = true;
             }
           }
         }
       }
     }
+
+    return undefined;
   }
 
   function seq() {
     var buffer = NewDenseArray(length);
     var conflicts = NewDenseArray(length);
 
     for (var i = 0; i < length; i++) {
       UnsafePutElements(buffer, i, defaultValue);
@@ -976,16 +900,18 @@ function ParallelArrayScatter(targets, d
       ThrowError(JSMSG_PAR_ARRAY_SCATTER_BAD_TARGET, i);
 
     if (t < 0 || t >= length)
       ThrowError(JSMSG_PAR_ARRAY_SCATTER_BOUNDS);
 
     // It's not enough to return t, as -0 | 0 === -0.
     return TO_INT32(t);
   }
+
+  return undefined;
 }
 
 /**
  * The familiar filter() operation applied across the outermost
  * dimension.
  */
 function ParallelArrayFilter(func, mode) {
   // FIXME(bug 844887): Check |this instanceof ParallelArray|
@@ -1112,16 +1038,18 @@ function ParallelArrayFilter(func, mode)
           if (count == total)
             break;
         }
       }
     }
 
     return true;
   }
+
+  return undefined;
 }
 
 /**
  * Divides the outermost dimension into two dimensions. Does not copy
  * or affect the underlying data, just how it is divided amongst
  * dimensions. So if we had a vector with shape [N, ...] and you
  * partition with amount=4, you get a [N/4, 4, ...] vector. Note that
  * N must be evenly divisible by 4 in that case.
@@ -1284,19 +1212,19 @@ function ForkJoinMode(mode) {
   } else if (mode.mode === "compile") {
     return 1;
   } else if (mode.mode === "par") {
     return 2;
   } else if (mode.mode === "recover") {
     return 3;
   } else if (mode.mode === "bailout") {
     return 4;
-  } else {
-    ThrowError(JSMSG_PAR_ARRAY_BAD_ARG, "");
   }
+  ThrowError(JSMSG_PAR_ARRAY_BAD_ARG, "");
+  return undefined;
 }
 
 /*
  * Mark the main operations as clone-at-callsite for better precision.
  * This is slightly overkill, as all that we really need is to
  * specialize to the receiver and the elemental function, but in
  * practice this is likely not so different, since element functions
  * are often used in exactly one place.
--- a/js/src/builtin/TypedObject.js
+++ b/js/src/builtin/TypedObject.js
@@ -122,16 +122,17 @@ TypedObjectPointer.prototype.moveTo = fu
 
   case JS_TYPEREPR_STRUCT_KIND:
     if (HAS_PROPERTY(this.typeObj.fieldTypes, propName))
       return this.moveToField(propName);
     break;
   }
 
   ThrowError(JSMSG_TYPEDOBJECT_NO_SUCH_PROP, propName);
+  return undefined;
 };
 
 // Adjust `this` in place to point at the element `index`.  `this`
 // must be a array type and `index` must be within bounds. Returns
 // `this`.
 TypedObjectPointer.prototype.moveToElem = function(index) {
   assert(this.kind() == JS_TYPEREPR_ARRAY_KIND,
          "moveToElem invoked on non-array");
@@ -200,16 +201,17 @@ TypedObjectPointer.prototype.get = funct
     return this.getX4();
 
   case JS_TYPEREPR_ARRAY_KIND:
   case JS_TYPEREPR_STRUCT_KIND:
     return NewDerivedTypedDatum(this.typeObj, this.datum, this.offset);
   }
 
   assert(false, "Unhandled kind: " + REPR_KIND(this.typeRepr));
+  return undefined;
 }
 
 TypedObjectPointer.prototype.getScalar = function() {
   var type = REPR_TYPE(this.typeRepr);
   switch (type) {
   case JS_SCALARTYPEREPR_INT8:
     return Load_int8(this.datum, this.offset);
 
@@ -232,32 +234,34 @@ TypedObjectPointer.prototype.getScalar =
   case JS_SCALARTYPEREPR_FLOAT32:
     return Load_float32(this.datum, this.offset);
 
   case JS_SCALARTYPEREPR_FLOAT64:
     return Load_float64(this.datum, this.offset);
   }
 
   assert(false, "Unhandled scalar type: " + type);
+  return undefined;
 }
 
 TypedObjectPointer.prototype.getReference = function() {
   var type = REPR_TYPE(this.typeRepr);
   switch (type) {
   case JS_REFERENCETYPEREPR_ANY:
     return Load_Any(this.datum, this.offset);
 
   case JS_REFERENCETYPEREPR_OBJECT:
     return Load_Object(this.datum, this.offset);
 
   case JS_REFERENCETYPEREPR_STRING:
     return Load_string(this.datum, this.offset);
   }
 
   assert(false, "Unhandled scalar type: " + type);
+  return undefined;
 }
 
 TypedObjectPointer.prototype.getX4 = function() {
   var type = REPR_TYPE(this.typeRepr);
   var T = StandardTypeObjectDescriptors();
   switch (type) {
   case JS_X4TYPEREPR_FLOAT32:
     var x = Load_float32(this.datum, this.offset + 0);
@@ -268,17 +272,19 @@ TypedObjectPointer.prototype.getX4 = fun
 
   case JS_X4TYPEREPR_INT32:
     var x = Load_int32(this.datum, this.offset + 0);
     var y = Load_int32(this.datum, this.offset + 4);
     var z = Load_int32(this.datum, this.offset + 8);
     var w = Load_int32(this.datum, this.offset + 12);
     return T.int32x4(x, y, z, w);
   }
+
   assert(false, "Unhandled x4 type: " + type);
+  return undefined;
 }
 
 ///////////////////////////////////////////////////////////////////////////
 // Setting values
 //
 // The methods in this section modify the data pointed at by `this`.
 
 // Assigns `fromValue` to the memory pointed at by `this`, adapting it
@@ -347,17 +353,17 @@ TypedObjectPointer.prototype.set = funct
       var fieldName = fieldNames[i];
       tempPtr.reset(this).moveToField(fieldName).set(fromValue[fieldName]);
     }
     return;
   }
 
   ThrowError(JSMSG_CANT_CONVERT_TO,
              typeof(fromValue),
-             this.typeRepr.toSource())
+             this.typeRepr.toSource());
 }
 
 // Sets `fromValue` to `this` assuming that `this` is a scalar type.
 TypedObjectPointer.prototype.setScalar = function(fromValue) {
   assert(REPR_KIND(this.typeRepr) == JS_TYPEREPR_SCALAR_KIND,
          "setScalar called with non-scalar");
 
   var type = REPR_TYPE(this.typeRepr);
@@ -393,16 +399,17 @@ TypedObjectPointer.prototype.setScalar =
   case JS_SCALARTYPEREPR_FLOAT32:
     return Store_float32(this.datum, this.offset, +fromValue);
 
   case JS_SCALARTYPEREPR_FLOAT64:
     return Store_float64(this.datum, this.offset, +fromValue);
   }
 
   assert(false, "Unhandled scalar type: " + type);
+  return undefined;
 }
 
 TypedObjectPointer.prototype.setReference = function(fromValue) {
   var type = REPR_TYPE(this.typeRepr);
   switch (type) {
   case JS_REFERENCETYPEREPR_ANY:
     return Store_Any(this.datum, this.offset, fromValue);
 
@@ -410,27 +417,28 @@ TypedObjectPointer.prototype.setReferenc
     var value = (fromValue === null ? fromValue : ToObject(fromValue));
     return Store_Object(this.datum, this.offset, value);
 
   case JS_REFERENCETYPEREPR_STRING:
     return Store_string(this.datum, this.offset, ToString(fromValue));
   }
 
   assert(false, "Unhandled scalar type: " + type);
+  return undefined;
 }
 
 // Sets `fromValue` to `this` assuming that `this` is a scalar type.
 TypedObjectPointer.prototype.setX4 = function(fromValue) {
   // It is only permitted to set a float32x4/int32x4 value from another
   // float32x4/int32x4; in that case, the "fast path" that uses memcopy will
   // have already matched. So if we get to this point, we're supposed
   // to "adapt" fromValue, but there are no legal adaptions.
   ThrowError(JSMSG_CANT_CONVERT_TO,
              typeof(fromValue),
-             this.typeRepr.toSource())
+             this.typeRepr.toSource());
 }
 
 ///////////////////////////////////////////////////////////////////////////
 // C++ Wrappers
 //
 // These helpers are invoked by C++ code or used as method bodies.
 
 // Wrapper for use from C++ code.
@@ -656,17 +664,19 @@ function HandleTest(obj) {
 
 function X4ProtoString(type) {
   switch (type) {
   case JS_X4TYPEREPR_INT32:
     return "int32x4";
   case JS_X4TYPEREPR_FLOAT32:
     return "float32x4";
   }
+
   assert(false, "Unhandled type constant");
+  return undefined;
 }
 
 var X4LaneStrings = ["x", "y", "z", "w"];
 
 // Generalized handler for the various properties for accessing a
 // single lane of an X4 vector value. Note that this is the slow path;
 // the fast path will be inlined into ion code.
 function X4GetLane(datum, type, lane) {
@@ -680,17 +690,19 @@ function X4GetLane(datum, type, lane) {
                X4LaneStrings[lane], typeof this);
 
   switch (type) {
   case JS_X4TYPEREPR_INT32:
     return Load_int32(datum, lane * 4);
   case JS_X4TYPEREPR_FLOAT32:
     return Load_float32(datum, lane * 4);
   }
+
   assert(false, "Unhandled type constant");
+  return undefined;
 }
 
 function Float32x4Lane0() { return X4GetLane(this, JS_X4TYPEREPR_FLOAT32, 0); }
 function Float32x4Lane1() { return X4GetLane(this, JS_X4TYPEREPR_FLOAT32, 1); }
 function Float32x4Lane2() { return X4GetLane(this, JS_X4TYPEREPR_FLOAT32, 2); }
 function Float32x4Lane3() { return X4GetLane(this, JS_X4TYPEREPR_FLOAT32, 3); }
 
 function Int32x4Lane0() { return X4GetLane(this, JS_X4TYPEREPR_INT32, 0); }
--- a/js/src/vm/SelfHosting.cpp
+++ b/js/src/vm/SelfHosting.cpp
@@ -778,16 +778,17 @@ JSRuntime::initSelfHosting(JSContext *cx
     options.setSelfHostingMode(true);
     options.setCanLazilyParse(false);
     options.setSourcePolicy(CompileOptions::NO_SOURCE);
     options.setVersion(JSVERSION_LATEST);
     options.werrorOption = true;
 
 #ifdef DEBUG
     options.strictOption = true;
+    options.extraWarningsOption = true;
 #endif
 
     /*
      * Set a temporary error reporter printing to stderr because it is too
      * early in the startup process for any other reporter to be registered
      * and we don't want errors in self-hosted code to be silently swallowed.
      */
     JSErrorReporter oldReporter = JS_SetErrorReporter(cx, selfHosting_ErrorReporter);