Bug 1508255 - Minor formatting changes in js/src r=jandem
authorTed Campbell <tcampbell@mozilla.com>
Tue, 20 Nov 2018 10:15:12 +0000
changeset 450572 5ca3611ae290a9e1d5a4d3281c9cceec0a2c571e
parent 450571 517302dcd9e106827ef66dcd66a10dc82f7bc447
child 450573 9c97df04bbf8f43432680b514649350deb50ad2d
push id272
push usereakhgari@mozilla.com
push dateThu, 13 Dec 2018 22:29:46 +0000
treeherdermozilla-esr60@ff97c7a84632 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjandem
bugs1508255
milestone60.4.1
Bug 1508255 - Minor formatting changes in js/src r=jandem These cause clang-format to generate better results when reflowing comments. Depends on D12386 Differential Revision: https://phabricator.services.mozilla.com/D12387
js/src/builtin/intl/IntlObject.h
js/src/ctypes/CTypes.cpp
js/src/frontend/BytecodeEmitter.cpp
js/src/gc/Barrier.h
js/src/gc/FindSCCs.h
js/src/gc/Scheduling.h
js/src/jit/BaselineFrameInfo.h
js/src/jit/BaselineIC.cpp
js/src/jit/BaselineJIT.h
js/src/jit/EffectiveAddressAnalysis.cpp
js/src/jit/IonAnalysis.cpp
js/src/jit/IonBuilder.cpp
js/src/jit/IonTypes.h
js/src/jit/JitFrames.h
js/src/jit/JitcodeMap.cpp
js/src/jit/JitcodeMap.h
js/src/jit/MIR.cpp
js/src/jit/arm/Assembler-arm.cpp
js/src/jit/arm/Simulator-arm.h
js/src/jit/arm/Trampoline-arm.cpp
js/src/jit/arm64/Trampoline-arm64.cpp
js/src/jit/x86/Trampoline-x86.cpp
js/src/jsapi.cpp
js/src/vm/ArrayBufferObject.cpp
js/src/vm/BytecodeUtil.cpp
js/src/vm/Debugger.cpp
js/src/vm/Interpreter.cpp
js/src/vm/SelfHosting.cpp
js/src/vm/StringType.h
js/src/vm/TraceLogging.h
js/src/vm/TraceLoggingGraph.h
js/src/wasm/WasmBuiltins.cpp
js/src/wasm/WasmIonCompile.cpp
js/src/wasm/WasmSignalHandlers.cpp
--- a/js/src/builtin/intl/IntlObject.h
+++ b/js/src/builtin/intl/IntlObject.h
@@ -27,17 +27,18 @@ InitIntlClass(JSContext* cx, JS::Handle<
  * properties:
  *
  *   firstDayOfWeek
  *     an integer in the range 1=Sunday to 7=Saturday indicating the day
  *     considered the first day of the week in calendars, e.g. 1 for en-US,
  *     2 for en-GB, 1 for bn-IN
  *   minDays
  *     an integer in the range of 1 to 7 indicating the minimum number
- *     of days required in the first week of the year, e.g. 1 for en-US, 4 for de
+ *     of days required in the first week of the year, e.g. 1 for en-US,
+ *     4 for de
  *   weekendStart
  *     an integer in the range 1=Sunday to 7=Saturday indicating the day
  *     considered the beginning of a weekend, e.g. 7 for en-US, 7 for en-GB,
  *     1 for bn-IN
  *   weekendEnd
  *     an integer in the range 1=Sunday to 7=Saturday indicating the day
  *     considered the end of a weekend, e.g. 1 for en-US, 1 for en-GB,
  *     1 for bn-IN (note that "weekend" is *not* necessarily two days)
--- a/js/src/ctypes/CTypes.cpp
+++ b/js/src/ctypes/CTypes.cpp
@@ -2069,17 +2069,17 @@ InitCDataClass(JSContext* cx, HandleObje
                          JSPROP_ENUMERATE | JSPROP_READONLY | JSPROP_PERMANENT))
     return nullptr;
 
   // Define properties and functions common to all CDatas.
   if (!JS_DefineProperties(cx, prototype, sCDataProps) ||
       !JS_DefineFunctions(cx, prototype, sCDataFunctions))
     return nullptr;
 
-  if (//!JS_FreezeObject(cx, prototype) || // XXX fixme - see bug 541212!
+  if (// !JS_FreezeObject(cx, prototype) || // XXX fixme - see bug 541212!
       !JS_FreezeObject(cx, ctor))
     return nullptr;
 
   return prototype;
 }
 
 static bool
 DefineABIConstant(JSContext* cx,
@@ -2164,17 +2164,17 @@ InitTypeConstructor(JSContext* cx,
 
   if (instanceProps && !JS_DefineProperties(cx, dataProto, instanceProps))
     return false;
 
   // Link the type prototype to the data prototype.
   JS_SetReservedSlot(typeProto, SLOT_OURDATAPROTO, ObjectValue(*dataProto));
 
   if (!JS_FreezeObject(cx, obj) ||
-      //!JS_FreezeObject(cx, dataProto) || // XXX fixme - see bug 541212!
+      // !JS_FreezeObject(cx, dataProto) || // XXX fixme - see bug 541212!
       !JS_FreezeObject(cx, typeProto))
     return false;
 
   return true;
 }
 
 static JSObject*
 InitInt64Class(JSContext* cx,
@@ -2314,17 +2314,18 @@ InitTypeClasses(JSContext* cx, HandleObj
     return false;
 
   protos[SLOT_CDATAPROTO].set(CDataProto);
 
   // Create and attach the ctypes.{Int64,UInt64} constructors.
   // Each of these has, respectively:
   //   * [[Class]] "Function"
   //   * __proto__ === Function.prototype
-  //   * A constructor that creates a ctypes.{Int64,UInt64} object, respectively.
+  //   * A constructor that creates a ctypes.{Int64,UInt64} object,
+  //     respectively.
   //   * 'prototype' property:
   //     * [[Class]] {"Int64Proto","UInt64Proto"}
   //     * 'constructor' property === ctypes.{Int64,UInt64}
   protos[SLOT_INT64PROTO].set(InitInt64Class(cx, ctypesObj, &sInt64ProtoClass,
     Int64::Construct, sInt64Functions, sInt64StaticFunctions));
   if (!protos[SLOT_INT64PROTO])
     return false;
   protos[SLOT_UINT64PROTO].set(InitInt64Class(cx, ctypesObj, &sUInt64ProtoClass,
--- a/js/src/frontend/BytecodeEmitter.cpp
+++ b/js/src/frontend/BytecodeEmitter.cpp
@@ -10474,23 +10474,25 @@ BytecodeEmitter::emitClass(ParseNode* pn
     //   } else {
     //     objProto = %ObjectPrototype%;
     //   }
     //
     //   let homeObject = ObjectCreate(objProto);
     //
     //   if defined <constructor> {
     //     if defined <BaseExpression> {
-    //       cons = DefineMethod(<constructor>, proto=homeObject, funProto=funProto);
+    //       cons = DefineMethod(<constructor>, proto=homeObject,
+    //                           funProto=funProto);
     //     } else {
     //       cons = DefineMethod(<constructor>, proto=homeObject);
     //     }
     //   } else {
     //     if defined <BaseExpression> {
-    //       cons = DefaultDerivedConstructor(proto=homeObject, funProto=funProto);
+    //       cons = DefaultDerivedConstructor(proto=homeObject,
+    //                                        funProto=funProto);
     //     } else {
     //       cons = DefaultConstructor(proto=homeObject);
     //     }
     //   }
     //
     //   cons.prototype = homeObject;
     //   homeObject.constructor = cons;
     //
--- a/js/src/gc/Barrier.h
+++ b/js/src/gc/Barrier.h
@@ -41,17 +41,17 @@
  *                               PRE-BARRIER
  *
  * To understand the pre-barrier, let's consider how incremental GC works. The
  * GC itself is divided into "slices". Between each slice, JS code is allowed to
  * run. Each slice should be short so that the user doesn't notice the
  * interruptions. In our GC, the structure of the slices is as follows:
  *
  * 1. ... JS work, which leads to a request to do GC ...
- * 2. [first GC slice, which performs all root marking and possibly more marking]
+ * 2. [first GC slice, which performs all root marking and (maybe) more marking]
  * 3. ... more JS work is allowed to run ...
  * 4. [GC mark slice, which runs entirely in drainMarkStack]
  * 5. ... more JS work ...
  * 6. [GC mark slice, which runs entirely in drainMarkStack]
  * 7. ... more JS work ...
  * 8. [GC marking finishes; sweeping done non-incrementally; GC is done]
  * 9. ... JS continues uninterrupted now that GC is finishes ...
  *
--- a/js/src/gc/FindSCCs.h
+++ b/js/src/gc/FindSCCs.h
@@ -56,17 +56,18 @@ struct GraphNodeBase
  *     void findOutgoingEdges(MyComponentFinder& finder)
  *     {
  *         for edge in my_outgoing_edges:
  *             if is_relevant(edge):
  *                 finder.addEdgeTo(edge.destination)
  *     }
  * }
  *
- * struct MyComponentFinder : public ComponentFinder<MyGraphNode, MyComponentFinder>
+ * struct MyComponentFinder : public ComponentFinder<MyGraphNode,
+ *                                                   MyComponentFinder>
  * {
  *     ...
  * };
  *
  * MyComponentFinder finder;
  * finder.addNode(v);
  */
 
--- a/js/src/gc/Scheduling.h
+++ b/js/src/gc/Scheduling.h
@@ -102,18 +102,18 @@
  *   2) Do some known amount of GC work now because the embedding knows now is
  *      a good time to do a long, unblockable operation of a known duration.
  *      These are INTER_SLICE_GC and REFRESH_FRAME.
  *
  *  Correctness reasons:
  *
  *   3) Do a GC now because correctness depends on some GC property. For
  *      example, CC_WAITING is where the embedding requires the mark bits
- *      to be set correct. Also, EVICT_NURSERY where we need to work on the tenured
- *      heap.
+ *      to be set correct. Also, EVICT_NURSERY where we need to work on the
+ *      tenured heap.
  *
  *   4) Do a GC because we are shutting down: e.g. SHUTDOWN_CC or DESTROY_*.
  *
  *   5) Do a GC because a compartment was accessed between GC slices when we
  *      would have otherwise discarded it. We have to do a second GC to clean
  *      it up: e.g. COMPARTMENT_REVIVED.
  *
  *  Emergency Reasons:
--- a/js/src/jit/BaselineFrameInfo.h
+++ b/js/src/jit/BaselineFrameInfo.h
@@ -32,19 +32,19 @@ struct BytecodeInfo;
 //    var y = x + 9;
 //
 // Here two values are pushed: StackValue(LocalSlot(0)) and StackValue(Int32Value(9)).
 // Only when we reach the ADD op, code is generated to load the operands directly
 // into the right operand registers and sync all other stack values.
 //
 // For stack values, the following invariants hold (and are checked between ops):
 //
-// (1) If a value is synced (kind == Stack), all values below it must also be synced.
-//     In other words, values with kind other than Stack can only appear on top of the
-//     abstract stack.
+// (1) If a value is synced (kind == Stack), all values below it must also be
+//     synced. In other words, values with kind other than Stack can only appear
+//     on top of the abstract stack.
 //
 // (2) When we call a stub or IC, all values still on the stack must be synced.
 
 // Represents a value pushed on the stack. Note that StackValue is not used for
 // locals or arguments since these are always fully synced.
 class StackValue
 {
   public:
--- a/js/src/jit/BaselineIC.cpp
+++ b/js/src/jit/BaselineIC.cpp
@@ -2840,18 +2840,18 @@ ICCall_Fallback::Compiler::generateStubC
     EmitReturnFromIC(masm);
 
     // This is the resume point used when bailout rewrites call stack to undo
     // Ion inlined frames. The return address pushed onto reconstructed stack
     // will point here.
     assumeStubFrame();
     bailoutReturnOffset_.bind(masm.currentOffset());
 
-    // Load passed-in ThisV into R1 just in case it's needed.  Need to do this before
-    // we leave the stub frame since that info will be lost.
+    // Load passed-in ThisV into R1 just in case it's needed.  Need to do this
+    // before we leave the stub frame since that info will be lost.
     // Current stack:  [...., ThisV, ActualArgc, CalleeToken, Descriptor ]
     masm.loadValue(Address(masm.getStackPointer(), 3 * sizeof(size_t)), R1);
 
     leaveStubFrame(masm, true);
 
     // If this is a |constructing| call, if the callee returns a non-object, we replace it with
     // the |this| object passed in.
     if (isConstructing_) {
@@ -3009,17 +3009,19 @@ ICCallScriptedCompiler::generateStubCode
         regs = availableGeneralRegs(0);
         regs.take(R0);
         argcReg = regs.takeAny();
 
         // Restore saved argc so we can use it to calculate the address to save
         // the resulting this object to.
         masm.pop(argcReg);
 
-        // Save "this" value back into pushed arguments on stack.  R0 can be clobbered after that.
+        // Save "this" value back into pushed arguments on stack. R0 can be
+        // clobbered after that.
+        //
         // Stack now looks like:
         //      [..., Callee, ThisV, Arg0V, ..., ArgNV, [NewTarget], StubFrameHeader ]
         if (isSpread_) {
             masm.storeValue(R0, Address(masm.getStackPointer(),
                                         (1 + isConstructing_) * sizeof(Value) + STUB_FRAME_SIZE));
         } else {
             BaseValueIndex thisSlot(masm.getStackPointer(), argcReg,
                                     STUB_FRAME_SIZE + isConstructing_ * sizeof(Value));
--- a/js/src/jit/BaselineJIT.h
+++ b/js/src/jit/BaselineJIT.h
@@ -27,18 +27,20 @@ class ControlFlowGraph;
 
 class PCMappingSlotInfo
 {
     uint8_t slotInfo_;
 
   public:
     // SlotInfo encoding:
     //  Bits 0 & 1: number of slots at top of stack which are unsynced.
-    //  Bits 2 & 3: SlotLocation of top slot value (only relevant if numUnsynced > 0).
-    //  Bits 3 & 4: SlotLocation of next slot value (only relevant if numUnsynced > 1).
+    //  Bits 2 & 3: SlotLocation of top slot value (only relevant if
+    //              numUnsynced > 0).
+    //  Bits 3 & 4: SlotLocation of next slot value (only relevant if
+    //              numUnsynced > 1).
     enum SlotLocation { SlotInR0 = 0, SlotInR1 = 1, SlotIgnore = 3 };
 
     PCMappingSlotInfo()
       : slotInfo_(0)
     { }
 
     explicit PCMappingSlotInfo(uint8_t slotInfo)
       : slotInfo_(slotInfo)
--- a/js/src/jit/EffectiveAddressAnalysis.cpp
+++ b/js/src/jit/EffectiveAddressAnalysis.cpp
@@ -242,18 +242,20 @@ EffectiveAddressAnalysis::analyzeAsmJSHe
 
 // This analysis converts patterns of the form:
 //   truncate(x + (y << {0,1,2,3}))
 //   truncate(x + (y << {0,1,2,3}) + imm32)
 // into a single lea instruction, and patterns of the form:
 //   asmload(x + imm32)
 //   asmload(x << {0,1,2,3})
 //   asmload((x << {0,1,2,3}) + imm32)
-//   asmload((x << {0,1,2,3}) & mask)            (where mask is redundant with shift)
-//   asmload(((x << {0,1,2,3}) + imm32) & mask)  (where mask is redundant with shift + imm32)
+//   asmload((x << {0,1,2,3}) & mask)            (where mask is redundant
+//                                                with shift)
+//   asmload(((x << {0,1,2,3}) + imm32) & mask)  (where mask is redundant
+//                                                with shift + imm32)
 // into a single asmload instruction (and for asmstore too).
 //
 // Additionally, we should consider the general forms:
 //   truncate(x + y + imm32)
 //   truncate((y << {0,1,2,3}) + imm32)
 bool
 EffectiveAddressAnalysis::analyze()
 {
--- a/js/src/jit/IonAnalysis.cpp
+++ b/js/src/jit/IonAnalysis.cpp
@@ -3183,17 +3183,18 @@ ExtractMathSpace(MDefinition* ins)
         return MathSpace::Infinite;
       case MDefinition::IndirectTruncate:
       case MDefinition::Truncate:
         return MathSpace::Modulo;
     }
     MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Unknown TruncateKind");
 }
 
-// Extract a linear sum from ins, if possible (otherwise giving the sum 'ins + 0').
+// Extract a linear sum from ins, if possible (otherwise giving the
+// sum 'ins + 0').
 SimpleLinearSum
 jit::ExtractLinearSum(MDefinition* ins, MathSpace space)
 {
     if (ins->isBeta())
         ins = ins->getOperand(0);
 
     if (ins->type() != MIRType::Int32)
         return SimpleLinearSum(ins, 0);
--- a/js/src/jit/IonBuilder.cpp
+++ b/js/src/jit/IonBuilder.cpp
@@ -4865,20 +4865,21 @@ IonBuilder::createCallObject(MDefinition
 
 MDefinition*
 IonBuilder::createThisScripted(MDefinition* callee, MDefinition* newTarget)
 {
     // Get callee.prototype.
     //
     // This instruction MUST be idempotent: since it does not correspond to an
     // explicit operation in the bytecode, we cannot use resumeAfter().
-    // Getters may not override |prototype| fetching, so this operation is indeed idempotent.
+    // Getters may not override |prototype| fetching, so this operation is
+    // indeed idempotent.
     // - First try an idempotent property cache.
-    // - Upon failing idempotent property cache, we can't use a non-idempotent cache,
-    //   therefore we fallback to CallGetProperty
+    // - Upon failing idempotent property cache, we can't use a non-idempotent
+    //   cache, therefore we fallback to CallGetProperty
     //
     // Note: both CallGetProperty and GetPropertyCache can trigger a GC,
     //       and thus invalidation.
     MInstruction* getProto;
     if (!invalidatedIdempotentCache()) {
         MConstant* id = constant(StringValue(names().prototype));
         MGetPropertyCache* getPropCache = MGetPropertyCache::New(alloc(), newTarget, id,
                                                                  /* monitored = */ false);
--- a/js/src/jit/IonTypes.h
+++ b/js/src/jit/IonTypes.h
@@ -86,19 +86,20 @@ enum BailoutKind
     // We hit a hole in an array.
     Bailout_Hole,
 
     // Array access with negative index
     Bailout_NegativeIndex,
 
     // Pretty specific case:
     //  - need a type barrier on a property write
-    //  - all but one of the observed types have property types that reflect the value
+    //  - all but one of the observed types have property types that reflect
+    //    the value
     //  - we need to guard that we're not given an object of that one other type
-    // also used for the unused GuardClass instruction
+    //    also used for the unused GuardClass instruction
     Bailout_ObjectIdentityOrTypeGuard,
 
     // Unbox expects a given type, bails out if it doesn't get it.
     Bailout_NonInt32Input,
     Bailout_NonNumericInput, // unboxing a double works with int32 too
     Bailout_NonBooleanInput,
     Bailout_NonObjectInput,
     Bailout_NonStringInput,
--- a/js/src/jit/JitFrames.h
+++ b/js/src/jit/JitFrames.h
@@ -198,17 +198,18 @@ static const uintptr_t FRAMESIZE_SHIFT =
                                          1 /* cached saved frame bit */;
 static const uintptr_t FRAMESIZE_BITS = 32 - FRAMESIZE_SHIFT;
 static const uintptr_t FRAMESIZE_MASK = (1 << FRAMESIZE_BITS) - 1;
 
 // Ion frames have a few important numbers associated with them:
 //      Local depth:    The number of bytes required to spill local variables.
 //      Argument depth: The number of bytes required to push arguments and make
 //                      a function call.
-//      Slack:          A frame may temporarily use extra stack to resolve cycles.
+//      Slack:          A frame may temporarily use extra stack to resolve
+//                      cycles.
 //
 // The (local + argument) depth determines the "fixed frame size". The fixed
 // frame size is the distance between the stack pointer and the frame header.
 // Thus, fixed >= (local + argument).
 //
 // In order to compress guards, we create shared jump tables that recover the
 // script from the stack and recover a snapshot pointer based on which jump was
 // taken. Thus, we create a jump table for each fixed frame size.
@@ -659,20 +660,22 @@ class IonOOLNativeExitFrameLayout
     inline Value* thisp() {
         return reinterpret_cast<Value*>(&loThis_);
     }
     inline uintptr_t argc() const {
         return argc_;
     }
 };
 
-// ProxyGetProperty(JSContext* cx, HandleObject proxy, HandleId id, MutableHandleValue vp)
-// ProxyCallProperty(JSContext* cx, HandleObject proxy, HandleId id, MutableHandleValue vp)
-// ProxySetProperty(JSContext* cx, HandleObject proxy, HandleId id, MutableHandleValue vp,
-//                  bool strict)
+// ProxyGetProperty(JSContext* cx, HandleObject proxy, HandleId id,
+//                  MutableHandleValue vp)
+// ProxyCallProperty(JSContext* cx, HandleObject proxy, HandleId id,
+//                   MutableHandleValue vp)
+// ProxySetProperty(JSContext* cx, HandleObject proxy, HandleId id,
+//                  MutableHandleValue vp, bool strict)
 class IonOOLProxyExitFrameLayout
 {
   protected: // only to silence a clang warning about unused private fields
     ExitFooterFrame footer_;
     ExitFrameLayout exit_;
 
     // The proxy object.
     JSObject* proxy_;
--- a/js/src/jit/JitcodeMap.cpp
+++ b/js/src/jit/JitcodeMap.cpp
@@ -1489,19 +1489,19 @@ JitcodeIonTable::findRegionEntry(uint32_
 
     // For small region lists, just search linearly.
     if (regions <= LINEAR_SEARCH_THRESHOLD) {
         JitcodeRegionEntry previousEntry = regionEntry(0);
         for (uint32_t i = 1; i < regions; i++) {
             JitcodeRegionEntry nextEntry = regionEntry(i);
             MOZ_ASSERT(nextEntry.nativeOffset() >= previousEntry.nativeOffset());
 
-            // See note in binary-search code below about why we use '<=' here instead of
-            // '<'.  Short explanation: regions are closed at their ending addresses,
-            // and open at their starting addresses.
+            // See note in binary-search code below about why we use '<=' here
+            // instead of '<'.  Short explanation: regions are closed at their
+            // ending addresses, and open at their starting addresses.
             if (nativeOffset <= nextEntry.nativeOffset())
                 return i-1;
 
             previousEntry = nextEntry;
         }
         // If nothing found, assume it falls within last region.
         return regions - 1;
     }
--- a/js/src/jit/JitcodeMap.h
+++ b/js/src/jit/JitcodeMap.h
@@ -1314,17 +1314,18 @@ class JitcodeRegionEntry
 
     static const uint32_t ENC3_PC_DELTA_MASK = 0x001ff8;
     static const int32_t ENC3_PC_DELTA_MAX = 0x1ff;
     static const int32_t ENC3_PC_DELTA_MIN = -ENC3_PC_DELTA_MAX - 1;
     static const unsigned ENC3_PC_DELTA_SHIFT = 3;
 
     //  byte 3    byte 2    byte 1    byte 0
     //  NNNN-NNNN NNNN-NNNN BBBB-BBBB BBBB-B111
-    //      Three-byte format.  nativeDelta in [0, 65535], pcDelta in [-4096, 4095]
+    //      Three-byte format.  nativeDelta in [0, 65535],
+    //                          pcDelta in [-4096, 4095]
     static const uint32_t ENC4_MASK = 0x7;
     static const uint32_t ENC4_MASK_VAL = 0x7;
 
     static const uint32_t ENC4_NATIVE_DELTA_MAX = 0xffff;
     static const unsigned ENC4_NATIVE_DELTA_SHIFT = 16;
 
     static const uint32_t ENC4_PC_DELTA_MASK = 0x0000fff8;
     static const int32_t ENC4_PC_DELTA_MAX = 0xfff;
--- a/js/src/jit/MIR.cpp
+++ b/js/src/jit/MIR.cpp
@@ -3809,17 +3809,18 @@ SimpleArithOperand(MDefinition* op)
         && !op->mightBeType(MIRType::MagicOptimizedArguments)
         && !op->mightBeType(MIRType::MagicHole)
         && !op->mightBeType(MIRType::MagicIsConstructing);
 }
 
 static bool
 SafelyCoercesToDouble(MDefinition* op)
 {
-    // Strings and symbols are unhandled -- visitToDouble() doesn't support them yet.
+    // Strings and symbols are unhandled -- visitToDouble() doesn't support
+    // them yet.
     // Null is unhandled -- ToDouble(null) == 0, but (0 == null) is false.
     return SimpleArithOperand(op) && !op->mightBeType(MIRType::Null);
 }
 
 MIRType
 MCompare::inputType()
 {
     switch(compareType_) {
--- a/js/src/jit/arm/Assembler-arm.cpp
+++ b/js/src/jit/arm/Assembler-arm.cpp
@@ -3097,38 +3097,44 @@ BufferInstructionIterator::maybeSkipAuto
     }
     if (InstIsBNop<BufferInstructionIterator>(*this))
         return next();
     return cur();
 }
 
 // Cases to be handled:
 // 1) no pools or branches in sight => return this+1
-// 2) branch to next instruction => return this+2, because a nop needed to be inserted into the stream.
-// 3) this+1 is an artificial guard for a pool => return first instruction after the pool
+// 2) branch to next instruction => return this+2, because a nop needed to be
+//    inserted into the stream.
+// 3) this+1 is an artificial guard for a pool => return first instruction
+//    after the pool
 // 4) this+1 is a natural guard => return the branch
-// 5) this is a branch, right before a pool => return first instruction after the pool
+// 5) this is a branch, right before a pool => return first instruction after
+//    the pool
 // in assembly form:
 // 1) add r0, r0, r0 <= this
 //    add r1, r1, r1 <= returned value
 //    add r2, r2, r2
 //
 // 2) add r0, r0, r0 <= this
 //    b foo
 //    foo:
 //    add r2, r2, r2 <= returned value
 //
 // 3) add r0, r0, r0 <= this
 //    b after_pool;
-//    .word 0xffff0002  # bit 15 being 0 indicates that the branch was not requested by the assembler
-//    0xdeadbeef        # the 2 indicates that there is 1 pool entry, and the pool header
+//    .word 0xffff0002  # bit 15 being 0 indicates that the branch was not
+//                      # requested by the assembler
+//    0xdeadbeef        # the 2 indicates that there is 1 pool entry, and the
+//                      # pool header
 //    add r4, r4, r4 <= returned value
 // 4) add r0, r0, r0 <= this
 //    b after_pool  <= returned value
-//    .word 0xffff8002  # bit 15 being 1 indicates that the branch was requested by the assembler
+//    .word 0xffff8002  # bit 15 being 1 indicates that the branch was
+//                      # requested by the assembler
 //    0xdeadbeef
 //    add r4, r4, r4
 // 5) b after_pool  <= this
 //    .word 0xffff8002  # bit 15 has no bearing on the returned value
 //    0xdeadbeef
 //    add r4, r4, r4  <= returned value
 
 Instruction*
--- a/js/src/jit/arm/Simulator-arm.h
+++ b/js/src/jit/arm/Simulator-arm.h
@@ -137,17 +137,18 @@ class Simulator
     // Disassemble one instruction.
     // "call disasm(instr)"
     void disasm(SimInstruction* instr);
 
     // Disassemble n instructions starting at instr.
     // "call disasm(instr, 3)"
     void disasm(SimInstruction* instr, size_t n);
 
-    // Skip backwards m instructions before starting, then disassemble n instructions.
+    // Skip backwards m instructions before starting, then disassemble n
+    // instructions.
     // "call disasm(instr, 3, 7)"
     void disasm(SimInstruction* instr, size_t m, size_t n);
 
     uintptr_t* addressOfStackLimit();
 
     // Accessors for register state. Reading the pc value adheres to the ARM
     // architecture specification and is off by a 8 from the currently executing
     // instruction.
--- a/js/src/jit/arm/Trampoline-arm.cpp
+++ b/js/src/jit/arm/Trampoline-arm.cpp
@@ -95,18 +95,18 @@ struct EnterJITStack
     JSObject* scopeChain;
     size_t numStackValues;
     Value* vp;
 };
 
 /*
  * This method generates a trampoline for a c++ function with the following
  * signature:
- *   void enter(void* code, int argc, Value* argv, InterpreterFrame* fp, CalleeToken
- *              calleeToken, JSObject* scopeChain, Value* vp)
+ *   void enter(void* code, int argc, Value* argv, InterpreterFrame* fp,
+ *              CalleeToken calleeToken, JSObject* scopeChain, Value* vp)
  *   ...using standard EABI calling convention
  */
 void
 JitRuntime::generateEnterJIT(JSContext* cx, MacroAssembler& masm)
 {
     enterJITOffset_ = startTrampolineCode(masm);
 
     const Address slot_token(sp, offsetof(EnterJITStack, token));
--- a/js/src/jit/arm64/Trampoline-arm64.cpp
+++ b/js/src/jit/arm64/Trampoline-arm64.cpp
@@ -23,17 +23,18 @@ using namespace js::jit;
 // All registers to save and restore. This includes the stack pointer, since we
 // use the ability to reference register values on the stack by index.
 static const LiveRegisterSet AllRegs =
     LiveRegisterSet(GeneralRegisterSet(Registers::AllMask & ~(1 << 31 | 1 << 30 | 1 << 29| 1 << 28)),
                 FloatRegisterSet(FloatRegisters::AllMask));
 
 /* This method generates a trampoline on ARM64 for a c++ function with
  * the following signature:
- *   bool blah(void* code, int argc, Value* argv, JSObject* scopeChain, Value* vp)
+ *   bool blah(void* code, int argc, Value* argv,
+ *             JSObject* scopeChain, Value* vp)
  *   ...using standard AArch64 calling convention
  */
 void
 JitRuntime::generateEnterJIT(JSContext* cx, MacroAssembler& masm)
 {
     enterJITOffset_ = startTrampolineCode(masm);
 
     const Register reg_code      = IntArgReg0; // EnterJitData::jitcode.
--- a/js/src/jit/x86/Trampoline-x86.cpp
+++ b/js/src/jit/x86/Trampoline-x86.cpp
@@ -414,20 +414,21 @@ JitRuntime::generateArgumentsRectifier(M
     masm.andl(Imm32(~(JitStackValueAlignment - 1)), ecx);
     masm.subl(esi, ecx);
 
     // Copy the number of actual arguments into edx.
     masm.mov(esi, edx);
 
     masm.moveValue(UndefinedValue(), ValueOperand(ebx, edi));
 
-    // NOTE: The fact that x86 ArgumentsRectifier saves the FramePointer is relied upon
-    // by the baseline bailout code.  If this changes, fix that code!  See
-    // BaselineJIT.cpp/BaselineStackBuilder::calculatePrevFramePtr, and
-    // BaselineJIT.cpp/InitFromBailout.  Check for the |#if defined(JS_CODEGEN_X86)| portions.
+    // NOTE: The fact that x86 ArgumentsRectifier saves the FramePointer is
+    // relied upon by the baseline bailout code.  If this changes, fix that
+    // code!  See BaselineJIT.cpp/BaselineStackBuilder::calculatePrevFramePtr,
+    // and BaselineJIT.cpp/InitFromBailout. Check for the
+    // |#if defined(JS_CODEGEN_X86)| portions.
     masm.push(FramePointer);
     masm.movl(esp, FramePointer); // Save %esp.
     masm.push(FramePointer /* padding */);
 
     // Caller:
     // [arg2] [arg1] [this] [[argc] [callee] [descr] [raddr]]
     // '-- #esi ---'
     //
--- a/js/src/jsapi.cpp
+++ b/js/src/jsapi.cpp
@@ -876,19 +876,19 @@ ReleaseAssertObjectHasNoWrappers(JSConte
  *     changed into wrappers for `target`, extending the illusion to those
  *     compartments as well.
  *
  * During navigation, we use the above technique to transplant the WindowProxy
  * into the new Window's compartment.
  *
  * A few rules:
  *
- * -   `origobj` and `target` must be two distinct objects of the same `JSClass`.
- *     Some classes may not support transplantation; WindowProxy objects and DOM
- *     nodes are OK.
+ * -   `origobj` and `target` must be two distinct objects of the same
+ *     `JSClass`.  Some classes may not support transplantation; WindowProxy
+ *     objects and DOM nodes are OK.
  *
  * -   `target` should be created specifically to be passed to this function.
  *     There must be no existing cross-compartment wrappers for it; ideally
  *     there shouldn't be any pointers to it at all, except the one passed in.
  *
  * -   `target` shouldn't be used afterwards. Instead, `JS_TransplantObject`
  *     returns a pointer to the transplanted object, which might be `target`
  *     but might be some other object in the same compartment. Use that.
--- a/js/src/vm/ArrayBufferObject.cpp
+++ b/js/src/vm/ArrayBufferObject.cpp
@@ -609,18 +609,18 @@ ArrayBufferObject::changeContents(JSCont
  *                                           SLOP
  * \_____________________________________________________________________/
  *                         MAPPED
  *
  * Invariants:
  *  - length only increases
  *  - 0 <= length <= maxSize (if present) <= boundsCheckLimit <= mappedSize
  *  - on ARM boundsCheckLimit must be a valid ARM immediate.
- *  - if maxSize is not specified, boundsCheckLimit/mappedSize may grow. They are
- *    otherwise constant.
+ *  - if maxSize is not specified, boundsCheckLimit/mappedSize may grow. They
+ *    are otherwise constant.
  *
  * NOTE: For asm.js on non-x64 we guarantee that
  *
  * length == maxSize == boundsCheckLimit == mappedSize
  *
  * That is, signal handlers will not be invoked, since they cannot emulate
  * asm.js accesses on non-x64 architectures.
  *
--- a/js/src/vm/BytecodeUtil.cpp
+++ b/js/src/vm/BytecodeUtil.cpp
@@ -2448,17 +2448,18 @@ js::CallResultEscapes(jsbytecode* pc)
         pc += JSOP_NOT_LENGTH;
 
     return *pc != JSOP_IFEQ;
 }
 
 extern bool
 js::IsValidBytecodeOffset(JSContext* cx, JSScript* script, size_t offset)
 {
-    // This could be faster (by following jump instructions if the target is <= offset).
+    // This could be faster (by following jump instructions if the target
+    // is <= offset).
     for (BytecodeRange r(cx, script); !r.empty(); r.popFront()) {
         size_t here = r.frontOffset();
         if (here >= offset)
             return here == offset;
     }
     return false;
 }
 
--- a/js/src/vm/Debugger.cpp
+++ b/js/src/vm/Debugger.cpp
@@ -3049,18 +3049,18 @@ Debugger::traceIncomingCrossCompartmentE
             if (!zone->isCollecting() || state == gc::State::Compact)
                 dbg->traceCrossCompartmentEdges(trc);
         }
     }
 }
 
 /*
  * This method has two tasks:
- *   1. Mark Debugger objects that are unreachable except for debugger hooks that
- *      may yet be called.
+ *   1. Mark Debugger objects that are unreachable except for debugger hooks
+ *      that may yet be called.
  *   2. Mark breakpoint handlers.
  *
  * This happens during the iterative part of the GC mark phase. This method
  * returns true if it has to mark anything; GC calls it repeatedly until it
  * returns false.
  */
 /* static */ bool
 Debugger::markIteratively(GCMarker* marker)
--- a/js/src/vm/Interpreter.cpp
+++ b/js/src/vm/Interpreter.cpp
@@ -1595,18 +1595,19 @@ ModOperation(JSContext* cx, HandleValue 
     return true;
 }
 
 static MOZ_ALWAYS_INLINE bool
 SetObjectElementOperation(JSContext* cx, HandleObject obj, HandleId id, HandleValue value,
                           HandleValue receiver, bool strict,
                           JSScript* script = nullptr, jsbytecode* pc = nullptr)
 {
-    // receiver != obj happens only at super[expr], where we expect to find the property
-    // People probably aren't building hashtables with |super| anyway.
+    // receiver != obj happens only at super[expr], where we expect to find the
+    // property. People probably aren't building hashtables with |super|
+    // anyway.
     TypeScript::MonitorAssign(cx, obj, id);
 
     if (obj->isNative() && JSID_IS_INT(id)) {
         uint32_t length = obj->as<NativeObject>().getDenseInitializedLength();
         int32_t i = JSID_TO_INT(id);
         if ((uint32_t)i >= length) {
             // Annotate script if provided with information (e.g. baseline)
             if (script && script->hasBaselineScript() && IsSetElemPC(pc))
--- a/js/src/vm/SelfHosting.cpp
+++ b/js/src/vm/SelfHosting.cpp
@@ -588,17 +588,18 @@ intrinsic_DefineDataProperty(JSContext* 
 
     args.rval().setUndefined();
     return true;
 }
 
 static bool
 intrinsic_DefineProperty(JSContext* cx, unsigned argc, Value* vp)
 {
-    // _DefineProperty(object, propertyKey, attributes, valueOrGetter, setter, strict)
+    // _DefineProperty(object, propertyKey, attributes,
+    //                 valueOrGetter, setter, strict)
     CallArgs args = CallArgsFromVp(argc, vp);
     MOZ_ASSERT(args.length() == 6);
     MOZ_ASSERT(args[0].isObject());
     MOZ_ASSERT(args[1].isString() || args[1].isNumber() || args[1].isSymbol());
     MOZ_ASSERT(args[2].isInt32());
     MOZ_ASSERT(args[5].isBoolean());
 
     RootedObject obj(cx, &args[0].toObject());
--- a/js/src/vm/StringType.h
+++ b/js/src/vm/StringType.h
@@ -89,19 +89,19 @@ static const size_t UINT32_CHAR_BUFFER_L
  *  - To avoid comparing O(n) string equality comparison, strings can be
  *    canonicalized to "atoms" (JSAtom) such that there is a single atom with a
  *    given (length,chars).
  *
  *  - To avoid copying all strings created through the JSAPI, an "external"
  *    string (JSExternalString) can be created whose chars are managed by the
  *    JSAPI client.
  *
- *  - To avoid using two bytes per character for every string, string characters
- *    are stored as Latin1 instead of TwoByte if all characters are representable
- *    in Latin1.
+ *  - To avoid using two bytes per character for every string, string
+ *    characters are stored as Latin1 instead of TwoByte if all characters are
+ *    representable in Latin1.
  *
  *  - To avoid slow conversions from strings to integer indexes, we cache 16 bit
  *    unsigned indexes on strings representing such numbers.
  *
  * Although all strings share the same basic memory layout, we can conceptually
  * arrange them into a hierarchy of operations/invariants and represent this
  * hierarchy in C++ with classes:
  *
--- a/js/src/vm/TraceLogging.h
+++ b/js/src/vm/TraceLogging.h
@@ -37,19 +37,19 @@ namespace jit {
  * and/or the start and stop of an event. This is implemented with as low
  * overhead as possible to not interfere with running.
  *
  * Logging something is done in 3 stages.
  * 1) Get the tracelogger of the current thread. cx may be omitted, in which
  *    case it will be fetched from TLS.
  *     - TraceLoggerForCurrentThread(cx);
  *
- * 2) Optionally create a TraceLoggerEvent for the text that needs to get logged. This
- *    step takes some time, so try to do this beforehand, outside the hot
- *    path and don't do unnecessary repetitions, since it will cripple
+ * 2) Optionally create a TraceLoggerEvent for the text that needs to get
+ *    logged. This step takes some time, so try to do this beforehand, outside
+ *    the hot path and don't do unnecessary repetitions, since it will cripple
  *    performance.
  *     - TraceLoggerEvent event(logger, "foo");
  *
  *    There are also some predefined events. They are located in
  *    TraceLoggerTextId. They don't require to create an TraceLoggerEvent and
  *    can also be used as an argument to these functions.
  *
  * 3) Log the occurrence of a single event:
--- a/js/src/vm/TraceLoggingGraph.h
+++ b/js/src/vm/TraceLoggingGraph.h
@@ -29,17 +29,18 @@
  *  - treeFormat: The format used to encode the tree. By default "64,64,31,1,32".
  *                There are currently no other formats to save the tree.
  *     - 64,64,31,1,32 signifies how many bytes are used for the different
  *       parts of the tree.
  *       => 64 bits: Time Stamp Counter of start of event.
  *       => 64 bits: Time Stamp Counter of end of event.
  *       => 31 bits: Index to dict file containing the log text.
  *       =>  1 bit:  Boolean signifying if this entry has children.
- *                   When true, the child can be found just right after this entry.
+ *                   When true, the child can be found just right after this
+ *                   entry.
  *       => 32 bits: Containing the ID of the next event on the same depth
  *                   or 0 if there isn't an event on the same depth anymore.
  *
  *        /-> The position in the file. Id is this divided by size of entry.
  *        |   So in this case this would be 1 (192bits per entry).
  *        |                              /-> Indicates there are children. The
  *        |                              |   first child is located at current
  *        |                              |   ID + 1. So 1 + 1 in this case: 2.
--- a/js/src/wasm/WasmBuiltins.cpp
+++ b/js/src/wasm/WasmBuiltins.cpp
@@ -427,18 +427,18 @@ TruncateDoubleToInt64(double input)
     if (input >= double(INT64_MAX) || input < double(INT64_MIN) || IsNaN(input))
         return 0x8000000000000000;
     return int64_t(input);
 }
 
 static uint64_t
 TruncateDoubleToUint64(double input)
 {
-    // Note: UINT64_MAX is not representable in double. It is actually UINT64_MAX + 1.
-    // Therefore also sending the failure value.
+    // Note: UINT64_MAX is not representable in double. It is actually
+    // UINT64_MAX + 1.  Therefore also sending the failure value.
     if (input >= double(UINT64_MAX) || input <= -1.0 || IsNaN(input))
         return 0x8000000000000000;
     return uint64_t(input);
 }
 
 static int64_t
 SaturatingTruncateDoubleToInt64(double input)
 {
@@ -867,20 +867,20 @@ PopulateTypedNatives(TypedNativeToFuncPt
 #undef FOR_EACH_UNARY_NATIVE
 #undef FOR_EACH_BINARY_NATIVE
 
 // ============================================================================
 // Process-wide builtin thunk set
 //
 // Thunks are inserted between wasm calls and the C++ callee and achieve two
 // things:
-//  - bridging the few differences between the internal wasm ABI and the external
-//    native ABI (viz. float returns on x86 and soft-fp ARM)
-//  - executing an exit prologue/epilogue which in turn allows any asynchronous
-//    interrupt to see the full stack up to the wasm operation that called out
+//  - bridging the few differences between the internal wasm ABI and the
+//    external native ABI (viz. float returns on x86 and soft-fp ARM)
+//  - executing an exit prologue/epilogue which in turn allows any profiling
+//    iterator to see the full stack up to the wasm operation that called out
 //
 // Thunks are created for two kinds of C++ callees, enumerated above:
 //  - SymbolicAddress: for statically compiled calls in the wasm module
 //  - Imported JS builtins: optimized calls to imports
 //
 // All thunks are created up front, lazily, when the first wasm module is
 // compiled in the process. Thunks are kept alive until the JS engine shuts down
 // in the process. No thunks are created at runtime after initialization. This
--- a/js/src/wasm/WasmIonCompile.cpp
+++ b/js/src/wasm/WasmIonCompile.cpp
@@ -1160,17 +1160,18 @@ class FunctionCompiler
             return false;
 
         uint32_t stackBytes = call->abi_.stackBytesConsumedSoFar();
         if (call->childClobbers_) {
             call->spIncrement_ = AlignBytes(call->maxChildStackBytes_, WasmStackAlignment);
             for (MWasmStackArg* stackArg : call->stackArgs_)
                 stackArg->incrementOffset(call->spIncrement_);
 
-            // If instanceArg_ is not initialized then instanceArg_.kind() != ABIArg::Stack
+            // If instanceArg_ is not initialized then
+            // instanceArg_.kind() != ABIArg::Stack
             if (call->instanceArg_.kind() == ABIArg::Stack) {
                 call->instanceArg_ = ABIArg(call->instanceArg_.offsetFromArgBase() +
                                             call->spIncrement_);
             }
 
             stackBytes += call->spIncrement_;
         } else {
             call->spIncrement_ = 0;
--- a/js/src/wasm/WasmSignalHandlers.cpp
+++ b/js/src/wasm/WasmSignalHandlers.cpp
@@ -1265,18 +1265,19 @@ MachExceptionHandlerThread(JSContext* cx
         // the kernel. The kernel is waiting for us to reply with instructions.
         // Our default is the "not handled" reply (by setting the RetCode field
         // of the reply to KERN_FAILURE) which tells the kernel to continue
         // searching at the process and system level. If this is an asm.js
         // expected exception, we handle it and return KERN_SUCCESS.
         bool handled = HandleMachException(cx, request);
         kern_return_t replyCode = handled ? KERN_SUCCESS : KERN_FAILURE;
 
-        // This magic incantation to send a reply back to the kernel was derived
-        // from the exc_server generated by 'mig -v /usr/include/mach/mach_exc.defs'.
+        // This magic incantation to send a reply back to the kernel was
+        // derived from the exc_server generated by
+        // 'mig -v /usr/include/mach/mach_exc.defs'.
         __Reply__exception_raise_t reply;
         reply.Head.msgh_bits = MACH_MSGH_BITS(MACH_MSGH_BITS_REMOTE(request.body.Head.msgh_bits), 0);
         reply.Head.msgh_size = sizeof(reply);
         reply.Head.msgh_remote_port = request.body.Head.msgh_remote_port;
         reply.Head.msgh_local_port = MACH_PORT_NULL;
         reply.Head.msgh_id = request.body.Head.msgh_id + 100;
         reply.NDR = NDR_record;
         reply.RetCode = replyCode;