Bug 1026919 - IonMonkey: (ARM) Cleanup of the assembler and backend. r=mjrosenb, a=lmandel
authorDouglas Crosher <dtc-moz@scieneer.com>
Thu, 17 Jul 2014 02:40:51 +1000
changeset 208038 73891d7ac22ca27d76ca9b4a4f45330c0b1941ba
parent 208037 8502e15d3a77a32883c0c82bdac7eaac89669540
child 208039 78872d6478e32f67828b06e42b3a37900e6e0332
push id3741
push userasasaki@mozilla.com
push dateMon, 21 Jul 2014 20:25:18 +0000
treeherdermozilla-beta@4d6f46f5af68 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersmjrosenb, lmandel
bugs1026919
milestone32.0a2
Bug 1026919 - IonMonkey: (ARM) Cleanup of the assembler and backend. r=mjrosenb, a=lmandel
js/src/irregexp/NativeRegExpMacroAssembler.cpp
js/src/jit/AsmJS.cpp
js/src/jit/AsmJSModule.cpp
js/src/jit/BaselineCompiler.cpp
js/src/jit/CodeGenerator.cpp
js/src/jit/Ion.cpp
js/src/jit/IonCaches.cpp
js/src/jit/IonCaches.h
js/src/jit/IonFrames.cpp
js/src/jit/IonMacroAssembler.h
js/src/jit/LIR.h
js/src/jit/Safepoints.cpp
js/src/jit/arm/Architecture-arm.cpp
js/src/jit/arm/Architecture-arm.h
js/src/jit/arm/Assembler-arm.cpp
js/src/jit/arm/Assembler-arm.h
js/src/jit/arm/Bailouts-arm.cpp
js/src/jit/arm/BaselineHelpers-arm.h
js/src/jit/arm/BaselineIC-arm.cpp
js/src/jit/arm/BaselineRegisters-arm.h
js/src/jit/arm/CodeGenerator-arm.cpp
js/src/jit/arm/CodeGenerator-arm.h
js/src/jit/arm/LIR-arm.h
js/src/jit/arm/Lowering-arm.cpp
js/src/jit/arm/MacroAssembler-arm.cpp
js/src/jit/arm/MacroAssembler-arm.h
js/src/jit/arm/MoveEmitter-arm.cpp
js/src/jit/arm/Simulator-arm.cpp
js/src/jit/arm/Simulator-arm.h
js/src/jit/arm/Trampoline-arm.cpp
js/src/jit/mips/Assembler-mips.cpp
js/src/jit/mips/Assembler-mips.h
js/src/jit/mips/CodeGenerator-mips.cpp
js/src/jit/mips/MacroAssembler-mips.cpp
js/src/jit/mips/MoveEmitter-mips.cpp
js/src/jit/shared/Assembler-x86-shared.h
js/src/jit/shared/CodeGenerator-shared.cpp
js/src/jit/shared/CodeGenerator-x86-shared.cpp
js/src/jit/shared/IonAssemblerBuffer.h
js/src/jit/shared/IonAssemblerBufferWithConstantPools.h
js/src/vm/Runtime.cpp
--- a/js/src/irregexp/NativeRegExpMacroAssembler.cpp
+++ b/js/src/irregexp/NativeRegExpMacroAssembler.cpp
@@ -450,17 +450,17 @@ NativeRegExpMacroAssembler::GenerateCode
     writePerfSpewerJitCodeProfile(code, "RegExp");
 #endif
 
     for (size_t i = 0; i < labelPatches.length(); i++) {
         LabelPatch &v = labelPatches[i];
         JS_ASSERT(!v.label);
         v.patchOffset.fixup(&masm);
         uintptr_t offset = masm.actualOffset(v.labelOffset);
-        Assembler::patchDataWithValueCheck(CodeLocationLabel(code, v.patchOffset),
+        Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, v.patchOffset),
                                            ImmPtr(code->raw() + offset),
                                            ImmPtr(0));
     }
 
     IonSpew(IonSpew_Codegen, "Created RegExp (raw %p length %d)",
             (void *) code->raw(), (int) masm.bytesNeeded());
 
     RegExpCode res;
--- a/js/src/jit/AsmJS.cpp
+++ b/js/src/jit/AsmJS.cpp
@@ -1571,17 +1571,17 @@ class MOZ_STACK_CLASS ModuleCompiler
             while (labelOffset != LabelBase::INVALID_OFFSET) {
                 size_t patchAtOffset = masm_.labelOffsetToPatchOffset(labelOffset);
                 AsmJSModule::RelativeLink link(AsmJSModule::RelativeLink::CodeLabel);
                 link.patchAtOffset = patchAtOffset;
                 link.targetOffset = targetOffset;
                 if (!module_->addRelativeLink(link))
                     return false;
 
-                labelOffset = Assembler::extractCodeLabelOffset(module_->codeBase() +
+                labelOffset = Assembler::ExtractCodeLabelOffset(module_->codeBase() +
                                                                 patchAtOffset);
             }
         }
 
         // Function-pointer-table entries
         for (unsigned tableIndex = 0; tableIndex < funcPtrTables_.length(); tableIndex++) {
             FuncPtrTable &table = funcPtrTables_[tableIndex];
             unsigned tableBaseOffset = module_->offsetOfGlobalData() + table.globalDataOffset();
@@ -6928,17 +6928,17 @@ GenerateInterruptExit(ModuleCompiler &m,
     masm.branchIfFalseBool(ReturnReg, throwLabel);
 
     // This will restore stack to the address before the call.
     masm.movePtr(s0, StackPointer);
     masm.PopRegsInMask(AllRegsExceptSP);
 
     // Pop resumePC into PC. Clobber HeapReg to make the jump and restore it
     // during jump delay slot.
-    JS_ASSERT(Imm16::isInSignedRange(m.module().heapOffset()));
+    JS_ASSERT(Imm16::IsInSignedRange(m.module().heapOffset()));
     masm.pop(HeapReg);
     masm.as_jr(HeapReg);
     masm.loadPtr(Address(GlobalReg, m.module().heapOffset()), HeapReg);
 #elif defined(JS_CODEGEN_ARM)
     masm.setFramePushed(0);         // set to zero so we can use masm.framePushed() below
     masm.PushRegsInMask(RegisterSet(GeneralRegisterSet(Registers::AllMask & ~(1<<Registers::sp)), FloatRegisterSet(uint32_t(0))));   // save all GP registers,excep sp
 
     // Save both the APSR and FPSCR in non-volatile registers.
--- a/js/src/jit/AsmJSModule.cpp
+++ b/js/src/jit/AsmJSModule.cpp
@@ -58,17 +58,17 @@ AsmJSModule::initHeap(Handle<ArrayBuffer
         void *addr = access.patchOffsetAt(code_);
         uint32_t disp = reinterpret_cast<uint32_t>(JSC::X86Assembler::getPointer(addr));
         JS_ASSERT(disp <= INT32_MAX);
         JSC::X86Assembler::setPointer(addr, (void *)(heapOffset + disp));
     }
 #elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS)
     uint32_t heapLength = heap->byteLength();
     for (unsigned i = 0; i < heapAccesses_.length(); i++) {
-        jit::Assembler::updateBoundsCheck(heapLength,
+        jit::Assembler::UpdateBoundsCheck(heapLength,
                                           (jit::Instruction*)(heapAccesses_[i].offset() + code_));
     }
 #endif
 }
 
 static uint8_t *
 AllocateExecutableMemory(ExclusiveContext *cx, size_t totalBytes)
 {
@@ -270,21 +270,21 @@ AddressOf(AsmJSImmKind kind, ExclusiveCo
     MOZ_ASSUME_UNREACHABLE("Bad AsmJSImmKind");
     return nullptr;
 }
 
 void
 AsmJSModule::restoreToInitialState(ArrayBufferObject *maybePrevBuffer, ExclusiveContext *cx)
 {
 #ifdef DEBUG
-    // Put the absolute links back to -1 so patchDataWithValueCheck assertions
+    // Put the absolute links back to -1 so PatchDataWithValueCheck assertions
     // in staticallyLink are valid.
     for (size_t i = 0; i < staticLinkData_.absoluteLinks.length(); i++) {
         AbsoluteLink link = staticLinkData_.absoluteLinks[i];
-        Assembler::patchDataWithValueCheck(CodeLocationLabel(code_ + link.patchAt.offset()),
+        Assembler::PatchDataWithValueCheck(CodeLocationLabel(code_ + link.patchAt.offset()),
                                            PatchedImmPtr((void*)-1),
                                            PatchedImmPtr(AddressOf(link.target, cx)));
     }
 #endif
 
     if (maybePrevBuffer) {
 #if defined(JS_CODEGEN_X86)
         // Subtract out the base-pointer added by AsmJSModule::initHeap.
@@ -315,22 +315,22 @@ AsmJSModule::staticallyLink(ExclusiveCon
 
     for (size_t i = 0; i < staticLinkData_.relativeLinks.length(); i++) {
         RelativeLink link = staticLinkData_.relativeLinks[i];
         uint8_t *patchAt = code_ + link.patchAtOffset;
         uint8_t *target = code_ + link.targetOffset;
         if (link.isRawPointerPatch())
             *(uint8_t **)(patchAt) = target;
         else
-            Assembler::patchInstructionImmediate(patchAt, PatchedImmPtr(target));
+            Assembler::PatchInstructionImmediate(patchAt, PatchedImmPtr(target));
     }
 
     for (size_t i = 0; i < staticLinkData_.absoluteLinks.length(); i++) {
         AbsoluteLink link = staticLinkData_.absoluteLinks[i];
-        Assembler::patchDataWithValueCheck(CodeLocationLabel(code_ + link.patchAt.offset()),
+        Assembler::PatchDataWithValueCheck(CodeLocationLabel(code_ + link.patchAt.offset()),
                                            PatchedImmPtr(AddressOf(link.target, cx)),
                                            PatchedImmPtr((void*)-1));
     }
 
     // Initialize global data segment
 
     for (size_t i = 0; i < exits_.length(); i++) {
         exitIndexToGlobalDatum(i).exit = interpExitTrampoline(exits_[i]);
--- a/js/src/jit/BaselineCompiler.cpp
+++ b/js/src/jit/BaselineCompiler.cpp
@@ -213,17 +213,17 @@ BaselineCompiler::compile()
     baselineScript->adoptFallbackStubs(&stubSpace_);
 
     // Patch IC loads using IC entries
     for (size_t i = 0; i < icLoadLabels_.length(); i++) {
         CodeOffsetLabel label = icLoadLabels_[i].label;
         label.fixup(&masm);
         size_t icEntry = icLoadLabels_[i].icEntry;
         ICEntry *entryAddr = &(baselineScript->icEntry(icEntry));
-        Assembler::patchDataWithValueCheck(CodeLocationLabel(code, label),
+        Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, label),
                                            ImmPtr(entryAddr),
                                            ImmPtr((void*)-1));
     }
 
     if (modifiesArguments_)
         baselineScript->setModifiesArguments();
 
     // All barriers are emitted off-by-default, toggle them on if needed.
--- a/js/src/jit/CodeGenerator.cpp
+++ b/js/src/jit/CodeGenerator.cpp
@@ -6792,17 +6792,17 @@ CodeGenerator::link(JSContext *cx, types
 
     // In parallel execution mode, when we first compile a script, we
     // don't know that its potential callees are compiled, so set a
     // flag warning that the callees may not be fully compiled.
     if (!callTargets.empty())
         ionScript->setHasUncompiledCallTarget();
 
     invalidateEpilogueData_.fixup(&masm);
-    Assembler::patchDataWithValueCheck(CodeLocationLabel(code, invalidateEpilogueData_),
+    Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, invalidateEpilogueData_),
                                        ImmPtr(ionScript),
                                        ImmPtr((void*)-1));
 
     IonSpew(IonSpew_Codegen, "Created IonScript %p (raw %p)",
             (void *) ionScript, (void *) code->raw());
 
     ionScript->setInvalidationEpilogueDataOffset(invalidateEpilogueData_.offset());
     ionScript->setOsrPc(gen->info().osrPc());
@@ -6814,17 +6814,17 @@ CodeGenerator::link(JSContext *cx, types
 
 #if defined(JS_ION_PERF)
     if (PerfEnabled())
         perfSpewer_.writeProfile(script, code, masm);
 #endif
 
     for (size_t i = 0; i < ionScriptLabels_.length(); i++) {
         ionScriptLabels_[i].fixup(&masm);
-        Assembler::patchDataWithValueCheck(CodeLocationLabel(code, ionScriptLabels_[i]),
+        Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, ionScriptLabels_[i]),
                                            ImmPtr(ionScript),
                                            ImmPtr((void*)-1));
     }
 
     // for generating inline caches during the execution.
     if (runtimeData_.length())
         ionScript->copyRuntimeData(&runtimeData_[0]);
     if (cacheList_.length())
@@ -6852,24 +6852,24 @@ CodeGenerator::link(JSContext *cx, types
         ionScript->copyCallTargetEntries(callTargets.begin());
     if (patchableBackedges_.length() > 0)
         ionScript->copyPatchableBackedges(cx, code, patchableBackedges_.begin());
 
 #ifdef JS_TRACE_LOGGING
     TraceLogger *logger = TraceLoggerForMainThread(cx->runtime());
     for (uint32_t i = 0; i < patchableTraceLoggers_.length(); i++) {
         patchableTraceLoggers_[i].fixup(&masm);
-        Assembler::patchDataWithValueCheck(CodeLocationLabel(code, patchableTraceLoggers_[i]),
+        Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, patchableTraceLoggers_[i]),
                                            ImmPtr(logger),
                                            ImmPtr(nullptr));
     }
     uint32_t scriptId = TraceLogCreateTextId(logger, script);
     for (uint32_t i = 0; i < patchableTLScripts_.length(); i++) {
         patchableTLScripts_[i].fixup(&masm);
-        Assembler::patchDataWithValueCheck(CodeLocationLabel(code, patchableTLScripts_[i]),
+        Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, patchableTLScripts_[i]),
                                            ImmPtr((void *) uintptr_t(scriptId)),
                                            ImmPtr((void *)0));
     }
 #endif
 
     switch (executionMode) {
       case SequentialExecution:
         // The correct state for prebarriers is unknown until the end of compilation,
@@ -8632,17 +8632,17 @@ CodeGenerator::visitHasClass(LHasClass *
 }
 
 bool
 CodeGenerator::visitAsmJSCall(LAsmJSCall *ins)
 {
     MAsmJSCall *mir = ins->mir();
 
 #if defined(JS_CODEGEN_ARM)
-    if (!useHardFpABI() && mir->callee().which() == MAsmJSCall::Callee::Builtin) {
+    if (!UseHardFpABI() && mir->callee().which() == MAsmJSCall::Callee::Builtin) {
         for (unsigned i = 0, e = ins->numOperands(); i < e; i++) {
             LAllocation *a = ins->getOperand(i);
             if (a->isFloatReg()) {
                 FloatRegister fr = ToFloatRegister(a);
                 int srcId = fr.code() * 2;
                 masm.ma_vxfer(fr, Register::FromCode(srcId), Register::FromCode(srcId+1));
             }
         }
--- a/js/src/jit/Ion.cpp
+++ b/js/src/jit/Ion.cpp
@@ -2619,24 +2619,24 @@ InvalidateActivation(FreeOp *fop, uint8_
         // IonScript pointer embedded into the invalidation epilogue)
         // where the safepointed call instruction used to be. We rely on
         // the call sequence causing the safepoint being >= the size of
         // a uint32, which is checked during safepoint index
         // construction.
         CodeLocationLabel dataLabelToMunge(it.returnAddressToFp());
         ptrdiff_t delta = ionScript->invalidateEpilogueDataOffset() -
                           (it.returnAddressToFp() - ionCode->raw());
-        Assembler::patchWrite_Imm32(dataLabelToMunge, Imm32(delta));
+        Assembler::PatchWrite_Imm32(dataLabelToMunge, Imm32(delta));
 
         CodeLocationLabel osiPatchPoint = SafepointReader::InvalidationPatchPoint(ionScript, si);
         CodeLocationLabel invalidateEpilogue(ionCode, CodeOffsetLabel(ionScript->invalidateEpilogueOffset()));
 
         IonSpew(IonSpew_Invalidate, "   ! Invalidate ionScript %p (ref %u) -> patching osipoint %p",
                 ionScript, ionScript->refcount(), (void *) osiPatchPoint.raw());
-        Assembler::patchWrite_NearCall(osiPatchPoint, invalidateEpilogue);
+        Assembler::PatchWrite_NearCall(osiPatchPoint, invalidateEpilogue);
     }
 
     IonSpew(IonSpew_Invalidate, "END invalidating activation");
 }
 
 void
 jit::StopAllOffThreadCompilations(JSCompartment *comp)
 {
--- a/js/src/jit/IonCaches.cpp
+++ b/js/src/jit/IonCaches.cpp
@@ -241,17 +241,17 @@ class IonCache::StubAttacher
         rejoinOffset_.fixup(&masm);
         CodeLocationJump rejoinJump(code, rejoinOffset_);
         PatchJump(rejoinJump, rejoinLabel_);
     }
 
     void patchStubCodePointer(MacroAssembler &masm, JitCode *code) {
         if (hasStubCodePatchOffset_) {
             stubCodePatchOffset_.fixup(&masm);
-            Assembler::patchDataWithValueCheck(CodeLocationLabel(code, stubCodePatchOffset_),
+            Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, stubCodePatchOffset_),
                                                ImmPtr(code), STUB_ADDR);
         }
     }
 
     virtual void patchNextStubJump(MacroAssembler &masm, JitCode *code) = 0;
 };
 
 const ImmPtr IonCache::StubAttacher::STUB_ADDR = ImmPtr((void*)0xdeadc0de);
@@ -368,17 +368,17 @@ DispatchIonCache::bindInitialJump(MacroA
 void
 DispatchIonCache::updateBaseAddress(JitCode *code, MacroAssembler &masm)
 {
     // The address of firstStub_ should be pointer aligned.
     JS_ASSERT(uintptr_t(&firstStub_) % sizeof(uintptr_t) == 0);
 
     IonCache::updateBaseAddress(code, masm);
     dispatchLabel_.fixup(&masm);
-    Assembler::patchDataWithValueCheck(CodeLocationLabel(code, dispatchLabel_),
+    Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, dispatchLabel_),
                                        ImmPtr(&firstStub_),
                                        ImmPtr((void*)-1));
     firstStub_ = fallbackLabel_.raw();
     rejoinLabel_.repoint(code, &masm);
 }
 
 void
 IonCache::attachStub(MacroAssembler &masm, StubAttacher &attacher, Handle<JitCode *> code)
--- a/js/src/jit/IonCaches.h
+++ b/js/src/jit/IonCaches.h
@@ -370,17 +370,17 @@ class RepatchIonCache : public IonCache
     static const size_t REJOIN_LABEL_OFFSET = 0;
 #endif
 
     CodeLocationLabel rejoinLabel() const {
         uint8_t *ptr = initialJump_.raw();
 #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS)
         uint32_t i = 0;
         while (i < REJOIN_LABEL_OFFSET)
-            ptr = Assembler::nextInstruction(ptr, &i);
+            ptr = Assembler::NextInstruction(ptr, &i);
 #endif
         return CodeLocationLabel(ptr);
     }
 
   public:
     RepatchIonCache()
       : initialJump_(),
         lastJump_()
--- a/js/src/jit/IonFrames.cpp
+++ b/js/src/jit/IonFrames.cpp
@@ -129,17 +129,17 @@ JitFrameIterator::checkInvalidation(IonS
         invalidated = !script->hasIonScript() ||
             !script->ionScript()->containsReturnAddress(returnAddr);
     }
     if (!invalidated)
         return false;
 
     int32_t invalidationDataOffset = ((int32_t *) returnAddr)[-1];
     uint8_t *ionScriptDataOffset = returnAddr + invalidationDataOffset;
-    IonScript *ionScript = (IonScript *) Assembler::getPointer(ionScriptDataOffset);
+    IonScript *ionScript = (IonScript *) Assembler::GetPointer(ionScriptDataOffset);
     JS_ASSERT(ionScript->containsReturnAddress(returnAddr));
     *ionScriptOut = ionScript;
     return true;
 }
 
 CalleeToken
 JitFrameIterator::calleeToken() const
 {
@@ -1340,17 +1340,17 @@ OsiIndex::fixUpOffset(MacroAssembler &ma
 }
 
 uint32_t
 OsiIndex::returnPointDisplacement() const
 {
     // In general, pointer arithmetic on code is bad, but in this case,
     // getting the return address from a call instruction, stepping over pools
     // would be wrong.
-    return callPointDisplacement_ + Assembler::patchWrite_NearCallSize();
+    return callPointDisplacement_ + Assembler::PatchWrite_NearCallSize();
 }
 
 SnapshotIterator::SnapshotIterator(IonScript *ionScript, SnapshotOffset snapshotOffset,
                                    IonJSFrameLayout *fp, const MachineState &machine)
   : snapshot_(ionScript->snapshots(),
               snapshotOffset,
               ionScript->snapshotsRVATableSize(),
               ionScript->snapshotsListSize()),
--- a/js/src/jit/IonMacroAssembler.h
+++ b/js/src/jit/IonMacroAssembler.h
@@ -890,17 +890,17 @@ class MacroAssembler : public MacroAssem
 
     void link(JitCode *code) {
         JS_ASSERT(!oom());
         // If this code can transition to C++ code and witness a GC, then we need to store
         // the JitCode onto the stack in order to GC it correctly.  exitCodePatch should
         // be unset if the code never needed to push its JitCode*.
         if (hasEnteredExitFrame()) {
             exitCodePatch_.fixup(this);
-            patchDataWithValueCheck(CodeLocationLabel(code, exitCodePatch_),
+            PatchDataWithValueCheck(CodeLocationLabel(code, exitCodePatch_),
                                     ImmPtr(code),
                                     ImmPtr((void*)-1));
         }
 
     }
 
     // Generates code used to complete a bailout.
     void generateBailoutTail(Register scratch, Register bailoutInfo);
--- a/js/src/jit/LIR.h
+++ b/js/src/jit/LIR.h
@@ -1381,17 +1381,17 @@ class LSafepoint : public TempObject
     }
     void setOffset(uint32_t offset) {
         safepointOffset_ = offset;
     }
     uint32_t osiReturnPointOffset() const {
         // In general, pointer arithmetic on code is bad, but in this case,
         // getting the return address from a call instruction, stepping over pools
         // would be wrong.
-        return osiCallPointOffset_ + Assembler::patchWrite_NearCallSize();
+        return osiCallPointOffset_ + Assembler::PatchWrite_NearCallSize();
     }
     uint32_t osiCallPointOffset() const {
         return osiCallPointOffset_;
     }
     void setOsiCallPointOffset(uint32_t osiCallPointOffset) {
         JS_ASSERT(!osiCallPointOffset_);
         osiCallPointOffset_ = osiCallPointOffset;
     }
--- a/js/src/jit/Safepoints.cpp
+++ b/js/src/jit/Safepoints.cpp
@@ -366,17 +366,17 @@ SafepointReader::SafepointReader(IonScri
     allFloatSpills_ = FloatRegisterSet(ReadRegisterMask(stream_));
 
     advanceFromGcRegs();
 }
 
 uint32_t
 SafepointReader::osiReturnPointOffset() const
 {
-    return osiCallPointOffset_ + Assembler::patchWrite_NearCallSize();
+    return osiCallPointOffset_ + Assembler::PatchWrite_NearCallSize();
 }
 
 CodeLocationLabel
 SafepointReader::InvalidationPatchPoint(IonScript *script, const SafepointIndex *si)
 {
     SafepointReader reader(script, si);
 
     return CodeLocationLabel(script->method(), CodeOffsetLabel(reader.osiCallPointOffset()));
--- a/js/src/jit/arm/Architecture-arm.cpp
+++ b/js/src/jit/arm/Architecture-arm.cpp
@@ -134,20 +134,19 @@ uint32_t GetARMFlags()
     if (fd > 0) {
         Elf32_auxv_t aux;
         while (read(fd, &aux, sizeof(Elf32_auxv_t))) {
             if (aux.a_type == AT_HWCAP) {
                 close(fd);
                 flags = aux.a_un.a_val;
                 isSet = true;
 #if defined(__ARM_ARCH_7__) || defined (__ARM_ARCH_7A__)
-                // this should really be detected at runtime, but
-                // /proc/*/auxv doesn't seem to carry the ISA
-                // I could look in /proc/cpuinfo as well, but
-                // the chances that it will be different from this
+                // This should really be detected at runtime, but /proc/*/auxv
+                // doesn't seem to carry the ISA. We could look in /proc/cpuinfo
+                // as well, but the chances that it will be different from this
                 // are low.
                 flags |= HWCAP_ARMv7;
 #endif
                 return flags;
             }
         }
         close(fd);
     }
@@ -196,67 +195,67 @@ uint32_t GetARMFlags()
         flags |= HWCAP_IDIVA;
 
     if (strstr(buf, " idivt "))
         flags |= HWCAP_IDIVT;
 
     if (strstr(buf, " neon "))
         flags |= HWCAP_NEON;
 
-    // not part of the HWCAP flag, but I need to know this, and we're not using
-    //  that bit, so... I'm using it
+    // Not part of the HWCAP flag, but we need to know this, and we're not using
+    // that bit, so... we are using it.
     if (strstr(buf, "ARMv7"))
         flags |= HWCAP_ARMv7;
 
 #ifdef DEBUG
     IonSpew(IonSpew_Codegen, "ARMHWCAP: '%s'\n   flags: 0x%x\n", buf, flags);
 #endif
 
     isSet = true;
     return flags;
 #endif
 
     return 0;
 #endif // JS_ARM_SIMULATOR
 }
 
-bool hasMOVWT()
+bool HasMOVWT()
 {
     return GetARMFlags() & HWCAP_ARMv7;
 }
-bool hasVFPv3()
+bool HasVFPv3()
 {
     return GetARMFlags() & HWCAP_VFPv3;
 }
-bool hasVFP()
+bool HasVFP()
 {
     return GetARMFlags() & HWCAP_VFP;
 }
 
-bool has32DP()
+bool Has32DP()
 {
     return !(GetARMFlags() & HWCAP_VFPv3D16 && !(GetARMFlags() & HWCAP_NEON));
 }
-bool useConvReg()
+bool UseConvReg()
 {
-    return has32DP();
+    return Has32DP();
 }
 
-bool hasIDIV()
+bool HasIDIV()
 {
 #if defined HWCAP_IDIVA
     return GetARMFlags() & HWCAP_IDIVA;
 #else
     return false;
 #endif
 }
 
 // This is defined in the header and inlined when not using the simulator.
 #if defined(JS_ARM_SIMULATOR)
-bool useHardFpABI()
+bool UseHardFpABI()
 {
     return GetARMFlags() & HWCAP_USE_HARDFP_ABI;
 }
 #endif
 
 Registers::Code
 Registers::FromName(const char *name)
 {
--- a/js/src/jit/arm/Architecture-arm.h
+++ b/js/src/jit/arm/Architecture-arm.h
@@ -7,17 +7,18 @@
 #ifndef jit_arm_Architecture_arm_h
 #define jit_arm_Architecture_arm_h
 
 #include <limits.h>
 #include <stdint.h>
 
 #include "js/Utility.h"
 
-// gcc appears to use __ARM_PCS_VFP to denote that the target is a hard-float target.
+// Gcc appears to use __ARM_PCS_VFP to denote that the target is a hard-float
+// target.
 #if defined(__ARM_PCS_VFP)
 #define JS_CODEGEN_ARM_HARDFP
 #endif
 
 namespace js {
 namespace jit {
 
 // In bytes: slots needed for potential memory->memory move spills.
@@ -31,21 +32,21 @@ static const uint32_t ION_FRAME_SLACK_SI
 static const int32_t NUNBOX32_TYPE_OFFSET    = 4;
 static const int32_t NUNBOX32_PAYLOAD_OFFSET = 0;
 
 static const uint32_t ShadowStackSpace = 0;
 ////
 // These offsets are related to bailouts.
 ////
 
-// Size of each bailout table entry. On arm, this is presently
-// a single call (which is wrong!). the call clobbers lr.
-// For now, I've dealt with this by ensuring that we never allocate to lr.
-// it should probably be 8 bytes, a mov of an immediate into r12 (not
-// allocated presently, or ever) followed by a branch to the apropriate code.
+// Size of each bailout table entry. On arm, this is presently a single call
+// (which is wrong!). The call clobbers lr.
+// For now, I've dealt with this by ensuring that we never allocate to lr. It
+// should probably be 8 bytes, a mov of an immediate into r12 (not allocated
+// presently, or ever) followed by a branch to the apropriate code.
 static const uint32_t BAILOUT_TABLE_ENTRY_SIZE    = 4;
 
 class Registers
 {
   public:
     enum RegisterID {
         r0 = 0,
         r1,
@@ -132,17 +133,17 @@ class Registers
     // Registers returned from a JS -> JS call.
     static const uint32_t JSCallMask =
         (1 << Registers::r2) |
         (1 << Registers::r3);
 
     // Registers returned from a JS -> C call.
     static const uint32_t CallMask =
         (1 << Registers::r0) |
-        (1 << Registers::r1);  // used for double-size returns
+        (1 << Registers::r1);  // Used for double-size returns.
 
     static const uint32_t AllocatableMask = AllMask & ~NonAllocatableMask;
 };
 
 // Smallest integer type that can hold a register bitmask.
 typedef uint16_t PackedRegisterMask;
 
 class FloatRegisters
@@ -222,28 +223,29 @@ class FloatRegisters
 
     // Registers that can be allocated without being saved, generally.
     static const uint32_t TempMask = VolatileMask & ~NonAllocatableMask;
 
     static const uint32_t AllocatableMask = AllMask & ~NonAllocatableMask;
 };
 
 uint32_t GetARMFlags();
-bool hasMOVWT();
-bool hasVFPv3();
-bool hasVFP();
-bool has16DP();
-bool hasIDIV();
+bool HasMOVWT();
+bool HasVFPv3();
+bool HasVFP();
+bool Has16DP();
+bool HasIDIV();
 
-// If the simulator is used then the ABI choice is dynamic.  Otherwise the ABI is static
-// and useHardFpABI is inlined so that unused branches can be optimized away.
+// If the simulator is used then the ABI choice is dynamic. Otherwise the ABI is
+// static and useHardFpABI is inlined so that unused branches can be optimized
+// away.
 #if defined(JS_ARM_SIMULATOR)
-bool useHardFpABI();
+bool UseHardFpABI();
 #else
-static inline bool useHardFpABI()
+static inline bool UseHardFpABI()
 {
 #if defined(JS_CODEGEN_ARM_HARDFP)
     return true;
 #else
     return false;
 #endif
 }
 #endif
--- a/js/src/jit/arm/Assembler-arm.cpp
+++ b/js/src/jit/arm/Assembler-arm.cpp
@@ -17,18 +17,18 @@
 #include "jit/arm/MacroAssembler-arm.h"
 #include "jit/JitCompartment.h"
 
 using namespace js;
 using namespace js::jit;
 
 using mozilla::CountLeadingZeroes32;
 
-// Note this is used for inter-AsmJS calls and may pass arguments and results
-// in floating point registers even if the system ABI does not.
+// Note this is used for inter-AsmJS calls and may pass arguments and results in
+// floating point registers even if the system ABI does not.
 ABIArgGenerator::ABIArgGenerator() :
     intRegIndex_(0),
     floatRegIndex_(0),
     stackOffset_(0),
     current_()
 {}
 
 ABIArg
@@ -61,18 +61,18 @@ ABIArgGenerator::next(MIRType type)
         MOZ_ASSUME_UNREACHABLE("Unexpected argument type");
     }
 
     return current_;
 }
 const Register ABIArgGenerator::NonArgReturnVolatileReg0 = r4;
 const Register ABIArgGenerator::NonArgReturnVolatileReg1 = r5;
 
-// Encode a standard register when it is being used as src1, the dest, and
-// an extra register. These should never be called with an InvalidReg.
+// Encode a standard register when it is being used as src1, the dest, and an
+// extra register. These should never be called with an InvalidReg.
 uint32_t
 js::jit::RT(Register r)
 {
     JS_ASSERT((r.code() & ~0xf) == 0);
     return r.code() << 12;
 }
 
 uint32_t
@@ -91,18 +91,18 @@ js::jit::RD(Register r)
 
 uint32_t
 js::jit::RM(Register r)
 {
     JS_ASSERT((r.code() & ~0xf) == 0);
     return r.code() << 8;
 }
 
-// Encode a standard register when it is being used as src1, the dest, and
-// an extra register.  For these, an InvalidReg is used to indicate a optional
+// Encode a standard register when it is being used as src1, the dest, and an
+// extra register. For these, an InvalidReg is used to indicate a optional
 // register that has been omitted.
 uint32_t
 js::jit::maybeRT(Register r)
 {
     if (r == InvalidReg)
         return 0;
 
     JS_ASSERT((r.code() & ~0xf) == 0);
@@ -127,235 +127,235 @@ js::jit::maybeRD(Register r)
 
     JS_ASSERT((r.code() & ~0xf) == 0);
     return r.code() << 12;
 }
 
 Register
 js::jit::toRD(Instruction &i)
 {
-    return Register::FromCode((i.encode()>>12) & 0xf);
+    return Register::FromCode((i.encode() >> 12) & 0xf);
 }
 Register
 js::jit::toR(Instruction &i)
 {
     return Register::FromCode(i.encode() & 0xf);
 }
 
 Register
 js::jit::toRM(Instruction &i)
 {
-    return Register::FromCode((i.encode()>>8) & 0xf);
+    return Register::FromCode((i.encode() >> 8) & 0xf);
 }
 
 Register
 js::jit::toRN(Instruction &i)
 {
-    return Register::FromCode((i.encode()>>16) & 0xf);
+    return Register::FromCode((i.encode() >> 16) & 0xf);
 }
 
 uint32_t
 js::jit::VD(VFPRegister vr)
 {
     if (vr.isMissing())
         return 0;
 
-    //bits 15,14,13,12, 22
+    // Bits 15,14,13,12, 22.
     VFPRegister::VFPRegIndexSplit s = vr.encode();
     return s.bit << 22 | s.block << 12;
 }
 uint32_t
 js::jit::VN(VFPRegister vr)
 {
     if (vr.isMissing())
         return 0;
 
-    // bits 19,18,17,16, 7
+    // Bits 19,18,17,16, 7.
     VFPRegister::VFPRegIndexSplit s = vr.encode();
     return s.bit << 7 | s.block << 16;
 }
 uint32_t
 js::jit::VM(VFPRegister vr)
 {
     if (vr.isMissing())
         return 0;
 
-    // bits 5, 3,2,1,0
+    // Bits 5, 3,2,1,0.
     VFPRegister::VFPRegIndexSplit s = vr.encode();
     return s.bit << 5 | s.block;
 }
 
 VFPRegister::VFPRegIndexSplit
 jit::VFPRegister::encode()
 {
     JS_ASSERT(!_isInvalid);
 
     switch (kind) {
       case Double:
-        return VFPRegIndexSplit(_code &0xf , _code >> 4);
+        return VFPRegIndexSplit(code_ & 0xf, code_ >> 4);
       case Single:
-        return VFPRegIndexSplit(_code >> 1, _code & 1);
+        return VFPRegIndexSplit(code_ >> 1, code_ & 1);
       default:
-        // vfp register treated as an integer, NOT a gpr
-        return VFPRegIndexSplit(_code >> 1, _code & 1);
+        // VFP register treated as an integer, NOT a gpr.
+        return VFPRegIndexSplit(code_ >> 1, code_ & 1);
     }
 }
 
 VFPRegister js::jit::NoVFPRegister(true);
 
 bool
-InstDTR::isTHIS(const Instruction &i)
+InstDTR::IsTHIS(const Instruction &i)
 {
     return (i.encode() & IsDTRMask) == (uint32_t)IsDTR;
 }
 
 InstDTR *
-InstDTR::asTHIS(const Instruction &i)
+InstDTR::AsTHIS(const Instruction &i)
 {
-    if (isTHIS(i))
+    if (IsTHIS(i))
         return (InstDTR*)&i;
     return nullptr;
 }
 
 bool
-InstLDR::isTHIS(const Instruction &i)
+InstLDR::IsTHIS(const Instruction &i)
 {
     return (i.encode() & IsDTRMask) == (uint32_t)IsDTR;
 }
 
 InstLDR *
-InstLDR::asTHIS(const Instruction &i)
+InstLDR::AsTHIS(const Instruction &i)
 {
-    if (isTHIS(i))
+    if (IsTHIS(i))
         return (InstLDR*)&i;
     return nullptr;
 }
 
 InstNOP *
-InstNOP::asTHIS(Instruction &i)
+InstNOP::AsTHIS(Instruction &i)
 {
-    if (isTHIS(i))
-        return (InstNOP*) (&i);
+    if (IsTHIS(i))
+        return (InstNOP*)&i;
     return nullptr;
 }
 
 bool
-InstNOP::isTHIS(const Instruction &i)
+InstNOP::IsTHIS(const Instruction &i)
 {
     return (i.encode() & 0x0fffffff) == NopInst;
 }
 
 bool
-InstBranchReg::isTHIS(const Instruction &i)
+InstBranchReg::IsTHIS(const Instruction &i)
 {
-    return InstBXReg::isTHIS(i) || InstBLXReg::isTHIS(i);
+    return InstBXReg::IsTHIS(i) || InstBLXReg::IsTHIS(i);
 }
 
 InstBranchReg *
-InstBranchReg::asTHIS(const Instruction &i)
+InstBranchReg::AsTHIS(const Instruction &i)
 {
-    if (isTHIS(i))
+    if (IsTHIS(i))
         return (InstBranchReg*)&i;
     return nullptr;
 }
 void
 InstBranchReg::extractDest(Register *dest)
 {
     *dest = toR(*this);
 }
 bool
 InstBranchReg::checkDest(Register dest)
 {
     return dest == toR(*this);
 }
 
 bool
-InstBranchImm::isTHIS(const Instruction &i)
+InstBranchImm::IsTHIS(const Instruction &i)
 {
-    return InstBImm::isTHIS(i) || InstBLImm::isTHIS(i);
+    return InstBImm::IsTHIS(i) || InstBLImm::IsTHIS(i);
 }
 
 InstBranchImm *
-InstBranchImm::asTHIS(const Instruction &i)
+InstBranchImm::AsTHIS(const Instruction &i)
 {
-    if (isTHIS(i))
+    if (IsTHIS(i))
         return (InstBranchImm*)&i;
     return nullptr;
 }
 
 void
 InstBranchImm::extractImm(BOffImm *dest)
 {
     *dest = BOffImm(*this);
 }
 
 bool
-InstBXReg::isTHIS(const Instruction &i)
+InstBXReg::IsTHIS(const Instruction &i)
 {
     return (i.encode() & IsBRegMask) == IsBX;
 }
 
 InstBXReg *
-InstBXReg::asTHIS(const Instruction &i)
+InstBXReg::AsTHIS(const Instruction &i)
 {
-    if (isTHIS(i))
+    if (IsTHIS(i))
         return (InstBXReg*)&i;
     return nullptr;
 }
 
 bool
-InstBLXReg::isTHIS(const Instruction &i)
+InstBLXReg::IsTHIS(const Instruction &i)
 {
     return (i.encode() & IsBRegMask) == IsBLX;
 
 }
 InstBLXReg *
-InstBLXReg::asTHIS(const Instruction &i)
+InstBLXReg::AsTHIS(const Instruction &i)
 {
-    if (isTHIS(i))
+    if (IsTHIS(i))
         return (InstBLXReg*)&i;
     return nullptr;
 }
 
 bool
-InstBImm::isTHIS(const Instruction &i)
+InstBImm::IsTHIS(const Instruction &i)
 {
     return (i.encode () & IsBImmMask) == IsB;
 }
 InstBImm *
-InstBImm::asTHIS(const Instruction &i)
+InstBImm::AsTHIS(const Instruction &i)
 {
-    if (isTHIS(i))
+    if (IsTHIS(i))
         return (InstBImm*)&i;
     return nullptr;
 }
 
 bool
-InstBLImm::isTHIS(const Instruction &i)
+InstBLImm::IsTHIS(const Instruction &i)
 {
     return (i.encode () & IsBImmMask) == IsBL;
 
 }
 InstBLImm *
-InstBLImm::asTHIS(Instruction &i)
+InstBLImm::AsTHIS(Instruction &i)
 {
-    if (isTHIS(i))
+    if (IsTHIS(i))
         return (InstBLImm*)&i;
     return nullptr;
 }
 
 bool
-InstMovWT::isTHIS(Instruction &i)
+InstMovWT::IsTHIS(Instruction &i)
 {
-    return  InstMovW::isTHIS(i) || InstMovT::isTHIS(i);
+    return  InstMovW::IsTHIS(i) || InstMovT::IsTHIS(i);
 }
 InstMovWT *
-InstMovWT::asTHIS(Instruction &i)
+InstMovWT::AsTHIS(Instruction &i)
 {
-    if (isTHIS(i))
+    if (IsTHIS(i))
         return (InstMovWT*)&i;
     return nullptr;
 }
 
 void
 InstMovWT::extractImm(Imm16 *imm)
 {
     *imm = Imm16(*this);
@@ -373,51 +373,51 @@ InstMovWT::extractDest(Register *dest)
 }
 bool
 InstMovWT::checkDest(Register dest)
 {
     return dest == toRD(*this);
 }
 
 bool
-InstMovW::isTHIS(const Instruction &i)
+InstMovW::IsTHIS(const Instruction &i)
 {
     return (i.encode() & IsWTMask) == IsW;
 }
 
 InstMovW *
-InstMovW::asTHIS(const Instruction &i)
+InstMovW::AsTHIS(const Instruction &i)
 {
-    if (isTHIS(i))
-        return (InstMovW*) (&i);
+    if (IsTHIS(i))
+        return (InstMovW*)&i;
     return nullptr;
 }
 InstMovT *
-InstMovT::asTHIS(const Instruction &i)
+InstMovT::AsTHIS(const Instruction &i)
 {
-    if (isTHIS(i))
-        return (InstMovT*) (&i);
+    if (IsTHIS(i))
+        return (InstMovT*)&i;
     return nullptr;
 }
 
 bool
-InstMovT::isTHIS(const Instruction &i)
+InstMovT::IsTHIS(const Instruction &i)
 {
     return (i.encode() & IsWTMask) == IsT;
 }
 
 InstALU *
-InstALU::asTHIS(const Instruction &i)
+InstALU::AsTHIS(const Instruction &i)
 {
-    if (isTHIS(i))
-        return (InstALU*) (&i);
+    if (IsTHIS(i))
+        return (InstALU*)&i;
     return nullptr;
 }
 bool
-InstALU::isTHIS(const Instruction &i)
+InstALU::IsTHIS(const Instruction &i)
 {
     return (i.encode() & ALUMask) == 0;
 }
 void
 InstALU::extractOp(ALUOp *ret)
 {
     *ret = ALUOp(encode() & (0xf << 21));
 }
@@ -450,41 +450,41 @@ InstALU::checkOp1(Register rn)
 }
 Operand2
 InstALU::extractOp2()
 {
     return Operand2(encode());
 }
 
 InstCMP *
-InstCMP::asTHIS(const Instruction &i)
+InstCMP::AsTHIS(const Instruction &i)
 {
-    if (isTHIS(i))
-        return (InstCMP*) (&i);
+    if (IsTHIS(i))
+        return (InstCMP*)&i;
     return nullptr;
 }
 
 bool
-InstCMP::isTHIS(const Instruction &i)
+InstCMP::IsTHIS(const Instruction &i)
 {
-    return InstALU::isTHIS(i) && InstALU::asTHIS(i)->checkDest(r0) && InstALU::asTHIS(i)->checkOp(op_cmp);
+    return InstALU::IsTHIS(i) && InstALU::AsTHIS(i)->checkDest(r0) && InstALU::AsTHIS(i)->checkOp(OpCmp);
 }
 
 InstMOV *
-InstMOV::asTHIS(const Instruction &i)
+InstMOV::AsTHIS(const Instruction &i)
 {
-    if (isTHIS(i))
-        return (InstMOV*) (&i);
+    if (IsTHIS(i))
+        return (InstMOV*)&i;
     return nullptr;
 }
 
 bool
-InstMOV::isTHIS(const Instruction &i)
+InstMOV::IsTHIS(const Instruction &i)
 {
-    return InstALU::isTHIS(i) && InstALU::asTHIS(i)->checkOp1(r0) && InstALU::asTHIS(i)->checkOp(op_mov);
+    return InstALU::IsTHIS(i) && InstALU::AsTHIS(i)->checkOp1(r0) && InstALU::AsTHIS(i)->checkOp(OpMov);
 }
 
 Op2Reg
 Operand2::toOp2Reg() {
     return *(Op2Reg*)this;
 }
 O2RegImmShift
 Op2Reg::toO2RegImmShift() {
@@ -498,45 +498,47 @@ Op2Reg::toO2RegRegShift() {
 Imm16::Imm16(Instruction &inst)
   : lower(inst.encode() & 0xfff),
     upper(inst.encode() >> 16),
     invalid(0xfff)
 { }
 
 Imm16::Imm16(uint32_t imm)
   : lower(imm & 0xfff), pad(0),
-    upper((imm>>12) & 0xf),
+    upper((imm >> 12) & 0xf),
     invalid(0)
 {
     JS_ASSERT(decode() == imm);
 }
 
 Imm16::Imm16()
   : invalid(0xfff)
 { }
 
 void
 jit::PatchJump(CodeLocationJump &jump_, CodeLocationLabel label)
 {
-    // We need to determine if this jump can fit into the standard 24+2 bit address
-    // or if we need a larger branch (or just need to use our pool entry)
+    // We need to determine if this jump can fit into the standard 24+2 bit
+    // address or if we need a larger branch (or just need to use our pool
+    // entry).
     Instruction *jump = (Instruction*)jump_.raw();
     // jumpWithPatch() returns the offset of the jump and never a pool or nop.
     Assembler::Condition c;
     jump->extractCond(&c);
     JS_ASSERT(jump->is<InstBranchImm>() || jump->is<InstLDR>());
 
     int jumpOffset = label.raw() - jump_.raw();
-    if (BOffImm::isInRange(jumpOffset)) {
-        // This instruction started off as a branch, and will remain one
-        Assembler::retargetNearBranch(jump, jumpOffset, c);
+    if (BOffImm::IsInRange(jumpOffset)) {
+        // This instruction started off as a branch, and will remain one.
+        Assembler::RetargetNearBranch(jump, jumpOffset, c);
     } else {
-        // This instruction started off as a branch, but now needs to be demoted to an ldr.
+        // This instruction started off as a branch, but now needs to be demoted
+        // to an ldr.
         uint8_t **slot = reinterpret_cast<uint8_t**>(jump_.jumpTableEntry());
-        Assembler::retargetFarBranch(jump, slot, label.raw(), c);
+        Assembler::RetargetFarBranch(jump, slot, label.raw(), c);
     }
 }
 
 void
 Assembler::finish()
 {
     flush();
     JS_ASSERT(!isFinished);
@@ -598,17 +600,17 @@ BufferOffset
 Assembler::actualOffset(BufferOffset off_) const
 {
     return BufferOffset(off_.getOffset() + m_buffer.poolSizeBefore(off_.getOffset()));
 }
 
 class RelocationIterator
 {
     CompactBufferReader reader_;
-    // offset in bytes
+    // Offset in bytes.
     uint32_t offset_;
 
   public:
     RelocationIterator(CompactBufferReader &reader)
       : reader_(reader)
     { }
 
     bool read() {
@@ -620,43 +622,43 @@ class RelocationIterator
 
     uint32_t offset() const {
         return offset_;
     }
 };
 
 template<class Iter>
 const uint32_t *
-Assembler::getCF32Target(Iter *iter)
+Assembler::GetCF32Target(Iter *iter)
 {
     Instruction *inst1 = iter->cur();
     Instruction *inst2 = iter->next();
     Instruction *inst3 = iter->next();
     Instruction *inst4 = iter->next();
 
     if (inst1->is<InstBranchImm>()) {
-        // see if we have a simple case, b #offset
+        // See if we have a simple case, b #offset.
         BOffImm imm;
         InstBranchImm *jumpB = inst1->as<InstBranchImm>();
         jumpB->extractImm(&imm);
         return imm.getDest(inst1)->raw();
     }
 
     if (inst1->is<InstMovW>() && inst2->is<InstMovT>() &&
         (inst3->is<InstNOP>() || inst3->is<InstBranchReg>() || inst4->is<InstBranchReg>()))
     {
-        // see if we have the complex case,
-        // movw r_temp, #imm1
-        // movt r_temp, #imm2
-        // bx r_temp
+        // See if we have the complex case:
+        //  movw r_temp, #imm1
+        //  movt r_temp, #imm2
+        //  bx r_temp
         // OR
-        // movw r_temp, #imm1
-        // movt r_temp, #imm2
-        // str pc, [sp]
-        // bx r_temp
+        //  movw r_temp, #imm1
+        //  movt r_temp, #imm2
+        //  str pc, [sp]
+        //  bx r_temp
 
         Imm16 targ_bot;
         Imm16 targ_top;
         Register temp;
 
         // Extract both the temp register and the bottom immediate.
         InstMovW *bottom = inst1->as<InstMovW>();
         bottom->extractImm(&targ_bot);
@@ -666,66 +668,66 @@ Assembler::getCF32Target(Iter *iter)
         InstMovT *top = inst2->as<InstMovT>();
         top->extractImm(&targ_top);
 
         // Make sure they are being loaded into the same register.
         JS_ASSERT(top->checkDest(temp));
 
         // Make sure we're branching to the same register.
 #ifdef DEBUG
-        // A toggled call sometimes has a NOP instead of a branch for the third instruction.
-        // No way to assert that it's valid in that situation.
+        // A toggled call sometimes has a NOP instead of a branch for the third
+        // instruction. No way to assert that it's valid in that situation.
         if (!inst3->is<InstNOP>()) {
             InstBranchReg *realBranch = inst3->is<InstBranchReg>() ? inst3->as<InstBranchReg>()
                                                                    : inst4->as<InstBranchReg>();
             JS_ASSERT(realBranch->checkDest(temp));
         }
 #endif
 
         uint32_t *dest = (uint32_t*) (targ_bot.decode() | (targ_top.decode() << 16));
         return dest;
     }
 
     if (inst1->is<InstLDR>()) {
         InstLDR *load = inst1->as<InstLDR>();
         uint32_t inst = load->encode();
-        // get the address of the instruction as a raw pointer
+        // Get the address of the instruction as a raw pointer.
         char *dataInst = reinterpret_cast<char*>(load);
         IsUp_ iu = IsUp_(inst & IsUp);
         int32_t offset = inst & 0xfff;
         if (iu != IsUp) {
             offset = - offset;
         }
         uint32_t **ptr = (uint32_t **)&dataInst[offset + 8];
         return *ptr;
 
     }
 
     MOZ_ASSUME_UNREACHABLE("unsupported branch relocation");
 }
 
 uintptr_t
-Assembler::getPointer(uint8_t *instPtr)
+Assembler::GetPointer(uint8_t *instPtr)
 {
     InstructionIterator iter((Instruction*)instPtr);
-    uintptr_t ret = (uintptr_t)getPtr32Target(&iter, nullptr, nullptr);
+    uintptr_t ret = (uintptr_t)GetPtr32Target(&iter, nullptr, nullptr);
     return ret;
 }
 
 template<class Iter>
 const uint32_t *
-Assembler::getPtr32Target(Iter *start, Register *dest, RelocStyle *style)
+Assembler::GetPtr32Target(Iter *start, Register *dest, RelocStyle *style)
 {
     Instruction *load1 = start->cur();
     Instruction *load2 = start->next();
 
     if (load1->is<InstMovW>() && load2->is<InstMovT>()) {
-        // see if we have the complex case,
-        // movw r_temp, #imm1
-        // movt r_temp, #imm2
+        // See if we have the complex case:
+        //  movw r_temp, #imm1
+        //  movt r_temp, #imm2
 
         Imm16 targ_bot;
         Imm16 targ_top;
         Register temp;
 
         // Extract both the temp register and the bottom immediate.
         InstMovW *bottom = load1->as<InstMovW>();
         bottom->extractImm(&targ_bot);
@@ -744,36 +746,37 @@ Assembler::getPtr32Target(Iter *start, R
             *style = L_MOVWT;
 
         uint32_t *value = (uint32_t*) (targ_bot.decode() | (targ_top.decode() << 16));
         return value;
     }
     if (load1->is<InstLDR>()) {
         InstLDR *load = load1->as<InstLDR>();
         uint32_t inst = load->encode();
-        // get the address of the instruction as a raw pointer
+        // Get the address of the instruction as a raw pointer.
         char *dataInst = reinterpret_cast<char*>(load);
         IsUp_ iu = IsUp_(inst & IsUp);
         int32_t offset = inst & 0xfff;
         if (iu == IsDown)
             offset = - offset;
         if (dest)
             *dest = toRD(*load);
         if (style)
             *style = L_LDR;
         uint32_t **ptr = (uint32_t **)&dataInst[offset + 8];
         return *ptr;
     }
+
     MOZ_ASSUME_UNREACHABLE("unsupported relocation");
 }
 
 static JitCode *
 CodeFromJump(InstructionIterator *jump)
 {
-    uint8_t *target = (uint8_t *)Assembler::getCF32Target(jump);
+    uint8_t *target = (uint8_t *)Assembler::GetCF32Target(jump);
     return JitCode::FromExecutable(target);
 }
 
 void
 Assembler::TraceJumpRelocations(JSTracer *trc, JitCode *code, CompactBufferReader &reader)
 {
     RelocationIterator iter(reader);
     while (iter.read()) {
@@ -783,31 +786,31 @@ Assembler::TraceJumpRelocations(JSTracer
     }
 }
 
 static void
 TraceDataRelocations(JSTracer *trc, uint8_t *buffer, CompactBufferReader &reader)
 {
     while (reader.more()) {
         size_t offset = reader.readUnsigned();
-        InstructionIterator iter((Instruction*)(buffer+offset));
-        void *ptr = const_cast<uint32_t *>(Assembler::getPtr32Target(&iter));
+        InstructionIterator iter((Instruction*)(buffer + offset));
+        void *ptr = const_cast<uint32_t *>(Assembler::GetPtr32Target(&iter));
         // No barrier needed since these are constants.
         gc::MarkGCThingUnbarriered(trc, reinterpret_cast<void **>(&ptr), "ion-masm-ptr");
     }
 
 }
 static void
 TraceDataRelocations(JSTracer *trc, ARMBuffer *buffer,
-                     js::Vector<BufferOffset, 0, SystemAllocPolicy> *locs)
+                     Vector<BufferOffset, 0, SystemAllocPolicy> *locs)
 {
     for (unsigned int idx = 0; idx < locs->length(); idx++) {
         BufferOffset bo = (*locs)[idx];
         ARMBuffer::AssemblerBufferInstIterator iter(bo, buffer);
-        void *ptr = const_cast<uint32_t *>(Assembler::getPtr32Target(&iter));
+        void *ptr = const_cast<uint32_t *>(Assembler::GetPtr32Target(&iter));
 
         // No barrier needed since these are constants.
         gc::MarkGCThingUnbarriered(trc, reinterpret_cast<void **>(&ptr), "ion-masm-ptr");
     }
 
 }
 void
 Assembler::TraceDataRelocations(JSTracer *trc, JitCode *code, CompactBufferReader &reader)
@@ -861,18 +864,18 @@ Assembler::processCodeLabels(uint8_t *ra
     }
 }
 
 void
 Assembler::writeCodePointer(AbsoluteLabel *absoluteLabel) {
     JS_ASSERT(!absoluteLabel->bound());
     BufferOffset off = writeInst(LabelBase::INVALID_OFFSET);
 
-    // x86/x64 makes general use of AbsoluteLabel and weaves a linked list of
-    // uses of an AbsoluteLabel through the assembly. ARM only uses labels
+    // The x86/x64 makes general use of AbsoluteLabel and weaves a linked list
+    // of uses of an AbsoluteLabel through the assembly. ARM only uses labels
     // for the case statements of switch jump tables. Thus, for simplicity, we
     // simply treat the AbsoluteLabel as a label and bind it to the offset of
     // the jump table entry that needs to be patched.
     LabelBase *label = absoluteLabel;
     label->bind(off.getOffset());
 }
 
 void
@@ -886,214 +889,213 @@ Assembler::Bind(uint8_t *rawCode, Absolu
 Assembler::Condition
 Assembler::InvertCondition(Condition cond)
 {
     const uint32_t ConditionInversionBit = 0x10000000;
     return Condition(ConditionInversionBit ^ cond);
 }
 
 Imm8::TwoImm8mData
-Imm8::encodeTwoImms(uint32_t imm)
+Imm8::EncodeTwoImms(uint32_t imm)
 {
-    // In the ideal case, we are looking for a number that (in binary) looks like:
-    // 0b((00)*)n_1((00)*)n_2((00)*)
-    //    left  n1   mid  n2
-    // where both n_1 and n_2 fit into 8 bits.
-    // since this is being done with rotates, we also need to handle the case
+    // In the ideal case, we are looking for a number that (in binary) looks
+    // like:
+    //   0b((00)*)n_1((00)*)n_2((00)*)
+    //      left  n1   mid  n2
+    //   where both n_1 and n_2 fit into 8 bits.
+    // Since this is being done with rotates, we also need to handle the case
     // that one of these numbers is in fact split between the left and right
     // sides, in which case the constant will look like:
-    // 0bn_1a((00)*)n_2((00)*)n_1b
-    //   n1a  mid  n2   rgh    n1b
-    // also remember, values are rotated by multiples of two, and left,
-    // mid or right can have length zero
+    //   0bn_1a((00)*)n_2((00)*)n_1b
+    //     n1a  mid  n2   rgh    n1b
+    // Also remember, values are rotated by multiples of two, and left, mid or
+    // right can have length zero.
     uint32_t imm1, imm2;
     int left = CountLeadingZeroes32(imm) & 0x1E;
     uint32_t no_n1 = imm & ~(0xff << (24 - left));
 
-    // not technically needed: this case only happens if we can encode
-    // as a single imm8m.  There is a perfectly reasonable encoding in this
-    // case, but we shouldn't encourage people to do things like this.
+    // Not technically needed: this case only happens if we can encode as a
+    // single imm8m. There is a perfectly reasonable encoding in this case, but
+    // we shouldn't encourage people to do things like this.
     if (no_n1 == 0)
         return TwoImm8mData();
 
     int mid = CountLeadingZeroes32(no_n1) & 0x1E;
     uint32_t no_n2 = no_n1 & ~((0xff << ((24 - mid) & 0x1f)) | 0xff >> ((8 + mid) & 0x1f));
 
     if (no_n2 == 0) {
-        // we hit the easy case, no wraparound.
-        // note: a single constant *may* look like this.
+        // We hit the easy case, no wraparound.
+        // Note: a single constant *may* look like this.
         int imm1shift = left + 8;
         int imm2shift = mid + 8;
         imm1 = (imm >> (32 - imm1shift)) & 0xff;
         if (imm2shift >= 32) {
             imm2shift = 0;
-            // this assert does not always hold
-            //assert((imm & 0xff) == no_n1);
-            // in fact, this would lead to some incredibly subtle bugs.
+            // This assert does not always hold, in fact, this would lead to
+            // some incredibly subtle bugs.
+            // assert((imm & 0xff) == no_n1);
             imm2 = no_n1;
         } else {
             imm2 = ((imm >> (32 - imm2shift)) | (imm << imm2shift)) & 0xff;
             JS_ASSERT( ((no_n1 >> (32 - imm2shift)) | (no_n1 << imm2shift)) ==
                        imm2);
         }
         JS_ASSERT((imm1shift & 0x1) == 0);
         JS_ASSERT((imm2shift & 0x1) == 0);
         return TwoImm8mData(datastore::Imm8mData(imm1, imm1shift >> 1),
                             datastore::Imm8mData(imm2, imm2shift >> 1));
     }
 
-    // either it wraps, or it does not fit.
-    // if we initially chopped off more than 8 bits, then it won't fit.
+    // Either it wraps, or it does not fit. If we initially chopped off more
+    // than 8 bits, then it won't fit.
     if (left >= 8)
         return TwoImm8mData();
 
     int right = 32 - (CountLeadingZeroes32(no_n2) & 30);
-    // all remaining set bits *must* fit into the lower 8 bits
-    // the right == 8 case should be handled by the previous case.
+    // All remaining set bits *must* fit into the lower 8 bits.
+    // The right == 8 case should be handled by the previous case.
     if (right > 8)
         return TwoImm8mData();
 
-    // make sure the initial bits that we removed for no_n1
-    // fit into the 8-(32-right) leftmost bits
-    if (((imm & (0xff << (24 - left))) << (8-right)) != 0) {
+    // Make sure the initial bits that we removed for no_n1 fit into the
+    // 8-(32-right) leftmost bits.
+    if (((imm & (0xff << (24 - left))) << (8 - right)) != 0) {
         // BUT we may have removed more bits than we needed to for no_n1
-        // 0x04104001 e.g. we can encode 0x104 with a single op, then
-        // 0x04000001 with a second, but we try to encode 0x0410000
-        // and find that we need a second op for 0x4000, and 0x1 cannot
-        // be included in the encoding of 0x04100000
-        no_n1 = imm & ~((0xff >> (8-right)) | (0xff << (24 + right)));
+        // 0x04104001 e.g. we can encode 0x104 with a single op, then 0x04000001
+        // with a second, but we try to encode 0x0410000 and find that we need a
+        // second op for 0x4000, and 0x1 cannot be included in the encoding of
+        // 0x04100000.
+        no_n1 = imm & ~((0xff >> (8 - right)) | (0xff << (24 + right)));
         mid = CountLeadingZeroes32(no_n1) & 30;
-        no_n2 =
-            no_n1  & ~((0xff << ((24 - mid)&31)) | 0xff >> ((8 + mid)&31));
+        no_n2 = no_n1  & ~((0xff << ((24 - mid)&31)) | 0xff >> ((8 + mid)&31));
         if (no_n2 != 0)
             return TwoImm8mData();
     }
 
-    // now assemble all of this information into a two coherent constants
-    // it is a rotate right from the lower 8 bits.
+    // Now assemble all of this information into a two coherent constants it is
+    // a rotate right from the lower 8 bits.
     int imm1shift = 8 - right;
     imm1 = 0xff & ((imm << imm1shift) | (imm >> (32 - imm1shift)));
-    JS_ASSERT ((imm1shift&~0x1e) == 0);
+    JS_ASSERT ((imm1shift & ~0x1e) == 0);
     // left + 8 + mid is the position of the leftmost bit of n_2.
-    // we needed to rotate 0x000000ab right by 8 in order to get
-    // 0xab000000, then shift again by the leftmost bit in order to
-    // get the constant that we care about.
+    // We needed to rotate 0x000000ab right by 8 in order to get 0xab000000,
+    // then shift again by the leftmost bit in order to get the constant that we
+    // care about.
     int imm2shift =  mid + 8;
     imm2 = ((imm >> (32 - imm2shift)) | (imm << imm2shift)) & 0xff;
     JS_ASSERT((imm1shift & 0x1) == 0);
     JS_ASSERT((imm2shift & 0x1) == 0);
     return TwoImm8mData(datastore::Imm8mData(imm1, imm1shift >> 1),
                         datastore::Imm8mData(imm2, imm2shift >> 1));
 }
 
 ALUOp
 jit::ALUNeg(ALUOp op, Register dest, Imm32 *imm, Register *negDest)
 {
-    // find an alternate ALUOp to get the job done, and use a different imm.
+    // Find an alternate ALUOp to get the job done, and use a different imm.
     *negDest = dest;
     switch (op) {
-      case op_mov:
+      case OpMov:
         *imm = Imm32(~imm->value);
-        return op_mvn;
-      case op_mvn:
+        return OpMvn;
+      case OpMvn:
         *imm = Imm32(~imm->value);
-        return op_mov;
-      case op_and:
+        return OpMov;
+      case OpAnd:
         *imm = Imm32(~imm->value);
-        return op_bic;
-      case op_bic:
+        return OpBic;
+      case OpBic:
         *imm = Imm32(~imm->value);
-        return op_and;
-      case op_add:
+        return OpAnd;
+      case OpAdd:
         *imm = Imm32(-imm->value);
-        return op_sub;
-      case op_sub:
+        return OpSub;
+      case OpSub:
         *imm = Imm32(-imm->value);
-        return op_add;
-      case op_cmp:
+        return OpAdd;
+      case OpCmp:
         *imm = Imm32(-imm->value);
-        return op_cmn;
-      case op_cmn:
+        return OpCmn;
+      case OpCmn:
         *imm = Imm32(-imm->value);
-        return op_cmp;
-      case op_tst:
+        return OpCmp;
+      case OpTst:
         JS_ASSERT(dest == InvalidReg);
         *imm = Imm32(~imm->value);
         *negDest = ScratchRegister;
-        return op_bic;
+        return OpBic;
         // orr has orn on thumb2 only.
       default:
-        return op_invalid;
+        return OpInvalid;
     }
 }
 
 bool
 jit::can_dbl(ALUOp op)
 {
-    // some instructions can't be processed as two separate instructions
-    // such as and, and possibly add (when we're setting ccodes).
-    // there is also some hilarity with *reading* condition codes.
-    // for example, adc dest, src1, 0xfff; (add with carry) can be split up
-    // into adc dest, src1, 0xf00; add dest, dest, 0xff, since "reading" the
-    // condition code increments the result by one conditionally, that only needs
-    // to be done on one of the two instructions.
+    // Some instructions can't be processed as two separate instructions such as
+    // and, and possibly add (when we're setting ccodes). There is also some
+    // hilarity with *reading* condition codes. For example, adc dest, src1,
+    // 0xfff; (add with carry) can be split up into adc dest, src1, 0xf00; add
+    // dest, dest, 0xff, since "reading" the condition code increments the
+    // result by one conditionally, that only needs to be done on one of the two
+    // instructions.
     switch (op) {
-      case op_bic:
-      case op_add:
-      case op_sub:
-      case op_eor:
-      case op_orr:
+      case OpBic:
+      case OpAdd:
+      case OpSub:
+      case OpEor:
+      case OpOrr:
         return true;
       default:
         return false;
     }
 }
 
 bool
 jit::condsAreSafe(ALUOp op) {
-    // Even when we are setting condition codes, sometimes we can
-    // get away with splitting an operation into two.
-    // for example, if our immediate is 0x00ff00ff, and the operation is eors
-    // we can split this in half, since x ^ 0x00ff0000 ^ 0x000000ff should
-    // set all of its condition codes exactly the same as x ^ 0x00ff00ff.
-    // However, if the operation were adds,
-    // we cannot split this in half.  If the source on the add is
-    // 0xfff00ff0, the result sholud be 0xef10ef, but do we set the overflow bit
-    // or not?  Depending on which half is performed first (0x00ff0000
-    // or 0x000000ff) the V bit will be set differently, and *not* updating
-    // the V bit would be wrong.  Theoretically, the following should work
-    // adds r0, r1, 0x00ff0000;
-    // addsvs r0, r1, 0x000000ff;
-    // addvc r0, r1, 0x000000ff;
-    // but this is 3 instructions, and at that point, we might as well use
+    // Even when we are setting condition codes, sometimes we can get away with
+    // splitting an operation into two. For example, if our immediate is
+    // 0x00ff00ff, and the operation is eors we can split this in half, since x
+    // ^ 0x00ff0000 ^ 0x000000ff should set all of its condition codes exactly
+    // the same as x ^ 0x00ff00ff. However, if the operation were adds, we
+    // cannot split this in half. If the source on the add is 0xfff00ff0, the
+    // result sholud be 0xef10ef, but do we set the overflow bit or not?
+    // Depending on which half is performed first (0x00ff0000 or 0x000000ff) the
+    // V bit will be set differently, and *not* updating the V bit would be
+    // wrong. Theoretically, the following should work:
+    //  adds r0, r1, 0x00ff0000;
+    //  addsvs r0, r1, 0x000000ff;
+    //  addvc r0, r1, 0x000000ff;
+    // But this is 3 instructions, and at that point, we might as well use
     // something else.
     switch(op) {
-      case op_bic:
-      case op_orr:
-      case op_eor:
+      case OpBic:
+      case OpOrr:
+      case OpEor:
         return true;
       default:
         return false;
     }
 }
 
 ALUOp
 jit::getDestVariant(ALUOp op)
 {
-    // all of the compare operations are dest-less variants of a standard
-    // operation.  Given the dest-less variant, return the dest-ful variant.
+    // All of the compare operations are dest-less variants of a standard
+    // operation. Given the dest-less variant, return the dest-ful variant.
     switch (op) {
-      case op_cmp:
-        return op_sub;
-      case op_cmn:
-        return op_add;
-      case op_tst:
-        return op_and;
-      case op_teq:
-        return op_eor;
+      case OpCmp:
+        return OpSub;
+      case OpCmn:
+        return OpAdd;
+      case OpTst:
+        return OpAnd;
+      case OpTeq:
+        return OpEor;
       default:
         return op;
     }
 }
 
 O2RegImmShift
 jit::O2Reg(Register r) {
     return O2RegImmShift(r, LSL, 0);
@@ -1155,17 +1157,17 @@ jit::ror(Register r, Register amt)
 O2RegRegShift
 jit::asr (Register r, Register amt)
 {
     return O2RegRegShift(r, ASR, amt);
 }
 
 static js::jit::DoubleEncoder doubleEncoder;
 
-/* static */ const js::jit::VFPImm js::jit::VFPImm::one(0x3FF00000);
+/* static */ const js::jit::VFPImm js::jit::VFPImm::One(0x3FF00000);
 
 js::jit::VFPImm::VFPImm(uint32_t top)
 {
     data = -1;
     datastore::Imm8VFPImmData tmp;
     if (doubleEncoder.lookup(top, &tmp))
         data = tmp.encode();
 }
@@ -1173,82 +1175,82 @@ js::jit::VFPImm::VFPImm(uint32_t top)
 BOffImm::BOffImm(Instruction &inst)
   : data(inst.encode() & 0x00ffffff)
 {
 }
 
 Instruction *
 BOffImm::getDest(Instruction *src)
 {
-    // TODO: It is probably worthwhile to verify that src is actually a branch
+    // TODO: It is probably worthwhile to verify that src is actually a branch.
     // NOTE: This does not explicitly shift the offset of the destination left by 2,
     // since it is indexing into an array of instruction sized objects.
-    return &src[(((int32_t)data<<8)>>8) + 2];
+    return &src[(((int32_t)data << 8) >> 8) + 2];
 }
 
-//VFPRegister implementation
+// VFPRegister implementation
 VFPRegister
 VFPRegister::doubleOverlay() const
 {
     JS_ASSERT(!_isInvalid);
     if (kind != Double) {
-        JS_ASSERT(_code % 2 == 0);
-        return VFPRegister(_code >> 1, Double);
+        JS_ASSERT(code_ % 2 == 0);
+        return VFPRegister(code_ >> 1, Double);
     }
     return *this;
 }
 VFPRegister
 VFPRegister::singleOverlay() const
 {
     JS_ASSERT(!_isInvalid);
     if (kind == Double) {
-        // There are no corresponding float registers for d16-d31
-        JS_ASSERT(_code < 16);
-        return VFPRegister(_code << 1, Single);
+        // There are no corresponding float registers for d16-d31.
+        JS_ASSERT(code_ < 16);
+        return VFPRegister(code_ << 1, Single);
     }
 
-    JS_ASSERT(_code % 2 == 0);
-    return VFPRegister(_code, Single);
+    JS_ASSERT(code_ % 2 == 0);
+    return VFPRegister(code_, Single);
 }
 
 VFPRegister
 VFPRegister::sintOverlay() const
 {
     JS_ASSERT(!_isInvalid);
     if (kind == Double) {
-        // There are no corresponding float registers for d16-d31
-        JS_ASSERT(_code < 16);
-        return VFPRegister(_code << 1, Int);
+        // There are no corresponding float registers for d16-d31.
+        JS_ASSERT(code_ < 16);
+        return VFPRegister(code_ << 1, Int);
     }
 
-    JS_ASSERT(_code % 2 == 0);
-    return VFPRegister(_code, Int);
+    JS_ASSERT(code_ % 2 == 0);
+    return VFPRegister(code_, Int);
 }
 VFPRegister
 VFPRegister::uintOverlay() const
 {
     JS_ASSERT(!_isInvalid);
     if (kind == Double) {
-        // There are no corresponding float registers for d16-d31
-        JS_ASSERT(_code < 16);
-        return VFPRegister(_code << 1, UInt);
+        // There are no corresponding float registers for d16-d31.
+        JS_ASSERT(code_ < 16);
+        return VFPRegister(code_ << 1, UInt);
     }
 
-    JS_ASSERT(_code % 2 == 0);
-    return VFPRegister(_code, UInt);
+    JS_ASSERT(code_ % 2 == 0);
+    return VFPRegister(code_, UInt);
 }
 
 bool
-VFPRegister::isInvalid()
+VFPRegister::isInvalid() const
 {
     return _isInvalid;
 }
 
 bool
-VFPRegister::isMissing()
+VFPRegister::isMissing() const
 {
     JS_ASSERT(!_isInvalid);
     return _isMissing;
 }
 
 
 bool
 Assembler::oom() const
@@ -1261,19 +1263,19 @@ Assembler::oom() const
 }
 
 bool
 Assembler::addCodeLabel(CodeLabel label)
 {
     return codeLabels_.append(label);
 }
 
-// Size of the instruction stream, in bytes.  Including pools. This function expects
-// all pools that need to be placed have been placed.  If they haven't then we
-// need to go an flush the pools :(
+// Size of the instruction stream, in bytes. Including pools. This function
+// expects all pools that need to be placed have been placed. If they haven't
+// then we need to go an flush the pools :(
 size_t
 Assembler::size() const
 {
     return m_buffer.size();
 }
 // Size of the relocation table, in bytes.
 size_t
 Assembler::jumpRelocationTableBytes() const
@@ -1297,33 +1299,33 @@ size_t
 Assembler::bytesNeeded() const
 {
     return size() +
         jumpRelocationTableBytes() +
         dataRelocationTableBytes() +
         preBarrierTableBytes();
 }
 
-// write a blob of binary into the instruction stream
+// Write a blob of binary into the instruction stream.
 BufferOffset
 Assembler::writeInst(uint32_t x, uint32_t *dest)
 {
     if (dest == nullptr)
         return m_buffer.putInt(x);
 
-    writeInstStatic(x, dest);
+    WriteInstStatic(x, dest);
     return BufferOffset();
 }
 BufferOffset
 Assembler::writeBranchInst(uint32_t x)
 {
     return m_buffer.putInt(x, /* markAsBranch = */ true);
 }
 void
-Assembler::writeInstStatic(uint32_t x, uint32_t *dest)
+Assembler::WriteInstStatic(uint32_t x, uint32_t *dest)
 {
     JS_ASSERT(dest != nullptr);
     *dest = x;
 }
 
 BufferOffset
 Assembler::align(int alignment)
 {
@@ -1341,16 +1343,17 @@ Assembler::align(int alignment)
             BufferOffset tmp = as_nop();
             if (!ret.assigned())
                 ret = tmp;
         }
     }
     return ret;
 
 }
+
 BufferOffset
 Assembler::as_nop()
 {
     return writeInst(0xe320f000);
 }
 BufferOffset
 Assembler::as_alu(Register dest, Register src1, Operand2 op2,
                   ALUOp op, SetCond_ sc, Condition c, Instruction *instdest)
@@ -1358,307 +1361,306 @@ Assembler::as_alu(Register dest, Registe
     return writeInst((int)op | (int)sc | (int) c | op2.encode() |
                      ((dest == InvalidReg) ? 0 : RD(dest)) |
                      ((src1 == InvalidReg) ? 0 : RN(src1)), (uint32_t*)instdest);
 }
 
 BufferOffset
 Assembler::as_mov(Register dest, Operand2 op2, SetCond_ sc, Condition c, Instruction *instdest)
 {
-    return as_alu(dest, InvalidReg, op2, op_mov, sc, c, instdest);
+    return as_alu(dest, InvalidReg, op2, OpMov, sc, c, instdest);
 }
 
 BufferOffset
 Assembler::as_mvn(Register dest, Operand2 op2, SetCond_ sc, Condition c)
 {
-    return as_alu(dest, InvalidReg, op2, op_mvn, sc, c);
+    return as_alu(dest, InvalidReg, op2, OpMvn, sc, c);
 }
 
 // Logical operations.
 BufferOffset
 Assembler::as_and(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c)
 {
-    return as_alu(dest, src1, op2, op_and, sc, c);
+    return as_alu(dest, src1, op2, OpAnd, sc, c);
 }
 BufferOffset
 Assembler::as_bic(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c)
 {
-    return as_alu(dest, src1, op2, op_bic, sc, c);
+    return as_alu(dest, src1, op2, OpBic, sc, c);
 }
 BufferOffset
 Assembler::as_eor(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c)
 {
-    return as_alu(dest, src1, op2, op_eor, sc, c);
+    return as_alu(dest, src1, op2, OpEor, sc, c);
 }
 BufferOffset
 Assembler::as_orr(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c)
 {
-    return as_alu(dest, src1, op2, op_orr, sc, c);
+    return as_alu(dest, src1, op2, OpOrr, sc, c);
 }
 
 // Mathematical operations.
 BufferOffset
 Assembler::as_adc(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c)
 {
-    return as_alu(dest, src1, op2, op_adc, sc, c);
+    return as_alu(dest, src1, op2, OpAdc, sc, c);
 }
 BufferOffset
 Assembler::as_add(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c)
 {
-    return as_alu(dest, src1, op2, op_add, sc, c);
+    return as_alu(dest, src1, op2, OpAdd, sc, c);
 }
 BufferOffset
 Assembler::as_sbc(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c)
 {
-    return as_alu(dest, src1, op2, op_sbc, sc, c);
+    return as_alu(dest, src1, op2, OpSbc, sc, c);
 }
 BufferOffset
 Assembler::as_sub(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c)
 {
-    return as_alu(dest, src1, op2, op_sub, sc, c);
+    return as_alu(dest, src1, op2, OpSub, sc, c);
 }
 BufferOffset
 Assembler::as_rsb(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c)
 {
-    return as_alu(dest, src1, op2, op_rsb, sc, c);
+    return as_alu(dest, src1, op2, OpRsb, sc, c);
 }
 BufferOffset
 Assembler::as_rsc(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c)
 {
-    return as_alu(dest, src1, op2, op_rsc, sc, c);
+    return as_alu(dest, src1, op2, OpRsc, sc, c);
 }
 
 // Test operations.
 BufferOffset
 Assembler::as_cmn(Register src1, Operand2 op2, Condition c)
 {
-    return as_alu(InvalidReg, src1, op2, op_cmn, SetCond, c);
+    return as_alu(InvalidReg, src1, op2, OpCmn, SetCond, c);
 }
 BufferOffset
 Assembler::as_cmp(Register src1, Operand2 op2, Condition c)
 {
-    return as_alu(InvalidReg, src1, op2, op_cmp, SetCond, c);
+    return as_alu(InvalidReg, src1, op2, OpCmp, SetCond, c);
 }
 BufferOffset
 Assembler::as_teq(Register src1, Operand2 op2, Condition c)
 {
-    return as_alu(InvalidReg, src1, op2, op_teq, SetCond, c);
+    return as_alu(InvalidReg, src1, op2, OpTeq, SetCond, c);
 }
 BufferOffset
 Assembler::as_tst(Register src1, Operand2 op2, Condition c)
 {
-    return as_alu(InvalidReg, src1, op2, op_tst, SetCond, c);
+    return as_alu(InvalidReg, src1, op2, OpTst, SetCond, c);
 }
 
-// Not quite ALU worthy, but useful none the less:
-// These also have the isue of these being formatted
-// completly differently from the standard ALU operations.
+// Not quite ALU worthy, but these are useful none the less. These also have
+// the isue of these being formatted completly differently from the standard ALU
+// operations.
 BufferOffset
 Assembler::as_movw(Register dest, Imm16 imm, Condition c, Instruction *pos)
 {
-    JS_ASSERT(hasMOVWT());
+    JS_ASSERT(HasMOVWT());
     return writeInst(0x03000000 | c | imm.encode() | RD(dest), (uint32_t*)pos);
 }
 BufferOffset
 Assembler::as_movt(Register dest, Imm16 imm, Condition c, Instruction *pos)
 {
-    JS_ASSERT(hasMOVWT());
+    JS_ASSERT(HasMOVWT());
     return writeInst(0x03400000 | c | imm.encode() | RD(dest), (uint32_t*)pos);
 }
 
 static const int mull_tag = 0x90;
 
 BufferOffset
 Assembler::as_genmul(Register dhi, Register dlo, Register rm, Register rn,
                      MULOp op, SetCond_ sc, Condition c)
 {
 
     return writeInst(RN(dhi) | maybeRD(dlo) | RM(rm) | rn.code() | op | sc | c | mull_tag);
 }
 BufferOffset
 Assembler::as_mul(Register dest, Register src1, Register src2, SetCond_ sc, Condition c)
 {
-    return as_genmul(dest, InvalidReg, src1, src2, opm_mul, sc, c);
+    return as_genmul(dest, InvalidReg, src1, src2, OpmMul, sc, c);
 }
 BufferOffset
 Assembler::as_mla(Register dest, Register acc, Register src1, Register src2,
                   SetCond_ sc, Condition c)
 {
-    return as_genmul(dest, acc, src1, src2, opm_mla, sc, c);
+    return as_genmul(dest, acc, src1, src2, OpmMla, sc, c);
 }
 BufferOffset
 Assembler::as_umaal(Register destHI, Register destLO, Register src1, Register src2, Condition c)
 {
-    return as_genmul(destHI, destLO, src1, src2, opm_umaal, NoSetCond, c);
+    return as_genmul(destHI, destLO, src1, src2, OpmUmaal, NoSetCond, c);
 }
 BufferOffset
 Assembler::as_mls(Register dest, Register acc, Register src1, Register src2, Condition c)
 {
-    return as_genmul(dest, acc, src1, src2, opm_mls, NoSetCond, c);
+    return as_genmul(dest, acc, src1, src2, OpmMls, NoSetCond, c);
 }
 
 BufferOffset
 Assembler::as_umull(Register destHI, Register destLO, Register src1, Register src2,
                     SetCond_ sc, Condition c)
 {
-    return as_genmul(destHI, destLO, src1, src2, opm_umull, sc, c);
+    return as_genmul(destHI, destLO, src1, src2, OpmUmull, sc, c);
 }
 
 BufferOffset
 Assembler::as_umlal(Register destHI, Register destLO, Register src1, Register src2,
                     SetCond_ sc, Condition c)
 {
-    return as_genmul(destHI, destLO, src1, src2, opm_umlal, sc, c);
+    return as_genmul(destHI, destLO, src1, src2, OpmUmlal, sc, c);
 }
 
 BufferOffset
 Assembler::as_smull(Register destHI, Register destLO, Register src1, Register src2,
                     SetCond_ sc, Condition c)
 {
-    return as_genmul(destHI, destLO, src1, src2, opm_smull, sc, c);
+    return as_genmul(destHI, destLO, src1, src2, OpmSmull, sc, c);
 }
 
 BufferOffset
 Assembler::as_smlal(Register destHI, Register destLO, Register src1, Register src2,
                     SetCond_ sc, Condition c)
 {
-    return as_genmul(destHI, destLO, src1, src2, opm_smlal, sc, c);
+    return as_genmul(destHI, destLO, src1, src2, OpmSmlal, sc, c);
 }
 
 BufferOffset
 Assembler::as_sdiv(Register rd, Register rn, Register rm, Condition c)
 {
     return writeInst(0x0710f010 | c | RN(rd) | RM(rm) | rn.code());
 }
 
 BufferOffset
 Assembler::as_udiv(Register rd, Register rn, Register rm, Condition c)
 {
     return writeInst(0x0730f010 | c | RN(rd) | RM(rm) | rn.code());
 }
 
-// Data transfer instructions: ldr, str, ldrb, strb.
-// Using an int to differentiate between 8 bits and 32 bits is
-// overkill, but meh
+// Data transfer instructions: ldr, str, ldrb, strb. Using an int to
+// differentiate between 8 bits and 32 bits is overkill, but meh.
 BufferOffset
 Assembler::as_dtr(LoadStore ls, int size, Index mode,
                   Register rt, DTRAddr addr, Condition c, uint32_t *dest)
 {
     JS_ASSERT (mode == Offset ||  (rt != addr.getBase() && pc != addr.getBase()));
     JS_ASSERT(size == 32 || size == 8);
     return writeInst( 0x04000000 | ls | (size == 8 ? 0x00400000 : 0) | mode | c |
                       RT(rt) | addr.encode(), dest);
 
 }
 class PoolHintData {
   public:
     enum LoadType {
-        // set 0 to bogus, since that is the value most likely to be
+        // Set 0 to bogus, since that is the value most likely to be
         // accidentally left somewhere.
-        poolBOGUS  = 0,
-        poolDTR    = 1,
-        poolBranch = 2,
-        poolVDTR   = 3
+        PoolBOGUS  = 0,
+        PoolDTR    = 1,
+        PoolBranch = 2,
+        PoolVDTR   = 3
     };
 
   private:
-    uint32_t   index    : 16;
-    uint32_t   cond     : 4;
-    LoadType   loadType : 2;
-    uint32_t   destReg  : 5;
-    uint32_t   destType : 1;
+    uint32_t   index_    : 16;
+    uint32_t   cond_     : 4;
+    LoadType   loadType_ : 2;
+    uint32_t   destReg_  : 5;
+    uint32_t   destType_ : 1;
     uint32_t   ONES     : 4;
 
-    static const uint32_t expectedOnes = 0xfu;
+    static const uint32_t ExpectedOnes = 0xfu;
 
   public:
-    void init(uint32_t index_, Assembler::Condition cond_, LoadType lt, Register destReg_) {
-        index = index_;
-        JS_ASSERT(index == index_);
-        cond = cond_ >> 28;
-        JS_ASSERT(cond == cond_ >> 28);
-        loadType = lt;
-        ONES = expectedOnes;
-        destReg = destReg_.code();
-        destType = 0;
+    void init(uint32_t index, Assembler::Condition cond, LoadType lt, Register destReg) {
+        index_ = index;
+        JS_ASSERT(index_ == index);
+        cond_ = cond >> 28;
+        JS_ASSERT(cond_ == cond >> 28);
+        loadType_ = lt;
+        ONES = ExpectedOnes;
+        destReg_ = destReg.code();
+        destType_ = 0;
     }
-    void init(uint32_t index_, Assembler::Condition cond_, LoadType lt, const VFPRegister &destReg_) {
-        JS_ASSERT(destReg_.isFloat());
-        index = index_;
-        JS_ASSERT(index == index_);
-        cond = cond_ >> 28;
-        JS_ASSERT(cond == cond_ >> 28);
-        loadType = lt;
-        ONES = expectedOnes;
-        destReg = destReg_.isDouble() ? destReg_.code() : destReg_.doubleOverlay().code();
-        destType = destReg_.isDouble();
+    void init(uint32_t index, Assembler::Condition cond, LoadType lt, const VFPRegister &destReg) {
+        JS_ASSERT(destReg.isFloat());
+        index_ = index;
+        JS_ASSERT(index_ == index);
+        cond_ = cond >> 28;
+        JS_ASSERT(cond_ == cond >> 28);
+        loadType_ = lt;
+        ONES = ExpectedOnes;
+        destReg_ = destReg.isDouble() ? destReg.code() : destReg.doubleOverlay().code();
+        destType_ = destReg.isDouble();
     }
     Assembler::Condition getCond() {
-        return Assembler::Condition(cond << 28);
+        return Assembler::Condition(cond_ << 28);
     }
 
     Register getReg() {
-        return Register::FromCode(destReg);
+        return Register::FromCode(destReg_);
     }
     VFPRegister getVFPReg() {
-        VFPRegister r = VFPRegister(FloatRegister::FromCode(destReg));
-        return destType ? r : r.singleOverlay();
+        VFPRegister r = VFPRegister(FloatRegister::FromCode(destReg_));
+        return destType_ ? r : r.singleOverlay();
     }
 
     int32_t getIndex() {
-        return index;
+        return index_;
     }
-    void setIndex(uint32_t index_) {
-        JS_ASSERT(ONES == expectedOnes && loadType != poolBOGUS);
-        index = index_;
-        JS_ASSERT(index == index_);
+    void setIndex(uint32_t index) {
+        JS_ASSERT(ONES == ExpectedOnes && loadType_ != PoolBOGUS);
+        index_ = index;
+        JS_ASSERT(index_ == index);
     }
 
     LoadType getLoadType() {
-        // If this *was* a poolBranch, but the branch has already been bound
+        // If this *was* a PoolBranch, but the branch has already been bound
         // then this isn't going to look like a real poolhintdata, but we still
         // want to lie about it so everyone knows it *used* to be a branch.
-        if (ONES != expectedOnes)
-            return PoolHintData::poolBranch;
-        return loadType;
+        if (ONES != ExpectedOnes)
+            return PoolHintData::PoolBranch;
+        return loadType_;
     }
 
     bool isValidPoolHint() {
-        // Most instructions cannot have a condition that is 0xf. Notable exceptions are
-        // blx and the entire NEON instruction set. For the purposes of pool loads, and
-        // possibly patched branches, the possible instructions are ldr and b, neither of
-        // which can have a condition code of 0xf.
-        return ONES == expectedOnes;
+        // Most instructions cannot have a condition that is 0xf. Notable
+        // exceptions are blx and the entire NEON instruction set. For the
+        // purposes of pool loads, and possibly patched branches, the possible
+        // instructions are ldr and b, neither of which can have a condition
+        // code of 0xf.
+        return ONES == ExpectedOnes;
     }
 };
 
 union PoolHintPun {
     PoolHintData phd;
     uint32_t raw;
 };
 
-// Handles all of the other integral data transferring functions:
-// ldrsb, ldrsh, ldrd, etc.
-// size is given in bits.
+// Handles all of the other integral data transferring functions: ldrsb, ldrsh,
+// ldrd, etc. The size is given in bits.
 BufferOffset
 Assembler::as_extdtr(LoadStore ls, int size, bool IsSigned, Index mode,
                      Register rt, EDtrAddr addr, Condition c, uint32_t *dest)
 {
     int extra_bits2 = 0;
     int extra_bits1 = 0;
     switch(size) {
       case 8:
         JS_ASSERT(IsSigned);
-        JS_ASSERT(ls!=IsStore);
+        JS_ASSERT(ls != IsStore);
         extra_bits1 = 0x1;
         extra_bits2 = 0x2;
         break;
       case 16:
-        //case 32:
-        // doesn't need to be handled-- it is handled by the default ldr/str
+        // 'case 32' doesn't need to be handled, it is handled by the default
+        // ldr/str.
         extra_bits2 = 0x01;
         extra_bits1 = (ls == IsStore) ? 0 : 1;
         if (IsSigned) {
             JS_ASSERT(ls != IsStore);
             extra_bits2 |= 0x2;
         }
         break;
       case 64:
@@ -1679,17 +1681,17 @@ Assembler::as_dtm(LoadStore ls, Register
     return writeInst(0x08000000 | RN(rn) | ls |
                      mode | mask | c | wb);
 }
 
 BufferOffset
 Assembler::as_Imm32Pool(Register dest, uint32_t value, Condition c)
 {
     PoolHintPun php;
-    php.phd.init(0, c, PoolHintData::poolDTR, dest);
+    php.phd.init(0, c, PoolHintData::PoolDTR, dest);
     return m_buffer.insertEntry(4, (uint8_t*)&php.raw, int32Pool, (uint8_t*)&value);
 }
 
 void
 Assembler::as_WritePoolEntry(Instruction *addr, Condition c, uint32_t data)
 {
     JS_ASSERT(addr->is<InstLDR>());
     int32_t offset = addr->encode() & 0xfff;
@@ -1702,146 +1704,135 @@ Assembler::as_WritePoolEntry(Instruction
     addr->extractCond(&orig_cond);
     JS_ASSERT(orig_cond == c);
 }
 
 BufferOffset
 Assembler::as_BranchPool(uint32_t value, RepatchLabel *label, ARMBuffer::PoolEntry *pe, Condition c)
 {
     PoolHintPun php;
-    php.phd.init(0, c, PoolHintData::poolBranch, pc);
+    php.phd.init(0, c, PoolHintData::PoolBranch, pc);
     BufferOffset ret = m_buffer.insertEntry(4, (uint8_t*)&php.raw, int32Pool, (uint8_t*)&value, pe,
                                             /* markAsBranch = */ true);
-    // If this label is already bound, then immediately replace the stub load with
-    // a correct branch.
+    // If this label is already bound, then immediately replace the stub load
+    // with a correct branch.
     if (label->bound()) {
         BufferOffset dest(label);
         as_b(dest.diffB<BOffImm>(ret), c, ret);
     } else {
         label->use(ret.getOffset());
     }
     return ret;
 }
 
 BufferOffset
 Assembler::as_FImm64Pool(VFPRegister dest, double value, Condition c)
 {
     JS_ASSERT(dest.isDouble());
     PoolHintPun php;
-    php.phd.init(0, c, PoolHintData::poolVDTR, dest);
+    php.phd.init(0, c, PoolHintData::PoolVDTR, dest);
     return m_buffer.insertEntry(4, (uint8_t*)&php.raw, doublePool, (uint8_t*)&value);
 }
 
 struct PaddedFloat32
 {
     float value;
     uint32_t padding;
 };
 JS_STATIC_ASSERT(sizeof(PaddedFloat32) == sizeof(double));
 
 BufferOffset
 Assembler::as_FImm32Pool(VFPRegister dest, float value, Condition c)
 {
-    /*
-     * Insert floats into the double pool as they have the same limitations on
-     * immediate offset.  This wastes 4 bytes padding per float.  An alternative
-     * would be to have a separate pool for floats.
-     */
+    // Insert floats into the double pool as they have the same limitations on
+    // immediate offset. This wastes 4 bytes padding per float. An alternative
+    // would be to have a separate pool for floats.
     JS_ASSERT(dest.isSingle());
     PoolHintPun php;
-    php.phd.init(0, c, PoolHintData::poolVDTR, dest);
+    php.phd.init(0, c, PoolHintData::PoolVDTR, dest);
     PaddedFloat32 pf = { value, 0 };
     return m_buffer.insertEntry(4, (uint8_t*)&php.raw, doublePool, (uint8_t*)&pf);
 }
 
 // Pool callbacks stuff:
 void
-Assembler::insertTokenIntoTag(uint32_t instSize, uint8_t *load_, int32_t token)
+Assembler::InsertTokenIntoTag(uint32_t instSize, uint8_t *load_, int32_t token)
 {
     uint32_t *load = (uint32_t*) load_;
     PoolHintPun php;
     php.raw = *load;
     php.phd.setIndex(token);
     *load = php.raw;
 }
-// patchConstantPoolLoad takes the address of the instruction that wants to be patched, and
-//the address of the start of the constant pool, and figures things out from there.
+
+// patchConstantPoolLoad takes the address of the instruction that wants to be
+// patched, and the address of the start of the constant pool, and figures
+// things out from there.
 bool
-Assembler::patchConstantPoolLoad(void* loadAddr, void* constPoolAddr)
+Assembler::PatchConstantPoolLoad(void* loadAddr, void* constPoolAddr)
 {
     PoolHintData data = *(PoolHintData*)loadAddr;
     uint32_t *instAddr = (uint32_t*) loadAddr;
     int offset = (char *)constPoolAddr - (char *)loadAddr;
     switch(data.getLoadType()) {
-      case PoolHintData::poolBOGUS:
+      case PoolHintData::PoolBOGUS:
         MOZ_ASSUME_UNREACHABLE("bogus load type!");
-      case PoolHintData::poolDTR:
-        dummy->as_dtr(IsLoad, 32, Offset, data.getReg(),
+      case PoolHintData::PoolDTR:
+        Dummy->as_dtr(IsLoad, 32, Offset, data.getReg(),
                       DTRAddr(pc, DtrOffImm(offset+4*data.getIndex() - 8)), data.getCond(), instAddr);
         break;
-      case PoolHintData::poolBranch:
-        // Either this used to be a poolBranch, and the label was already bound, so it was
-        // replaced with a real branch, or this may happen in the future.
-        // If this is going to happen in the future, then the actual bits that are written here
-        // don't matter (except the condition code, since that is always preserved across
-        // patchings) but if it does not get bound later,
-        // then we want to make sure this is a load from the pool entry (and the pool entry
-        // should be nullptr so it will crash).
+      case PoolHintData::PoolBranch:
+        // Either this used to be a poolBranch, and the label was already bound,
+        // so it was replaced with a real branch, or this may happen in the
+        // future. If this is going to happen in the future, then the actual
+        // bits that are written here don't matter (except the condition code,
+        // since that is always preserved across patchings) but if it does not
+        // get bound later, then we want to make sure this is a load from the
+        // pool entry (and the pool entry should be nullptr so it will crash).
         if (data.isValidPoolHint()) {
-            dummy->as_dtr(IsLoad, 32, Offset, pc,
+            Dummy->as_dtr(IsLoad, 32, Offset, pc,
                           DTRAddr(pc, DtrOffImm(offset+4*data.getIndex() - 8)),
                           data.getCond(), instAddr);
         }
         break;
-      case PoolHintData::poolVDTR: {
+      case PoolHintData::PoolVDTR: {
         VFPRegister dest = data.getVFPReg();
         int32_t imm = offset + (8 * data.getIndex()) - 8;
         if (imm < -1023 || imm  > 1023)
             return false;
-        dummy->as_vdtr(IsLoad, dest, VFPAddr(pc, VFPOffImm(imm)), data.getCond(), instAddr);
+        Dummy->as_vdtr(IsLoad, dest, VFPAddr(pc, VFPOffImm(imm)), data.getCond(), instAddr);
         break;
       }
     }
     return true;
 }
 
-uint32_t
-Assembler::placeConstantPoolBarrier(int offset)
-{
-    // BUG: 700526
-    // this is still an active path, however, we do not hit it in the test
-    // suite at all.
-    MOZ_ASSUME_UNREACHABLE("ARMAssembler holdover");
-}
-
 // Control flow stuff:
 
-// bx can *only* branch to a register
-// never to an immediate.
+// bx can *only* branch to a register, never to an immediate.
 BufferOffset
 Assembler::as_bx(Register r, Condition c, bool isPatchable)
 {
-    BufferOffset ret = writeInst(((int) c) | op_bx | r.code());
+    BufferOffset ret = writeInst(((int) c) | OpBx | r.code());
     if (c == Always && !isPatchable)
         m_buffer.markGuard();
     return ret;
 }
 void
-Assembler::writePoolGuard(BufferOffset branch, Instruction *dest, BufferOffset afterPool)
+Assembler::WritePoolGuard(BufferOffset branch, Instruction *dest, BufferOffset afterPool)
 {
     BOffImm off = afterPool.diffB<BOffImm>(branch);
     *dest = InstBImm(off, Always);
 }
 // Branch can branch to an immediate *or* to a register.
-// Branches to immediates are pc relative, branches to registers
-// are absolute
+// Branches to immediates are pc relative, branches to registers are absolute.
 BufferOffset
 Assembler::as_b(BOffImm off, Condition c, bool isPatchable)
 {
-    BufferOffset ret = writeBranchInst(((int)c) | op_b | off.encode());
+    BufferOffset ret = writeBranchInst(((int)c) | OpB | off.encode());
     if (c == Always && !isPatchable)
         m_buffer.markGuard();
     return ret;
 }
 
 BufferOffset
 Assembler::as_b(Label *l, Condition c, bool isPatchable)
 {
@@ -1858,17 +1849,17 @@ Assembler::as_b(Label *l, Condition c, b
     }
 
     int32_t old;
     BufferOffset ret;
     if (l->used()) {
         old = l->offset();
         // This will currently throw an assertion if we couldn't actually
         // encode the offset of the branch.
-        if (!BOffImm::isInRange(old)) {
+        if (!BOffImm::IsInRange(old)) {
             m_buffer.fail_bail();
             return ret;
         }
         ret = as_b(BOffImm(old), c, isPatchable);
     } else {
         old = LabelBase::INVALID_OFFSET;
         BOffImm inv;
         ret = as_b(inv, c, isPatchable);
@@ -1880,32 +1871,32 @@ Assembler::as_b(Label *l, Condition c, b
 BufferOffset
 Assembler::as_b(BOffImm off, Condition c, BufferOffset inst)
 {
     *editSrc(inst) = InstBImm(off, c);
     return inst;
 }
 
 // blx can go to either an immediate or a register.
-// When blx'ing to a register, we change processor state
-// depending on the low bit of the register
-// when blx'ing to an immediate, we *always* change processor state.
+// When blx'ing to a register, we change processor state depending on the low
+// bit of the register when blx'ing to an immediate, we *always* change
+// processor state.
 
 BufferOffset
 Assembler::as_blx(Register r, Condition c)
 {
-    return writeInst(((int) c) | op_blx | r.code());
+    return writeInst(((int) c) | OpBlx | r.code());
 }
 
 // bl can only branch to an pc-relative immediate offset
 // It cannot change the processor state.
 BufferOffset
 Assembler::as_bl(BOffImm off, Condition c)
 {
-    return writeBranchInst(((int)c) | op_bl | off.encode());
+    return writeBranchInst(((int)c) | OpBl | off.encode());
 }
 
 BufferOffset
 Assembler::as_bl(Label *l, Condition c)
 {
     if (m_buffer.oom()) {
         BufferOffset ret;
         return ret;
@@ -1917,20 +1908,20 @@ Assembler::as_bl(Label *l, Condition c)
         as_bl(BufferOffset(l).diffB<BOffImm>(ret), c, ret);
         return ret;
     }
 
     int32_t old;
     BufferOffset ret;
     // See if the list was empty :(
     if (l->used()) {
-        // This will currently throw an assertion if we couldn't actually
-        // encode the offset of the branch.
+        // This will currently throw an assertion if we couldn't actually encode
+        // the offset of the branch.
         old = l->offset();
-        if (!BOffImm::isInRange(old)) {
+        if (!BOffImm::IsInRange(old)) {
             m_buffer.fail_bail();
             return ret;
         }
         ret = as_bl(BOffImm(old), c);
     } else {
         old = LabelBase::INVALID_OFFSET;
         BOffImm inv;
         ret = as_bl(inv, c);
@@ -1950,73 +1941,74 @@ BufferOffset
 Assembler::as_mrs(Register r, Condition c)
 {
     return writeInst(0x010f0000 | int(c) | RD(r));
 }
 
 BufferOffset
 Assembler::as_msr(Register r, Condition c)
 {
-    // hardcode the 'mask' field to 0b11 for now.  it is bits 18 and 19, which are the two high bits of the 'c' in this constant.
+    // Hardcode the 'mask' field to 0b11 for now. It is bits 18 and 19, which
+    // are the two high bits of the 'c' in this constant.
     JS_ASSERT((r.code() & ~0xf) == 0);
     return writeInst(0x012cf000 | int(c) | r.code());
 }
 
 // VFP instructions!
 enum vfp_tags {
-    vfp_tag   = 0x0C000A00,
-    vfp_arith = 0x02000000
+    VfpTag   = 0x0C000A00,
+    VfpArith = 0x02000000
 };
 BufferOffset
 Assembler::writeVFPInst(vfp_size sz, uint32_t blob, uint32_t *dest)
 {
     JS_ASSERT((sz & blob) == 0);
-    JS_ASSERT((vfp_tag & blob) == 0);
-    return writeInst(vfp_tag | sz | blob, dest);
+    JS_ASSERT((VfpTag & blob) == 0);
+    return writeInst(VfpTag | sz | blob, dest);
 }
 
 // Unityped variants: all registers hold the same (ieee754 single/double)
 // notably not included are vcvt; vmov vd, #imm; vmov rt, vn.
 BufferOffset
 Assembler::as_vfp_float(VFPRegister vd, VFPRegister vn, VFPRegister vm,
                   VFPOp op, Condition c)
 {
-    // Make sure we believe that all of our operands are the same kind
+    // Make sure we believe that all of our operands are the same kind.
     JS_ASSERT_IF(!vn.isMissing(), vd.equiv(vn));
     JS_ASSERT_IF(!vm.isMissing(), vd.equiv(vm));
-    vfp_size sz = vd.isDouble() ? isDouble : isSingle;
-    return writeVFPInst(sz, VD(vd) | VN(vn) | VM(vm) | op | vfp_arith | c);
+    vfp_size sz = vd.isDouble() ? IsDouble : IsSingle;
+    return writeVFPInst(sz, VD(vd) | VN(vn) | VM(vm) | op | VfpArith | c);
 }
 
 BufferOffset
 Assembler::as_vadd(VFPRegister vd, VFPRegister vn, VFPRegister vm,
                  Condition c)
 {
-    return as_vfp_float(vd, vn, vm, opv_add, c);
+    return as_vfp_float(vd, vn, vm, OpvAdd, c);
 }
 
 BufferOffset
 Assembler::as_vdiv(VFPRegister vd, VFPRegister vn, VFPRegister vm,
                  Condition c)
 {
-    return as_vfp_float(vd, vn, vm, opv_div, c);
+    return as_vfp_float(vd, vn, vm, OpvDiv, c);
 }
 
 BufferOffset
 Assembler::as_vmul(VFPRegister vd, VFPRegister vn, VFPRegister vm,
                  Condition c)
 {
-    return as_vfp_float(vd, vn, vm, opv_mul, c);
+    return as_vfp_float(vd, vn, vm, OpvMul, c);
 }
 
 BufferOffset
 Assembler::as_vnmul(VFPRegister vd, VFPRegister vn, VFPRegister vm,
                   Condition c)
 {
-    return as_vfp_float(vd, vn, vm, opv_mul, c);
+    return as_vfp_float(vd, vn, vm, OpvMul, c);
     MOZ_ASSUME_UNREACHABLE("Feature NYI");
 }
 
 BufferOffset
 Assembler::as_vnmla(VFPRegister vd, VFPRegister vn, VFPRegister vm,
                   Condition c)
 {
     MOZ_ASSUME_UNREACHABLE("Feature NYI");
@@ -2028,80 +2020,80 @@ Assembler::as_vnmls(VFPRegister vd, VFPR
 {
     MOZ_ASSUME_UNREACHABLE("Feature NYI");
     return BufferOffset();
 }
 
 BufferOffset
 Assembler::as_vneg(VFPRegister vd, VFPRegister vm, Condition c)
 {
-    return as_vfp_float(vd, NoVFPRegister, vm, opv_neg, c);
+    return as_vfp_float(vd, NoVFPRegister, vm, OpvNeg, c);
 }
 
 BufferOffset
 Assembler::as_vsqrt(VFPRegister vd, VFPRegister vm, Condition c)
 {
-    return as_vfp_float(vd, NoVFPRegister, vm, opv_sqrt, c);
+    return as_vfp_float(vd, NoVFPRegister, vm, OpvSqrt, c);
 }
 
 BufferOffset
 Assembler::as_vabs(VFPRegister vd, VFPRegister vm, Condition c)
 {
-    return as_vfp_float(vd, NoVFPRegister, vm, opv_abs, c);
+    return as_vfp_float(vd, NoVFPRegister, vm, OpvAbs, c);
 }
 
 BufferOffset
 Assembler::as_vsub(VFPRegister vd, VFPRegister vn, VFPRegister vm,
                  Condition c)
 {
-    return as_vfp_float(vd, vn, vm, opv_sub, c);
+    return as_vfp_float(vd, vn, vm, OpvSub, c);
 }
 
 BufferOffset
 Assembler::as_vcmp(VFPRegister vd, VFPRegister vm,
                  Condition c)
 {
-    return as_vfp_float(vd, NoVFPRegister, vm, opv_cmp, c);
+    return as_vfp_float(vd, NoVFPRegister, vm, OpvCmp, c);
 }
 BufferOffset
 Assembler::as_vcmpz(VFPRegister vd, Condition c)
 {
-    return as_vfp_float(vd, NoVFPRegister, NoVFPRegister, opv_cmpz, c);
+    return as_vfp_float(vd, NoVFPRegister, NoVFPRegister, OpvCmpz, c);
 }
 
 // Specifically, a move between two same sized-registers.
 BufferOffset
 Assembler::as_vmov(VFPRegister vd, VFPRegister vsrc, Condition c)
 {
-    return as_vfp_float(vd, NoVFPRegister, vsrc, opv_mov, c);
+    return as_vfp_float(vd, NoVFPRegister, vsrc, OpvMov, c);
 }
-//xfer between Core and VFP
+// Transfer between Core and VFP.
 
-// Unlike the next function, moving between the core registers and vfp
-// registers can't be *that* properly typed.  Namely, since I don't want to
-// munge the type VFPRegister to also include core registers.  Thus, the core
-// and vfp registers are passed in based on their type, and src/dest is
-// determined by the float2core.
+// Unlike the next function, moving between the core registers and vfp registers
+// can't be *that* properly typed. Namely, since I don't want to munge the type
+// VFPRegister to also include core registers. Thus, the core and vfp registers
+// are passed in based on their type, and src/dest is determined by the
+// float2core.
 
 BufferOffset
 Assembler::as_vxfer(Register vt1, Register vt2, VFPRegister vm, FloatToCore_ f2c,
                     Condition c, int idx)
 {
-    vfp_size sz = isSingle;
+    vfp_size sz = IsSingle;
     if (vm.isDouble()) {
         // Technically, this can be done with a vmov à la ARM ARM under vmov
-        // however, that requires at least an extra bit saying if the
-        // operation should be performed on the lower or upper half of the
-        // double.  Moving a single to/from 2N/2N+1 isn't equivalent,
-        // since there are 32 single registers, and 32 double registers
-        // so there is no way to encode the last 16 double registers.
-        sz = isDouble;
+        // however, that requires at least an extra bit saying if the operation
+        // should be performed on the lower or upper half of the double. Moving
+        // a single to/from 2N/2N+1 isn't equivalent, since there are 32 single
+        // registers, and 32 double registers so there is no way to encode the
+        // last 16 double registers.
+        sz = IsDouble;
         JS_ASSERT(idx == 0 || idx == 1);
-        // If we are transferring a single half of the double
-        // then it must be moving a VFP reg to a core reg.
+        // If we are transferring a single half of the double then it must be
+        // moving a VFP reg to a core reg.
         if (vt2 == InvalidReg)
             JS_ASSERT(f2c == FloatToCore);
         idx = idx << 21;
     } else {
         JS_ASSERT(idx == 0);
     }
     VFPXferSize xfersz = WordTransfer;
     uint32_t (*encodeVFP)(VFPRegister) = VN;
@@ -2110,113 +2102,111 @@ Assembler::as_vxfer(Register vt1, Regist
         xfersz = DoubleTransfer;
         encodeVFP = VM;
     }
 
     return writeVFPInst(sz, xfersz | f2c | c |
                         RT(vt1) | maybeRN(vt2) | encodeVFP(vm) | idx);
 }
 enum vcvt_destFloatness {
-    toInteger = 1 << 18,
-    toFloat  = 0 << 18
+    VcvtToInteger = 1 << 18,
+    VcvtToFloat  = 0 << 18
 };
 enum vcvt_toZero {
-    toZero = 1 << 7, // use the default rounding mode, which rounds truncates
-    toFPSCR = 0 << 7 // use whatever rounding mode the fpscr specifies
+    VcvtToZero = 1 << 7, // Use the default rounding mode, which rounds truncates.
+    VcvtToFPSCR = 0 << 7 // Use whatever rounding mode the fpscr specifies.
 };
 enum vcvt_Signedness {
-    toSigned   = 1 << 16,
-    toUnsigned = 0 << 16,
-    fromSigned   = 1 << 7,
-    fromUnsigned = 0 << 7
+    VcvtToSigned   = 1 << 16,
+    VcvtToUnsigned = 0 << 16,
+    VcvtFromSigned   = 1 << 7,
+    VcvtFromUnsigned = 0 << 7
 };
 
-// our encoding actually allows just the src and the dest (and their types)
-// to uniquely specify the encoding that we are going to use.
+// Our encoding actually allows just the src and the dest (and their types) to
+// uniquely specify the encoding that we are going to use.
 BufferOffset
 Assembler::as_vcvt(VFPRegister vd, VFPRegister vm, bool useFPSCR,
                    Condition c)
 {
-    // Unlike other cases, the source and dest types cannot be the same
+    // Unlike other cases, the source and dest types cannot be the same.
     JS_ASSERT(!vd.equiv(vm));
-    vfp_size sz = isDouble;
+    vfp_size sz = IsDouble;
     if (vd.isFloat() && vm.isFloat()) {
-        // Doing a float -> float conversion
+        // Doing a float -> float conversion.
         if (vm.isSingle())
-            sz = isSingle;
-        return writeVFPInst(sz, c | 0x02B700C0 |
-                            VM(vm) | VD(vd));
+            sz = IsSingle;
+        return writeVFPInst(sz, c | 0x02B700C0 | VM(vm) | VD(vd));
     }
 
     // At least one of the registers should be a float.
     vcvt_destFloatness destFloat;
     vcvt_Signedness opSign;
-    vcvt_toZero doToZero = toFPSCR;
+    vcvt_toZero doToZero = VcvtToFPSCR;
     JS_ASSERT(vd.isFloat() || vm.isFloat());
     if (vd.isSingle() || vm.isSingle()) {
-        sz = isSingle;
+        sz = IsSingle;
     }
     if (vd.isFloat()) {
-        destFloat = toFloat;
-        opSign = (vm.isSInt()) ? fromSigned : fromUnsigned;
+        destFloat = VcvtToFloat;
+        opSign = (vm.isSInt()) ? VcvtFromSigned : VcvtFromUnsigned;
     } else {
-        destFloat = toInteger;
-        opSign = (vd.isSInt()) ? toSigned : toUnsigned;
-        doToZero = useFPSCR ? toFPSCR : toZero;
+        destFloat = VcvtToInteger;
+        opSign = (vd.isSInt()) ? VcvtToSigned : VcvtToUnsigned;
+        doToZero = useFPSCR ? VcvtToFPSCR : VcvtToZero;
     }
     return writeVFPInst(sz, c | 0x02B80040 | VD(vd) | VM(vm) | destFloat | opSign | doToZero);
 }
 
 BufferOffset
 Assembler::as_vcvtFixed(VFPRegister vd, bool isSigned, uint32_t fixedPoint, bool toFixed, Condition c)
 {
     JS_ASSERT(vd.isFloat());
     uint32_t sx = 0x1;
-    vfp_size sf = vd.isDouble() ? isDouble : isSingle;
+    vfp_size sf = vd.isDouble() ? IsDouble : IsSingle;
     int32_t imm5 = fixedPoint;
     imm5 = (sx ? 32 : 16) - imm5;
     JS_ASSERT(imm5 >= 0);
     imm5 = imm5 >> 1 | (imm5 & 1) << 5;
     return writeVFPInst(sf, 0x02BA0040 | VD(vd) | toFixed << 18 | sx << 7 |
                         (!isSigned) << 16 | imm5 | c);
 }
 
-// xfer between VFP and memory
+// Transfer between VFP and memory.
 BufferOffset
 Assembler::as_vdtr(LoadStore ls, VFPRegister vd, VFPAddr addr,
-                   Condition c /* vfp doesn't have a wb option*/,
+                   Condition c /* vfp doesn't have a wb option */,
                    uint32_t *dest)
 {
-    vfp_size sz = vd.isDouble() ? isDouble : isSingle;
+    vfp_size sz = vd.isDouble() ? IsDouble : IsSingle;
     return writeVFPInst(sz, ls | 0x01000000 | addr.encode() | VD(vd) | c, dest);
 }
 
-// VFP's ldm/stm work differently from the standard arm ones.
-// You can only transfer a range
+// VFP's ldm/stm work differently from the standard arm ones. You can only
+// transfer a range.
 
 BufferOffset
 Assembler::as_vdtm(LoadStore st, Register rn, VFPRegister vd, int length,
-                 /*also has update conditions*/Condition c)
+                   /* also has update conditions */ Condition c)
 {
     JS_ASSERT(length <= 16 && length >= 0);
-    vfp_size sz = vd.isDouble() ? isDouble : isSingle;
+    vfp_size sz = vd.isDouble() ? IsDouble : IsSingle;
 
     if (vd.isDouble())
         length *= 2;
 
-    return writeVFPInst(sz, dtmLoadStore | RN(rn) | VD(vd) |
-                        length |
+    return writeVFPInst(sz, dtmLoadStore | RN(rn) | VD(vd) | length |
                         dtmMode | dtmUpdate | dtmCond);
 }
 
 BufferOffset
 Assembler::as_vimm(VFPRegister vd, VFPImm imm, Condition c)
 {
     JS_ASSERT(imm.isValid());
-    vfp_size sz = vd.isDouble() ? isDouble : isSingle;
+    vfp_size sz = vd.isDouble() ? IsDouble : IsSingle;
     return writeVFPInst(sz,  c | imm.encode() | VD(vd) | 0x02B00000);
 
 }
 BufferOffset
 Assembler::as_vmrs(Register r, Condition c)
 {
     return writeInst(c | 0x0ef10a10 | RT(r));
 }
@@ -2233,30 +2223,29 @@ Assembler::nextLink(BufferOffset b, Buff
     Instruction branch = *editSrc(b);
     JS_ASSERT(branch.is<InstBranchImm>());
 
     BOffImm destOff;
     branch.as<InstBranchImm>()->extractImm(&destOff);
     if (destOff.isInvalid())
         return false;
 
-    // Propagate the next link back to the caller, by
-    // constructing a new BufferOffset into the space they
-    // provided.
+    // Propagate the next link back to the caller, by constructing a new
+    // BufferOffset into the space they provided.
     new (next) BufferOffset(destOff.decode());
     return true;
 }
 
 void
 Assembler::bind(Label *label, BufferOffset boff)
 {
     if (label->used()) {
         bool more;
-        // If our caller didn't give us an explicit target to bind to
-        // then we want to bind to the location of the next instruction
+        // If our caller didn't give us an explicit target to bind to then we
+        // want to bind to the location of the next instruction.
         BufferOffset dest = boff.assigned() ? boff : nextOffset();
         BufferOffset b(label);
         do {
             BufferOffset next;
             more = nextLink(b, &next);
             Instruction branch = *editSrc(b);
             Condition c;
             branch.extractCond(&c);
@@ -2272,18 +2261,18 @@ Assembler::bind(Label *label, BufferOffs
     label->bind(nextOffset().getOffset());
 }
 
 void
 Assembler::bind(RepatchLabel *label)
 {
     BufferOffset dest = nextOffset();
     if (label->used()) {
-        // If the label has a use, then change this use to refer to
-        // the bound label;
+        // If the label has a use, then change this use to refer to the bound
+        // label.
         BufferOffset branchOff(label->offset());
         // Since this was created with a RepatchLabel, the value written in the
         // instruction stream is not branch shaped, it is PoolHintData shaped.
         Instruction *branch = editSrc(branchOff);
         PoolHintPun p;
         p.raw = branch->encode();
         Condition cond;
         if (p.phd.isValidPoolHint())
@@ -2306,75 +2295,69 @@ Assembler::retarget(Label *label, Label 
             // onto target's.
             BufferOffset labelBranchOffset(label);
             BufferOffset next;
 
             // Find the head of the use chain for label.
             while (nextLink(labelBranchOffset, &next))
                 labelBranchOffset = next;
 
-            // Then patch the head of label's use chain to the tail of
-            // target's use chain, prepending the entire use chain of target.
+            // Then patch the head of label's use chain to the tail of target's
+            // use chain, prepending the entire use chain of target.
             Instruction branch = *editSrc(labelBranchOffset);
             Condition c;
             branch.extractCond(&c);
             int32_t prev = target->use(label->offset());
             if (branch.is<InstBImm>())
                 as_b(BOffImm(prev), c, labelBranchOffset);
             else if (branch.is<InstBLImm>())
                 as_bl(BOffImm(prev), c, labelBranchOffset);
             else
                 MOZ_ASSUME_UNREACHABLE("crazy fixup!");
         } else {
-            // The target is unbound and unused.  We can just take the head of
+            // The target is unbound and unused. We can just take the head of
             // the list hanging off of label, and dump that into target.
             DebugOnly<uint32_t> prev = target->use(label->offset());
             JS_ASSERT((int32_t)prev == Label::INVALID_OFFSET);
         }
     }
     label->reset();
 
 }
 
 
 void dbg_break() {}
 static int stopBKPT = -1;
 void
 Assembler::as_bkpt()
 {
-    // This is a count of how many times a breakpoint instruction has been generated.
-    // It is embedded into the instruction for debugging purposes.  gdb will print "bkpt xxx"
-    // when you attempt to dissassemble a breakpoint with the number xxx embedded into it.
-    // If this breakpoint is being hit, then you can run (in gdb)
-    // >b dbg_break
-    // >b main
-    // >commands
-    // >set stopBKPT = xxx
-    // >c
-    // >end
-
-    // which will set a breakpoint on the function dbg_break above
-    // set a scripted breakpoint on main that will set the (otherwise unmodified)
-    // value to the number of the breakpoint, so dbg_break will actuall be called
-    // and finally, when you run the executable, execution will halt when that
-    // breakpoint is generated
+    // This is a count of how many times a breakpoint instruction has been
+    // generated. It is embedded into the instruction for debugging
+    // purposes. Gdb will print "bkpt xxx" when you attempt to dissassemble a
+    // breakpoint with the number xxx embedded into it. If this breakpoint is
+    // being hit, then you can run (in gdb):
+    //  >b dbg_break
+    //  >b main
+    //  >commands
+    //  >set stopBKPT = xxx
+    //  >c
+    //  >end
+    // which will set a breakpoint on the function dbg_break above set a
+    // scripted breakpoint on main that will set the (otherwise unmodified)
+    // value to the number of the breakpoint, so dbg_break will actuall be
+    // called and finally, when you run the executable, execution will halt when
+    // that breakpoint is generated.
     static int hit = 0;
     if (stopBKPT == hit)
         dbg_break();
-    writeInst(0xe1200070 | (hit & 0xf) | ((hit & 0xfff0)<<4));
+    writeInst(0xe1200070 | (hit & 0xf) | ((hit & 0xfff0) << 4));
     hit++;
 }
 
 void
-Assembler::dumpPool()
-{
-    m_buffer.flushPool();
-}
-
-void
 Assembler::flushBuffer()
 {
     m_buffer.flushPool();
 }
 
 void
 Assembler::enterNoPool()
 {
@@ -2383,66 +2366,66 @@ Assembler::enterNoPool()
 
 void
 Assembler::leaveNoPool()
 {
     m_buffer.leaveNoPool();
 }
 
 ptrdiff_t
-Assembler::getBranchOffset(const Instruction *i_)
+Assembler::GetBranchOffset(const Instruction *i_)
 {
     if (!i_->is<InstBranchImm>())
         return 0;
 
     InstBranchImm *i = i_->as<InstBranchImm>();
     BOffImm dest;
     i->extractImm(&dest);
     return dest.decode();
 }
 void
-Assembler::retargetNearBranch(Instruction *i, int offset, bool final)
+Assembler::RetargetNearBranch(Instruction *i, int offset, bool final)
 {
     Assembler::Condition c;
     i->extractCond(&c);
-    retargetNearBranch(i, offset, c, final);
+    RetargetNearBranch(i, offset, c, final);
 }
 
 void
-Assembler::retargetNearBranch(Instruction *i, int offset, Condition cond, bool final)
+Assembler::RetargetNearBranch(Instruction *i, int offset, Condition cond, bool final)
 {
     // Retargeting calls is totally unsupported!
     JS_ASSERT_IF(i->is<InstBranchImm>(), i->is<InstBImm>() || i->is<InstBLImm>());
     if (i->is<InstBLImm>())
         new (i) InstBLImm(BOffImm(offset), cond);
     else
         new (i) InstBImm(BOffImm(offset), cond);
 
-    // Flush the cache, since an instruction was overwritten
+    // Flush the cache, since an instruction was overwritten.
     if (final)
         AutoFlushICache::flush(uintptr_t(i), 4);
 }
 
 void
-Assembler::retargetFarBranch(Instruction *i, uint8_t **slot, uint8_t *dest, Condition cond)
+Assembler::RetargetFarBranch(Instruction *i, uint8_t **slot, uint8_t *dest, Condition cond)
 {
     int32_t offset = reinterpret_cast<uint8_t*>(slot) - reinterpret_cast<uint8_t*>(i);
     if (!i->is<InstLDR>()) {
         new (i) InstLDR(Offset, pc, DTRAddr(pc, DtrOffImm(offset - 8)), cond);
         AutoFlushICache::flush(uintptr_t(i), 4);
     }
     *slot = dest;
 
 }
 
 struct PoolHeader : Instruction {
     struct Header
     {
-        // size should take into account the pool header.
-        // size is in units of Instruction (4bytes), not byte
+        // The size should take into account the pool header.
+        // The size is in units of Instruction (4 bytes), not byte.
         uint32_t size : 15;
         bool isNatural : 1;
         uint32_t ONES : 16;
 
         Header(int size_, bool isNatural_)
           : size(size_),
             isNatural(isNatural_),
             ONES(0xffff)
@@ -2469,116 +2452,114 @@ struct PoolHeader : Instruction {
     uint32_t size() const {
         Header tmp(this);
         return tmp.size;
     }
     uint32_t isNatural() const {
         Header tmp(this);
         return tmp.isNatural;
     }
-    static bool isTHIS(const Instruction &i) {
+    static bool IsTHIS(const Instruction &i) {
         return (*i.raw() & 0xffff0000) == 0xffff0000;
     }
-    static const PoolHeader *asTHIS(const Instruction &i) {
-        if (!isTHIS(i))
+    static const PoolHeader *AsTHIS(const Instruction &i) {
+        if (!IsTHIS(i))
             return nullptr;
         return static_cast<const PoolHeader*>(&i);
     }
 };
 
 
 void
-Assembler::writePoolHeader(uint8_t *start, Pool *p, bool isNatural)
+Assembler::WritePoolHeader(uint8_t *start, Pool *p, bool isNatural)
 {
     STATIC_ASSERT(sizeof(PoolHeader) == 4);
     uint8_t *pool = start+4;
-    // go through the usual rigaramarole to get the size of the pool.
+    // Go through the usual rigmarole to get the size of the pool.
     pool = p[0].addPoolSize(pool);
     pool = p[1].addPoolSize(pool);
     pool = p[1].other->addPoolSize(pool);
     pool = p[0].other->addPoolSize(pool);
     uint32_t size = pool - start;
     JS_ASSERT((size & 3) == 0);
     size = size >> 2;
     JS_ASSERT(size < (1 << 15));
     PoolHeader header(size, isNatural);
     *(PoolHeader*)start = header;
 }
 
 
 void
-Assembler::writePoolFooter(uint8_t *start, Pool *p, bool isNatural)
+Assembler::WritePoolFooter(uint8_t *start, Pool *p, bool isNatural)
 {
     return;
 }
 
-// The size of an arbitrary 32-bit call in the instruction stream.
-// On ARM this sequence is |pc = ldr pc - 4; imm32| given that we
-// never reach the imm32.
+// The size of an arbitrary 32-bit call in the instruction stream. On ARM this
+// sequence is |pc = ldr pc - 4; imm32| given that we never reach the imm32.
 uint32_t
-Assembler::patchWrite_NearCallSize()
+Assembler::PatchWrite_NearCallSize()
 {
     return sizeof(uint32_t);
 }
 void
-Assembler::patchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall)
+Assembler::PatchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall)
 {
     Instruction *inst = (Instruction *) start.raw();
-    // Overwrite whatever instruction used to be here with a call.
-    // Since the destination is in the same function, it will be within range of the 24<<2 byte
-    // bl instruction.
+    // Overwrite whatever instruction used to be here with a call. Since the
+    // destination is in the same function, it will be within range of the
+    // 24 << 2 byte bl instruction.
     uint8_t *dest = toCall.raw();
     new (inst) InstBLImm(BOffImm(dest - (uint8_t*)inst) , Always);
     // Ensure everyone sees the code that was just written into memory.
-
     AutoFlushICache::flush(uintptr_t(inst), 4);
 
 }
 void
-Assembler::patchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue,
+Assembler::PatchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue,
                                    PatchedImmPtr expectedValue)
 {
     Instruction *ptr = (Instruction *) label.raw();
     InstructionIterator iter(ptr);
     Register dest;
     Assembler::RelocStyle rs;
-    DebugOnly<const uint32_t *> val = getPtr32Target(&iter, &dest, &rs);
+    DebugOnly<const uint32_t *> val = GetPtr32Target(&iter, &dest, &rs);
     JS_ASSERT((uint32_t)(const uint32_t *)val == uint32_t(expectedValue.value));
-    reinterpret_cast<MacroAssemblerARM*>(dummy)->ma_movPatchable(Imm32(int32_t(newValue.value)),
+    reinterpret_cast<MacroAssemblerARM*>(Dummy)->ma_movPatchable(Imm32(int32_t(newValue.value)),
                                                                  dest, Always, rs, ptr);
     // L_LDR won't cause any instructions to be updated.
     if (rs != L_LDR) {
         AutoFlushICache::flush(uintptr_t(ptr), 4);
         AutoFlushICache::flush(uintptr_t(ptr->next()), 4);
     }
 }
 
 void
-Assembler::patchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue, ImmPtr expectedValue)
+Assembler::PatchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue, ImmPtr expectedValue)
 {
-    patchDataWithValueCheck(label, PatchedImmPtr(newValue.value), PatchedImmPtr(expectedValue.value));
+    PatchDataWithValueCheck(label, PatchedImmPtr(newValue.value), PatchedImmPtr(expectedValue.value));
 }
 
 // This just stomps over memory with 32 bits of raw data. Its purpose is to
 // overwrite the call of JITed code with 32 bits worth of an offset. This will
-// is only meant to function on code that has been invalidated, so it should
-// be totally safe. Since that instruction will never be executed again, a
-// ICache flush should not be necessary
+// is only meant to function on code that has been invalidated, so it should be
+// totally safe. Since that instruction will never be executed again, a ICache
+// flush should not be necessary
 void
-Assembler::patchWrite_Imm32(CodeLocationLabel label, Imm32 imm) {
+Assembler::PatchWrite_Imm32(CodeLocationLabel label, Imm32 imm) {
     // Raw is going to be the return address.
     uint32_t *raw = (uint32_t*)label.raw();
-    // Overwrite the 4 bytes before the return address, which will
-    // end up being the call instruction.
-    *(raw-1) = imm.value;
+    // Overwrite the 4 bytes before the return address, which will end up being
+    // the call instruction.
+    *(raw - 1) = imm.value;
 }
 
 
 uint8_t *
-Assembler::nextInstruction(uint8_t *inst_, uint32_t *count)
+Assembler::NextInstruction(uint8_t *inst_, uint32_t *count)
 {
     Instruction *inst = reinterpret_cast<Instruction*>(inst_);
     if (count != nullptr)
         *count += sizeof(Instruction);
     return reinterpret_cast<uint8_t*>(inst->next());
 }
 
 static bool
@@ -2586,26 +2567,27 @@ InstIsGuard(Instruction *inst, const Poo
 {
     Assembler::Condition c;
     inst->extractCond(&c);
     if (c != Assembler::Always)
         return false;
     if (!(inst->is<InstBXReg>() || inst->is<InstBImm>()))
         return false;
     // See if the next instruction is a pool header.
-    *ph = (inst+1)->as<const PoolHeader>();
+    *ph = (inst + 1)->as<const PoolHeader>();
     return *ph != nullptr;
 }
 
 static bool
 InstIsBNop(Instruction *inst) {
-    // In some special situations, it is necessary to insert a NOP
-    // into the instruction stream that nobody knows about, since nobody should know about
-    // it, make sure it gets skipped when Instruction::next() is called.
-    // this generates a very specific nop, namely a branch to the next instruction.
+    // In some special situations, it is necessary to insert a NOP into the
+    // instruction stream that nobody knows about, since nobody should know
+    // about it, make sure it gets skipped when Instruction::next() is called.
+    // this generates a very specific nop, namely a branch to the next
+    // instruction.
     Assembler::Condition c;
     inst->extractCond(&c);
     if (c != Assembler::Always)
         return false;
     if (!inst->is<InstBImm>())
         return false;
     InstBImm *b = inst->as<InstBImm>();
     BOffImm offset;
@@ -2621,19 +2603,18 @@ InstIsArtificialGuard(Instruction *inst,
     return !(*ph)->isNatural();
 }
 
 // If the instruction points to a artificial pool guard then skip the pool.
 Instruction *
 Instruction::skipPool()
 {
     const PoolHeader *ph;
-    // If this is a guard, and the next instruction is a header,
-    // always work around the pool. If it isn't a guard, then start
-    // looking ahead.
+    // If this is a guard, and the next instruction is a header, always work
+    // around the pool. If it isn't a guard, then start looking ahead.
     if (InstIsGuard(this, &ph)) {
         // Don't skip a natural guard.
         if (ph->isNatural())
             return this;
         return (this + 1 + ph->size())->skipPool();
     }
     if (InstIsBNop(this))
         return (this + 1)->skipPool();
@@ -2671,18 +2652,18 @@ Instruction::skipPool()
 //    0xdeadbeef
 //    add r4, r4, r4  <= returned value
 
 Instruction *
 Instruction::next()
 {
     Instruction *ret = this+1;
     const PoolHeader *ph;
-    // If this is a guard, and the next instruction is a header, always work around the pool
-    // If it isn't a guard, then start looking ahead.
+    // If this is a guard, and the next instruction is a header, always work
+    // around the pool. If it isn't a guard, then start looking ahead.
     if (InstIsGuard(this, &ph))
         return (ret + ph->size())->skipPool();
     if (InstIsArtificialGuard(ret, &ph))
         return (ret + 1 + ph->size())->skipPool();
     return ret->skipPool();
 }
 
 void
@@ -2726,19 +2707,18 @@ void
 Assembler::ToggleCall(CodeLocationLabel inst_, bool enabled)
 {
     Instruction *inst = (Instruction *)inst_.raw();
     // Skip a pool with an artificial guard.
     inst = inst->skipPool();
     JS_ASSERT(inst->is<InstMovW>() || inst->is<InstLDR>());
 
     if (inst->is<InstMovW>()) {
-        // If it looks like the start of a movw/movt sequence,
-        // then make sure we have all of it (and advance the iterator
-        // past the full sequence)
+        // If it looks like the start of a movw/movt sequence, then make sure we
+        // have all of it (and advance the iterator past the full sequence).
         inst = inst->next();
         JS_ASSERT(inst->is<InstMovT>());
     }
 
     inst = inst->next();
     JS_ASSERT(inst->is<InstNOP>() || inst->is<InstBLXReg>());
 
     if (enabled == inst->is<InstBLXReg>()) {
@@ -2758,19 +2738,18 @@ size_t
 Assembler::ToggledCallSize(uint8_t *code)
 {
     Instruction *inst = (Instruction *)code;
     // Skip a pool with an artificial guard.
     inst = inst->skipPool();
     JS_ASSERT(inst->is<InstMovW>() || inst->is<InstLDR>());
 
     if (inst->is<InstMovW>()) {
-        // If it looks like the start of a movw/movt sequence,
-        // then make sure we have all of it (and advance the iterator
-        // past the full sequence)
+        // If it looks like the start of a movw/movt sequence, then make sure we
+        // have all of it (and advance the iterator past the full sequence).
         inst = inst->next();
         JS_ASSERT(inst->is<InstMovT>());
     }
 
     inst = inst->next();
     JS_ASSERT(inst->is<InstNOP>() || inst->is<InstBLXReg>());
     return uintptr_t(inst) + 4 - uintptr_t(code);
 }
@@ -2780,41 +2759,42 @@ Assembler::BailoutTableStart(uint8_t *co
 {
     Instruction *inst = (Instruction *)code;
     // Skip a pool with an artificial guard or NOP fill.
     inst = inst->skipPool();
     JS_ASSERT(inst->is<InstBLImm>());
     return (uint8_t *) inst;
 }
 
-void Assembler::updateBoundsCheck(uint32_t heapSize, Instruction *inst)
+void Assembler::UpdateBoundsCheck(uint32_t heapSize, Instruction *inst)
 {
     JS_ASSERT(inst->is<InstCMP>());
     InstCMP *cmp = inst->as<InstCMP>();
 
     Register index;
     cmp->extractOp1(&index);
 
     Operand2 op = cmp->extractOp2();
     JS_ASSERT(op.isImm8());
 
     Imm8 imm8 = Imm8(heapSize);
     JS_ASSERT(!imm8.invalid);
 
-    *inst = InstALU(InvalidReg, index, imm8, op_cmp, SetCond, Always);
-    // NOTE: we don't update the Auto Flush Cache!  this function is currently only called from
-    // within AsmJSModule::patchHeapAccesses, which does that for us.  Don't call this!
+    *inst = InstALU(InvalidReg, index, imm8, OpCmp, SetCond, Always);
+    // NOTE: we don't update the Auto Flush Cache!  this function is currently
+    // only called from within AsmJSModule::patchHeapAccesses, which does that
+    // for us. Don't call this!
 }
 
 InstructionIterator::InstructionIterator(Instruction *i_) : i(i_)
 {
     // Work around pools with an artificial pool guard and around nop-fill.
     i = i->skipPool();
 }
-Assembler *Assembler::dummy = nullptr;
+Assembler *Assembler::Dummy = nullptr;
 
 uint32_t Assembler::NopFill = 0;
 
 uint32_t
 Assembler::GetNopFill()
 {
     static bool isSet = false;
     if (!isSet) {
--- a/js/src/jit/arm/Assembler-arm.h
+++ b/js/src/jit/arm/Assembler-arm.h
@@ -16,22 +16,20 @@
 #include "jit/CompactBuffer.h"
 #include "jit/IonCode.h"
 #include "jit/shared/Assembler-shared.h"
 #include "jit/shared/IonAssemblerBufferWithConstantPools.h"
 
 namespace js {
 namespace jit {
 
-//NOTE: there are duplicates in this list!
-// sometimes we want to specifically refer to the
-// link register as a link register (bl lr is much
-// clearer than bl r14).  HOWEVER, this register can
-// easily be a gpr when it is not busy holding the return
-// address.
+// NOTE: there are duplicates in this list! Sometimes we want to specifically
+// refer to the link register as a link register (bl lr is much clearer than bl
+// r14). HOWEVER, this register can easily be a gpr when it is not busy holding
+// the return address.
 static MOZ_CONSTEXPR_VAR Register r0  = { Registers::r0 };
 static MOZ_CONSTEXPR_VAR Register r1  = { Registers::r1 };
 static MOZ_CONSTEXPR_VAR Register r2  = { Registers::r2 };
 static MOZ_CONSTEXPR_VAR Register r3  = { Registers::r3 };
 static MOZ_CONSTEXPR_VAR Register r4  = { Registers::r4 };
 static MOZ_CONSTEXPR_VAR Register r5  = { Registers::r5 };
 static MOZ_CONSTEXPR_VAR Register r6  = { Registers::r6 };
 static MOZ_CONSTEXPR_VAR Register r7  = { Registers::r7 };
@@ -125,21 +123,20 @@ static MOZ_CONSTEXPR_VAR FloatRegister d
 static MOZ_CONSTEXPR_VAR FloatRegister d9  = {FloatRegisters::d9};
 static MOZ_CONSTEXPR_VAR FloatRegister d10 = {FloatRegisters::d10};
 static MOZ_CONSTEXPR_VAR FloatRegister d11 = {FloatRegisters::d11};
 static MOZ_CONSTEXPR_VAR FloatRegister d12 = {FloatRegisters::d12};
 static MOZ_CONSTEXPR_VAR FloatRegister d13 = {FloatRegisters::d13};
 static MOZ_CONSTEXPR_VAR FloatRegister d14 = {FloatRegisters::d14};
 static MOZ_CONSTEXPR_VAR FloatRegister d15 = {FloatRegisters::d15};
 
-// For maximal awesomeness, 8 should be sufficent.
-// ldrd/strd (dual-register load/store) operate in a single cycle
-// when the address they are dealing with is 8 byte aligned.
-// Also, the ARM abi wants the stack to be 8 byte aligned at
-// function boundaries.  I'm trying to make sure this is always true.
+// For maximal awesomeness, 8 should be sufficent. ldrd/strd (dual-register
+// load/store) operate in a single cycle when the address they are dealing with
+// is 8 byte aligned. Also, the ARM abi wants the stack to be 8 byte aligned at
+// function boundaries. I'm trying to make sure this is always true.
 static const uint32_t StackAlignment = 8;
 static const uint32_t CodeAlignment = 8;
 static const bool StackKeptAligned = true;
 static const uint32_t NativeFrameSize = sizeof(void*);
 static const uint32_t AlignmentAtAsmJSPrologue = sizeof(void*);
 
 static const Scale ScalePointer = TimesFour;
 
@@ -175,61 +172,59 @@ class VFPRegister
         Double = 0x0,
         Single = 0x1,
         UInt   = 0x2,
         Int    = 0x3
     };
 
   protected:
     RegType kind : 2;
-    // ARM doesn't have more than 32 registers...
-    // don't take more bits than we'll need.
-    // Presently, I don't have plans to address the upper
-    // and lower halves of the double registers seprately, so
-    // 5 bits should suffice.  If I do decide to address them seprately
-    // (vmov, I'm looking at you), I will likely specify it as a separate
-    // field.
-    uint32_t _code : 5;
+    // ARM doesn't have more than 32 registers. Don't take more bits than we'll
+    // need. Presently, we don't have plans to address the upper and lower
+    // halves of the double registers seprately, so 5 bits should suffice. If we
+    // do decide to address them seprately (vmov, I'm looking at you), we will
+    // likely specify it as a separate field.
+    uint32_t code_ : 5;
     bool _isInvalid : 1;
     bool _isMissing : 1;
 
     VFPRegister(int  r, RegType k)
-      : kind(k), _code (r), _isInvalid(false), _isMissing(false)
+      : kind(k), code_(r), _isInvalid(false), _isMissing(false)
     { }
 
   public:
     VFPRegister()
       : _isInvalid(true), _isMissing(false)
     { }
 
     VFPRegister(bool b)
       : _isInvalid(false), _isMissing(b)
     { }
 
     VFPRegister(FloatRegister fr)
-      : kind(Double), _code(fr.code()), _isInvalid(false), _isMissing(false)
+      : kind(Double), code_(fr.code()), _isInvalid(false), _isMissing(false)
     {
-        JS_ASSERT(_code == (unsigned)fr.code());
+        JS_ASSERT(code_ == (unsigned)fr.code());
     }
 
     VFPRegister(FloatRegister fr, RegType k)
-      : kind(k), _code (fr.code()), _isInvalid(false), _isMissing(false)
+      : kind(k), code_(fr.code()), _isInvalid(false), _isMissing(false)
     {
-        JS_ASSERT(_code == (unsigned)fr.code());
+        JS_ASSERT(code_ == (unsigned)fr.code());
     }
     bool isDouble() const { return kind == Double; }
     bool isSingle() const { return kind == Single; }
     bool isFloat() const { return (kind == Double) || (kind == Single); }
     bool isInt() const { return (kind == UInt) || (kind == Int); }
     bool isSInt() const { return kind == Int; }
     bool isUInt() const { return kind == UInt; }
     bool equiv(VFPRegister other) const { return other.kind == kind; }
     size_t size() const { return (kind == Double) ? 8 : 4; }
-    bool isInvalid();
-    bool isMissing();
+    bool isInvalid() const;
+    bool isMissing() const;
 
     VFPRegister doubleOverlay() const;
     VFPRegister singleOverlay() const;
     VFPRegister sintOverlay() const;
     VFPRegister uintOverlay() const;
 
     struct VFPRegIndexSplit;
     VFPRegIndexSplit encode();
@@ -246,22 +241,22 @@ class VFPRegister
           : block(block_), bit(bit_)
         {
             JS_ASSERT (block == block_);
             JS_ASSERT(bit == bit_);
         }
     };
 
     uint32_t code() const {
-        return _code;
+        return code_;
     }
 };
 
-// For being passed into the generic vfp instruction generator when
-// there is an instruction that only takes two registers
+// For being passed into the generic vfp instruction generator when there is an
+// instruction that only takes two registers.
 extern VFPRegister NoVFPRegister;
 
 struct ImmTag : public Imm32
 {
     ImmTag(JSValueTag mask)
       : Imm32(int32_t(mask))
     { }
 };
@@ -270,59 +265,57 @@ struct ImmType : public ImmTag
 {
     ImmType(JSValueType type)
       : ImmTag(JSVAL_TYPE_TO_TAG(type))
     { }
 };
 
 enum Index {
     Offset = 0 << 21 | 1<<24,
-    PreIndex = 1<<21 | 1 << 24,
+    PreIndex = 1 << 21 | 1 << 24,
     PostIndex = 0 << 21 | 0 << 24
-    // The docs were rather unclear on this. it sounds like
-    // 1<<21 | 0 << 24 encodes dtrt
+    // The docs were rather unclear on this. It sounds like
+    // 1 << 21 | 0 << 24 encodes dtrt.
 };
 
 // Seriously, wtf arm
 enum IsImmOp2_ {
     IsImmOp2    = 1 << 25,
     IsNotImmOp2 = 0 << 25
 };
 enum IsImmDTR_ {
     IsImmDTR    = 0 << 25,
     IsNotImmDTR = 1 << 25
 };
-// For the extra memory operations, ldrd, ldrsb, ldrh
+// For the extra memory operations, ldrd, ldrsb, ldrh.
 enum IsImmEDTR_ {
     IsImmEDTR    = 1 << 22,
     IsNotImmEDTR = 0 << 22
 };
 
 
 enum ShiftType {
     LSL = 0, // << 5
     LSR = 1, // << 5
     ASR = 2, // << 5
     ROR = 3, // << 5
     RRX = ROR // RRX is encoded as ROR with a 0 offset.
 };
 
-// The actual codes that get set by instructions
-// and the codes that are checked by the conditions below.
+// The actual codes that get set by instructions and the codes that are checked
+// by the conditions below.
 struct ConditionCodes
 {
     bool Zero : 1;
     bool Overflow : 1;
     bool Carry : 1;
     bool Minus : 1;
 };
 
-// Modes for STM/LDM.
-// Names are the suffixes applied to
-// the instruction.
+// Modes for STM/LDM. Names are the suffixes applied to the instruction.
 enum DTMMode {
     A = 0 << 24, // empty / after
     B = 1 << 24, // full / before
     D = 0 << 23, // decrement
     I = 1 << 23, // increment
     DA = D | A,
     DB = D | B,
     IA = I | A,
@@ -337,145 +330,143 @@ enum DTMWriteBack {
 enum SetCond_ {
     SetCond   = 1 << 20,
     NoSetCond = 0 << 20
 };
 enum LoadStore {
     IsLoad  = 1 << 20,
     IsStore = 0 << 20
 };
-// You almost never want to use this directly.
-// Instead, you wantto pass in a signed constant,
-// and let this bit be implicitly set for you.
-// this is however, necessary if we want a negative index
+// You almost never want to use this directly. Instead, you wantto pass in a
+// signed constant, and let this bit be implicitly set for you. This is however,
+// necessary if we want a negative index.
 enum IsUp_ {
     IsUp   = 1 << 23,
     IsDown = 0 << 23
 };
 enum ALUOp {
-    op_mov = 0xd << 21,
-    op_mvn = 0xf << 21,
-    op_and = 0x0 << 21,
-    op_bic = 0xe << 21,
-    op_eor = 0x1 << 21,
-    op_orr = 0xc << 21,
-    op_adc = 0x5 << 21,
-    op_add = 0x4 << 21,
-    op_sbc = 0x6 << 21,
-    op_sub = 0x2 << 21,
-    op_rsb = 0x3 << 21,
-    op_rsc = 0x7 << 21,
-    op_cmn = 0xb << 21,
-    op_cmp = 0xa << 21,
-    op_teq = 0x9 << 21,
-    op_tst = 0x8 << 21,
-    op_invalid = -1
+    OpMov = 0xd << 21,
+    OpMvn = 0xf << 21,
+    OpAnd = 0x0 << 21,
+    OpBic = 0xe << 21,
+    OpEor = 0x1 << 21,
+    OpOrr = 0xc << 21,
+    OpAdc = 0x5 << 21,
+    OpAdd = 0x4 << 21,
+    OpSbc = 0x6 << 21,
+    OpSub = 0x2 << 21,
+    OpRsb = 0x3 << 21,
+    OpRsc = 0x7 << 21,
+    OpCmn = 0xb << 21,
+    OpCmp = 0xa << 21,
+    OpTeq = 0x9 << 21,
+    OpTst = 0x8 << 21,
+    OpInvalid = -1
 };
 
 
 enum MULOp {
-    opm_mul   = 0 << 21,
-    opm_mla   = 1 << 21,
-    opm_umaal = 2 << 21,
-    opm_mls   = 3 << 21,
-    opm_umull = 4 << 21,
-    opm_umlal = 5 << 21,
-    opm_smull = 6 << 21,
-    opm_smlal = 7 << 21
+    OpmMul   = 0 << 21,
+    OpmMla   = 1 << 21,
+    OpmUmaal = 2 << 21,
+    OpmMls   = 3 << 21,
+    OpmUmull = 4 << 21,
+    OpmUmlal = 5 << 21,
+    OpmSmull = 6 << 21,
+    OpmSmlal = 7 << 21
 };
 enum BranchTag {
-    op_b = 0x0a000000,
-    op_b_mask = 0x0f000000,
-    op_b_dest_mask = 0x00ffffff,
-    op_bl = 0x0b000000,
-    op_blx = 0x012fff30,
-    op_bx  = 0x012fff10
+    OpB = 0x0a000000,
+    OpBMask = 0x0f000000,
+    OpBDestMask = 0x00ffffff,
+    OpBl = 0x0b000000,
+    OpBlx = 0x012fff30,
+    OpBx  = 0x012fff10
 };
 
 // Just like ALUOp, but for the vfp instruction set.
 enum VFPOp {
-    opv_mul  = 0x2 << 20,
-    opv_add  = 0x3 << 20,
-    opv_sub  = 0x3 << 20 | 0x1 << 6,
-    opv_div  = 0x8 << 20,
-    opv_mov  = 0xB << 20 | 0x1 << 6,
-    opv_abs  = 0xB << 20 | 0x3 << 6,
-    opv_neg  = 0xB << 20 | 0x1 << 6 | 0x1 << 16,
-    opv_sqrt = 0xB << 20 | 0x3 << 6 | 0x1 << 16,
-    opv_cmp  = 0xB << 20 | 0x1 << 6 | 0x4 << 16,
-    opv_cmpz  = 0xB << 20 | 0x1 << 6 | 0x5 << 16
+    OpvMul  = 0x2 << 20,
+    OpvAdd  = 0x3 << 20,
+    OpvSub  = 0x3 << 20 | 0x1 << 6,
+    OpvDiv  = 0x8 << 20,
+    OpvMov  = 0xB << 20 | 0x1 << 6,
+    OpvAbs  = 0xB << 20 | 0x3 << 6,
+    OpvNeg  = 0xB << 20 | 0x1 << 6 | 0x1 << 16,
+    OpvSqrt = 0xB << 20 | 0x3 << 6 | 0x1 << 16,
+    OpvCmp  = 0xB << 20 | 0x1 << 6 | 0x4 << 16,
+    OpvCmpz  = 0xB << 20 | 0x1 << 6 | 0x5 << 16
 };
 // Negate the operation, AND negate the immediate that we were passed in.
 ALUOp ALUNeg(ALUOp op, Register dest, Imm32 *imm, Register *negDest);
 bool can_dbl(ALUOp op);
 bool condsAreSafe(ALUOp op);
-// If there is a variant of op that has a dest (think cmp/sub)
-// return that variant of it.
+// If there is a variant of op that has a dest (think cmp/sub) return that
+// variant of it.
 ALUOp getDestVariant(ALUOp op);
 
 static const ValueOperand JSReturnOperand = ValueOperand(JSReturnReg_Type, JSReturnReg_Data);
 static const ValueOperand softfpReturnOperand = ValueOperand(r1, r0);
 // All of these classes exist solely to shuffle data into the various operands.
-// For example Operand2 can be an imm8, a register-shifted-by-a-constant or
-// a register-shifted-by-a-register.  I represent this in C++ by having a
-// base class Operand2, which just stores the 32 bits of data as they will be
-// encoded in the instruction.  You cannot directly create an Operand2
-// since it is tricky, and not entirely sane to do so.  Instead, you create
-// one of its child classes, e.g. Imm8.  Imm8's constructor takes a single
-// integer argument.  Imm8 will verify that its argument can be encoded
-// as an ARM 12 bit imm8, encode it using an Imm8data, and finally call
-// its parent's (Operand2) constructor with the Imm8data.  The Operand2
-// constructor will then call the Imm8data's encode() function to extract
-// the raw bits from it.  In the future, we should be able to extract
-// data from the Operand2 by asking it for its component Imm8data
-// structures.  The reason this is so horribly round-about is I wanted
-// to have Imm8 and RegisterShiftedRegister inherit directly from Operand2
-// but have all of them take up only a single word of storage.
-// I also wanted to avoid passing around raw integers at all
-// since they are error prone.
+// For example Operand2 can be an imm8, a register-shifted-by-a-constant or a
+// register-shifted-by-a-register. We represent this in C++ by having a base
+// class Operand2, which just stores the 32 bits of data as they will be encoded
+// in the instruction. You cannot directly create an Operand2 since it is
+// tricky, and not entirely sane to do so. Instead, you create one of its child
+// classes, e.g. Imm8. Imm8's constructor takes a single integer argument. Imm8
+// will verify that its argument can be encoded as an ARM 12 bit imm8, encode it
+// using an Imm8data, and finally call its parent's (Operand2) constructor with
+// the Imm8data. The Operand2 constructor will then call the Imm8data's encode()
+// function to extract the raw bits from it.
+//
+// In the future, we should be able to extract data from the Operand2 by asking
+// it for its component Imm8data structures. The reason this is so horribly
+// round-about is we wanted to have Imm8 and RegisterShiftedRegister inherit
+// directly from Operand2 but have all of them take up only a single word of
+// storage. We also wanted to avoid passing around raw integers at all since
+// they are error prone.
 class Op2Reg;
 class O2RegImmShift;
 class O2RegRegShift;
 namespace datastore {
 struct Reg
 {
-    // the "second register"
+    // The "second register".
     uint32_t RM : 4;
-    // do we get another register for shifting
+    // Do we get another register for shifting.
     uint32_t RRS : 1;
     ShiftType Type : 2;
-    // I'd like this to be a more sensible encoding, but that would
-    // need to be a struct and that would not pack :(
+    // We'd like this to be a more sensible encoding, but that would need to be
+    // a struct and that would not pack :(
     uint32_t ShiftAmount : 5;
     uint32_t pad : 20;
 
     Reg(uint32_t rm, ShiftType type, uint32_t rsr, uint32_t shiftamount)
       : RM(rm), RRS(rsr), Type(type), ShiftAmount(shiftamount), pad(0)
     { }
 
     uint32_t encode() {
         return RM | RRS << 4 | Type << 5 | ShiftAmount << 7;
     }
     explicit Reg(const Op2Reg &op) {
         memcpy(this, &op, sizeof(*this));
     }
 };
 
-// Op2 has a mode labelled "<imm8m>", which is arm's magical
-// immediate encoding.  Some instructions actually get 8 bits of
-// data, which is called Imm8Data below.  These should have edit
-// distance > 1, but this is how it is for now.
+// Op2 has a mode labelled "<imm8m>", which is arm's magical immediate encoding.
+// Some instructions actually get 8 bits of data, which is called Imm8Data
+// below. These should have edit distance > 1, but this is how it is for now.
 struct Imm8mData
 {
   private:
     uint32_t data : 8;
     uint32_t rot : 4;
-    // Throw in an extra bit that will be 1 if we can't encode this
-    // properly.  if we can encode it properly, a simple "|" will still
-    // suffice to meld it into the instruction.
+    // Throw in an extra bit that will be 1 if we can't encode this properly.
+    // if we can encode it properly, a simple "|" will still suffice to meld it
+    // into the instruction.
     uint32_t buff : 19;
   public:
     uint32_t invalid : 1;
 
     uint32_t encode() {
         JS_ASSERT(!invalid);
         return data | rot << 8;
     };
@@ -499,54 +490,53 @@ struct Imm8Data
     uint32_t imm4L : 4;
     uint32_t pad : 4;
     uint32_t imm4H : 4;
 
   public:
     uint32_t encode() {
         return imm4L | (imm4H << 8);
     };
-    Imm8Data(uint32_t imm) : imm4L(imm&0xf), imm4H(imm>>4) {
+    Imm8Data(uint32_t imm) : imm4L(imm & 0xf), imm4H(imm >> 4) {
         JS_ASSERT(imm <= 0xff);
     }
 };
 
-// VLDR/VSTR take an 8 bit offset, which is implicitly left shifted
-// by 2.
+// VLDR/VSTR take an 8 bit offset, which is implicitly left shifted by 2.
 struct Imm8VFPOffData
 {
   private:
     uint32_t data;
 
   public:
     uint32_t encode() {
         return data;
     };
     Imm8VFPOffData(uint32_t imm) : data (imm) {
         JS_ASSERT((imm & ~(0xff)) == 0);
     }
 };
 
-// ARM can magically encode 256 very special immediates to be moved
-// into a register.
+// ARM can magically encode 256 very special immediates to be moved into a
+// register.
 struct Imm8VFPImmData
 {
   private:
     uint32_t imm4L : 4;
     uint32_t pad : 12;
     uint32_t imm4H : 4;
     int32_t isInvalid : 12;
 
   public:
     Imm8VFPImmData()
       : imm4L(-1U & 0xf), imm4H(-1U & 0xf), isInvalid(-1)
     { }
 
     Imm8VFPImmData(uint32_t imm)
-      : imm4L(imm&0xf), imm4H(imm>>4), isInvalid(0)
+      : imm4L(imm&0xf), imm4H(imm >> 4), isInvalid(0)
     {
         JS_ASSERT(imm <= 0xff);
     }
 
     uint32_t encode() {
         if (isInvalid != 0)
             return -1;
         return imm4L | (imm4H << 16);
@@ -581,17 +571,17 @@ struct RIS
         JS_ASSERT(ShiftAmount == imm);
     }
     explicit RIS(Reg r) : ShiftAmount(r.ShiftAmount) {}
 };
 
 struct RRS
 {
     uint32_t MustZero : 1;
-    // the register that holds the shift amount
+    // The register that holds the shift amount.
     uint32_t RS : 4;
 
     RRS(uint32_t rs)
       : RS(rs)
     {
         JS_ASSERT(rs == RS);
     }
 
@@ -639,63 +629,63 @@ class Operand2
     uint32_t encode() {
         return oper;
     }
 };
 
 class Imm8 : public Operand2
 {
   public:
-    static datastore::Imm8mData encodeImm(uint32_t imm) {
+    static datastore::Imm8mData EncodeImm(uint32_t imm) {
         // mozilla::CountLeadingZeroes32(imm) requires imm != 0.
         if (imm == 0)
             return datastore::Imm8mData(0, 0);
         int left = mozilla::CountLeadingZeroes32(imm) & 30;
         // See if imm is a simple value that can be encoded with a rotate of 0.
         // This is effectively imm <= 0xff, but I assume this can be optimized
-        // more
+        // more.
         if (left >= 24)
             return datastore::Imm8mData(imm, 0);
 
         // Mask out the 8 bits following the first bit that we found, see if we
         // have 0 yet.
         int no_imm = imm & ~(0xff << (24 - left));
         if (no_imm == 0) {
-            return  datastore::Imm8mData(imm >> (24 - left), ((8+left) >> 1));
+            return  datastore::Imm8mData(imm >> (24 - left), ((8 + left) >> 1));
         }
         // Look for the most signifigant bit set, once again.
         int right = 32 - (mozilla::CountLeadingZeroes32(no_imm) & 30);
         // If it is in the bottom 8 bits, there is a chance that this is a
         // wraparound case.
         if (right >= 8)
             return datastore::Imm8mData();
         // Rather than masking out bits and checking for 0, just rotate the
         // immediate that we were passed in, and see if it fits into 8 bits.
         unsigned int mask = imm << (8 - right) | imm >> (24 + right);
         if (mask <= 0xff)
-            return datastore::Imm8mData(mask, (8-right) >> 1);
+            return datastore::Imm8mData(mask, (8 - right) >> 1);
         return datastore::Imm8mData();
     }
-    // pair template?
+    // Pair template?
     struct TwoImm8mData
     {
         datastore::Imm8mData fst, snd;
 
         TwoImm8mData()
           : fst(), snd()
         { }
 
         TwoImm8mData(datastore::Imm8mData _fst, datastore::Imm8mData _snd)
           : fst(_fst), snd(_snd)
         { }
     };
 
-    static TwoImm8mData encodeTwoImms(uint32_t);
+    static TwoImm8mData EncodeTwoImms(uint32_t);
     Imm8(uint32_t imm)
-      : Operand2(encodeImm(imm))
+      : Operand2(EncodeImm(imm))
     { }
 };
 
 class Op2Reg : public Operand2
 {
   public:
     Op2Reg(Register rm, ShiftType type, datastore::RIS shiftImm)
       : Operand2(datastore::Reg(rm.code(), type, 0, shiftImm.encode()))
@@ -758,21 +748,21 @@ O2RegImmShift asr (Register r, int amt);
 O2RegImmShift rol (Register r, int amt);
 O2RegImmShift ror (Register r, int amt);
 
 O2RegRegShift lsl (Register r, Register amt);
 O2RegRegShift lsr (Register r, Register amt);
 O2RegRegShift asr (Register r, Register amt);
 O2RegRegShift ror (Register r, Register amt);
 
-// An offset from a register to be used for ldr/str.  This should include
-// the sign bit, since ARM has "signed-magnitude" offsets.  That is it encodes
-// an unsigned offset, then the instruction specifies if the offset is positive
-// or negative.  The +/- bit is necessary if the instruction set wants to be
-// able to have a negative register offset e.g. ldr pc, [r1,-r2];
+// An offset from a register to be used for ldr/str. This should include the
+// sign bit, since ARM has "signed-magnitude" offsets. That is it encodes an
+// unsigned offset, then the instruction specifies if the offset is positive or
+// negative. The +/- bit is necessary if the instruction set wants to be able to
+// have a negative register offset e.g. ldr pc, [r1,-r2];
 class DtrOff
 {
     uint32_t data;
 
   protected:
     DtrOff(datastore::Imm12Data immdata, IsUp_ iu)
       : data(immdata.encode() | (uint32_t)IsImmDTR | ((uint32_t)iu))
     { }
@@ -793,17 +783,17 @@ class DtrOffImm : public DtrOff
     {
         JS_ASSERT(mozilla::Abs(imm) < 4096);
     }
 };
 
 class DtrOffReg : public DtrOff
 {
     // These are designed to be called by a constructor of a subclass.
-    // Constructing the necessary RIS/RRS structures are annoying
+    // Constructing the necessary RIS/RRS structures are annoying.
   protected:
     DtrOffReg(Register rn, ShiftType type, datastore::RIS shiftImm, IsUp_ iu = IsUp)
       : DtrOff(datastore::Reg(rn.code(), type, 0, shiftImm.encode()), iu)
     { }
 
     DtrOffReg(Register rn, ShiftType type, datastore::RRS shiftReg, IsUp_ iu = IsUp)
       : DtrOff(datastore::Reg(rn.code(), type, 1, shiftReg.encode()), iu)
     { }
@@ -820,17 +810,17 @@ class DtrRegImmShift : public DtrOffReg
 class DtrRegRegShift : public DtrOffReg
 {
   public:
     DtrRegRegShift(Register rn, ShiftType type, Register rs, IsUp_ iu = IsUp)
       : DtrOffReg(rn, type, datastore::RRS(rs.code()), iu)
     { }
 };
 
-// we will frequently want to bundle a register with its offset so that we have
+// We will frequently want to bundle a register with its offset so that we have
 // an "operand" to a load instruction.
 class DTRAddr
 {
     uint32_t data;
 
   public:
     DTRAddr(Register reg, DtrOff dtr)
       : data(dtr.encode() | (reg.code() << 16))
@@ -875,19 +865,18 @@ class EDtrOffImm : public EDtrOff
   public:
     EDtrOffImm(int32_t imm)
       : EDtrOff(datastore::Imm8Data(mozilla::Abs(imm)), (imm >= 0) ? IsUp : IsDown)
     {
         JS_ASSERT(mozilla::Abs(imm) < 256);
     }
 };
 
-// this is the most-derived class, since the extended data
-// transfer instructions don't support any sort of modifying the
-// "index" operand
+// This is the most-derived class, since the extended data transfer instructions
+// don't support any sort of modifying the "index" operand.
 class EDtrOffReg : public EDtrOff
 {
   public:
     EDtrOffReg(Register rm)
       : EDtrOff(rm)
     { }
 };
 
@@ -949,50 +938,51 @@ class VFPAddr
         return data;
     }
 };
 
 class VFPImm {
     uint32_t data;
 
   public:
-    static const VFPImm one;
+    static const VFPImm One;
 
     VFPImm(uint32_t topWordOfDouble);
 
     uint32_t encode() {
         return data;
     }
     bool isValid() {
         return data != -1U;
     }
 };
 
-// A BOffImm is an immediate that is used for branches. Namely, it is the offset that will
-// be encoded in the branch instruction. This is the only sane way of constructing a branch.
+// A BOffImm is an immediate that is used for branches. Namely, it is the offset
+// that will be encoded in the branch instruction. This is the only sane way of
+// constructing a branch.
 class BOffImm
 {
     uint32_t data;
 
   public:
     uint32_t encode() {
         return data;
     }
     int32_t decode() {
         return ((((int32_t)data) << 8) >> 6) + 8;
     }
 
     explicit BOffImm(int offset)
       : data ((offset - 8) >> 2 & 0x00ffffff)
     {
         JS_ASSERT((offset & 0x3) == 0);
-        if (!isInRange(offset))
+        if (!IsInRange(offset))
             CrashAtUnhandlableOOM("BOffImm");
     }
-    static bool isInRange(int offset)
+    static bool IsInRange(int offset)
     {
         if ((offset - 8) < -33554432)
             return false;
         if ((offset - 8) > 33554428)
             return false;
         return true;
     }
     static const int INVALID = 0x00800000;
@@ -1029,25 +1019,23 @@ class Imm16
         return lower | upper << 12;
     }
 
     bool isInvalid () {
         return invalid;
     }
 };
 
-/* I would preffer that these do not exist, since there are essentially
-* no instructions that would ever take more than one of these, however,
-* the MIR wants to only have one type of arguments to functions, so bugger.
-*/
+// I would preffer that these do not exist, since there are essentially no
+// instructions that would ever take more than one of these, however, the MIR
+// wants to only have one type of arguments to functions, so bugger.
 class Operand
 {
-    // the encoding of registers is the same for OP2, DTR and EDTR
-    // yet the type system doesn't let us express this, so choices
-    // must be made.
+    // The encoding of registers is the same for OP2, DTR and EDTR yet the type
+    // system doesn't let us express this, so choices must be made.
   public:
     enum Tag_ {
         OP2,
         MEM,
         FOP
     };
 
   private:
@@ -1123,17 +1111,17 @@ void
 PatchJump(CodeLocationJump &jump_, CodeLocationLabel label);
 class InstructionIterator;
 class Assembler;
 typedef js::jit::AssemblerBufferWithConstantPool<1024, 4, Instruction, Assembler, 1> ARMBuffer;
 
 class Assembler : public AssemblerShared
 {
   public:
-    // ARM conditional constants
+    // ARM conditional constants:
     enum ARMCondition {
         EQ = 0x00000000, // Zero
         NE = 0x10000000, // Non-zero
         CS = 0x20000000,
         CC = 0x30000000,
         MI = 0x40000000,
         PL = 0x50000000,
         VS = 0x60000000,
@@ -1180,17 +1168,18 @@ class Assembler : public AssemblerShared
     };
 
     // Bit set when a DoubleCondition does not map to a single ARM condition.
     // The macro assembler has to special-case these conditions, or else
     // ConditionFromDoubleCondition will complain.
     static const int DoubleConditionBitSpecial = 0x1;
 
     enum DoubleCondition {
-        // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
+        // These conditions will only evaluate to true if the comparison is
+        // ordered - i.e. neither operand is NaN.
         DoubleOrdered = VFP_NotUnordered,
         DoubleEqual = VFP_Equal,
         DoubleNotEqual = VFP_NotEqualOrUnordered | DoubleConditionBitSpecial,
         DoubleGreaterThan = VFP_GreaterThan,
         DoubleGreaterThanOrEqual = VFP_GreaterThanOrEqual,
         DoubleLessThan = VFP_LessThan,
         DoubleLessThanOrEqual = VFP_LessThanOrEqual,
         // If either operand is NaN, these conditions always evaluate to true.
@@ -1206,77 +1195,77 @@ class Assembler : public AssemblerShared
     Condition getCondition(uint32_t inst) {
         return (Condition) (0xf0000000 & inst);
     }
     static inline Condition ConditionFromDoubleCondition(DoubleCondition cond) {
         JS_ASSERT(!(cond & DoubleConditionBitSpecial));
         return static_cast<Condition>(cond);
     }
 
-    // :( this should be protected, but since CodeGenerator
-    // wants to use it, It needs to go out here :(
+    // This should be protected, but since CodeGenerator wants to use it, it
+    // needs to go out here :(
 
     BufferOffset nextOffset() {
         return m_buffer.nextOffset();
     }
 
   protected:
     BufferOffset labelOffset (Label *l) {
         return BufferOffset(l->bound());
     }
 
-    Instruction * editSrc (BufferOffset bo) {
+    Instruction *editSrc (BufferOffset bo) {
         return m_buffer.getInst(bo);
     }
   public:
     void resetCounter();
     uint32_t actualOffset(uint32_t) const;
     uint32_t actualIndex(uint32_t) const;
     static uint8_t *PatchableJumpAddress(JitCode *code, uint32_t index);
     BufferOffset actualOffset(BufferOffset) const;
     static uint32_t NopFill;
     static uint32_t GetNopFill();
   protected:
 
-    // structure for fixing up pc-relative loads/jumps when a the machine code
-    // gets moved (executable copy, gc, etc.)
+    // Structure for fixing up pc-relative loads/jumps when a the machine code
+    // gets moved (executable copy, gc, etc.).
     struct RelativePatch
     {
         void *target;
         Relocation::Kind kind;
         RelativePatch(void *target, Relocation::Kind kind)
             : target(target), kind(kind)
         { }
     };
 
-    // TODO: this should actually be a pool-like object
-    //       It is currently a big hack, and probably shouldn't exist
+    // TODO: this should actually be a pool-like object. It is currently a big
+    // hack, and probably shouldn't exist.
     js::Vector<CodeLabel, 0, SystemAllocPolicy> codeLabels_;
     js::Vector<RelativePatch, 8, SystemAllocPolicy> jumps_;
     js::Vector<BufferOffset, 0, SystemAllocPolicy> tmpJumpRelocations_;
     js::Vector<BufferOffset, 0, SystemAllocPolicy> tmpDataRelocations_;
     js::Vector<BufferOffset, 0, SystemAllocPolicy> tmpPreBarriers_;
 
     CompactBufferWriter jumpRelocations_;
     CompactBufferWriter dataRelocations_;
     CompactBufferWriter relocations_;
     CompactBufferWriter preBarriers_;
 
     ARMBuffer m_buffer;
 
-    // There is now a semi-unified interface for instruction generation.
-    // During assembly, there is an active buffer that instructions are
-    // being written into, but later, we may wish to modify instructions
-    // that have already been created.  In order to do this, we call the
-    // same assembly function, but pass it a destination address, which
-    // will be overwritten with a new instruction. In order to do this very
-    // after assembly buffers no longer exist, when calling with a third
-    // dest parameter, a this object is still needed.  dummy always happens
-    // to be null, but we shouldn't be looking at it in any case.
-    static Assembler *dummy;
+    // There is now a semi-unified interface for instruction generation. During
+    // assembly, there is an active buffer that instructions are being written
+    // into, but later, we may wish to modify instructions that have already
+    // been created. In order to do this, we call the same assembly function,
+    // but pass it a destination address, which will be overwritten with a new
+    // instruction. In order to do this very after assembly buffers no longer
+    // exist, when calling with a third dest parameter, a this object is still
+    // needed. Dummy always happens to be null, but we shouldn't be looking at
+    // it in any case.
+    static Assembler *Dummy;
     mozilla::Array<Pool, 4> pools_;
     Pool *int32Pool;
     Pool *doublePool;
 
   public:
     // For the nopFill use a branch to the next instruction: 0xeaffffff.
     Assembler()
       : m_buffer(4, 4, 0, &pools_[0], 8, 0xeaffffff, GetNopFill()),
@@ -1288,23 +1277,23 @@ class Assembler : public AssemblerShared
     {
     }
 
     // We need to wait until an AutoIonContextAlloc is created by the
     // IonMacroAssembler, before allocating any space.
     void initWithAllocator() {
         m_buffer.initWithAllocator();
 
-        // Set up the backwards double region
+        // Set up the backwards double region.
         new (&pools_[2]) Pool (1024, 8, 4, 8, 8, m_buffer.LifoAlloc_, true);
-        // Set up the backwards 32 bit region
+        // Set up the backwards 32 bit region.
         new (&pools_[3]) Pool (4096, 4, 4, 8, 4, m_buffer.LifoAlloc_, true, true);
-        // Set up the forwards double region
+        // Set up the forwards double region.
         new (doublePool) Pool (1024, 8, 4, 8, 8, m_buffer.LifoAlloc_, false, false, &pools_[2]);
-        // Set up the forwards 32 bit region
+        // Set up the forwards 32 bit region.
         new (int32Pool) Pool (4096, 4, 4, 8, 4, m_buffer.LifoAlloc_, false, true, &pools_[3]);
         for (int i = 0; i < 4; i++) {
             if (pools_[i].poolData == nullptr) {
                 m_buffer.fail_oom();
                 return;
             }
         }
     }
@@ -1335,25 +1324,25 @@ class Assembler : public AssemblerShared
     };
 
     enum RelocStyle {
         L_MOVWT,
         L_LDR
     };
 
   public:
-    // Given the start of a Control Flow sequence, grab the value that is finally branched to
-    // given the start of a function that loads an address into a register get the address that
-    // ends up in the register.
+    // Given the start of a Control Flow sequence, grab the value that is
+    // finally branched to given the start of a function that loads an address
+    // into a register get the address that ends up in the register.
     template <class Iter>
-    static const uint32_t * getCF32Target(Iter *iter);
+    static const uint32_t *GetCF32Target(Iter *iter);
 
-    static uintptr_t getPointer(uint8_t *);
+    static uintptr_t GetPointer(uint8_t *);
     template <class Iter>
-    static const uint32_t * getPtr32Target(Iter *iter, Register *dest = nullptr, RelocStyle *rs = nullptr);
+    static const uint32_t *GetPtr32Target(Iter *iter, Register *dest = nullptr, RelocStyle *rs = nullptr);
 
     bool oom() const;
 
     void setPrinter(Sprinter *sp) {
     }
 
   private:
     bool isFinished;
@@ -1377,77 +1366,77 @@ class Assembler : public AssemblerShared
     // Size of the jump relocation table, in bytes.
     size_t jumpRelocationTableBytes() const;
     size_t dataRelocationTableBytes() const;
     size_t preBarrierTableBytes() const;
 
     // Size of the data table, in bytes.
     size_t bytesNeeded() const;
 
-    // Write a blob of binary into the instruction stream *OR*
-    // into a destination address. If dest is nullptr (the default), then the
+    // Write a blob of binary into the instruction stream *OR* into a
+    // destination address. If dest is nullptr (the default), then the
     // instruction gets written into the instruction stream. If dest is not null
     // it is interpreted as a pointer to the location that we want the
     // instruction to be written.
     BufferOffset writeInst(uint32_t x, uint32_t *dest = nullptr);
 
     // As above, but also mark the instruction as a branch.
     BufferOffset writeBranchInst(uint32_t x);
 
     // A static variant for the cases where we don't want to have an assembler
     // object at all. Normally, you would use the dummy (nullptr) object.
-    static void writeInstStatic(uint32_t x, uint32_t *dest);
+    static void WriteInstStatic(uint32_t x, uint32_t *dest);
 
   public:
     void writeCodePointer(AbsoluteLabel *label);
 
     BufferOffset align(int alignment);
     BufferOffset as_nop();
     BufferOffset as_alu(Register dest, Register src1, Operand2 op2,
                 ALUOp op, SetCond_ sc = NoSetCond, Condition c = Always, Instruction *instdest = nullptr);
 
     BufferOffset as_mov(Register dest,
                 Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always, Instruction *instdest = nullptr);
     BufferOffset as_mvn(Register dest, Operand2 op2,
                 SetCond_ sc = NoSetCond, Condition c = Always);
-    // logical operations
+    // Logical operations:
     BufferOffset as_and(Register dest, Register src1,
                 Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always);
     BufferOffset as_bic(Register dest, Register src1,
                 Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always);
     BufferOffset as_eor(Register dest, Register src1,
                 Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always);
     BufferOffset as_orr(Register dest, Register src1,
                 Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always);
-    // mathematical operations
+    // Mathematical operations:
     BufferOffset as_adc(Register dest, Register src1,
                 Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always);
     BufferOffset as_add(Register dest, Register src1,
                 Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always);
     BufferOffset as_sbc(Register dest, Register src1,
                 Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always);
     BufferOffset as_sub(Register dest, Register src1,
                 Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always);
     BufferOffset as_rsb(Register dest, Register src1,
                 Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always);
     BufferOffset as_rsc(Register dest, Register src1,
                 Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always);
-    // test operations
+    // Test operations:
     BufferOffset as_cmn(Register src1, Operand2 op2,
                 Condition c = Always);
     BufferOffset as_cmp(Register src1, Operand2 op2,
                 Condition c = Always);
     BufferOffset as_teq(Register src1, Operand2 op2,
                 Condition c = Always);
     BufferOffset as_tst(Register src1, Operand2 op2,
                 Condition c = Always);
 
-    // Not quite ALU worthy, but useful none the less:
-    // These also have the isue of these being formatted
-    // completly differently from the standard ALU operations.
+    // Not quite ALU worthy, but useful none the less: These also have the isue
+    // of these being formatted completly differently from the standard ALU
+    // operations.
     BufferOffset as_movw(Register dest, Imm16 imm, Condition c = Always, Instruction *pos = nullptr);
     BufferOffset as_movt(Register dest, Imm16 imm, Condition c = Always, Instruction *pos = nullptr);
 
     BufferOffset as_genmul(Register d1, Register d2, Register rm, Register rn,
                    MULOp op, SetCond_ sc, Condition c = Always);
     BufferOffset as_mul(Register dest, Register src1, Register src2,
                 SetCond_ sc = NoSetCond, Condition c = Always);
     BufferOffset as_mla(Register dest, Register acc, Register src1, Register src2,
@@ -1464,78 +1453,74 @@ class Assembler : public AssemblerShared
                 SetCond_ sc = NoSetCond, Condition c = Always);
     BufferOffset as_smlal(Register dest1, Register dest2, Register src1, Register src2,
                 SetCond_ sc = NoSetCond, Condition c = Always);
 
     BufferOffset as_sdiv(Register dest, Register num, Register div, Condition c = Always);
     BufferOffset as_udiv(Register dest, Register num, Register div, Condition c = Always);
 
     // Data transfer instructions: ldr, str, ldrb, strb.
-    // Using an int to differentiate between 8 bits and 32 bits is
-    // overkill, but meh
+    // Using an int to differentiate between 8 bits and 32 bits is overkill.
     BufferOffset as_dtr(LoadStore ls, int size, Index mode,
                 Register rt, DTRAddr addr, Condition c = Always, uint32_t *dest = nullptr);
     // Handles all of the other integral data transferring functions:
-    // ldrsb, ldrsh, ldrd, etc.
-    // size is given in bits.
+    // ldrsb, ldrsh, ldrd, etc. The size is given in bits.
     BufferOffset as_extdtr(LoadStore ls, int size, bool IsSigned, Index mode,
                    Register rt, EDtrAddr addr, Condition c = Always, uint32_t *dest = nullptr);
 
     BufferOffset as_dtm(LoadStore ls, Register rn, uint32_t mask,
                 DTMMode mode, DTMWriteBack wb, Condition c = Always);
-    //overwrite a pool entry with new data.
+    // Overwrite a pool entry with new data.
     void as_WritePoolEntry(Instruction *addr, Condition c, uint32_t data);
-    // load a 32 bit immediate from a pool into a register
+    // Load a 32 bit immediate from a pool into a register.
     BufferOffset as_Imm32Pool(Register dest, uint32_t value, Condition c = Always);
-    // make a patchable jump that can target the entire 32 bit address space.
+    // Make a patchable jump that can target the entire 32 bit address space.
     BufferOffset as_BranchPool(uint32_t value, RepatchLabel *label, ARMBuffer::PoolEntry *pe = nullptr, Condition c = Always);
 
-    // load a 64 bit floating point immediate from a pool into a register
+    // Load a 64 bit floating point immediate from a pool into a register.
     BufferOffset as_FImm64Pool(VFPRegister dest, double value, Condition c = Always);
-    // load a 32 bit floating point immediate from a pool into a register
+    // Load a 32 bit floating point immediate from a pool into a register.
     BufferOffset as_FImm32Pool(VFPRegister dest, float value, Condition c = Always);
 
     // Control flow stuff:
 
-    // bx can *only* branch to a register
-    // never to an immediate.
+    // bx can *only* branch to a register never to an immediate.
     BufferOffset as_bx(Register r, Condition c = Always, bool isPatchable = false);
 
-    // Branch can branch to an immediate *or* to a register.
-    // Branches to immediates are pc relative, branches to registers
-    // are absolute
+    // Branch can branch to an immediate *or* to a register. Branches to
+    // immediates are pc relative, branches to registers are absolute.
     BufferOffset as_b(BOffImm off, Condition c, bool isPatchable = false);
 
     BufferOffset as_b(Label *l, Condition c = Always, bool isPatchable = false);
     BufferOffset as_b(BOffImm off, Condition c, BufferOffset inst);
 
-    // blx can go to either an immediate or a register.
-    // When blx'ing to a register, we change processor mode
-    // depending on the low bit of the register
-    // when blx'ing to an immediate, we *always* change processor state.
+    // blx can go to either an immediate or a register. When blx'ing to a
+    // register, we change processor mode depending on the low bit of the
+    // register when blx'ing to an immediate, we *always* change processor
+    // state.
     BufferOffset as_blx(Label *l);
 
     BufferOffset as_blx(Register r, Condition c = Always);
     BufferOffset as_bl(BOffImm off, Condition c);
-    // bl can only branch+link to an immediate, never to a register
-    // it never changes processor state
+    // bl can only branch+link to an immediate, never to a register it never
+    // changes processor state.
     BufferOffset as_bl();
     // bl #imm can have a condition code, blx #imm cannot.
     // blx reg can be conditional.
     BufferOffset as_bl(Label *l, Condition c);
     BufferOffset as_bl(BOffImm off, Condition c, BufferOffset inst);
 
     BufferOffset as_mrs(Register r, Condition c = Always);
     BufferOffset as_msr(Register r, Condition c = Always);
     // VFP instructions!
   private:
 
     enum vfp_size {
-        isDouble = 1 << 8,
-        isSingle = 0 << 8
+        IsDouble = 1 << 8,
+        IsSingle = 0 << 8
     };
 
     BufferOffset writeVFPInst(vfp_size sz, uint32_t blob, uint32_t *dest=nullptr);
     // Unityped variants: all registers hold the same (ieee754 single/double)
     // notably not included are vcvt; vmov vd, #imm; vmov rt, vn.
     BufferOffset as_vfp_float(VFPRegister vd, VFPRegister vn, VFPRegister vm,
                       VFPOp op, Condition c = Always);
 
@@ -1566,63 +1551,63 @@ class Assembler : public AssemblerShared
 
     BufferOffset as_vsub(VFPRegister vd, VFPRegister vn, VFPRegister vm,
                  Condition c = Always);
 
     BufferOffset as_vcmp(VFPRegister vd, VFPRegister vm,
                  Condition c = Always);
     BufferOffset as_vcmpz(VFPRegister vd,  Condition c = Always);
 
-    // specifically, a move between two same sized-registers
+    // Specifically, a move between two same sized-registers.
     BufferOffset as_vmov(VFPRegister vd, VFPRegister vsrc, Condition c = Always);
-    /*xfer between Core and VFP*/
+    // Transfer between Core and VFP.
     enum FloatToCore_ {
         FloatToCore = 1 << 20,
         CoreToFloat = 0 << 20
     };
 
   private:
     enum VFPXferSize {
         WordTransfer   = 0x02000010,
         DoubleTransfer = 0x00400010
     };
 
   public:
     // Unlike the next function, moving between the core registers and vfp
-    // registers can't be *that* properly typed.  Namely, since I don't want to
-    // munge the type VFPRegister to also include core registers.  Thus, the core
+    // registers can't be *that* properly typed. Namely, since I don't want to
+    // munge the type VFPRegister to also include core registers. Thus, the core
     // and vfp registers are passed in based on their type, and src/dest is
     // determined by the float2core.
 
     BufferOffset as_vxfer(Register vt1, Register vt2, VFPRegister vm, FloatToCore_ f2c,
                   Condition c = Always, int idx = 0);
 
-    // our encoding actually allows just the src and the dest (and theiyr types)
+    // Our encoding actually allows just the src and the dest (and their types)
     // to uniquely specify the encoding that we are going to use.
     BufferOffset as_vcvt(VFPRegister vd, VFPRegister vm, bool useFPSCR = false,
                          Condition c = Always);
-    // hard coded to a 32 bit fixed width result for now
+    // Hard coded to a 32 bit fixed width result for now.
     BufferOffset as_vcvtFixed(VFPRegister vd, bool isSigned, uint32_t fixedPoint, bool toFixed, Condition c = Always);
 
-    /* xfer between VFP and memory*/
+    // Transfer between VFP and memory.
     BufferOffset as_vdtr(LoadStore ls, VFPRegister vd, VFPAddr addr,
                  Condition c = Always /* vfp doesn't have a wb option*/,
                  uint32_t *dest = nullptr);
 
-    // VFP's ldm/stm work differently from the standard arm ones.
-    // You can only transfer a range
+    // VFP's ldm/stm work differently from the standard arm ones. You can only
+    // transfer a range.
 
     BufferOffset as_vdtm(LoadStore st, Register rn, VFPRegister vd, int length,
                  /*also has update conditions*/Condition c = Always);
 
     BufferOffset as_vimm(VFPRegister vd, VFPImm imm, Condition c = Always);
 
     BufferOffset as_vmrs(Register r, Condition c = Always);
     BufferOffset as_vmsr(Register r, Condition c = Always);
-    // label operations
+    // Label operations.
     bool nextLink(BufferOffset b, BufferOffset *next);
     void bind(Label *label, BufferOffset boff = BufferOffset());
     void bind(RepatchLabel *label);
     uint32_t currentOffset() {
         return nextOffset().getOffset();
     }
     void retarget(Label *label, Label *target);
     // I'm going to pretend this doesn't exist for now.
@@ -1724,17 +1709,17 @@ class Assembler : public AssemblerShared
         }
         dtmLastReg = rn.code();
     }
     void finishFloatTransfer() {
         JS_ASSERT(dtmActive);
         dtmActive = false;
         JS_ASSERT(dtmLastReg != -1);
         dtmDelta = dtmDelta ? dtmDelta : 1;
-        // fencepost problem.
+        // Fencepost problem.
         int len = dtmDelta * (dtmLastReg - vdtmFirstReg) + 1;
         as_vdtm(dtmLoadStore, dtmBase,
                 VFPRegister(FloatRegister::FromCode(Min(vdtmFirstReg, dtmLastReg))),
                 len, dtmCond);
     }
 
   private:
     int dtmRegBitField;
@@ -1745,210 +1730,207 @@ class Assembler : public AssemblerShared
     DTMWriteBack dtmUpdate;
     DTMMode dtmMode;
     LoadStore dtmLoadStore;
     bool dtmActive;
     Condition dtmCond;
 
   public:
     enum {
-        padForAlign8  = (int)0x00,
-        padForAlign16 = (int)0x0000,
-        padForAlign32 = (int)0xe12fff7f  // 'bkpt 0xffff'
+        PadForAlign8  = (int)0x00,
+        PadForAlign16 = (int)0x0000,
+        PadForAlign32 = (int)0xe12fff7f  // 'bkpt 0xffff'
     };
 
-    // API for speaking with the IonAssemblerBufferWithConstantPools
-    // generate an initial placeholder instruction that we want to later fix up
-    static void insertTokenIntoTag(uint32_t size, uint8_t *load, int32_t token);
-    // take the stub value that was written in before, and write in an actual load
-    // using the index we'd computed previously as well as the address of the pool start.
-    static bool patchConstantPoolLoad(void* loadAddr, void* constPoolAddr);
-    // this is a callback for when we have filled a pool, and MUST flush it now.
-    // The pool requires the assembler to place a branch past the pool, and it
-    // calls this function.
-    static uint32_t placeConstantPoolBarrier(int offset);
+    // API for speaking with the IonAssemblerBufferWithConstantPools generate an
+    // initial placeholder instruction that we want to later fix up.
+    static void InsertTokenIntoTag(uint32_t size, uint8_t *load, int32_t token);
+    // Take the stub value that was written in before, and write in an actual
+    // load using the index we'd computed previously as well as the address of
+    // the pool start.
+    static bool PatchConstantPoolLoad(void* loadAddr, void* constPoolAddr);
     // END API
 
-    // move our entire pool into the instruction stream
-    // This is to force an opportunistic dump of the pool, prefferably when it
-    // is more convenient to do a dump.
-    void dumpPool();
+    // Move our entire pool into the instruction stream. This is to force an
+    // opportunistic dump of the pool, prefferably when it is more convenient to
+    // do a dump.
     void flushBuffer();
     void enterNoPool();
     void leaveNoPool();
-    // this should return a BOffImm, but I didn't want to require everyplace that used the
-    // AssemblerBuffer to make that class.
-    static ptrdiff_t getBranchOffset(const Instruction *i);
-    static void retargetNearBranch(Instruction *i, int offset, Condition cond, bool final = true);
-    static void retargetNearBranch(Instruction *i, int offset, bool final = true);
-    static void retargetFarBranch(Instruction *i, uint8_t **slot, uint8_t *dest, Condition cond);
+    // This should return a BOffImm, but we didn't want to require everyplace
+    // that used the AssemblerBuffer to make that class.
+    static ptrdiff_t GetBranchOffset(const Instruction *i);
+    static void RetargetNearBranch(Instruction *i, int offset, Condition cond, bool final = true);
+    static void RetargetNearBranch(Instruction *i, int offset, bool final = true);
+    static void RetargetFarBranch(Instruction *i, uint8_t **slot, uint8_t *dest, Condition cond);
 
-    static void writePoolHeader(uint8_t *start, Pool *p, bool isNatural);
-    static void writePoolFooter(uint8_t *start, Pool *p, bool isNatural);
-    static void writePoolGuard(BufferOffset branch, Instruction *inst, BufferOffset dest);
+    static void WritePoolHeader(uint8_t *start, Pool *p, bool isNatural);
+    static void WritePoolFooter(uint8_t *start, Pool *p, bool isNatural);
+    static void WritePoolGuard(BufferOffset branch, Instruction *inst, BufferOffset dest);
 
 
-    static uint32_t patchWrite_NearCallSize();
-    static uint32_t nopSize() { return 4; }
-    static void patchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall);
-    static void patchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue,
+    static uint32_t PatchWrite_NearCallSize();
+    static uint32_t NopSize() { return 4; }
+    static void PatchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall);
+    static void PatchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue,
                                         PatchedImmPtr expectedValue);
-    static void patchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue,
+    static void PatchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue,
                                         ImmPtr expectedValue);
-    static void patchWrite_Imm32(CodeLocationLabel label, Imm32 imm);
+    static void PatchWrite_Imm32(CodeLocationLabel label, Imm32 imm);
 
-    static void patchInstructionImmediate(uint8_t *code, PatchedImmPtr imm) {
+    static void PatchInstructionImmediate(uint8_t *code, PatchedImmPtr imm) {
         MOZ_ASSUME_UNREACHABLE("Unused.");
     }
 
-    static uint32_t alignDoubleArg(uint32_t offset) {
-        return (offset+1)&~1;
+    static uint32_t AlignDoubleArg(uint32_t offset) {
+        return (offset + 1) & ~1;
     }
-    static uint8_t *nextInstruction(uint8_t *instruction, uint32_t *count = nullptr);
+    static uint8_t *NextInstruction(uint8_t *instruction, uint32_t *count = nullptr);
+
     // Toggle a jmp or cmp emitted by toggledJump().
-
     static void ToggleToJmp(CodeLocationLabel inst_);
     static void ToggleToCmp(CodeLocationLabel inst_);
 
     static uint8_t *BailoutTableStart(uint8_t *code);
 
     static size_t ToggledCallSize(uint8_t *code);
     static void ToggleCall(CodeLocationLabel inst_, bool enabled);
 
-    static void updateBoundsCheck(uint32_t logHeapSize, Instruction *inst);
+    static void UpdateBoundsCheck(uint32_t logHeapSize, Instruction *inst);
     void processCodeLabels(uint8_t *rawCode);
-    static int32_t extractCodeLabelOffset(uint8_t *code) {
+    static int32_t ExtractCodeLabelOffset(uint8_t *code) {
         return *(uintptr_t *)code;
     }
 
     bool bailed() {
         return m_buffer.bail();
     }
 }; // Assembler
 
-// An Instruction is a structure for both encoding and decoding any and all ARM instructions.
-// many classes have not been implemented thusfar.
+// An Instruction is a structure for both encoding and decoding any and all ARM
+// instructions. Many classes have not been implemented thus far.
 class Instruction
 {
     uint32_t data;
 
   protected:
     // This is not for defaulting to always, this is for instructions that
-    // cannot be made conditional, and have the usually invalid 4b1111 cond field
+    // cannot be made conditional, and have the usually invalid 4b1111 cond
+    // field.
     Instruction (uint32_t data_, bool fake = false) : data(data_ | 0xf0000000) {
         JS_ASSERT (fake || ((data_ & 0xf0000000) == 0));
     }
-    // Standard constructor
+    // Standard constructor.
     Instruction (uint32_t data_, Assembler::Condition c) : data(data_ | (uint32_t) c) {
         JS_ASSERT ((data_ & 0xf0000000) == 0);
     }
-    // You should never create an instruction directly.  You should create a
-    // more specific instruction which will eventually call one of these
-    // constructors for you.
+    // You should never create an instruction directly. You should create a more
+    // specific instruction which will eventually call one of these constructors
+    // for you.
   public:
     uint32_t encode() const {
         return data;
     }
-    // Check if this instruction is really a particular case
+    // Check if this instruction is really a particular case.
     template <class C>
-    bool is() const { return C::isTHIS(*this); }
+    bool is() const { return C::IsTHIS(*this); }
 
-    // safely get a more specific variant of this pointer
+    // Safely get a more specific variant of this pointer.
     template <class C>
-    C *as() const { return C::asTHIS(*this); }
+    C *as() const { return C::AsTHIS(*this); }
 
     const Instruction & operator=(const Instruction &src) {
         data = src.data;
         return *this;
     }
-    // Since almost all instructions have condition codes, the condition
-    // code extractor resides in the base class.
+    // Since almost all instructions have condition codes, the condition code
+    // extractor resides in the base class.
     void extractCond(Assembler::Condition *c) {
         if (data >> 28 != 0xf )
             *c = (Assembler::Condition)(data & 0xf0000000);
     }
     // Get the next instruction in the instruction stream.
     // This does neat things like ignoreconstant pools and their guards.
     Instruction *next();
 
     // Skipping pools with artificial guards.
     Instruction *skipPool();
 
-    // Sometimes, an api wants a uint32_t (or a pointer to it) rather than
-    // an instruction.  raw() just coerces this into a pointer to a uint32_t
+    // Sometimes, an api wants a uint32_t (or a pointer to it) rather than an
+    // instruction. raw() just coerces this into a pointer to a uint32_t.
     const uint32_t *raw() const { return &data; }
     uint32_t size() const { return 4; }
 }; // Instruction
 
-// make sure that it is the right size
+// Make sure that it is the right size.
 JS_STATIC_ASSERT(sizeof(Instruction) == 4);
 
-// Data Transfer Instructions
+// Data Transfer Instructions.
 class InstDTR : public Instruction
 {
   public:
     enum IsByte_ {
         IsByte = 0x00400000,
         IsWord = 0x00000000
     };
     static const int IsDTR     = 0x04000000;
     static const int IsDTRMask = 0x0c000000;
 
     // TODO: Replace the initialization with something that is safer.
     InstDTR(LoadStore ls, IsByte_ ib, Index mode, Register rt, DTRAddr addr, Assembler::Condition c)
       : Instruction(ls | ib | mode | RT(rt) | addr.encode() | IsDTR, c)
     { }
 
-    static bool isTHIS(const Instruction &i);
-    static InstDTR *asTHIS(const Instruction &i);
+    static bool IsTHIS(const Instruction &i);
+    static InstDTR *AsTHIS(const Instruction &i);
 
 };
 JS_STATIC_ASSERT(sizeof(InstDTR) == sizeof(Instruction));
 
 class InstLDR : public InstDTR
 {
   public:
     InstLDR(Index mode, Register rt, DTRAddr addr, Assembler::Condition c)
         : InstDTR(IsLoad, IsWord, mode, rt, addr, c)
     { }
-    static bool isTHIS(const Instruction &i);
-    static InstLDR *asTHIS(const Instruction &i);
+    static bool IsTHIS(const Instruction &i);
+    static InstLDR *AsTHIS(const Instruction &i);
 
 };
 JS_STATIC_ASSERT(sizeof(InstDTR) == sizeof(InstLDR));
 
 class InstNOP : public Instruction
 {
   public:
     static const uint32_t NopInst = 0x0320f000;
 
     InstNOP()
       : Instruction(NopInst, Assembler::Always)
     { }
 
-    static bool isTHIS(const Instruction &i);
-    static InstNOP *asTHIS(Instruction &i);
+    static bool IsTHIS(const Instruction &i);
+    static InstNOP *AsTHIS(Instruction &i);
 };
 
 // Branching to a register, or calling a register
 class InstBranchReg : public Instruction
 {
   protected:
     // Don't use BranchTag yourself, use a derived instruction.
     enum BranchTag {
         IsBX  = 0x012fff10,
         IsBLX = 0x012fff30
     };
     static const uint32_t IsBRegMask = 0x0ffffff0;
     InstBranchReg(BranchTag tag, Register rm, Assembler::Condition c)
       : Instruction(tag | rm.code(), c)
     { }
   public:
-    static bool isTHIS (const Instruction &i);
-    static InstBranchReg *asTHIS (const Instruction &i);
+    static bool IsTHIS (const Instruction &i);
+    static InstBranchReg *AsTHIS (const Instruction &i);
     // Get the register that is being branched to
     void extractDest(Register *dest);
     // Make sure we are branching to a pre-known register
     bool checkDest(Register dest);
 };
 JS_STATIC_ASSERT(sizeof(InstBranchReg) == sizeof(Instruction));
 
 // Branching to an immediate offset, or calling an immediate offset
@@ -1961,58 +1943,58 @@ class InstBranchImm : public Instruction
     };
     static const uint32_t IsBImmMask = 0x0f000000;
 
     InstBranchImm(BranchTag tag, BOffImm off, Assembler::Condition c)
       : Instruction(tag | off.encode(), c)
     { }
 
   public:
-    static bool isTHIS (const Instruction &i);
-    static InstBranchImm *asTHIS (const Instruction &i);
+    static bool IsTHIS (const Instruction &i);
+    static InstBranchImm *AsTHIS (const Instruction &i);
     void extractImm(BOffImm *dest);
 };
 JS_STATIC_ASSERT(sizeof(InstBranchImm) == sizeof(Instruction));
 
 // Very specific branching instructions.
 class InstBXReg : public InstBranchReg
 {
   public:
-    static bool isTHIS (const Instruction &i);
-    static InstBXReg *asTHIS (const Instruction &i);
+    static bool IsTHIS (const Instruction &i);
+    static InstBXReg *AsTHIS (const Instruction &i);
 };
 class InstBLXReg : public InstBranchReg
 {
   public:
     InstBLXReg(Register reg, Assembler::Condition c)
       : InstBranchReg(IsBLX, reg, c)
     { }
 
-    static bool isTHIS (const Instruction &i);
-    static InstBLXReg *asTHIS (const Instruction &i);
+    static bool IsTHIS (const Instruction &i);
+    static InstBLXReg *AsTHIS (const Instruction &i);
 };
 class InstBImm : public InstBranchImm
 {
   public:
     InstBImm(BOffImm off, Assembler::Condition c)
       : InstBranchImm(IsB, off, c)
     { }
 
-    static bool isTHIS (const Instruction &i);
-    static InstBImm *asTHIS (const Instruction &i);
+    static bool IsTHIS (const Instruction &i);
+    static InstBImm *AsTHIS (const Instruction &i);
 };
 class InstBLImm : public InstBranchImm
 {
   public:
     InstBLImm(BOffImm off, Assembler::Condition c)
       : InstBranchImm(IsBL, off, c)
     { }
 
-    static bool isTHIS (const Instruction &i);
-    static InstBLImm *asTHIS (Instruction &i);
+    static bool IsTHIS (const Instruction &i);
+    static InstBLImm *AsTHIS (Instruction &i);
 };
 
 // Both movw and movt. The layout of both the immediate and the destination
 // register is the same so the code is being shared.
 class InstMovWT : public Instruction
 {
   protected:
     enum WT {
@@ -2026,73 +2008,73 @@ class InstMovWT : public Instruction
     { }
 
   public:
     void extractImm(Imm16 *dest);
     void extractDest(Register *dest);
     bool checkImm(Imm16 dest);
     bool checkDest(Register dest);
 
-    static bool isTHIS (Instruction &i);
-    static InstMovWT *asTHIS (Instruction &i);
+    static bool IsTHIS (Instruction &i);
+    static InstMovWT *AsTHIS (Instruction &i);
 
 };
 JS_STATIC_ASSERT(sizeof(InstMovWT) == sizeof(Instruction));
 
 class InstMovW : public InstMovWT
 {
   public:
     InstMovW (Register rd, Imm16 imm, Assembler::Condition c)
       : InstMovWT(rd, imm, IsW, c)
     { }
 
-    static bool isTHIS (const Instruction &i);
-    static InstMovW *asTHIS (const Instruction &i);
+    static bool IsTHIS (const Instruction &i);
+    static InstMovW *AsTHIS (const Instruction &i);
 };
 
 class InstMovT : public InstMovWT
 {
   public:
     InstMovT (Register rd, Imm16 imm, Assembler::Condition c)
       : InstMovWT(rd, imm, IsT, c)
     { }
-    static bool isTHIS (const Instruction &i);
-    static InstMovT *asTHIS (const Instruction &i);
+    static bool IsTHIS (const Instruction &i);
+    static InstMovT *AsTHIS (const Instruction &i);
 };
 
 class InstALU : public Instruction
 {
     static const int32_t ALUMask = 0xc << 24;
   public:
     InstALU (Register rd, Register rn, Operand2 op2, ALUOp op, SetCond_ sc, Assembler::Condition c)
         : Instruction(maybeRD(rd) | maybeRN(rn) | op2.encode() | op | sc, c)
     { }
-    static bool isTHIS (const Instruction &i);
-    static InstALU *asTHIS (const Instruction &i);
+    static bool IsTHIS (const Instruction &i);
+    static InstALU *AsTHIS (const Instruction &i);
     void extractOp(ALUOp *ret);
     bool checkOp(ALUOp op);
     void extractDest(Register *ret);
     bool checkDest(Register rd);
     void extractOp1(Register *ret);
     bool checkOp1(Register rn);
     Operand2 extractOp2();
 };
 
 class InstCMP : public InstALU
 {
   public:
-    static bool isTHIS (const Instruction &i);
-    static InstCMP *asTHIS (const Instruction &i);
+    static bool IsTHIS (const Instruction &i);
+    static InstCMP *AsTHIS (const Instruction &i);
 };
 
 class InstMOV : public InstALU
 {
   public:
-    static bool isTHIS (const Instruction &i);
-    static InstMOV *asTHIS (const Instruction &i);
+    static bool IsTHIS (const Instruction &i);
+    static InstMOV *AsTHIS (const Instruction &i);
 };
 
 
 class InstructionIterator {
   private:
     Instruction *i;
   public:
     InstructionIterator(Instruction *i_);
@@ -2113,17 +2095,17 @@ GetIntArgReg(uint32_t usedIntArgs, uint3
 {
     if (usedIntArgs >= NumIntArgRegs)
         return false;
     *out = Register::FromCode(usedIntArgs);
     return true;
 }
 
 // Get a register in which we plan to put a quantity that will be used as an
-// integer argument.  This differs from GetIntArgReg in that if we have no more
+// integer argument. This differs from GetIntArgReg in that if we have no more
 // actual argument registers to use we will fall back on using whatever
 // CallTempReg* don't overlap the argument registers, and only fail once those
 // run out too.
 static inline bool
 GetTempRegForIntArg(uint32_t usedIntArgs, uint32_t usedFloatArgs, Register *out)
 {
     if (GetIntArgReg(usedIntArgs, usedFloatArgs, out))
         return true;
@@ -2138,68 +2120,68 @@ GetTempRegForIntArg(uint32_t usedIntArgs
 }
 
 
 #if !defined(JS_CODEGEN_ARM_HARDFP) || defined(JS_ARM_SIMULATOR)
 
 static inline uint32_t
 GetArgStackDisp(uint32_t arg)
 {
-    JS_ASSERT(!useHardFpABI());
+    JS_ASSERT(!UseHardFpABI());
     JS_ASSERT(arg >= NumIntArgRegs);
     return (arg - NumIntArgRegs) * sizeof(intptr_t);
 }
 
 #endif
 
 
 #if defined(JS_CODEGEN_ARM_HARDFP) || defined(JS_ARM_SIMULATOR)
 
 static inline bool
 GetFloatArgReg(uint32_t usedIntArgs, uint32_t usedFloatArgs, FloatRegister *out)
 {
-    JS_ASSERT(useHardFpABI());
+    JS_ASSERT(UseHardFpABI());
     if (usedFloatArgs >= NumFloatArgRegs)
         return false;
     *out = FloatRegister::FromCode(usedFloatArgs);
     return true;
 }
 
 static inline uint32_t
 GetIntArgStackDisp(uint32_t usedIntArgs, uint32_t usedFloatArgs, uint32_t *padding)
 {
-    JS_ASSERT(useHardFpABI());
+    JS_ASSERT(UseHardFpABI());
     JS_ASSERT(usedIntArgs >= NumIntArgRegs);
     uint32_t doubleSlots = Max(0, (int32_t)usedFloatArgs - (int32_t)NumFloatArgRegs);
     doubleSlots *= 2;
     int intSlots = usedIntArgs - NumIntArgRegs;
     return (intSlots + doubleSlots + *padding) * sizeof(intptr_t);
 }
 
 static inline uint32_t
 GetFloat32ArgStackDisp(uint32_t usedIntArgs, uint32_t usedFloatArgs, uint32_t *padding)
 {
-    JS_ASSERT(useHardFpABI());
+    JS_ASSERT(UseHardFpABI());
     JS_ASSERT(usedFloatArgs >= NumFloatArgRegs);
     uint32_t intSlots = 0;
     if (usedIntArgs > NumIntArgRegs)
         intSlots = usedIntArgs - NumIntArgRegs;
     uint32_t float32Slots = usedFloatArgs - NumFloatArgRegs;
     return (intSlots + float32Slots + *padding) * sizeof(intptr_t);
 }
 
 static inline uint32_t
 GetDoubleArgStackDisp(uint32_t usedIntArgs, uint32_t usedFloatArgs, uint32_t *padding)
 {
-    JS_ASSERT(useHardFpABI());
+    JS_ASSERT(UseHardFpABI());
     JS_ASSERT(usedFloatArgs >= NumFloatArgRegs);
     uint32_t intSlots = 0;
     if (usedIntArgs > NumIntArgRegs) {
         intSlots = usedIntArgs - NumIntArgRegs;
-        // update the amount of padding required.
+        // Update the amount of padding required.
         *padding += (*padding + usedIntArgs) % 2;
     }
     uint32_t doubleSlots = usedFloatArgs - NumFloatArgRegs;
     doubleSlots *= 2;
     return (intSlots + doubleSlots + *padding) * sizeof(intptr_t);
 }
 
 #endif
@@ -2210,28 +2192,27 @@ class DoubleEncoder {
     uint32_t rep(bool b, uint32_t count) {
         uint32_t ret = 0;
         for (uint32_t i = 0; i < count; i++)
             ret = (ret << 1) | b;
         return ret;
     }
 
     uint32_t encode(uint8_t value) {
-        //ARM ARM "VFP modified immediate constants"
-        // aBbbbbbb bbcdefgh 000...
-        // we want to return the top 32 bits of the double
-        // the rest are 0.
+        // ARM ARM "VFP modified immediate constants"
+        //  aBbbbbbb bbcdefgh 000...
+        // We want to return the top 32 bits of the double the rest are 0.
         bool a = value >> 7;
         bool b = value >> 6 & 1;
         bool B = !b;
         uint32_t cdefgh = value & 0x3f;
-        return a << 31 |
-            B << 30 |
-            rep(b, 8) << 22 |
-            cdefgh << 16;
+        return         a << 31 |
+                       B << 30 |
+               rep(b, 8) << 22 |
+                  cdefgh << 16;
     }
 
     struct DoubleEntry
     {
         uint32_t dblTop;
         datastore::Imm8VFPImmData data;
 
         DoubleEntry()
--- a/js/src/jit/arm/Bailouts-arm.cpp
+++ b/js/src/jit/arm/Bailouts-arm.cpp
@@ -15,19 +15,19 @@ using namespace js;
 using namespace js::jit;
 
 namespace js {
 namespace jit {
 
 class BailoutStack
 {
     uintptr_t frameClassId_;
-    // This is pushed in the bailout handler.  Both entry points into the handler
+    // This is pushed in the bailout handler. Both entry points into the handler
     // inserts their own value int lr, which is then placed onto the stack along
-    // with frameClassId_ above.  This should be migrated to ip.
+    // with frameClassId_ above. This should be migrated to ip.
   public:
     union {
         uintptr_t frameSize_;
         uintptr_t tableOffset_;
     };
 
   protected: // Silence Clang warning about unused private fields.
     mozilla::Array<double, FloatRegisters::Total> fpregs_;
--- a/js/src/jit/arm/BaselineHelpers-arm.h
+++ b/js/src/jit/arm/BaselineHelpers-arm.h
@@ -41,26 +41,26 @@ EmitCallIC(CodeOffsetLabel *patchOffset,
     // Load stub pointer into BaselineStubReg
     masm.loadPtr(Address(BaselineStubReg, ICEntry::offsetOfFirstStub()), BaselineStubReg);
 
     // Load stubcode pointer from BaselineStubEntry.
     // R2 won't be active when we call ICs, so we can use r0.
     JS_ASSERT(R2 == ValueOperand(r1, r0));
     masm.loadPtr(Address(BaselineStubReg, ICStub::offsetOfStubCode()), r0);
 
-    // Call the stubcode via a direct branch-and-link
+    // Call the stubcode via a direct branch-and-link.
     masm.ma_blx(r0);
 }
 
 inline void
 EmitEnterTypeMonitorIC(MacroAssembler &masm,
                        size_t monitorStubOffset = ICMonitoredStub::offsetOfFirstMonitorStub())
 {
-    // This is expected to be called from within an IC, when BaselineStubReg
-    // is properly initialized to point to the stub.
+    // This is expected to be called from within an IC, when BaselineStubReg is
+    // properly initialized to point to the stub.
     masm.loadPtr(Address(BaselineStubReg, (uint32_t) monitorStubOffset), BaselineStubReg);
 
     // Load stubcode pointer from BaselineStubEntry.
     // R2 won't be active when we call ICs, so we can use r0.
     JS_ASSERT(R2 == ValueOperand(r1, r0));
     masm.loadPtr(Address(BaselineStubReg, ICStub::offsetOfStubCode()), r0);
 
     // Jump to the stubcode.
@@ -91,31 +91,31 @@ EmitTailCallVM(JitCode *target, MacroAss
     masm.ma_add(Imm32(BaselineFrame::FramePointerOffset), r0);
     masm.ma_sub(BaselineStackReg, r0);
 
     // Store frame size without VMFunction arguments for GC marking.
     masm.ma_sub(r0, Imm32(argSize), r1);
     masm.store32(r1, Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFrameSize()));
 
     // Push frame descriptor and perform the tail call.
-    // BaselineTailCallReg (lr) already contains the return address (as we keep it there through
-    // the stub calls), but the VMWrapper code being called expects the return address to also
-    // be pushed on the stack.
+    // BaselineTailCallReg (lr) already contains the return address (as we keep
+    // it there through the stub calls), but the VMWrapper code being called
+    // expects the return address to also be pushed on the stack.
     JS_ASSERT(BaselineTailCallReg == lr);
     masm.makeFrameDescriptor(r0, JitFrame_BaselineJS);
     masm.push(r0);
     masm.push(lr);
     masm.branch(target);
 }
 
 inline void
 EmitCreateStubFrameDescriptor(MacroAssembler &masm, Register reg)
 {
-    // Compute stub frame size. We have to add two pointers: the stub reg and previous
-    // frame pointer pushed by EmitEnterStubFrame.
+    // Compute stub frame size. We have to add two pointers: the stub reg and
+    // previous frame pointer pushed by EmitEnterStubFrame.
     masm.mov(BaselineFrameReg, reg);
     masm.ma_add(Imm32(sizeof(void *) * 2), reg);
     masm.ma_sub(BaselineStackReg, reg);
 
     masm.makeFrameDescriptor(reg, JitFrame_BaselineStub);
 }
 
 inline void
@@ -137,18 +137,18 @@ EmitEnterStubFrame(MacroAssembler &masm,
 
     // Compute frame size.
     masm.mov(BaselineFrameReg, scratch);
     masm.ma_add(Imm32(BaselineFrame::FramePointerOffset), scratch);
     masm.ma_sub(BaselineStackReg, scratch);
 
     masm.store32(scratch, Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFrameSize()));
 
-    // Note: when making changes here,  don't forget to update STUB_FRAME_SIZE
-    // if needed.
+    // Note: when making changes here, don't forget to update STUB_FRAME_SIZE if
+    // needed.
 
     // Push frame descriptor and return address.
     masm.makeFrameDescriptor(scratch, JitFrame_BaselineJS);
     masm.push(scratch);
     masm.push(BaselineTailCallReg);
 
     // Save old frame pointer, stack pointer and stub reg.
     masm.push(BaselineStubReg);
@@ -157,20 +157,20 @@ EmitEnterStubFrame(MacroAssembler &masm,
 
     // We pushed 4 words, so the stack is still aligned to 8 bytes.
     masm.checkStackAlignment();
 }
 
 inline void
 EmitLeaveStubFrame(MacroAssembler &masm, bool calledIntoIon = false)
 {
-    // Ion frames do not save and restore the frame pointer. If we called
-    // into Ion, we have to restore the stack pointer from the frame descriptor.
-    // If we performed a VM call, the descriptor has been popped already so
-    // in that case we use the frame pointer.
+    // Ion frames do not save and restore the frame pointer. If we called into
+    // Ion, we have to restore the stack pointer from the frame descriptor. If
+    // we performed a VM call, the descriptor has been popped already so in that
+    // case we use the frame pointer.
     if (calledIntoIon) {
         masm.pop(ScratchRegister);
         masm.ma_lsr(Imm32(FRAMESIZE_SHIFT), ScratchRegister, ScratchRegister);
         masm.ma_add(ScratchRegister, BaselineStackReg);
     } else {
         masm.mov(BaselineFrameReg, BaselineStackReg);
     }
 
@@ -185,67 +185,67 @@ EmitLeaveStubFrame(MacroAssembler &masm,
 }
 
 inline void
 EmitStowICValues(MacroAssembler &masm, int values)
 {
     JS_ASSERT(values >= 0 && values <= 2);
     switch(values) {
       case 1:
-        // Stow R0
+        // Stow R0.
         masm.pushValue(R0);
         break;
       case 2:
-        // Stow R0 and R1
+        // Stow R0 and R1.
         masm.pushValue(R0);
         masm.pushValue(R1);
         break;
     }
 }
 
 inline void
 EmitUnstowICValues(MacroAssembler &masm, int values, bool discard = false)
 {
     JS_ASSERT(values >= 0 && values <= 2);
     switch(values) {
       case 1:
-        // Unstow R0
+        // Unstow R0.
         if (discard)
             masm.addPtr(Imm32(sizeof(Value)), BaselineStackReg);
         else
             masm.popValue(R0);
         break;
       case 2:
-        // Unstow R0 and R1
+        // Unstow R0 and R1.
         if (discard) {
             masm.addPtr(Imm32(sizeof(Value) * 2), BaselineStackReg);
         } else {
             masm.popValue(R1);
             masm.popValue(R0);
         }
         break;
     }
 }
 
 inline void
 EmitCallTypeUpdateIC(MacroAssembler &masm, JitCode *code, uint32_t objectOffset)
 {
     JS_ASSERT(R2 == ValueOperand(r1, r0));
 
-    // R0 contains the value that needs to be typechecked.
-    // The object we're updating is a boxed Value on the stack, at offset
-    // objectOffset from esp, excluding the return address.
+    // R0 contains the value that needs to be typechecked. The object we're
+    // updating is a boxed Value on the stack, at offset objectOffset from esp,
+    // excluding the return address.
 
     // Save the current BaselineStubReg to stack, as well as the TailCallReg,
     // since on ARM, the LR is live.
     masm.push(BaselineStubReg);
     masm.push(BaselineTailCallReg);
 
-    // This is expected to be called from within an IC, when BaselineStubReg
-    // is properly initialized to point to the stub.
+    // This is expected to be called from within an IC, when BaselineStubReg is
+    // properly initialized to point to the stub.
     masm.loadPtr(Address(BaselineStubReg, ICUpdatedStub::offsetOfFirstUpdateStub()),
                  BaselineStubReg);
 
     // TODO: Change r0 uses below to use masm's configurable scratch register instead.
 
     // Load stubcode pointer from BaselineStubReg into BaselineTailCallReg.
     masm.loadPtr(Address(BaselineStubReg, ICStub::offsetOfStubCode()), r0);
 
@@ -281,33 +281,33 @@ EmitCallTypeUpdateIC(MacroAssembler &mas
     // Success at end.
     masm.bind(&success);
 }
 
 template <typename AddrType>
 inline void
 EmitPreBarrier(MacroAssembler &masm, const AddrType &addr, MIRType type)
 {
-    // on ARM, lr is clobbered by patchableCallPreBarrier.  Save it first.
+    // On ARM, lr is clobbered by patchableCallPreBarrier. Save it first.
     masm.push(lr);
     masm.patchableCallPreBarrier(addr, type);
     masm.pop(lr);
 }
 
 inline void
 EmitStubGuardFailure(MacroAssembler &masm)
 {
     JS_ASSERT(R2 == ValueOperand(r1, r0));
 
     // NOTE: This routine assumes that the stub guard code left the stack in the
     // same state it was in when it was entered.
 
     // BaselineStubEntry points to the current stub.
 
-    // Load next stub into BaselineStubReg
+    // Load next stub into BaselineStubReg.
     masm.loadPtr(Address(BaselineStubReg, ICStub::offsetOfNext()), BaselineStubReg);
 
     // Load stubcode pointer from BaselineStubEntry into scratch register.
     masm.loadPtr(Address(BaselineStubReg, ICStub::offsetOfStubCode()), r0);
 
     // Return address is already loaded, just jump to the next stubcode.
     JS_ASSERT(BaselineTailCallReg == lr);
     masm.branch(r0);
--- a/js/src/jit/arm/BaselineIC-arm.cpp
+++ b/js/src/jit/arm/BaselineIC-arm.cpp
@@ -31,17 +31,17 @@ ICCompare_Int32::Compiler::generateStubC
     masm.cmp32(R0.payloadReg(), R1.payloadReg());
     masm.ma_mov(Imm32(1), R0.payloadReg(), NoSetCond, cond);
     masm.ma_mov(Imm32(0), R0.payloadReg(), NoSetCond, Assembler::InvertCondition(cond));
 
     // Result is implicitly boxed already.
     masm.tagValue(JSVAL_TYPE_BOOLEAN, R0.payloadReg(), R0);
     EmitReturnFromIC(masm);
 
-    // Failure case - jump to next stub
+    // Failure case - jump to next stub.
     masm.bind(&failure);
     EmitStubGuardFailure(masm);
 
     return true;
 }
 
 bool
 ICCompare_Double::Compiler::generateStubCode(MacroAssembler &masm)
@@ -57,17 +57,17 @@ ICCompare_Double::Compiler::generateStub
 
     masm.compareDouble(FloatReg0, FloatReg1);
     masm.ma_mov(Imm32(0), dest);
     masm.ma_mov(Imm32(1), dest, NoSetCond, cond);
 
     masm.tagValue(JSVAL_TYPE_BOOLEAN, dest, R0);
     EmitReturnFromIC(masm);
 
-    // Failure case - jump to next stub
+    // Failure case - jump to next stub.
     masm.bind(&failure);
     EmitStubGuardFailure(masm);
     return true;
 }
 
 // ICBinaryArith_Int32
 
 extern "C" {
@@ -77,35 +77,35 @@ extern "C" {
 bool
 ICBinaryArith_Int32::Compiler::generateStubCode(MacroAssembler &masm)
 {
     // Guard that R0 is an integer and R1 is an integer.
     Label failure;
     masm.branchTestInt32(Assembler::NotEqual, R0, &failure);
     masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
 
-    // Add R0 and R1.  Don't need to explicitly unbox, just use R2's payloadReg.
+    // Add R0 and R1. Don't need to explicitly unbox, just use R2's payloadReg.
     Register scratchReg = R2.payloadReg();
 
     // DIV and MOD need an extra non-volatile ValueOperand to hold R0.
     GeneralRegisterSet savedRegs = availableGeneralRegs(2);
     savedRegs = GeneralRegisterSet::Intersect(GeneralRegisterSet::NonVolatile(), savedRegs);
     ValueOperand savedValue = savedRegs.takeAnyValue();
 
     Label maybeNegZero, revertRegister;
     switch(op_) {
       case JSOP_ADD:
         masm.ma_add(R0.payloadReg(), R1.payloadReg(), scratchReg, SetCond);
 
-        // Just jump to failure on overflow.  R0 and R1 are preserved, so we can just jump to
-        // the next stub.
+        // Just jump to failure on overflow. R0 and R1 are preserved, so we can
+        // just jump to the next stub.
         masm.j(Assembler::Overflow, &failure);
 
-        // Box the result and return.  We know R0.typeReg() already contains the integer
-        // tag, so we just need to move the result value into place.
+        // Box the result and return. We know R0.typeReg() already contains the
+        // integer tag, so we just need to move the result value into place.
         masm.mov(scratchReg, R0.payloadReg());
         break;
       case JSOP_SUB:
         masm.ma_sub(R0.payloadReg(), R1.payloadReg(), scratchReg, SetCond);
         masm.j(Assembler::Overflow, &failure);
         masm.mov(scratchReg, R0.payloadReg());
         break;
       case JSOP_MUL: {
@@ -126,17 +126,18 @@ ICBinaryArith_Int32::Compiler::generateS
         masm.ma_cmp(R1.payloadReg(), Imm32(-1), Assembler::Equal);
         masm.j(Assembler::Equal, &failure);
 
         // Check for both division by zero and 0 / X with X < 0 (results in -0).
         masm.ma_cmp(R1.payloadReg(), Imm32(0));
         masm.ma_cmp(R0.payloadReg(), Imm32(0), Assembler::LessThan);
         masm.j(Assembler::Equal, &failure);
 
-        // The call will preserve registers r4-r11. Save R0 and the link register.
+        // The call will preserve registers r4-r11. Save R0 and the link
+        // register.
         JS_ASSERT(R1 == ValueOperand(r5, r4));
         JS_ASSERT(R0 == ValueOperand(r3, r2));
         masm.moveValue(R0, savedValue);
 
         masm.setupAlignedABICall(2);
         masm.passABIArg(R0.payloadReg());
         masm.passABIArg(R1.payloadReg());
         masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, __aeabi_idivmod));
@@ -217,17 +218,17 @@ ICBinaryArith_Int32::Compiler::generateS
       case JSOP_MOD:
         masm.bind(&revertRegister);
         masm.moveValue(savedValue, R0);
         break;
       default:
         break;
     }
 
-    // Failure case - jump to next stub
+    // Failure case - jump to next stub.
     masm.bind(&failure);
     EmitStubGuardFailure(masm);
 
     return true;
 }
 
 bool
 ICUnaryArith_Int32::Compiler::generateStubCode(MacroAssembler &masm)
--- a/js/src/jit/arm/BaselineRegisters-arm.h
+++ b/js/src/jit/arm/BaselineRegisters-arm.h
@@ -18,40 +18,38 @@ namespace jit {
 // r14 = link-register
 
 // r13 = stack-pointer
 // r11 = frame-pointer
 static MOZ_CONSTEXPR_VAR Register BaselineFrameReg = r11;
 static MOZ_CONSTEXPR_VAR Register BaselineStackReg = sp;
 
 // ValueOperands R0, R1, and R2.
-// R0 == JSReturnReg, and R2 uses registers not
-// preserved across calls.  R1 value should be
-// preserved across calls.
+// R0 == JSReturnReg, and R2 uses registers not preserved across calls. R1 value
+// should be preserved across calls.
 static MOZ_CONSTEXPR_VAR ValueOperand R0(r3, r2);
 static MOZ_CONSTEXPR_VAR ValueOperand R1(r5, r4);
 static MOZ_CONSTEXPR_VAR ValueOperand R2(r1, r0);
 
 // BaselineTailCallReg and BaselineStubReg
-// These use registers that are not preserved across
-// calls.
+// These use registers that are not preserved across calls.
 static MOZ_CONSTEXPR_VAR Register BaselineTailCallReg = r14;
 static MOZ_CONSTEXPR_VAR Register BaselineStubReg     = r9;
 
 static MOZ_CONSTEXPR_VAR Register ExtractTemp0        = InvalidReg;
 static MOZ_CONSTEXPR_VAR Register ExtractTemp1        = InvalidReg;
 
 // Register used internally by MacroAssemblerARM.
 static MOZ_CONSTEXPR_VAR Register BaselineSecondScratchReg = r6;
 
 // R7 - R9 are generally available for use within stubcode.
 
-// Note that BaselineTailCallReg is actually just the link
-// register.  In ARM code emission, we do not clobber BaselineTailCallReg
-// since we keep the return address for calls there.
+// Note that BaselineTailCallReg is actually just the link register. In ARM code
+// emission, we do not clobber BaselineTailCallReg since we keep the return
+// address for calls there.
 
 // FloatReg0 must be equal to ReturnFloatReg.
 static MOZ_CONSTEXPR_VAR FloatRegister FloatReg0      = d0;
 static MOZ_CONSTEXPR_VAR FloatRegister FloatReg1      = d1;
 
 } // namespace jit
 } // namespace js
 
--- a/js/src/jit/arm/CodeGenerator-arm.cpp
+++ b/js/src/jit/arm/CodeGenerator-arm.cpp
@@ -86,17 +86,17 @@ CodeGeneratorARM::generateEpilogue()
 #endif
 
     if (gen->compilingAsmJS())
         masm.freeStack(frameDepth_);
     else
         masm.freeStack(frameSize());
     JS_ASSERT(masm.framePushed() == 0);
     masm.pop(pc);
-    masm.dumpPool();
+    masm.flushBuffer();
     return true;
 }
 
 void
 CodeGeneratorARM::emitBranch(Assembler::Condition cond, MBasicBlock *mirTrue, MBasicBlock *mirFalse)
 {
     if (isNextBlock(mirFalse->lir())) {
         jumpToBlock(mirTrue, cond);
@@ -302,28 +302,32 @@ CodeGeneratorARM::visitMinMaxD(LMinMaxD 
     JS_ASSERT(first == output);
 
     Assembler::Condition cond = ins->mir()->isMax()
         ? Assembler::VFP_LessThanOrEqual
         : Assembler::VFP_GreaterThanOrEqual;
     Label nan, equal, returnSecond, done;
 
     masm.compareDouble(first, second);
-    masm.ma_b(&nan, Assembler::VFP_Unordered); // first or second is NaN, result is NaN.
-    masm.ma_b(&equal, Assembler::VFP_Equal); // make sure we handle -0 and 0 right.
+    // First or second is NaN, result is NaN.
+    masm.ma_b(&nan, Assembler::VFP_Unordered);
+    // Make sure we handle -0 and 0 right.
+    masm.ma_b(&equal, Assembler::VFP_Equal);
     masm.ma_b(&returnSecond, cond);
     masm.ma_b(&done);
 
     // Check for zero.
     masm.bind(&equal);
     masm.compareDouble(first, InvalidFloatReg);
-    masm.ma_b(&done, Assembler::VFP_NotEqualOrUnordered); // first wasn't 0 or -0, so just return it.
+    // First wasn't 0 or -0, so just return it.
+    masm.ma_b(&done, Assembler::VFP_NotEqualOrUnordered);
     // So now both operands are either -0 or 0.
     if (ins->mir()->isMax()) {
-        masm.ma_vadd(second, first, first); // -0 + -0 = -0 and -0 + 0 = 0.
+        // -0 + -0 = -0 and -0 + 0 = 0.
+        masm.ma_vadd(second, first, first);
     } else {
         masm.ma_vneg(first, first);
         masm.ma_vsub(first, second, first);
         masm.ma_vneg(first, first);
     }
     masm.ma_b(&done);
 
     masm.bind(&nan);
@@ -430,88 +434,90 @@ CodeGeneratorARM::visitMulI(LMulI *ins)
         }
         // TODO: move these to ma_mul.
         switch (constant) {
           case -1:
             masm.ma_rsb(ToRegister(lhs), Imm32(0), ToRegister(dest), SetCond);
             break;
           case 0:
             masm.ma_mov(Imm32(0), ToRegister(dest));
-            return true; // escape overflow check;
+            return true; // Escape overflow check;
           case 1:
-            // nop
+            // Nop
             masm.ma_mov(ToRegister(lhs), ToRegister(dest));
-            return true; // escape overflow check;
+            return true; // Escape overflow check;
           case 2:
             masm.ma_add(ToRegister(lhs), ToRegister(lhs), ToRegister(dest), SetCond);
             // Overflow is handled later.
             break;
           default: {
             bool handled = false;
             if (constant > 0) {
                 // Try shift and add sequences for a positive constant.
                 if (!mul->canOverflow()) {
-                    // If it cannot overflow, we can do lots of optimizations
+                    // If it cannot overflow, we can do lots of optimizations.
                     Register src = ToRegister(lhs);
                     uint32_t shift = FloorLog2(constant);
                     uint32_t rest = constant - (1 << shift);
-                    // See if the constant has one bit set, meaning it can be encoded as a bitshift
+                    // See if the constant has one bit set, meaning it can be
+                    // encoded as a bitshift.
                     if ((1 << shift) == constant) {
                         masm.ma_lsl(Imm32(shift), src, ToRegister(dest));
                         handled = true;
                     } else {
-                        // If the constant cannot be encoded as (1<<C1), see if it can be encoded as
-                        // (1<<C1) | (1<<C2), which can be computed using an add and a shift
+                        // If the constant cannot be encoded as (1 << C1), see
+                        // if it can be encoded as (1 << C1) | (1 << C2), which
+                        // can be computed using an add and a shift.
                         uint32_t shift_rest = FloorLog2(rest);
                         if ((1u << shift_rest) == rest) {
                             masm.as_add(ToRegister(dest), src, lsl(src, shift-shift_rest));
                             if (shift_rest != 0)
                                 masm.ma_lsl(Imm32(shift_rest), ToRegister(dest), ToRegister(dest));
                             handled = true;
                         }
                     }
                 } else if (ToRegister(lhs) != ToRegister(dest)) {
                     // To stay on the safe side, only optimize things that are a
                     // power of 2.
 
                     uint32_t shift = FloorLog2(constant);
                     if ((1 << shift) == constant) {
                         // dest = lhs * pow(2,shift)
                         masm.ma_lsl(Imm32(shift), ToRegister(lhs), ToRegister(dest));
-                        // At runtime, check (lhs == dest >> shift), if this does not hold,
-                        // some bits were lost due to overflow, and the computation should
-                        // be resumed as a double.
+                        // At runtime, check (lhs == dest >> shift), if this
+                        // does not hold, some bits were lost due to overflow,
+                        // and the computation should be resumed as a double.
                         masm.as_cmp(ToRegister(lhs), asr(ToRegister(dest), shift));
                         c = Assembler::NotEqual;
                         handled = true;
                     }
                 }
             }
 
             if (!handled) {
                 if (mul->canOverflow())
                     c = masm.ma_check_mul(ToRegister(lhs), Imm32(ToInt32(rhs)), ToRegister(dest), c);
                 else
                     masm.ma_mul(ToRegister(lhs), Imm32(ToInt32(rhs)), ToRegister(dest));
             }
           }
         }
-        // Bailout on overflow
+        // Bailout on overflow.
         if (mul->canOverflow() && !bailoutIf(c, ins->snapshot()))
             return false;
     } else {
         Assembler::Condition c = Assembler::Overflow;
 
-        //masm.imull(ToOperand(rhs), ToRegister(lhs));
+        // masm.imull(ToOperand(rhs), ToRegister(lhs));
         if (mul->canOverflow())
             c = masm.ma_check_mul(ToRegister(lhs), ToRegister(rhs), ToRegister(dest), c);
         else
             masm.ma_mul(ToRegister(lhs), ToRegister(rhs), ToRegister(dest));
 
-        // Bailout on overflow
+        // Bailout on overflow.
         if (mul->canOverflow() && !bailoutIf(c, ins->snapshot()))
             return false;
 
         if (mul->canBeNegativeZero()) {
             Label done;
             masm.ma_cmp(ToRegister(dest), Imm32(0));
             masm.ma_b(&done, Assembler::NotEqual);
 
@@ -529,18 +535,21 @@ CodeGeneratorARM::visitMulI(LMulI *ins)
 
 bool
 CodeGeneratorARM::divICommon(MDiv *mir, Register lhs, Register rhs, Register output,
                              LSnapshot *snapshot, Label &done)
 {
     if (mir->canBeNegativeOverflow()) {
         // Handle INT32_MIN / -1;
         // The integer division will give INT32_MIN, but we want -(double)INT32_MIN.
-        masm.ma_cmp(lhs, Imm32(INT32_MIN)); // sets EQ if lhs == INT32_MIN
-        masm.ma_cmp(rhs, Imm32(-1), Assembler::Equal); // if EQ (LHS == INT32_MIN), sets EQ if rhs == -1
+
+        // Sets EQ if lhs == INT32_MIN.
+        masm.ma_cmp(lhs, Imm32(INT32_MIN));
+        // If EQ (LHS == INT32_MIN), sets EQ if rhs == -1.
+        masm.ma_cmp(rhs, Imm32(-1), Assembler::Equal);
         if (mir->canTruncateOverflow()) {
             // (-INT32_MIN)|0 = INT32_MIN
             Label skip;
             masm.ma_b(&skip, Assembler::NotEqual);
             masm.ma_mov(Imm32(INT32_MIN), output);
             masm.ma_b(&done);
             masm.bind(&skip);
         } else {
@@ -580,17 +589,17 @@ CodeGeneratorARM::divICommon(MDiv *mir, 
     }
 
     return true;
 }
 
 bool
 CodeGeneratorARM::visitDivI(LDivI *ins)
 {
-    // Extract the registers from this instruction
+    // Extract the registers from this instruction.
     Register lhs = ToRegister(ins->lhs());
     Register rhs = ToRegister(ins->rhs());
     Register temp = ToRegister(ins->getTemp(0));
     Register output = ToRegister(ins->output());
     MDiv *mir = ins->mir();
 
     Label done;
     if (!divICommon(mir, lhs, rhs, output, ins->snapshot(), done))
@@ -615,17 +624,17 @@ CodeGeneratorARM::visitDivI(LDivI *ins)
 extern "C" {
     extern MOZ_EXPORT int64_t __aeabi_idivmod(int,int);
     extern MOZ_EXPORT int64_t __aeabi_uidivmod(int,int);
 }
 
 bool
 CodeGeneratorARM::visitSoftDivI(LSoftDivI *ins)
 {
-    // Extract the registers from this instruction
+    // Extract the registers from this instruction.
     Register lhs = ToRegister(ins->lhs());
     Register rhs = ToRegister(ins->rhs());
     Register output = ToRegister(ins->output());
     MDiv *mir = ins->mir();
 
     Label done;
     if (!divICommon(mir, lhs, rhs, output, ins->snapshot(), done))
         return false;
@@ -689,29 +698,30 @@ CodeGeneratorARM::visitDivPowTwoI(LDivPo
 
     return true;
 }
 
 bool
 CodeGeneratorARM::modICommon(MMod *mir, Register lhs, Register rhs, Register output,
                              LSnapshot *snapshot, Label &done)
 {
-    // 0/X (with X < 0) is bad because both of these values *should* be doubles, and
-    // the result should be -0.0, which cannot be represented in integers.
+    // 0/X (with X < 0) is bad because both of these values *should* be doubles,
+    // and the result should be -0.0, which cannot be represented in integers.
     // X/0 is bad because it will give garbage (or abort), when it should give
     // either \infty, -\infty or NAN.
 
     // Prevent 0 / X (with X < 0) and X / 0
-    // testing X / Y.  Compare Y with 0.
-    // There are three cases: (Y < 0), (Y == 0) and (Y > 0)
-    // If (Y < 0), then we compare X with 0, and bail if X == 0
-    // If (Y == 0), then we simply want to bail.  Since this does not set
-    // the flags necessary for LT to trigger, we don't test X, and take the
-    // bailout because the EQ flag is set.
-    // if (Y > 0), we don't set EQ, and we don't trigger LT, so we don't take the bailout.
+    // testing X / Y. Compare Y with 0.
+    // There are three cases: (Y < 0), (Y == 0) and (Y > 0).
+    // If (Y < 0), then we compare X with 0, and bail if X == 0.
+    // If (Y == 0), then we simply want to bail. Since this does not set the
+    // flags necessary for LT to trigger, we don't test X, and take the bailout
+    // because the EQ flag is set.
+    // If (Y > 0), we don't set EQ, and we don't trigger LT, so we don't take
+    // the bailout.
     if (mir->canBeDivideByZero() || mir->canBeNegativeDividend()) {
         masm.ma_cmp(rhs, Imm32(0));
         masm.ma_cmp(lhs, Imm32(0), Assembler::LessThan);
         if (mir->isTruncated()) {
             // NaN|0 == 0 and (0 % -X)|0 == 0
             Label skip;
             masm.ma_b(&skip, Assembler::NotEqual);
             masm.ma_mov(Imm32(0), output);
@@ -731,26 +741,26 @@ bool
 CodeGeneratorARM::visitModI(LModI *ins)
 {
     Register lhs = ToRegister(ins->lhs());
     Register rhs = ToRegister(ins->rhs());
     Register output = ToRegister(ins->output());
     Register callTemp = ToRegister(ins->callTemp());
     MMod *mir = ins->mir();
 
-    // save the lhs in case we end up with a 0 that should be a -0.0 because lhs < 0.
+    // Save the lhs in case we end up with a 0 that should be a -0.0 because lhs < 0.
     masm.ma_mov(lhs, callTemp);
 
     Label done;
     if (!modICommon(mir, lhs, rhs, output, ins->snapshot(), done))
         return false;
 
     masm.ma_smod(lhs, rhs, output);
 
-    // If X%Y == 0 and X < 0, then we *actually* wanted to return -0.0
+    // If X%Y == 0 and X < 0, then we *actually* wanted to return -0.0.
     if (mir->canBeNegativeDividend()) {
         if (mir->isTruncated()) {
             // -0.0|0 == 0
         } else {
             JS_ASSERT(mir->fallible());
             // See if X < 0
             masm.ma_cmp(output, Imm32(0));
             masm.ma_b(&done, Assembler::NotEqual);
@@ -762,33 +772,35 @@ CodeGeneratorARM::visitModI(LModI *ins)
 
     masm.bind(&done);
     return true;
 }
 
 bool
 CodeGeneratorARM::visitSoftModI(LSoftModI *ins)
 {
-    // Extract the registers from this instruction
+    // Extract the registers from this instruction.
     Register lhs = ToRegister(ins->lhs());
     Register rhs = ToRegister(ins->rhs());
     Register output = ToRegister(ins->output());
     Register callTemp = ToRegister(ins->callTemp());
     MMod *mir = ins->mir();
     Label done;
 
-    // save the lhs in case we end up with a 0 that should be a -0.0 because lhs < 0.
+    // Save the lhs in case we end up with a 0 that should be a -0.0 because lhs < 0.
     JS_ASSERT(callTemp.code() > r3.code() && callTemp.code() < r12.code());
     masm.ma_mov(lhs, callTemp);
 
     // Prevent INT_MIN % -1;
     // The integer division will give INT_MIN, but we want -(double)INT_MIN.
     if (mir->canBeNegativeDividend()) {
-        masm.ma_cmp(lhs, Imm32(INT_MIN)); // sets EQ if lhs == INT_MIN
-        masm.ma_cmp(rhs, Imm32(-1), Assembler::Equal); // if EQ (LHS == INT_MIN), sets EQ if rhs == -1
+        // Sets EQ if lhs == INT_MIN
+        masm.ma_cmp(lhs, Imm32(INT_MIN));
+        // If EQ (LHS == INT_MIN), sets EQ if rhs == -1
+        masm.ma_cmp(rhs, Imm32(-1), Assembler::Equal);
         if (mir->isTruncated()) {
             // (INT_MIN % -1)|0 == 0
             Label skip;
             masm.ma_b(&skip, Assembler::NotEqual);
             masm.ma_mov(Imm32(0), output);
             masm.ma_b(&done);
             masm.bind(&skip);
         } else {
@@ -829,21 +841,22 @@ CodeGeneratorARM::visitSoftModI(LSoftMod
 
 bool
 CodeGeneratorARM::visitModPowTwoI(LModPowTwoI *ins)
 {
     Register in = ToRegister(ins->getOperand(0));
     Register out = ToRegister(ins->getDef(0));
     MMod *mir = ins->mir();
     Label fin;
-    // bug 739870, jbramley has a different sequence that may help with speed here
+    // bug 739870, jbramley has a different sequence that may help with speed
+    // here.
     masm.ma_mov(in, out, SetCond);
     masm.ma_b(&fin, Assembler::Zero);
     masm.ma_rsb(Imm32(0), out, NoSetCond, Assembler::Signed);
-    masm.ma_and(Imm32((1<<ins->shift())-1), out);
+    masm.ma_and(Imm32((1 << ins->shift()) - 1), out);
     masm.ma_rsb(Imm32(0), out, SetCond, Assembler::Signed);
     if (mir->canBeNegativeDividend()) {
         if (!mir->isTruncated()) {
             JS_ASSERT(mir->fallible());
             if (!bailoutIf(Assembler::Zero, ins->snapshot()))
                 return false;
         } else {
             // -0|0 == 0
@@ -873,32 +886,31 @@ CodeGeneratorARM::visitModMaskI(LModMask
     }
     return true;
 }
 bool
 CodeGeneratorARM::visitBitNotI(LBitNotI *ins)
 {
     const LAllocation *input = ins->getOperand(0);
     const LDefinition *dest = ins->getDef(0);
-    // this will not actually be true on arm.
-    // We can not an imm8m in order to get a wider range
-    // of numbers
+    // This will not actually be true on arm. We can not an imm8m in order to
+    // get a wider range of numbers
     JS_ASSERT(!input->isConstant());
 
     masm.ma_mvn(ToRegister(input), ToRegister(dest));
     return true;
 }
 
 bool
 CodeGeneratorARM::visitBitOpI(LBitOpI *ins)
 {
     const LAllocation *lhs = ins->getOperand(0);
     const LAllocation *rhs = ins->getOperand(1);
     const LDefinition *dest = ins->getDef(0);
-    // all of these bitops should be either imm32's, or integer registers.
+    // All of these bitops should be either imm32's, or integer registers.
     switch (ins->bitop()) {
       case JSOP_BITOR:
         if (rhs->isConstant())
             masm.ma_orr(Imm32(ToInt32(rhs)), ToRegister(lhs), ToRegister(dest));
         else
             masm.ma_orr(ToRegister(rhs), ToRegister(lhs), ToRegister(dest));
         break;
       case JSOP_BITXOR:
@@ -955,18 +967,18 @@ CodeGeneratorARM::visitShiftI(LShiftI *i
                 }
             }
             break;
           default:
             MOZ_ASSUME_UNREACHABLE("Unexpected shift op");
         }
     } else {
         // The shift amounts should be AND'ed into the 0-31 range since arm
-        // shifts by the lower byte of the register (it will attempt to shift
-        // by 250 if you ask it to).
+        // shifts by the lower byte of the register (it will attempt to shift by
+        // 250 if you ask it to).
         masm.ma_and(Imm32(0x1F), ToRegister(rhs), dest);
 
         switch (ins->bitop()) {
           case JSOP_LSH:
             masm.ma_lsl(dest, lhs, dest);
             break;
           case JSOP_RSH:
             masm.ma_asr(dest, lhs, dest);
@@ -1021,17 +1033,18 @@ CodeGeneratorARM::visitPowHalfD(LPowHalf
     Label done;
 
     // Masm.pow(-Infinity, 0.5) == Infinity.
     masm.ma_vimm(NegativeInfinity<double>(), ScratchFloatReg);
     masm.compareDouble(input, ScratchFloatReg);
     masm.ma_vneg(ScratchFloatReg, output, Assembler::Equal);
     masm.ma_b(&done, Assembler::Equal);
 
-    // Math.pow(-0, 0.5) == 0 == Math.pow(0, 0.5). Adding 0 converts any -0 to 0.
+    // Math.pow(-0, 0.5) == 0 == Math.pow(0, 0.5).
+    // Adding 0 converts any -0 to 0.
     masm.ma_vimm(0.0, ScratchFloatReg);
     masm.ma_vadd(ScratchFloatReg, input, output);
     masm.ma_vsqrt(output, output);
 
     masm.bind(&done);
     return true;
 }
 
@@ -1094,55 +1107,56 @@ CodeGeneratorARM::visitOutOfLineTableSwi
     }
 
     return true;
 }
 
 bool
 CodeGeneratorARM::emitTableSwitchDispatch(MTableSwitch *mir, Register index, Register base)
 {
-    // the code generated by this is utter hax.
-    // the end result looks something like:
+    // The code generated by this is utter hax.
+    // The end result looks something like:
     // SUBS index, input, #base
     // RSBSPL index, index, #max
     // LDRPL pc, pc, index lsl 2
     // B default
 
     // If the range of targets in N through M, we first subtract off the lowest
-    // case (N), which both shifts the arguments into the range 0 to (M-N) with
-    // and sets the MInus flag if the argument was out of range on the low end.
+    // case (N), which both shifts the arguments into the range 0 to (M - N)
+    // with and sets the MInus flag if the argument was out of range on the low
+    // end.
 
     // Then we a reverse subtract with the size of the jump table, which will
     // reverse the order of range (It is size through 0, rather than 0 through
-    // size).  The main purpose of this is that we set the same flag as the lower
-    // bound check for the upper bound check.  Lastly, we do this conditionally
+    // size). The main purpose of this is that we set the same flag as the lower
+    // bound check for the upper bound check. Lastly, we do this conditionally
     // on the previous check succeeding.
 
     // Then we conditionally load the pc offset by the (reversed) index (times
-    // the address size) into the pc, which branches to the correct case.
-    // NOTE: when we go to read the pc, the value that we get back is the pc of
-    // the current instruction *PLUS 8*.  This means that ldr foo, [pc, +0]
-    // reads $pc+8.  In other words, there is an empty word after the branch into
-    // the switch table before the table actually starts.  Since the only other
-    // unhandled case is the default case (both out of range high and out of range low)
-    // I then insert a branch to default case into the extra slot, which ensures
-    // we don't attempt to execute the address table.
+    // the address size) into the pc, which branches to the correct case. NOTE:
+    // when we go to read the pc, the value that we get back is the pc of the
+    // current instruction *PLUS 8*. This means that ldr foo, [pc, +0] reads
+    // $pc+8. In other words, there is an empty word after the branch into the
+    // switch table before the table actually starts. Since the only other
+    // unhandled case is the default case (both out of range high and out of
+    // range low) I then insert a branch to default case into the extra slot,
+    // which ensures we don't attempt to execute the address table.
     Label *defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
 
     int32_t cases = mir->numCases();
-    // Lower value with low value
+    // Lower value with low value.
     masm.ma_sub(index, Imm32(mir->low()), index, SetCond);
     masm.ma_rsb(index, Imm32(cases - 1), index, SetCond, Assembler::NotSigned);
     AutoForbidPools afp(&masm);
     masm.ma_ldr(DTRAddr(pc, DtrRegImmShift(index, LSL, 2)), pc, Offset, Assembler::NotSigned);
     masm.ma_b(defaultcase);
 
-    // To fill in the CodeLabels for the case entries, we need to first
-    // generate the case entries (we don't yet know their offsets in the
-    // instruction stream).
+    // To fill in the CodeLabels for the case entries, we need to first generate
+    // the case entries (we don't yet know their offsets in the instruction
+    // stream).
     OutOfLineTableSwitch *ool = new(alloc()) OutOfLineTableSwitch(alloc(), mir);
     for (int32_t i = 0; i < cases; i++) {
         CodeLabel cl;
         masm.writeCodePointer(cl.dest());
         if (!ool->addCodeLabel(cl))
             return false;
     }
     if (!addOutOfLineCode(ool))
@@ -1253,33 +1267,33 @@ CodeGeneratorARM::visitCeilF(LCeilF *lir
 
 bool
 CodeGeneratorARM::visitRound(LRound *lir)
 {
     FloatRegister input = ToFloatRegister(lir->input());
     Register output = ToRegister(lir->output());
     FloatRegister tmp = ToFloatRegister(lir->temp());
     Label bail;
-    // Output is either correct, or clamped.  All -0 cases have been translated to a clamped
-    // case.a
+    // Output is either correct, or clamped. All -0 cases have been translated
+    // to a clamped case.
     masm.round(input, output, &bail, tmp);
     if (!bailoutFrom(&bail, lir->snapshot()))
         return false;
     return true;
 }
 
 bool
 CodeGeneratorARM::visitRoundF(LRoundF *lir)
 {
     FloatRegister input = ToFloatRegister(lir->input());
     Register output = ToRegister(lir->output());
     FloatRegister tmp = ToFloatRegister(lir->temp());
     Label bail;
-    // Output is either correct, or clamped.  All -0 cases have been translated to a clamped
-    // case.a
+    // Output is either correct, or clamped. All -0 cases have been translated
+    // to a clamped case.
     masm.roundf(input, output, &bail, tmp);
     if (!bailoutFrom(&bail, lir->snapshot()))
         return false;
     return true;
 }
 
 void
 CodeGeneratorARM::emitRoundDouble(FloatRegister src, Register dest, Label *fail)
@@ -1366,19 +1380,19 @@ CodeGeneratorARM::visitValue(LValue *val
 
 bool
 CodeGeneratorARM::visitBox(LBox *box)
 {
     const LDefinition *type = box->getDef(TYPE_INDEX);
 
     JS_ASSERT(!box->getOperand(0)->isConstant());
 
-    // On x86, the input operand and the output payload have the same
-    // virtual register. All that needs to be written is the type tag for
-    // the type definition.
+    // On x86, the input operand and the output payload have the same virtual
+    // register. All that needs to be written is the type tag for the type
+    // definition.
     masm.ma_mov(Imm32(MIRTypeToTag(box->type())), ToRegister(type));
     return true;
 }
 
 bool
 CodeGeneratorARM::visitBoxFloatingPoint(LBoxFloatingPoint *box)
 {
     const LDefinition *payload = box->getDef(PAYLOAD_INDEX);
@@ -1441,40 +1455,38 @@ bool
 CodeGeneratorARM::visitTestDAndBranch(LTestDAndBranch *test)
 {
     const LAllocation *opd = test->input();
     masm.ma_vcmpz(ToFloatRegister(opd));
     masm.as_vmrs(pc);
 
     MBasicBlock *ifTrue = test->ifTrue();
     MBasicBlock *ifFalse = test->ifFalse();
-    // If the compare set the  0 bit, then the result
-    // is definately false.
+    // If the compare set the 0 bit, then the result is definately false.
     jumpToBlock(ifFalse, Assembler::Zero);
-    // it is also false if one of the operands is NAN, which is
-    // shown as Overflow.
+    // It is also false if one of the operands is NAN, which is shown as
+    // Overflow.
     jumpToBlock(ifFalse, Assembler::Overflow);
     jumpToBlock(ifTrue);
     return true;
 }
 
 bool
 CodeGeneratorARM::visitTestFAndBranch(LTestFAndBranch *test)
 {
     const LAllocation *opd = test->input();
     masm.ma_vcmpz_f32(ToFloatRegister(opd));
     masm.as_vmrs(pc);
 
     MBasicBlock *ifTrue = test->ifTrue();
     MBasicBlock *ifFalse = test->ifFalse();
-    // If the compare set the  0 bit, then the result
-    // is definately false.
+    // If the compare set the 0 bit, then the result is definately false.
     jumpToBlock(ifFalse, Assembler::Zero);
-    // it is also false if one of the operands is NAN, which is
-    // shown as Overflow.
+    // It is also false if one of the operands is NAN, which is shown as
+    // Overflow.
     jumpToBlock(ifFalse, Assembler::Overflow);
     jumpToBlock(ifTrue);
     return true;
 }
 
 bool
 CodeGeneratorARM::visitCompareD(LCompareD *comp)
 {
@@ -1656,61 +1668,61 @@ CodeGeneratorARM::visitNotI(LNotI *ins)
     masm.ma_cmp(ToRegister(ins->input()), Imm32(0));
     masm.emitSet(Assembler::Equal, ToRegister(ins->output()));
     return true;
 }
 
 bool
 CodeGeneratorARM::visitNotD(LNotD *ins)
 {
-    // Since this operation is not, we want to set a bit if
-    // the double is falsey, which means 0.0, -0.0 or NaN.
-    // when comparing with 0, an input of 0 will set the Z bit (30)
-    // and NaN will set the V bit (28) of the APSR.
+    // Since this operation is not, we want to set a bit if the double is
+    // falsey, which means 0.0, -0.0 or NaN. When comparing with 0, an input of
+    // 0 will set the Z bit (30) and NaN will set the V bit (28) of the APSR.
     FloatRegister opd = ToFloatRegister(ins->input());
     Register dest = ToRegister(ins->output());
 
-    // Do the compare
+    // Do the compare.
     masm.ma_vcmpz(opd);
     // TODO There are three variations here to compare performance-wise.
     bool nocond = true;
     if (nocond) {
-        // Load the value into the dest register
+        // Load the value into the dest register.
         masm.as_vmrs(dest);
         masm.ma_lsr(Imm32(28), dest, dest);
-        masm.ma_alu(dest, lsr(dest, 2), dest, op_orr); // 28 + 2 = 30
+        // 28 + 2 = 30
+        masm.ma_alu(dest, lsr(dest, 2), dest, OpOrr);
         masm.ma_and(Imm32(1), dest);
     } else {
         masm.as_vmrs(pc);
         masm.ma_mov(Imm32(0), dest);
         masm.ma_mov(Imm32(1), dest, NoSetCond, Assembler::Equal);
         masm.ma_mov(Imm32(1), dest, NoSetCond, Assembler::Overflow);
     }
     return true;
 }
 
 bool
 CodeGeneratorARM::visitNotF(LNotF *ins)
 {
-    // Since this operation is not, we want to set a bit if
-    // the double is falsey, which means 0.0, -0.0 or NaN.
-    // when comparing with 0, an input of 0 will set the Z bit (30)
-    // and NaN will set the V bit (28) of the APSR.
+    // Since this operation is not, we want to set a bit if the double is
+    // falsey, which means 0.0, -0.0 or NaN. When comparing with 0, an input of
+    // 0 will set the Z bit (30) and NaN will set the V bit (28) of the APSR.
     FloatRegister opd = ToFloatRegister(ins->input());
     Register dest = ToRegister(ins->output());
 
-    // Do the compare
+    // Do the compare.
     masm.ma_vcmpz_f32(opd);
     // TODO There are three variations here to compare performance-wise.
     bool nocond = true;
     if (nocond) {
-        // Load the value into the dest register
+        // Load the value into the dest register.
         masm.as_vmrs(dest);
         masm.ma_lsr(Imm32(28), dest, dest);
-        masm.ma_alu(dest, lsr(dest, 2), dest, op_orr); // 28 + 2 = 30
+        // 28 + 2 = 30
+        masm.ma_alu(dest, lsr(dest, 2), dest, OpOrr);
         masm.ma_and(Imm32(1), dest);
     } else {
         masm.as_vmrs(pc);
         masm.ma_mov(Imm32(0), dest);
         masm.ma_mov(Imm32(1), dest, NoSetCond, Assembler::Equal);
         masm.ma_mov(Imm32(1), dest, NoSetCond, Assembler::Overflow);
     }
     return true;
@@ -1753,35 +1765,34 @@ CodeGeneratorARM::visitGuardClass(LGuard
     if (!bailoutIf(Assembler::NotEqual, guard->snapshot()))
         return false;
     return true;
 }
 
 bool
 CodeGeneratorARM::generateInvalidateEpilogue()
 {
-    // Ensure that there is enough space in the buffer for the OsiPoint
-    // patching to occur. Otherwise, we could overwrite the invalidation
-    // epilogue.
-    for (size_t i = 0; i < sizeof(void *); i+= Assembler::nopSize())
+    // Ensure that there is enough space in the buffer for the OsiPoint patching
+    // to occur. Otherwise, we could overwrite the invalidation epilogue.
+    for (size_t i = 0; i < sizeof(void *); i += Assembler::NopSize())
         masm.nop();
 
     masm.bind(&invalidate_);
 
-    // Push the return address of the point that we bailed out at onto the stack
+    // Push the return address of the point that we bailed out at onto the stack.
     masm.Push(lr);
 
     // Push the Ion script onto the stack (when we determine what that pointer is).
     invalidateEpilogueData_ = masm.pushWithPatch(ImmWord(uintptr_t(-1)));
     JitCode *thunk = gen->jitRuntime()->getInvalidationThunk();
 
     masm.branch(thunk);
 
-    // We should never reach this point in JIT code -- the invalidation thunk should
-    // pop the invalidated JS frame and return directly to its caller.
+    // We should never reach this point in JIT code -- the invalidation thunk
+    // should pop the invalidated JS frame and return directly to its caller.
     masm.assumeUnreachable("Should have returned directly to its caller instead of here.");
     return true;
 }
 
 void
 DispatchIonCache::initializeAddCacheState(LInstruction *ins, AddCacheState *addState)
 {
     // Can always use the scratch register on ARM.
--- a/js/src/jit/arm/CodeGenerator-arm.h
+++ b/js/src/jit/arm/CodeGenerator-arm.h
@@ -21,19 +21,19 @@ class CodeGeneratorARM : public CodeGene
     friend class MoveResolverARM;
 
     CodeGeneratorARM *thisFromCtor() {return this;}
 
   protected:
     // Label for the common return path.
     NonAssertingLabel returnLabel_;
     NonAssertingLabel deoptLabel_;
-    // ugh.  this is not going to be pretty to move over.
-    // stack slotted variables are not useful on arm.
-    // it looks like this will need to return one of two types.
+    // Ugh. This is not going to be pretty to move over. Stack slotted variables
+    // are not useful on arm. It looks like this will need to return one of two
+    // types.
     inline Operand ToOperand(const LAllocation &a) {
         if (a.isGeneralReg())
             return Operand(a.toGeneralReg()->reg());
         if (a.isFloatReg())
             return Operand(a.toFloatReg()->reg());
         return Operand(StackPointer, ToStackOffset(&a));
     }
     inline Operand ToOperand(const LAllocation *a) {
@@ -197,17 +197,17 @@ class CodeGeneratorARM : public CodeGene
     bool visitAsmJSLoadFFIFunc(LAsmJSLoadFFIFunc *ins);
     bool visitAsmJSPassStackArg(LAsmJSPassStackArg *ins);
 
     bool visitForkJoinGetSlice(LForkJoinGetSlice *ins);
 
     bool generateInvalidateEpilogue();
   protected:
     void postAsmJSCall(LAsmJSCall *lir) {
-        if (!useHardFpABI() && lir->mir()->callee().which() == MAsmJSCall::Callee::Builtin) {
+        if (!UseHardFpABI() && lir->mir()->callee().which() == MAsmJSCall::Callee::Builtin) {
             switch (lir->mir()->type()) {
               case MIRType_Double:
                 masm.ma_vxfer(r0, r1, d0);
                 break;
               case MIRType_Float32:
                 masm.as_vxfer(r0, InvalidReg, VFPRegister(d0).singleOverlay(),
                               Assembler::CoreToFloat);
                 break;
--- a/js/src/jit/arm/LIR-arm.h
+++ b/js/src/jit/arm/LIR-arm.h
@@ -139,20 +139,20 @@ class LDivI : public LBinaryMath<1>
 // LSoftDivI is a software divide for ARM cores that don't support a hardware
 // divide instruction.
 //
 // It is implemented as a proper C function so it trashes r0, r1, r2 and r3.
 // The call also trashes lr, and has the ability to trash ip. The function also
 // takes two arguments (dividend in r0, divisor in r1). The LInstruction gets
 // encoded such that the divisor and dividend are passed in their apropriate
 // registers and end their life at the start of the instruction by the use of
-// useFixedAtStart.  The result is returned in r0 and the other three registers
-// that can be trashed are marked as temps.  For the time being, the link
+// useFixedAtStart. The result is returned in r0 and the other three registers
+// that can be trashed are marked as temps. For the time being, the link
 // register is not marked as trashed because we never allocate to the link
-// register.  The FP registers are not trashed.
+// register. The FP registers are not trashed.
 class LSoftDivI : public LBinaryMath<3>
 {
   public:
     LIR_HEADER(SoftDivI);
 
     LSoftDivI(const LAllocation &lhs, const LAllocation &rhs,
               const LDefinition &temp1, const LDefinition &temp2, const LDefinition &temp3) {
         setOperand(0, lhs);
@@ -299,17 +299,17 @@ class LPowHalfD : public LInstructionHel
     const LAllocation *input() {
         return getOperand(0);
     }
     const LDefinition *output() {
         return getDef(0);
     }
 };
 
-// Takes a tableswitch with an integer to decide
+// Takes a tableswitch with an integer to decide.
 class LTableSwitch : public LInstructionHelper<0, 1, 1>
 {
   public:
     LIR_HEADER(TableSwitch);
 
     LTableSwitch(const LAllocation &in, const LDefinition &inputCopy, MTableSwitch *ins) {
         setOperand(0, in);
         setTemp(0, inputCopy);
@@ -327,17 +327,17 @@ class LTableSwitch : public LInstruction
         return getTemp(0);
     }
     // This is added to share the same CodeGenerator prefixes.
     const LDefinition *tempPointer() {
         return nullptr;
     }
 };
 
-// Takes a tableswitch with an integer to decide
+// Takes a tableswitch with an integer to decide.
 class LTableSwitchV : public LInstructionHelper<0, BOX_PIECES, 2>
 {
   public:
     LIR_HEADER(TableSwitchV);
 
     LTableSwitchV(const LDefinition &inputCopy, const LDefinition &floatCopy,
                   MTableSwitch *ins)
     {
@@ -428,18 +428,16 @@ class LUMod : public LBinaryMath<0>
   public:
     LIR_HEADER(UMod);
 
     MMod *mir() {
         return mir_->toMod();
     }
 };
 
-// This class performs a simple x86 'div', yielding either a quotient or remainder depending on
-// whether this instruction is defined to output eax (quotient) or edx (remainder).
 class LSoftUDivOrMod : public LBinaryMath<3>
 {
   public:
     LIR_HEADER(SoftUDivOrMod);
 
     LSoftUDivOrMod(const LAllocation &lhs, const LAllocation &rhs, const LDefinition &temp1,
                    const LDefinition &temp2, const LDefinition &temp3) {
         setOperand(0, lhs);
--- a/js/src/jit/arm/Lowering-arm.cpp
+++ b/js/src/jit/arm/Lowering-arm.cpp
@@ -278,17 +278,17 @@ LIRGeneratorARM::lowerDivI(MDiv *div)
         if (rhs > 0 && 1 << shift == rhs) {
             LDivPowTwoI *lir = new(alloc()) LDivPowTwoI(useRegisterAtStart(div->lhs()), shift);
             if (div->fallible() && !assignSnapshot(lir, Bailout_BaselineInfo))
                 return false;
             return define(lir, div);
         }
     }
 
-    if (hasIDIV()) {
+    if (HasIDIV()) {
         LDivI *lir = new(alloc()) LDivI(useRegister(div->lhs()), useRegister(div->rhs()), temp());
         if (div->fallible() && !assignSnapshot(lir, Bailout_BaselineInfo))
             return false;
         return define(lir, div);
     }
 
     LSoftDivI *lir = new(alloc()) LSoftDivI(useFixedAtStart(div->lhs(), r0), useFixedAtStart(div->rhs(), r1),
                                             tempFixed(r1), tempFixed(r2), tempFixed(r3));
@@ -323,17 +323,17 @@ LIRGeneratorARM::lowerModI(MMod *mod)
         } else if (shift < 31 && (1 << (shift+1)) - 1 == rhs) {
             LModMaskI *lir = new(alloc()) LModMaskI(useRegister(mod->lhs()), temp(), temp(), shift+1);
             if (mod->fallible() && !assignSnapshot(lir, Bailout_BaselineInfo))
                 return false;
             return define(lir, mod);
         }
     }
 
-    if (hasIDIV()) {
+    if (HasIDIV()) {
         LModI *lir = new(alloc()) LModI(useRegister(mod->lhs()), useRegister(mod->rhs()), temp());
         if (mod->fallible() && !assignSnapshot(lir, Bailout_BaselineInfo))
             return false;
         return define(lir, mod);
     }
 
     LSoftModI *lir = new(alloc()) LSoftModI(useFixedAtStart(mod->lhs(), r0), useFixedAtStart(mod->rhs(), r1),
                                             tempFixed(r0), tempFixed(r2), tempFixed(r3),
@@ -420,17 +420,17 @@ LIRGeneratorARM::visitAsmJSNeg(MAsmJSNeg
 }
 
 bool
 LIRGeneratorARM::lowerUDiv(MDiv *div)
 {
     MDefinition *lhs = div->getOperand(0);
     MDefinition *rhs = div->getOperand(1);
 
-    if (hasIDIV()) {
+    if (HasIDIV()) {
         LUDiv *lir = new(alloc()) LUDiv;
         lir->setOperand(0, useRegister(lhs));
         lir->setOperand(1, useRegister(rhs));
         if (div->fallible() && !assignSnapshot(lir, Bailout_BaselineInfo))
             return false;
         return define(lir, div);
     } else {
         LSoftUDivOrMod *lir = new(alloc()) LSoftUDivOrMod(useFixedAtStart(lhs, r0), useFixedAtStart(rhs, r1),
@@ -442,17 +442,17 @@ LIRGeneratorARM::lowerUDiv(MDiv *div)
 }
 
 bool
 LIRGeneratorARM::lowerUMod(MMod *mod)
 {
     MDefinition *lhs = mod->getOperand(0);
     MDefinition *rhs = mod->getOperand(1);
 
-    if (hasIDIV()) {
+    if (HasIDIV()) {
         LUMod *lir = new(alloc()) LUMod;
         lir->setOperand(0, useRegister(lhs));
         lir->setOperand(1, useRegister(rhs));
         if (mod->fallible() && !assignSnapshot(lir, Bailout_BaselineInfo))
             return false;
         return define(lir, mod);
     } else {
         LSoftUDivOrMod *lir = new(alloc()) LSoftUDivOrMod(useFixedAtStart(lhs, r0), useFixedAtStart(rhs, r1),
--- a/js/src/jit/arm/MacroAssembler-arm.cpp
+++ b/js/src/jit/arm/MacroAssembler-arm.cpp
@@ -21,18 +21,18 @@ using namespace jit;
 
 using mozilla::Abs;
 using mozilla::BitwiseCast;
 
 bool
 isValueDTRDCandidate(ValueOperand &val)
 {
     // In order to be used for a DTRD memory function, the two target registers
-    // need to be a) Adjacent, with the tag larger than the payload, and
-    // b) Aligned to a multiple of two.
+    // need to be a) Adjacent, with the tag larger than the payload, and b)
+    // Aligned to a multiple of two.
     if ((val.typeReg().code() != (val.payloadReg().code() + 1)))
         return false;
     if ((val.payloadReg().code() & 1) != 0)
         return false;
     return true;
 }
 
 void
@@ -41,61 +41,62 @@ MacroAssemblerARM::convertBoolToInt32(Re
     // Note that C++ bool is only 1 byte, so zero extend it to clear the
     // higher-order bits.
     ma_and(Imm32(0xff), source, dest);
 }
 
 void
 MacroAssemblerARM::convertInt32ToDouble(Register src, FloatRegister dest_)
 {
-    // direct conversions aren't possible.
+    // Direct conversions aren't possible.
     VFPRegister dest = VFPRegister(dest_);
     as_vxfer(src, InvalidReg, dest.sintOverlay(),
              CoreToFloat);
     as_vcvt(dest, dest.sintOverlay());
 }
 
 void
 MacroAssemblerARM::convertInt32ToDouble(const Address &src, FloatRegister dest)
 {
     ma_vldr(Operand(src), ScratchFloatReg);
     as_vcvt(dest, VFPRegister(ScratchFloatReg).sintOverlay());
 }
 
 void
 MacroAssemblerARM::convertUInt32ToDouble(Register src, FloatRegister dest_)
 {
-    // direct conversions aren't possible.
+    // Direct conversions aren't possible.
     VFPRegister dest = VFPRegister(dest_);
     as_vxfer(src, InvalidReg, dest.uintOverlay(), CoreToFloat);
     as_vcvt(dest, dest.uintOverlay());
 }
 
 void
 MacroAssemblerARM::convertUInt32ToFloat32(Register src, FloatRegister dest_)
 {
-    // direct conversions aren't possible.
+    // Direct conversions aren't possible.
     VFPRegister dest = VFPRegister(dest_);
     as_vxfer(src, InvalidReg, dest.uintOverlay(), CoreToFloat);
     as_vcvt(VFPRegister(dest).singleOverlay(), dest.uintOverlay());
 }
 
 void MacroAssemblerARM::convertDoubleToFloat32(FloatRegister src, FloatRegister dest,
                                                Condition c)
 {
     as_vcvt(VFPRegister(dest).singleOverlay(), VFPRegister(src), false, c);
 }
 
-// there are two options for implementing emitTruncateDouble.
-// 1) convert the floating point value to an integer, if it did not fit,
-//        then it was clamped to INT_MIN/INT_MAX, and we can test it.
-//        NOTE: if the value really was supposed to be INT_MAX / INT_MIN
-//        then it will be wrong.
-// 2) convert the floating point value to an integer, if it did not fit,
-//        then it set one or two bits in the fpcsr.  Check those.
+// There are two options for implementing emitTruncateDouble:
+//
+// 1. Convert the floating point value to an integer, if it did not fit, then it
+// was clamped to INT_MIN/INT_MAX, and we can test it. NOTE: if the value
+// really was supposed to be INT_MAX / INT_MIN then it will be wrong.
+//
+// 2. Convert the floating point value to an integer, if it did not fit, then it
+// set one or two bits in the fpcsr. Check those.
 void
 MacroAssemblerARM::branchTruncateDouble(FloatRegister src, Register dest, Label *fail)
 {
     ma_vcvt_F64_I32(src, ScratchFloatReg);
     ma_vxfer(ScratchFloatReg, dest);
     ma_cmp(dest, Imm32(0x7fffffff));
     ma_cmp(dest, Imm32(0x80000000), Assembler::NotEqual);
     ma_b(fail, Assembler::Equal);
@@ -103,61 +104,61 @@ MacroAssemblerARM::branchTruncateDouble(
 
 // Checks whether a double is representable as a 32-bit integer. If so, the
 // integer is written to the output register. Otherwise, a bailout is taken to
 // the given snapshot. This function overwrites the scratch float register.
 void
 MacroAssemblerARM::convertDoubleToInt32(FloatRegister src, Register dest,
                                         Label *fail, bool negativeZeroCheck)
 {
-    // convert the floating point value to an integer, if it did not fit,
-    //     then when we convert it *back* to  a float, it will have a
-    //     different value, which we can test.
+    // Convert the floating point value to an integer, if it did not fit, then
+    // when we convert it *back* to a float, it will have a different value,
+    // which we can test.
     ma_vcvt_F64_I32(src, ScratchFloatReg);
-    // move the value into the dest register.
+    // Move the value into the dest register.
     ma_vxfer(ScratchFloatReg, dest);
     ma_vcvt_I32_F64(ScratchFloatReg, ScratchFloatReg);
     ma_vcmp(src, ScratchFloatReg);
     as_vmrs(pc);
     ma_b(fail, Assembler::VFP_NotEqualOrUnordered);
 
     if (negativeZeroCheck) {
         ma_cmp(dest, Imm32(0));
-        // Test and bail for -0.0, when integer result is 0
-        // Move the top word of the double into the output reg, if it is non-zero,
-        // then the original value was -0.0
+        // Test and bail for -0.0, when integer result is 0. Move the top word
+        // of the double into the output reg, if it is non-zero, then the
+        // original value was -0.0.
         as_vxfer(dest, InvalidReg, src, FloatToCore, Assembler::Equal, 1);
         ma_cmp(dest, Imm32(0x80000000), Assembler::Equal);
         ma_b(fail, Assembler::Equal);
     }
 }
 
 // Checks whether a float32 is representable as a 32-bit integer. If so, the
 // integer is written to the output register. Otherwise, a bailout is taken to
 // the given snapshot. This function overwrites the scratch float register.
 void
 MacroAssemblerARM::convertFloat32ToInt32(FloatRegister src, Register dest,
                                          Label *fail, bool negativeZeroCheck)
 {
-    // convert the floating point value to an integer, if it did not fit,
-    //     then when we convert it *back* to  a float, it will have a
-    //     different value, which we can test.
+    // Convert the floating point value to an integer, if it did not fit, then
+    // when we convert it *back* to a float, it will have a different value,
+    // which we can test.
     ma_vcvt_F32_I32(src, ScratchFloatReg);
-    // move the value into the dest register.
+    // Move the value into the dest register.
     ma_vxfer(ScratchFloatReg, dest);
     ma_vcvt_I32_F32(ScratchFloatReg, ScratchFloatReg);
     ma_vcmp_f32(src, ScratchFloatReg);
     as_vmrs(pc);
     ma_b(fail, Assembler::VFP_NotEqualOrUnordered);
 
     if (negativeZeroCheck) {
         ma_cmp(dest, Imm32(0));
-        // Test and bail for -0.0, when integer result is 0
-        // Move the float into the output reg, and if it is non-zero then
-        // the original value was -0.0
+        // Test and bail for -0.0, when integer result is 0. Move the float into
+        // the output reg, and if it is non-zero then the original value was
+        // -0.0
         as_vxfer(dest, InvalidReg, VFPRegister(src).singleOverlay(), FloatToCore, Assembler::Equal, 0);
         ma_cmp(dest, Imm32(0x80000000), Assembler::Equal);
         ma_b(fail, Assembler::Equal);
     }
 }
 
 void
 MacroAssemblerARM::convertFloat32ToDouble(FloatRegister src, FloatRegister dest) {
@@ -170,17 +171,17 @@ MacroAssemblerARM::branchTruncateFloat32
     ma_vxfer(ScratchFloatReg, dest);
     ma_cmp(dest, Imm32(0x7fffffff));
     ma_cmp(dest, Imm32(0x80000000), Assembler::NotEqual);
     ma_b(fail, Assembler::Equal);
 }
 
 void
 MacroAssemblerARM::convertInt32ToFloat32(Register src, FloatRegister dest_) {
-    // direct conversions aren't possible.
+    // Direct conversions aren't possible.
     VFPRegister dest = VFPRegister(dest_).singleOverlay();
     as_vxfer(src, InvalidReg, dest.sintOverlay(),
              CoreToFloat);
     as_vcvt(dest, dest.sintOverlay());
 }
 
 void
 MacroAssemblerARM::convertInt32ToFloat32(const Address &src, FloatRegister dest) {
@@ -239,156 +240,146 @@ MacroAssemblerARM::inc64(AbsoluteAddress
 
 bool
 MacroAssemblerARM::alu_dbl(Register src1, Imm32 imm, Register dest, ALUOp op,
                            SetCond_ sc, Condition c)
 {
     if ((sc == SetCond && ! condsAreSafe(op)) || !can_dbl(op))
         return false;
     ALUOp interop = getDestVariant(op);
-    Imm8::TwoImm8mData both = Imm8::encodeTwoImms(imm.value);
+    Imm8::TwoImm8mData both = Imm8::EncodeTwoImms(imm.value);
     if (both.fst.invalid)
         return false;
-    // for the most part, there is no good reason to set the condition
-    // codes for the first instruction.
-    // we can do better things if the second instruction doesn't
-    // have a dest, such as check for overflow by doing first operation
-    // don't do second operation if first operation overflowed.
-    // this preserves the overflow condition code.
-    // unfortunately, it is horribly brittle.
+    // For the most part, there is no good reason to set the condition codes for
+    // the first instruction. We can do better things if the second instruction
+    // doesn't have a dest, such as check for overflow by doing first operation
+    // don't do second operation if first operation overflowed. This preserves
+    // the overflow condition code. Unfortunately, it is horribly brittle.
     as_alu(ScratchRegister, src1, both.fst, interop, NoSetCond, c);
     as_alu(dest, ScratchRegister, both.snd, op, sc, c);
     return true;
 }
 
 
 void
 MacroAssemblerARM::ma_alu(Register src1, Imm32 imm, Register dest,
                           ALUOp op,
                           SetCond_ sc, Condition c)
 {
-    // As it turns out, if you ask for a compare-like instruction
-    // you *probably* want it to set condition codes.
+    // As it turns out, if you ask for a compare-like instruction you *probably*
+    // want it to set condition codes.
     if (dest == InvalidReg)
         JS_ASSERT(sc == SetCond);
 
-    // The operator gives us the ability to determine how
-    // this can be used.
+    // The operator gives us the ability to determine how this can be used.
     Imm8 imm8 = Imm8(imm.value);
-    // ONE INSTRUCTION:
-    // If we can encode it using an imm8m, then do so.
+    // One instruction: If we can encode it using an imm8m, then do so.
     if (!imm8.invalid) {
         as_alu(dest, src1, imm8, op, sc, c);
         return;
     }
-    // ONE INSTRUCTION, NEGATED:
+    // One instruction, negated:
     Imm32 negImm = imm;
     Register negDest;
     ALUOp negOp = ALUNeg(op, dest, &negImm, &negDest);
     Imm8 negImm8 = Imm8(negImm.value);
-    // add r1, r2, -15 can be replaced with
-    // sub r1, r2, 15
-    // for bonus points, dest can be replaced (nearly always invalid => ScratchRegister)
-    // This is useful if we wish to negate tst.  tst has an invalid (aka not used) dest,
-    // but its negation is bic *requires* a dest.  We can accomodate, but it will need to clobber
-    // *something*, and the scratch register isn't being used, so...
-    if (negOp != op_invalid && !negImm8.invalid) {
+    // 'add r1, r2, -15' can be replaced with 'sub r1, r2, 15'. For bonus
+    // points, dest can be replaced (nearly always invalid => ScratchRegister)
+    // This is useful if we wish to negate tst. tst has an invalid (aka not
+    // used) dest, but its negation is bic *requires* a dest. We can accomodate,
+    // but it will need to clobber *something*, and the scratch register isn't
+    // being used, so...
+    if (negOp != OpInvalid && !negImm8.invalid) {
         as_alu(negDest, src1, negImm8, negOp, sc, c);
         return;
     }
 
-    if (hasMOVWT()) {
-        // If the operation is a move-a-like then we can try to use movw to
-        // move the bits into the destination.  Otherwise, we'll need to
-        // fall back on a multi-instruction format :(
-        // movw/movt don't set condition codes, so don't hold your breath.
-        if (sc == NoSetCond && (op == op_mov || op == op_mvn)) {
-            // ARMv7 supports movw/movt. movw zero-extends
-            // its 16 bit argument, so we can set the register
-            // this way.
-            // movt leaves the bottom 16 bits in tact, so
-            // it is unsuitable to move a constant that
-            if (op == op_mov && ((imm.value & ~ 0xffff) == 0)) {
+    if (HasMOVWT()) {
+        // If the operation is a move-a-like then we can try to use movw to move
+        // the bits into the destination. Otherwise, we'll need to fall back on
+        // a multi-instruction format :(
+        // movw/movt does not set condition codes, so don't hold your breath.
+        if (sc == NoSetCond && (op == OpMov || op == OpMvn)) {
+            // ARMv7 supports movw/movt. movw zero-extends its 16 bit argument,
+            // so we can set the register this way. movt leaves the bottom 16
+            // bits in tact, so it is unsuitable to move a constant that
+            if (op == OpMov && ((imm.value & ~ 0xffff) == 0)) {
                 JS_ASSERT(src1 == InvalidReg);
                 as_movw(dest, (uint16_t)imm.value, c);
                 return;
             }
 
             // If they asked for a mvn rfoo, imm, where ~imm fits into 16 bits
             // then do it.
-            if (op == op_mvn && (((~imm.value) & ~ 0xffff) == 0)) {
+            if (op == OpMvn && (((~imm.value) & ~ 0xffff) == 0)) {
                 JS_ASSERT(src1 == InvalidReg);
                 as_movw(dest, (uint16_t)~imm.value, c);
                 return;
             }
 
-            // TODO: constant dedup may enable us to add dest, r0, 23 *if*
-            // we are attempting to load a constant that looks similar to one
-            // that already exists
-            // If it can't be done with a single movw
-            // then we *need* to use two instructions
-            // since this must be some sort of a move operation, we can just use
-            // a movw/movt pair and get the whole thing done in two moves.  This
-            // does not work for ops like add, sinc we'd need to do
-            // movw tmp; movt tmp; add dest, tmp, src1
-            if (op == op_mvn)
+            // TODO: constant dedup may enable us to add dest, r0, 23 *if* we
+            // are attempting to load a constant that looks similar to one that
+            // already exists. If it can't be done with a single movw then we
+            // *need* to use two instructions since this must be some sort of a
+            // move operation, we can just use a movw/movt pair and get the
+            // whole thing done in two moves. This does not work for ops like
+            // add, since we'd need to do: movw tmp; movt tmp; add dest, tmp,
+            // src1.
+            if (op == OpMvn)
                 imm.value = ~imm.value;
             as_movw(dest, imm.value & 0xffff, c);
             as_movt(dest, (imm.value >> 16) & 0xffff, c);
             return;
         }
-        // If we weren't doing a movalike, a 16 bit immediate
-        // will require 2 instructions.  With the same amount of
-        // space and (less)time, we can do two 8 bit operations, reusing
-        // the dest register.  e.g.
-        // movw tmp, 0xffff; add dest, src, tmp ror 4
+        // If we weren't doing a movalike, a 16 bit immediate will require 2
+        // instructions. With the same amount of space and (less)time, we can do
+        // two 8 bit operations, reusing the dest register. e.g.
+        //  movw tmp, 0xffff; add dest, src, tmp ror 4
         // vs.
-        // add dest, src, 0xff0; add dest, dest, 0xf000000f
-        // it turns out that there are some immediates that we miss with the
-        // second approach.  A sample value is: add dest, src, 0x1fffe
-        // this can be done by movw tmp, 0xffff; add dest, src, tmp lsl 1
-        // since imm8m's only get even offsets, we cannot encode this.
-        // I'll try to encode as two imm8's first, since they are faster.
-        // Both operations should take 1 cycle, where as add dest, tmp ror 4
-        // takes two cycles to execute.
+        //  add dest, src, 0xff0; add dest, dest, 0xf000000f
+        //
+        // It turns out that there are some immediates that we miss with the
+        // second approach. A sample value is: add dest, src, 0x1fffe this can
+        // be done by movw tmp, 0xffff; add dest, src, tmp lsl 1 since imm8m's
+        // only get even offsets, we cannot encode this. I'll try to encode as
+        // two imm8's first, since they are faster. Both operations should take
+        // 1 cycle, where as add dest, tmp ror 4 takes two cycles to execute.
     }
 
-    // Either a) this isn't ARMv7 b) this isn't a move
-    // start by attempting to generate a two instruction form.
-    // Some things cannot be made into two-inst forms correctly.
-    // namely, adds dest, src, 0xffff.
-    // Since we want the condition codes (and don't know which ones will
-    // be checked), we need to assume that the overflow flag will be checked
-    // and add{,s} dest, src, 0xff00; add{,s} dest, dest, 0xff is not
-    // guaranteed to set the overflow flag the same as the (theoretical)
-    // one instruction variant.
+    // Either a) this isn't ARMv7 b) this isn't a move start by attempting to
+    // generate a two instruction form. Some things cannot be made into two-inst
+    // forms correctly. Namely, adds dest, src, 0xffff. Since we want the
+    // condition codes (and don't know which ones will be checked), we need to
+    // assume that the overflow flag will be checked and add{,s} dest, src,
+    // 0xff00; add{,s} dest, dest, 0xff is not guaranteed to set the overflow
+    // flag the same as the (theoretical) one instruction variant.
     if (alu_dbl(src1, imm, dest, op, sc, c))
         return;
 
     // And try with its negative.
-    if (negOp != op_invalid &&
+    if (negOp != OpInvalid &&
         alu_dbl(src1, negImm, negDest, negOp, sc, c))
         return;
 
-    // Well, damn. We can use two 16 bit mov's, then do the op
-    // or we can do a single load from a pool then op.
-    if (hasMOVWT()) {
-        // Try to load the immediate into a scratch register
-        // then use that
+    // Well, damn. We can use two 16 bit mov's, then do the op or we can do a
+    // single load from a pool then op.
+    if (HasMOVWT()) {
+        // Try to load the immediate into a scratch register then use that
         as_movw(ScratchRegister, imm.value & 0xffff, c);
         if ((imm.value >> 16) != 0)
             as_movt(ScratchRegister, (imm.value >> 16) & 0xffff, c);
     } else {
-        // Going to have to use a load.  If the operation is a move, then just move it into the
-        // destination register
-        if (op == op_mov) {
+        // Going to have to use a load. If the operation is a move, then just
+        // move it into the destination register
+        if (op == OpMov) {
             as_Imm32Pool(dest, imm.value, c);
             return;
         } else {
-            // If this isn't just going into a register, then stick it in a temp, and then proceed.
+            // If this isn't just going into a register, then stick it in a
+            // temp, and then proceed.
             as_Imm32Pool(ScratchRegister, imm.value, c);
         }
     }
     as_alu(dest, src1, O2Reg(ScratchRegister), op, sc, c);
 }
 
 void
 MacroAssemblerARM::ma_alu(Register src1, Operand op2, Register dest, ALUOp op,
@@ -419,26 +410,26 @@ NextInst(Instruction *i)
 }
 
 void
 MacroAssemblerARM::ma_movPatchable(Imm32 imm_, Register dest, Assembler::Condition c,
                                    RelocStyle rs, Instruction *i)
 {
     int32_t imm = imm_.value;
     if (i) {
-        // Make sure the current instruction is not an artificial guard
-        // inserted by the assembler buffer.
+        // Make sure the current instruction is not an artificial guard inserted
+        // by the assembler buffer.
         i = i->skipPool();
     }
     switch(rs) {
       case L_MOVWT:
         as_movw(dest, Imm16(imm & 0xffff), c, i);
-        // i can be nullptr here.  that just means "insert in the next in sequence."
-        // NextInst is special cased to not do anything when it is passed nullptr, so
-        // two consecutive instructions will be inserted.
+        // 'i' can be nullptr here. That just means "insert in the next in
+        // sequence." NextInst is special cased to not do anything when it is
+        // passed nullptr, so two consecutive instructions will be inserted.
         i = NextInst(i);
         as_movt(dest, Imm16(imm >> 16 & 0xffff), c, i);
         break;
       case L_LDR:
         if(i == nullptr)
             as_Imm32Pool(dest, imm, c);
         else
             as_WritePoolEntry(i, c, imm);
@@ -460,42 +451,42 @@ MacroAssemblerARM::ma_mov(Register src, 
     if (sc == SetCond || dest != src)
         as_mov(dest, O2Reg(src), sc, c);
 }
 
 void
 MacroAssemblerARM::ma_mov(Imm32 imm, Register dest,
                           SetCond_ sc, Assembler::Condition c)
 {
-    ma_alu(InvalidReg, imm, dest, op_mov, sc, c);
+    ma_alu(InvalidReg, imm, dest, OpMov, sc, c);
 }
 
 void
 MacroAssemblerARM::ma_mov(ImmWord imm, Register dest,
                           SetCond_ sc, Assembler::Condition c)
 {
-    ma_alu(InvalidReg, Imm32(imm.value), dest, op_mov, sc, c);
+    ma_alu(InvalidReg, Imm32(imm.value), dest, OpMov, sc, c);
 }
 
 void
 MacroAssemblerARM::ma_mov(ImmGCPtr ptr, Register dest)
 {
     // As opposed to x86/x64 version, the data relocation has to be executed
     // before to recover the pointer, and not after.
     writeDataRelocation(ptr);
     RelocStyle rs;
-    if (hasMOVWT())
+    if (HasMOVWT())
         rs = L_MOVWT;
     else
         rs = L_LDR;
 
     ma_movPatchable(Imm32(uintptr_t(ptr.value)), dest, Always, rs);
 }
 
-    // Shifts (just a move with a shifting op2)
+// Shifts (just a move with a shifting op2)
 void
 MacroAssemblerARM::ma_lsl(Imm32 shift, Register src, Register dst)
 {
     as_mov(dst, lsl(src, shift.value));
 }
 void
 MacroAssemblerARM::ma_lsr(Imm32 shift, Register src, Register dst)
 {
@@ -511,17 +502,18 @@ MacroAssemblerARM::ma_ror(Imm32 shift, R
 {
     as_mov(dst, ror(src, shift.value));
 }
 void
 MacroAssemblerARM::ma_rol(Imm32 shift, Register src, Register dst)
 {
     as_mov(dst, rol(src, shift.value));
 }
-    // Shifts (just a move with a shifting op2)
+
+// Shifts (just a move with a shifting op2)
 void
 MacroAssemblerARM::ma_lsl(Register shift, Register src, Register dst)
 {
     as_mov(dst, lsl(src, shift));
 }
 void
 MacroAssemblerARM::ma_lsr(Register shift, Register src, Register dst)
 {
@@ -539,28 +531,27 @@ MacroAssemblerARM::ma_ror(Register shift
 }
 void
 MacroAssemblerARM::ma_rol(Register shift, Register src, Register dst)
 {
     ma_rsb(shift, Imm32(32), ScratchRegister);
     as_mov(dst, ror(src, ScratchRegister));
 }
 
-    // Move not (dest <- ~src)
-
+// Move not (dest <- ~src)
 void
 MacroAssemblerARM::ma_mvn(Imm32 imm, Register dest, SetCond_ sc, Assembler::Condition c)
 {
-    ma_alu(InvalidReg, imm, dest, op_mvn, sc, c);
+    ma_alu(InvalidReg, imm, dest, OpMvn, sc, c);
 }
 
 void
 MacroAssemblerARM::ma_mvn(Register src1, Register dest, SetCond_ sc, Assembler::Condition c)
 {
-    as_alu(dest, InvalidReg, O2Reg(src1), op_mvn, sc, c);
+    as_alu(dest, InvalidReg, O2Reg(src1), OpMvn, sc, c);
 }
 
 // Negate (dest <- -src), src is a register, rather than a general op2.
 void
 MacroAssemblerARM::ma_neg(Register src1, Register dest, SetCond_ sc, Assembler::Condition c)
 {
     as_rsb(dest, src1, Imm8(0), sc, c);
 }
@@ -575,31 +566,30 @@ void
 MacroAssemblerARM::ma_and(Register src1, Register src2, Register dest,
                           SetCond_ sc, Assembler::Condition c)
 {
     as_and(dest, src1, O2Reg(src2), sc, c);
 }
 void
 MacroAssemblerARM::ma_and(Imm32 imm, Register dest, SetCond_ sc, Assembler::Condition c)
 {
-    ma_alu(dest, imm, dest, op_and, sc, c);
+    ma_alu(dest, imm, dest, OpAnd, sc, c);
 }
 void
 MacroAssemblerARM::ma_and(Imm32 imm, Register src1, Register dest,
                           SetCond_ sc, Assembler::Condition c)
 {
-    ma_alu(src1, imm, dest, op_and, sc, c);
-}
-
+    ma_alu(src1, imm, dest, OpAnd, sc, c);
+}
 
 // Bit clear (dest <- dest & ~imm) or (dest <- src1 & ~src2).
 void
 MacroAssemblerARM::ma_bic(Imm32 imm, Register dest, SetCond_ sc, Assembler::Condition c)
 {
-    ma_alu(dest, imm, dest, op_bic, sc, c);
+    ma_alu(dest, imm, dest, OpBic, sc, c);
 }
 
 // Exclusive or.
 void
 MacroAssemblerARM::ma_eor(Register src, Register dest, SetCond_ sc, Assembler::Condition c)
 {
     ma_eor(dest, src, dest, sc, c);
 }
@@ -607,23 +597,23 @@ void
 MacroAssemblerARM::ma_eor(Register src1, Register src2, Register dest,
                           SetCond_ sc, Assembler::Condition c)
 {
     as_eor(dest, src1, O2Reg(src2), sc, c);
 }
 void
 MacroAssemblerARM::ma_eor(Imm32 imm, Register dest, SetCond_ sc, Assembler::Condition c)
 {
-    ma_alu(dest, imm, dest, op_eor, sc, c);
+    ma_alu(dest, imm, dest, OpEor, sc, c);
 }
 void
 MacroAssemblerARM::ma_eor(Imm32 imm, Register src1, Register dest,
        SetCond_ sc, Assembler::Condition c)
 {
-    ma_alu(src1, imm, dest, op_eor, sc, c);
+    ma_alu(src1, imm, dest, OpEor, sc, c);
 }
 
 // Or.
 void
 MacroAssemblerARM::ma_orr(Register src, Register dest, SetCond_ sc, Assembler::Condition c)
 {
     ma_orr(dest, src, dest, sc, c);
 }
@@ -631,177 +621,177 @@ void
 MacroAssemblerARM::ma_orr(Register src1, Register src2, Register dest,
                           SetCond_ sc, Assembler::Condition c)
 {
     as_orr(dest, src1, O2Reg(src2), sc, c);
 }
 void
 MacroAssemblerARM::ma_orr(Imm32 imm, Register dest, SetCond_ sc, Assembler::Condition c)
 {
-    ma_alu(dest, imm, dest, op_orr, sc, c);
+    ma_alu(dest, imm, dest, OpOrr, sc, c);
 }
 void
 MacroAssemblerARM::ma_orr(Imm32 imm, Register src1, Register dest,
                           SetCond_ sc, Assembler::Condition c)
 {
-    ma_alu(src1, imm, dest, op_orr, sc, c);
+    ma_alu(src1, imm, dest, OpOrr, sc, c);
 }
 
 // Arithmetic-based ops.
 // Add with carry.
 void
 MacroAssemblerARM::ma_adc(Imm32 imm, Register dest, SetCond_ sc, Condition c)
 {
-    ma_alu(dest, imm, dest, op_adc, sc, c);
+    ma_alu(dest, imm, dest, OpAdc, sc, c);
 }
 void
 MacroAssemblerARM::ma_adc(Register src, Register dest, SetCond_ sc, Condition c)
 {
-    as_alu(dest, dest, O2Reg(src), op_adc, sc, c);
+    as_alu(dest, dest, O2Reg(src), OpAdc, sc, c);
 }
 void
 MacroAssemblerARM::ma_adc(Register src1, Register src2, Register dest, SetCond_ sc, Condition c)
 {
-    as_alu(dest, src1, O2Reg(src2), op_adc, sc, c);
+    as_alu(dest, src1, O2Reg(src2), OpAdc, sc, c);
 }
 
 // Add.
 void
 MacroAssemblerARM::ma_add(Imm32 imm, Register dest, SetCond_ sc, Condition c)
 {
-    ma_alu(dest, imm, dest, op_add, sc, c);
+    ma_alu(dest, imm, dest, OpAdd, sc, c);
 }
 
 void
 MacroAssemblerARM::ma_add(Register src1, Register dest, SetCond_ sc, Condition c)
 {
-    ma_alu(dest, O2Reg(src1), dest, op_add, sc, c);
+    ma_alu(dest, O2Reg(src1), dest, OpAdd, sc, c);
 }
 void
 MacroAssemblerARM::ma_add(Register src1, Register src2, Register dest, SetCond_ sc, Condition c)
 {
-    as_alu(dest, src1, O2Reg(src2), op_add, sc, c);
+    as_alu(dest, src1, O2Reg(src2), OpAdd, sc, c);
 }
 void
 MacroAssemblerARM::ma_add(Register src1, Operand op, Register dest, SetCond_ sc, Condition c)
 {
-    ma_alu(src1, op, dest, op_add, sc, c);
+    ma_alu(src1, op, dest, OpAdd, sc, c);
 }
 void
 MacroAssemblerARM::ma_add(Register src1, Imm32 op, Register dest, SetCond_ sc, Condition c)
 {
-    ma_alu(src1, op, dest, op_add, sc, c);
+    ma_alu(src1, op, dest, OpAdd, sc, c);
 }
 
 // Subtract with carry.
 void
 MacroAssemblerARM::ma_sbc(Imm32 imm, Register dest, SetCond_ sc, Condition c)
 {
-    ma_alu(dest, imm, dest, op_sbc, sc, c);
+    ma_alu(dest, imm, dest, OpSbc, sc, c);
 }
 void
 MacroAssemblerARM::ma_sbc(Register src1, Register dest, SetCond_ sc, Condition c)
 {
-    as_alu(dest, dest, O2Reg(src1), op_sbc, sc, c);
+    as_alu(dest, dest, O2Reg(src1), OpSbc, sc, c);
 }
 void
 MacroAssemblerARM::ma_sbc(Register src1, Register src2, Register dest, SetCond_ sc, Condition c)
 {
-    as_alu(dest, src1, O2Reg(src2), op_sbc, sc, c);
+    as_alu(dest, src1, O2Reg(src2), OpSbc, sc, c);
 }
 
 // Subtract.
 void
 MacroAssemblerARM::ma_sub(Imm32 imm, Register dest, SetCond_ sc, Condition c)
 {
-    ma_alu(dest, imm, dest, op_sub, sc, c);
+    ma_alu(dest, imm, dest, OpSub, sc, c);
 }
 void
 MacroAssemblerARM::ma_sub(Register src1, Register dest, SetCond_ sc, Condition c)
 {
-    ma_alu(dest, Operand(src1), dest, op_sub, sc, c);
+    ma_alu(dest, Operand(src1), dest, OpSub, sc, c);
 }
 void
 MacroAssemblerARM::ma_sub(Register src1, Register src2, Register dest, SetCond_ sc, Condition c)
 {
-    ma_alu(src1, Operand(src2), dest, op_sub, sc, c);
+    ma_alu(src1, Operand(src2), dest, OpSub, sc, c);
 }
 void
 MacroAssemblerARM::ma_sub(Register src1, Operand op, Register dest, SetCond_ sc, Condition c)
 {
-    ma_alu(src1, op, dest, op_sub, sc, c);
+    ma_alu(src1, op, dest, OpSub, sc, c);
 }
 void
 MacroAssemblerARM::ma_sub(Register src1, Imm32 op, Register dest, SetCond_ sc, Condition c)
 {
-    ma_alu(src1, op, dest, op_sub, sc, c);
+    ma_alu(src1, op, dest, OpSub, sc, c);
 }
 
 // Severse subtract.
 void
 MacroAssemblerARM::ma_rsb(Imm32 imm, Register dest, SetCond_ sc, Condition c)
 {
-    ma_alu(dest, imm, dest, op_rsb, sc, c);
+    ma_alu(dest, imm, dest, OpRsb, sc, c);
 }
 void
 MacroAssemblerARM::ma_rsb(Register src1, Register dest, SetCond_ sc, Condition c)
 {
-    as_alu(dest, dest, O2Reg(src1), op_add, sc, c);
+    as_alu(dest, dest, O2Reg(src1), OpAdd, sc, c);
 }
 void
 MacroAssemblerARM::ma_rsb(Register src1, Register src2, Register dest, SetCond_ sc, Condition c)
 {
-    as_alu(dest, src1, O2Reg(src2), op_rsb, sc, c);
+    as_alu(dest, src1, O2Reg(src2), OpRsb, sc, c);
 }
 void
 MacroAssemblerARM::ma_rsb(Register src1, Imm32 op2, Register dest, SetCond_ sc, Condition c)
 {
-    ma_alu(src1, op2, dest, op_rsb, sc, c);
+    ma_alu(src1, op2, dest, OpRsb, sc, c);
 }
 
 // Reverse subtract with carry.
 void
 MacroAssemblerARM::ma_rsc(Imm32 imm, Register dest, SetCond_ sc, Condition c)
 {
-    ma_alu(dest, imm, dest, op_rsc, sc, c);
+    ma_alu(dest, imm, dest, OpRsc, sc, c);
 }
 void
 MacroAssemblerARM::ma_rsc(Register src1, Register dest, SetCond_ sc, Condition c)
 {
-    as_alu(dest, dest, O2Reg(src1), op_rsc, sc, c);
+    as_alu(dest, dest, O2Reg(src1), OpRsc, sc, c);
 }
 void
 MacroAssemblerARM::ma_rsc(Register src1, Register src2, Register dest, SetCond_ sc, Condition c)
 {
-    as_alu(dest, src1, O2Reg(src2), op_rsc, sc, c);
+    as_alu(dest, src1, O2Reg(src2), OpRsc, sc, c);
 }
 
 // Compares/tests.
 // Compare negative (sets condition codes as src1 + src2 would).
 void
 MacroAssemblerARM::ma_cmn(Register src1, Imm32 imm, Condition c)
 {
-    ma_alu(src1, imm, InvalidReg, op_cmn, SetCond, c);
+    ma_alu(src1, imm, InvalidReg, OpCmn, SetCond, c);
 }
 void
 MacroAssemblerARM::ma_cmn(Register src1, Register src2, Condition c)
 {
-    as_alu(InvalidReg, src2, O2Reg(src1), op_cmn, SetCond, c);
+    as_alu(InvalidReg, src2, O2Reg(src1), OpCmn, SetCond, c);
 }
 void
 MacroAssemblerARM::ma_cmn(Register src1, Operand op, Condition c)
 {
     MOZ_ASSUME_UNREACHABLE("Feature NYI");
 }
 
 // Compare (src - src2).
 void
 MacroAssemblerARM::ma_cmp(Register src1, Imm32 imm, Condition c)
 {
-    ma_alu(src1, imm, InvalidReg, op_cmp, SetCond, c);
+    ma_alu(src1, imm, InvalidReg, OpCmp, SetCond, c);
 }
 
 void
 MacroAssemblerARM::ma_cmp(Register src1, ImmWord ptr, Condition c)
 {
     ma_cmp(src1, Imm32(ptr.value), c);
 }
 
@@ -827,21 +817,21 @@ MacroAssemblerARM::ma_cmp(Register src1,
     }
 }
 void
 MacroAssemblerARM::ma_cmp(Register src1, Register src2, Condition c)
 {
     as_cmp(src1, O2Reg(src2), c);
 }
 
-// Test for equality, (src1^src2).
+// Test for equality, (src1 ^ src2).
 void
 MacroAssemblerARM::ma_teq(Register src1, Imm32 imm, Condition c)
 {
-    ma_alu(src1, imm, InvalidReg, op_teq, SetCond, c);
+    ma_alu(src1, imm, InvalidReg, OpTeq, SetCond, c);
 }
 void
 MacroAssemblerARM::ma_teq(Register src1, Register src2, Condition c)
 {
     as_tst(src1, O2Reg(src2), c);
 }
 void
 MacroAssemblerARM::ma_teq(Register src1, Operand op, Condition c)
@@ -849,17 +839,17 @@ MacroAssemblerARM::ma_teq(Register src1,
     as_teq(src1, op.toOp2(), c);
 }
 
 
 // Test (src1 & src2).
 void
 MacroAssemblerARM::ma_tst(Register src1, Imm32 imm, Condition c)
 {
-    ma_alu(src1, imm, InvalidReg, op_tst, SetCond, c);
+    ma_alu(src1, imm, InvalidReg, OpTst, SetCond, c);
 }
 void
 MacroAssemblerARM::ma_tst(Register src1, Register src2, Condition c)
 {
     as_tst(src1, O2Reg(src2), c);
 }
 void
 MacroAssemblerARM::ma_tst(Register src1, Operand op, Condition c)
@@ -878,18 +868,18 @@ MacroAssemblerARM::ma_mul(Register src1,
 
     ma_mov(imm, ScratchRegister);
     as_mul( dest, src1, ScratchRegister);
 }
 
 Assembler::Condition
 MacroAssemblerARM::ma_check_mul(Register src1, Register src2, Register dest, Condition cond)
 {
-    // TODO: this operation is illegal on armv6 and earlier if src2 == ScratchRegister
-    //       or src2 == dest.
+    // TODO: this operation is illegal on armv6 and earlier if src2 ==
+    // ScratchRegister or src2 == dest.
     if (cond == Equal || cond == NotEqual) {
         as_smull(ScratchRegister, dest, src1, src2, SetCond);
         return cond;
     }
 
     if (cond == Overflow) {
         as_smull(ScratchRegister, dest, src1, src2);
         as_cmp(ScratchRegister, asr(dest, 31));
@@ -916,91 +906,95 @@ MacroAssemblerARM::ma_check_mul(Register
 
     MOZ_ASSUME_UNREACHABLE("Condition NYI");
 }
 
 void
 MacroAssemblerARM::ma_mod_mask(Register src, Register dest, Register hold, Register tmp,
                                int32_t shift)
 {
-    // MATH:
     // We wish to compute x % (1<<y) - 1 for a known constant, y.
-    // first, let b = (1<<y) and C = (1<<y)-1, then think of the 32 bit dividend as
+    //
+    // 1. Let b = (1<<y) and C = (1<<y)-1, then think of the 32 bit dividend as
     // a number in base b, namely c_0*1 + c_1*b + c_2*b^2 ... c_n*b^n
-    // now, since both addition and multiplication commute with modulus,
-    // x % C == (c_0 + c_1*b + ... + c_n*b^n) % C ==
-    // (c_0 % C) + (c_1%C) * (b % C) + (c_2 % C) * (b^2 % C)...
-    // now, since b == C + 1, b % C == 1, and b^n % C == 1
-    // this means that the whole thing simplifies to:
-    // c_0 + c_1 + c_2 ... c_n % C
-    // each c_n can easily be computed by a shift/bitextract, and the modulus can be maintained
-    // by simply subtracting by C whenever the number gets over C.
+    //
+    // 2. Since both addition and multiplication commute with modulus:
+    //   x % C == (c_0 + c_1*b + ... + c_n*b^n) % C ==
+    //    (c_0 % C) + (c_1%C) * (b % C) + (c_2 % C) * (b^2 % C)...
+    //
+    // 3. Since b == C + 1, b % C == 1, and b^n % C == 1 the whole thing
+    // simplifies to: c_0 + c_1 + c_2 ... c_n % C
+    //
+    // Each c_n can easily be computed by a shift/bitextract, and the modulus
+    // can be maintained by simply subtracting by C whenever the number gets
+    // over C.
     int32_t mask = (1 << shift) - 1;
     Label head;
 
-    // hold holds -1 if the value was negative, 1 otherwise.
-    // ScratchRegister holds the remaining bits that have not been processed
-    // lr serves as a temporary location to store extracted bits into as well
-    //    as holding the trial subtraction as a temp value
-    // dest is the accumulator (and holds the final result)
-
-    // move the whole value into tmp, setting the codition codes so we can
-    // muck with them later.
+    // Register 'hold' holds -1 if the value was negative, 1 otherwise. The
+    // ScratchRegister holds the remaining bits that have not been processed lr
+    // serves as a temporary location to store extracted bits into as well as
+    // holding the trial subtraction as a temp value dest is the accumulator
+    // (and holds the final result)
+    //
+    // Move the whole value into tmp, setting the codition codes so we can muck
+    // with them later.
     //
     // Note that we cannot use ScratchRegister in place of tmp here, as ma_and
-    // below on certain architectures move the mask into ScratchRegister
-    // before performing the bitwise and.
+    // below on certain architectures move the mask into ScratchRegister before
+    // performing the bitwise and.
     as_mov(tmp, O2Reg(src), SetCond);
     // Zero out the dest.
     ma_mov(Imm32(0), dest);
     // Set the hold appropriately.
     ma_mov(Imm32(1), hold);
     ma_mov(Imm32(-1), hold, NoSetCond, Signed);
     ma_rsb(Imm32(0), tmp, SetCond, Signed);
     // Begin the main loop.
     bind(&head);
 
     // Extract the bottom bits into lr.
     ma_and(Imm32(mask), tmp, secondScratchReg_);
     // Add those bits to the accumulator.
     ma_add(secondScratchReg_, dest, dest);
-    // Do a trial subtraction, this is the same operation as cmp, but we store the dest
+    // Do a trial subtraction, this is the same operation as cmp, but we store
+    // the dest.
     ma_sub(dest, Imm32(mask), secondScratchReg_, SetCond);
     // If (sum - C) > 0, store sum - C back into sum, thus performing a modulus.
     ma_mov(secondScratchReg_, dest, NoSetCond, NotSigned);
-    // Get rid of the bits that we extracted before, and set the condition codes
+    // Get rid of the bits that we extracted before, and set the condition codes.
     as_mov(tmp, lsr(tmp, shift), SetCond);
     // If the shift produced zero, finish, otherwise, continue in the loop.
     ma_b(&head, NonZero);
-    // Check the hold to see if we need to negate the result.  Hold can only be 1 or -1,
-    // so this will never set the 0 flag.
+    // Check the hold to see if we need to negate the result. Hold can only be
+    // 1 or -1, so this will never set the 0 flag.
     ma_cmp(hold, Imm32(0));
-    // If the hold was non-zero, negate the result to be in line with what JS wants
-    // this will set the condition codes if we try to negate
+    // If the hold was non-zero, negate the result to be in line with what JS
+    // wants this will set the condition codes if we try to negate.
     ma_rsb(Imm32(0), dest, SetCond, Signed);
-    // Since the Zero flag is not set by the compare, we can *only* set the Zero flag
-    // in the rsb, so Zero is set iff we negated zero (e.g. the result of the computation was -0.0).
-
+    // Since the Zero flag is not set by the compare, we can *only* set the Zero
+    // flag in the rsb, so Zero is set iff we negated zero (e.g. the result of
+    // the computation was -0.0).
 }
 
 void
 MacroAssemblerARM::ma_smod(Register num, Register div, Register dest)
 {
     as_sdiv(ScratchRegister, num, div);
     as_mls(dest, num, ScratchRegister, div);
 }
 
 void
 MacroAssemblerARM::ma_umod(Register num, Register div, Register dest)
 {
     as_udiv(ScratchRegister, num, div);
     as_mls(dest, num, ScratchRegister, div);
 }
 
-// division
+// Division
 void
 MacroAssemblerARM::ma_sdiv(Register num, Register div, Register dest, Condition cond)
 {
     as_sdiv(dest, num, div, cond);
 }
 
 void
 MacroAssemblerARM::ma_udiv(Register num, Register div, Register dest, Condition cond)
@@ -1124,144 +1118,169 @@ MacroAssemblerARM::ma_dataTransferN(Load
 }
 
 BufferOffset
 MacroAssemblerARM::ma_dataTransferN(LoadStore ls, int size, bool IsSigned,
                                     Register rn, Imm32 offset, Register rt,
                                     Index mode, Assembler::Condition cc)
 {
     int off = offset.value;
-    // we can encode this as a standard ldr... MAKE IT SO
+    // We can encode this as a standard ldr.
     if (size == 32 || (size == 8 && !IsSigned) ) {
         if (off < 4096 && off > -4096) {
             // This encodes as a single instruction, Emulating mode's behavior
             // in a multi-instruction sequence is not necessary.
             return as_dtr(ls, size, mode, rt, DTRAddr(rn, DtrOffImm(off)), cc);
         }
 
         // We cannot encode this offset in a a single ldr. For mode == index,
         // try to encode it as |add scratch, base, imm; ldr dest, [scratch, +offset]|.
         // This does not wark for mode == PreIndex or mode == PostIndex.
-        // PreIndex is simple, just do the add into the base register first, then do
-        // a PreIndex'ed load. PostIndexed loads can be tricky.  Normally, doing the load with
-        // an index of 0, then doing an add would work, but if the destination is the PC,
-        // you don't get to execute the instruction after the branch, which will lead to
-        // the base register not being updated correctly. Explicitly handle this case, without
+        // PreIndex is simple, just do the add into the base register first,
+        // then do a PreIndex'ed load. PostIndexed loads can be tricky.
+        // Normally, doing the load with an index of 0, then doing an add would
+        // work, but if the destination is the PC, you don't get to execute the
+        // instruction after the branch, which will lead to the base register
+        // not being updated correctly. Explicitly handle this case, without
         // doing anything fancy, then handle all of the other cases.
 
         // mode == Offset
         //  add   scratch, base, offset_hi
         //  ldr   dest, [scratch, +offset_lo]
         //
         // mode == PreIndex
         //  add   base, base, offset_hi
         //  ldr   dest, [base, +offset_lo]!
         //
         // mode == PostIndex, dest == pc
         //  ldr   scratch, [base]
         //  add   base, base, offset_hi
         //  add   base, base, offset_lo
         //  mov   dest, scratch
         // PostIndex with the pc as the destination needs to be handled
-        // specially, since in the code below, the write into 'dest'
-        // is going to alter the control flow, so the following instruction would
-        // never get emitted.
+        // specially, since in the code below, the write into 'dest' is going to
+        // alter the control flow, so the following instruction would never get
+        // emitted.
         //
         // mode == PostIndex, dest != pc
         //  ldr   dest, [base], offset_lo
         //  add   base, base, offset_hi
 
         if (rt == pc && mode == PostIndex && ls == IsLoad) {
             ma_mov(rn, ScratchRegister);
-            ma_alu(rn, offset, rn, op_add);
+            ma_alu(rn, offset, rn, OpAdd);
             return as_dtr(IsLoad, size, Offset, pc, DTRAddr(ScratchRegister, DtrOffImm(0)), cc);
         }
 
         int bottom = off & 0xfff;
         int neg_bottom = 0x1000 - bottom;
-        // For a regular offset, base == ScratchRegister does what we want.  Modify the
-        // scratch register, leaving the actual base unscathed.
+        // For a regular offset, base == ScratchRegister does what we want.
+        // Modify the scratch register, leaving the actual base unscathed.
         Register base = ScratchRegister;
-        // For the preindex case, we want to just re-use rn as the base register, so when
-        // the base register is updated *before* the load, rn is updated.
+        // For the preindex case, we want to just re-use rn as the base
+        // register, so when the base register is updated *before* the load, rn
+        // is updated.
         if (mode == PreIndex)
             base = rn;
         JS_ASSERT(mode != PostIndex);
-        // At this point, both off - bottom and off + neg_bottom will be reasonable-ish quantities.
+        // At this point, both off - bottom and off + neg_bottom will be
+        // reasonable-ish quantities.
         //
-        // Note a neg_bottom of 0x1000 can not be encoded as an immediate negative offset in the
-        // instruction and this occurs when bottom is zero, so this case is guarded against below.
+        // Note a neg_bottom of 0x1000 can not be encoded as an immediate
+        // negative offset in the instruction and this occurs when bottom is
+        // zero, so this case is guarded against below.
         if (off < 0) {
-            Operand2 sub_off = Imm8(-(off-bottom)); // sub_off = bottom - off
+            Operand2 sub_off = Imm8(-(off - bottom)); // sub_off = bottom - off
             if (!sub_off.invalid) {
-                as_sub(ScratchRegister, rn, sub_off, NoSetCond, cc); // - sub_off = off - bottom
+                // - sub_off = off - bottom
+                as_sub(ScratchRegister, rn, sub_off, NoSetCond, cc);
                 return as_dtr(ls, size, Offset, rt, DTRAddr(ScratchRegister, DtrOffImm(bottom)), cc);
             }
-            sub_off = Imm8(-(off+neg_bottom));// sub_off = -neg_bottom - off
+            // sub_off = -neg_bottom - off
+            sub_off = Imm8(-(off + neg_bottom));
             if (!sub_off.invalid && bottom != 0) {
-                JS_ASSERT(neg_bottom < 0x1000);  // Guarded against by: bottom != 0
-                as_sub(ScratchRegister, rn, sub_off, NoSetCond, cc); // - sub_off = neg_bottom + off
+                // Guarded against by: bottom != 0
+                JS_ASSERT(neg_bottom < 0x1000);
+                // - sub_off = neg_bottom + off
+                as_sub(ScratchRegister, rn, sub_off, NoSetCond, cc);
                 return as_dtr(ls, size, Offset, rt, DTRAddr(ScratchRegister, DtrOffImm(-neg_bottom)), cc);
             }
         } else {
-            Operand2 sub_off = Imm8(off-bottom); // sub_off = off - bottom
+            // sub_off = off - bottom
+            Operand2 sub_off = Imm8(off - bottom);
             if (!sub_off.invalid) {
-                as_add(ScratchRegister, rn, sub_off, NoSetCond, cc); //  sub_off = off - bottom
+                //  sub_off = off - bottom
+                as_add(ScratchRegister, rn, sub_off, NoSetCond, cc);
                 return as_dtr(ls, size, Offset, rt, DTRAddr(ScratchRegister, DtrOffImm(bottom)), cc);
             }
-            sub_off = Imm8(off+neg_bottom);// sub_off = neg_bottom + off
+            // sub_off = neg_bottom + off
+            sub_off = Imm8(off + neg_bottom);
             if (!sub_off.invalid && bottom != 0) {
-                JS_ASSERT(neg_bottom < 0x1000);  // Guarded against by: bottom != 0
-                as_add(ScratchRegister, rn, sub_off, NoSetCond,  cc); // sub_off = neg_bottom + off
+                // Guarded against by: bottom != 0
+                JS_ASSERT(neg_bottom < 0x1000);
+                // sub_off = neg_bottom + off
+                as_add(ScratchRegister, rn, sub_off, NoSetCond,  cc);
                 return as_dtr(ls, size, Offset, rt, DTRAddr(ScratchRegister, DtrOffImm(-neg_bottom)), cc);
             }
         }
         ma_mov(offset, ScratchRegister);
         return as_dtr(ls, size, mode, rt, DTRAddr(rn, DtrRegImmShift(ScratchRegister, LSL, 0)));
     } else {
-        // should attempt to use the extended load/store instructions
+        // Should attempt to use the extended load/store instructions.
         if (off < 256 && off > -256)
             return as_extdtr(ls, size, IsSigned, mode, rt, EDtrAddr(rn, EDtrOffImm(off)), cc);
 
-        // We cannot encode this offset in a single extldr.  Try to encode it as
+        // We cannot encode this offset in a single extldr. Try to encode it as
         // an add scratch, base, imm; extldr dest, [scratch, +offset].
         int bottom = off & 0xff;
         int neg_bottom = 0x100 - bottom;
-        // At this point, both off - bottom and off + neg_bottom will be reasonable-ish quantities.
+        // At this point, both off - bottom and off + neg_bottom will be
+        // reasonable-ish quantities.
         //
-        // Note a neg_bottom of 0x100 can not be encoded as an immediate negative offset in the
-        // instruction and this occurs when bottom is zero, so this case is guarded against below.
+        // Note a neg_bottom of 0x100 can not be encoded as an immediate
+        // negative offset in the instruction and this occurs when bottom is
+        // zero, so this case is guarded against below.
         if (off < 0) {
-            Operand2 sub_off = Imm8(-(off-bottom)); // sub_off = bottom - off
+            // sub_off = bottom - off
+            Operand2 sub_off = Imm8(-(off - bottom));
             if (!sub_off.invalid) {
-                as_sub(ScratchRegister, rn, sub_off, NoSetCond, cc); // - sub_off = off - bottom
+                // - sub_off = off - bottom
+                as_sub(ScratchRegister, rn, sub_off, NoSetCond, cc);
                 return as_extdtr(ls, size, IsSigned, Offset, rt,
                                  EDtrAddr(ScratchRegister, EDtrOffImm(bottom)),
                                  cc);
             }
-            sub_off = Imm8(-(off+neg_bottom));// sub_off = -neg_bottom - off
+            // sub_off = -neg_bottom - off
+            sub_off = Imm8(-(off + neg_bottom));
             if (!sub_off.invalid && bottom != 0) {
-                JS_ASSERT(neg_bottom < 0x100);  // Guarded against by: bottom != 0
-                as_sub(ScratchRegister, rn, sub_off, NoSetCond, cc); // - sub_off = neg_bottom + off
+                // Guarded against by: bottom != 0
+                JS_ASSERT(neg_bottom < 0x100);
+                // - sub_off = neg_bottom + off
+                as_sub(ScratchRegister, rn, sub_off, NoSetCond, cc);
                 return as_extdtr(ls, size, IsSigned, Offset, rt,
                                  EDtrAddr(ScratchRegister, EDtrOffImm(-neg_bottom)),
                                  cc);
             }
         } else {
-            Operand2 sub_off = Imm8(off-bottom); // sub_off = off - bottom
+            // sub_off = off - bottom
+            Operand2 sub_off = Imm8(off - bottom);
             if (!sub_off.invalid) {
-                as_add(ScratchRegister, rn, sub_off, NoSetCond, cc); //  sub_off = off - bottom
+                // sub_off = off - bottom
+                as_add(ScratchRegister, rn, sub_off, NoSetCond, cc);
                 return as_extdtr(ls, size, IsSigned, Offset, rt,
                                  EDtrAddr(ScratchRegister, EDtrOffImm(bottom)),
                                  cc);
             }
-            sub_off = Imm8(off+neg_bottom);// sub_off = neg_bottom + off
+            // sub_off = neg_bottom + off
+            sub_off = Imm8(off + neg_bottom);
             if (!sub_off.invalid && bottom != 0) {
-                JS_ASSERT(neg_bottom < 0x100);  // Guarded against by: bottom != 0
-                as_add(ScratchRegister, rn, sub_off, NoSetCond,  cc); // sub_off = neg_bottom + off
+                // Guarded against by: bottom != 0
+                JS_ASSERT(neg_bottom < 0x100);
+                // sub_off = neg_bottom + off
+                as_add(ScratchRegister, rn, sub_off, NoSetCond,  cc);
                 return as_extdtr(ls, size, IsSigned, Offset, rt,
                                  EDtrAddr(ScratchRegister, EDtrOffImm(-neg_bottom)),
                                  cc);
             }
         }
         ma_mov(offset, ScratchRegister);
         return as_extdtr(ls, size, IsSigned, mode, rt, EDtrAddr(rn, EDtrOffReg(ScratchRegister)), cc);
     }
@@ -1316,26 +1335,25 @@ MacroAssemblerARM::ma_bx(Register dest, 
 static Assembler::RelocBranchStyle
 b_type()
 {
     return Assembler::B_LDR;
 }
 void
 MacroAssemblerARM::ma_b(void *target, Relocation::Kind reloc, Assembler::Condition c)
 {
-    // we know the absolute address of the target, but not our final
-    // location (with relocating GC, we *can't* know our final location)
-    // for now, I'm going to be conservative, and load this with an
-    // absolute address
+    // We know the absolute address of the target, but not our final location
+    // (with relocating GC, we *can't* know our final location) for now, I'm
+    // going to be conservative, and load this with an absolute address
     uint32_t trg = (uint32_t)target;
     switch (b_type()) {
       case Assembler::B_MOVWT:
         as_movw(ScratchRegister, Imm16(trg & 0xffff), c);
         as_movt(ScratchRegister, Imm16(trg >> 16), c);
-        // this is going to get the branch predictor pissed off.
+        // This is going to get the branch predictor pissed off.
         as_bx(ScratchRegister, c);
         break;
       case Assembler::B_LDR_BX:
         as_Imm32Pool(ScratchRegister, trg, c);
         as_bx(ScratchRegister, c);
         break;
       case Assembler::B_LDR:
         as_Imm32Pool(pc, trg, c);
@@ -1472,21 +1490,21 @@ static inline uint32_t
 DoubleLowWord(const double value)
 {
     return BitwiseCast<uint64_t>(value) & uint32_t(0xffffffff);
 }
 
 void
 MacroAssemblerARM::ma_vimm(double value, FloatRegister dest, Condition cc)
 {
-    if (hasVFPv3()) {
+    if (HasVFPv3()) {
         if (DoubleLowWord(value) == 0) {
             if (DoubleHighWord(value) == 0) {
                 // To zero a register, load 1.0, then execute dN <- dN - dN
-                as_vimm(dest, VFPImm::one, cc);
+                as_vimm(dest, VFPImm::One, cc);
                 as_vsub(dest, dest, dest, cc);
                 return;
             }
 
             VFPImm enc(DoubleHighWord(value));
             if (enc.isValid()) {
                 as_vimm(dest, enc, cc);
                 return;
@@ -1503,30 +1521,31 @@ Float32Word(const float value)
 {
     return BitwiseCast<uint32_t>(value);
 }
 
 void
 MacroAssemblerARM::ma_vimm_f32(float value, FloatRegister dest, Condition cc)
 {
     VFPRegister vd = VFPRegister(dest).singleOverlay();
-    if (hasVFPv3()) {
+    if (HasVFPv3()) {
         if (Float32Word(value) == 0) {
-            // To zero a register, load 1.0, then execute sN <- sN - sN
-            as_vimm(vd, VFPImm::one, cc);
+            // To zero a register, load 1.0, then execute sN <- sN - sN.
+            as_vimm(vd, VFPImm::One, cc);
             as_vsub(vd, vd, vd, cc);
             return;
         }
 
-        // Note that the vimm immediate float32 instruction encoding differs from the
-        // vimm immediate double encoding, but this difference matches the difference
-        // in the floating point formats, so it is possible to convert the float32 to
-        // a double and then use the double encoding paths.  It is still necessary to
-        // firstly check that the double low word is zero because some float32
-        // numbers set these bits and this can not be ignored.
+        // Note that the vimm immediate float32 instruction encoding differs
+        // from the vimm immediate double encoding, but this difference matches
+        // the difference in the floating point formats, so it is possible to
+        // convert the float32 to a double and then use the double encoding
+        // paths. It is still necessary to firstly check that the double low
+        // word is zero because some float32 numbers set these bits and this can
+        // not be ignored.
         double doubleValue = value;
         if (DoubleLowWord(value) == 0) {
             VFPImm enc(DoubleHighWord(doubleValue));
             if (enc.isValid()) {
                 as_vimm(vd, enc, cc);
                 return;
             }
         }
@@ -1632,46 +1651,58 @@ BufferOffset
 MacroAssemblerARM::ma_vdtr(LoadStore ls, const Operand &addr, VFPRegister rt, Condition cc)
 {
     int off = addr.disp();
     JS_ASSERT((off & 3) == 0);
     Register base = Register::FromCode(addr.base());
     if (off > -1024 && off < 1024)
         return as_vdtr(ls, rt, addr.toVFPAddr(), cc);
 
-    // We cannot encode this offset in a a single ldr.  Try to encode it as
-    // an add scratch, base, imm; ldr dest, [scratch, +offset].
+    // We cannot encode this offset in a a single ldr. Try to encode it as an
+    // add scratch, base, imm; ldr dest, [scratch, +offset].
     int bottom = off & (0xff << 2);
     int neg_bottom = (0x100 << 2) - bottom;
-    // At this point, both off - bottom and off + neg_bottom will be reasonable-ish quantities.
+    // At this point, both off - bottom and off + neg_bottom will be
+    // reasonable-ish quantities.
     //
-    // Note a neg_bottom of 0x400 can not be encoded as an immediate negative offset in the
-    // instruction and this occurs when bottom is zero, so this case is guarded against below.
+    // Note a neg_bottom of 0x400 can not be encoded as an immediate negative
+    // offset in the instruction and this occurs when bottom is zero, so this
+    // case is guarded against below.
     if (off < 0) {
-        Operand2 sub_off = Imm8(-(off-bottom)); // sub_off = bottom - off
+        // sub_off = bottom - off
+        Operand2 sub_off = Imm8(-(off - bottom));
         if (!sub_off.invalid) {
-            as_sub(ScratchRegister, base, sub_off, NoSetCond, cc); // - sub_off = off - bottom
+            // - sub_off = off - bottom
+            as_sub(ScratchRegister, base, sub_off, NoSetCond, cc);
             return as_vdtr(ls, rt, VFPAddr(ScratchRegister, VFPOffImm(bottom)), cc);
         }
-        sub_off = Imm8(-(off+neg_bottom));// sub_off = -neg_bottom - off
+        // sub_off = -neg_bottom - off
+        sub_off = Imm8(-(off + neg_bottom));
         if (!sub_off.invalid && bottom != 0) {
-            JS_ASSERT(neg_bottom < 0x400);  // Guarded against by: bottom != 0
-            as_sub(ScratchRegister, base, sub_off, NoSetCond, cc); // - sub_off = neg_bottom + off
+            // Guarded against by: bottom != 0
+            JS_ASSERT(neg_bottom < 0x400);
+            // - sub_off = neg_bottom + off
+            as_sub(ScratchRegister, base, sub_off, NoSetCond, cc);
             return as_vdtr(ls, rt, VFPAddr(ScratchRegister, VFPOffImm(-neg_bottom)), cc);
         }
     } else {
-        Operand2 sub_off = Imm8(off-bottom); // sub_off = off - bottom
+        // sub_off = off - bottom
+        Operand2 sub_off = Imm8(off - bottom);
         if (!sub_off.invalid) {
-            as_add(ScratchRegister, base, sub_off, NoSetCond, cc); //  sub_off = off - bottom
+            // sub_off = off - bottom
+            as_add(ScratchRegister, base, sub_off, NoSetCond, cc);
             return as_vdtr(ls, rt, VFPAddr(ScratchRegister, VFPOffImm(bottom)), cc);
         }
-        sub_off = Imm8(off+neg_bottom);// sub_off = neg_bottom + off
+        // sub_off = neg_bottom + off
+        sub_off = Imm8(off + neg_bottom);
         if (!sub_off.invalid && bottom != 0) {
-            JS_ASSERT(neg_bottom < 0x400);  // Guarded against by: bottom != 0
-            as_add(ScratchRegister, base, sub_off, NoSetCond,  cc); // sub_off = neg_bottom + off
+            // Guarded against by: bottom != 0
+            JS_ASSERT(neg_bottom < 0x400);
+            // sub_off = neg_bottom + off
+            as_add(ScratchRegister, base, sub_off, NoSetCond, cc);
             return as_vdtr(ls, rt, VFPAddr(ScratchRegister, VFPOffImm(-neg_bottom)), cc);
         }
     }
     ma_add(base, Imm32(off), ScratchRegister, NoSetCond, cc);
     return as_vdtr(ls, rt, VFPAddr(ScratchRegister, VFPOffImm(0)), cc);
 }
 
 BufferOffset
@@ -1750,17 +1781,17 @@ MacroAssemblerARMCompat::buildOOLFakeExi
 void
 MacroAssemblerARMCompat::callWithExitFrame(JitCode *target)
 {
     uint32_t descriptor = MakeFrameDescriptor(framePushed(), JitFrame_IonJS);
     Push(Imm32(descriptor)); // descriptor
 
     addPendingJump(m_buffer.nextOffset(), ImmPtr(target->raw()), Relocation::JITCODE);
     RelocStyle rs;
-    if (hasMOVWT())
+    if (HasMOVWT())
         rs = L_MOVWT;
     else
         rs = L_LDR;
 
     ma_movPatchable(ImmPtr(target->raw()), ScratchRegister, Always, rs);
     ma_callIonHalfPush(ScratchRegister);
 }
 
@@ -1768,17 +1799,17 @@ void
 MacroAssemblerARMCompat::callWithExitFrame(JitCode *target, Register dynStack)
 {
     ma_add(Imm32(framePushed()), dynStack);
     makeFrameDescriptor(dynStack, JitFrame_IonJS);
     Push(dynStack); // descriptor
 
     addPendingJump(m_buffer.nextOffset(), ImmPtr(target->raw()), Relocation::JITCODE);
     RelocStyle rs;
-    if (hasMOVWT())
+    if (HasMOVWT())
         rs = L_MOVWT;
     else
         rs = L_LDR;
 
     ma_movPatchable(ImmPtr(target->raw()), ScratchRegister, Always, rs);
     ma_callIonHalfPush(ScratchRegister);
 }
 
@@ -2043,17 +2074,17 @@ void
 MacroAssemblerARMCompat::movePtr(ImmPtr imm, Register dest)
 {
     movePtr(ImmWord(uintptr_t(imm.value)), dest);
 }
 void
 MacroAssemblerARMCompat::movePtr(AsmJSImmPtr imm, Register dest)
 {
     RelocStyle rs;
-    if (hasMOVWT())
+    if (HasMOVWT())
         rs = L_MOVWT;
     else
         rs = L_LDR;
 
     enoughMemory_ &= append(AsmJSAbsoluteLink(CodeOffsetLabel(nextOffset().getOffset()), imm.kind()));
     ma_movPatchable(Imm32(-1), dest, Always, rs);
 }
 void
@@ -2224,18 +2255,18 @@ void
 MacroAssemblerARMCompat::loadDouble(const Address &address, FloatRegister dest)
 {
     ma_vldr(Operand(address), dest);
 }
 
 void
 MacroAssemblerARMCompat::loadDouble(const BaseIndex &src, FloatRegister dest)
 {
-    // VFP instructions don't even support register Base + register Index modes, so
-    // just add the index, then handle the offset like normal
+    // VFP instructions don't even support register Base + register Index modes,
+    // so just add the index, then handle the offset like normal.
     Register base = src.base;
     Register index = src.index;
     uint32_t scale = Imm32::ShiftOf(src.scale).value;
     int32_t offset = src.offset;
     as_add(ScratchRegister, base, lsl(index, scale));
 
     ma_vldr(Operand(ScratchRegister, offset), dest);
 }
@@ -2246,18 +2277,18 @@ MacroAssemblerARMCompat::loadFloatAsDoub
     VFPRegister rt = dest;
     ma_vldr(Operand(address), rt.singleOverlay());
     as_vcvt(rt, rt.singleOverlay());
 }
 
 void
 MacroAssemblerARMCompat::loadFloatAsDouble(const BaseIndex &src, FloatRegister dest)
 {
-    // VFP instructions don't even support register Base + register Index modes, so
-    // just add the index, then handle the offset like normal
+    // VFP instructions don't even support register Base + register Index modes,
+    // so just add the index, then handle the offset like normal.
     Register base = src.base;
     Register index = src.index;
     uint32_t scale = Imm32::ShiftOf(src.scale).value;
     int32_t offset = src.offset;
     VFPRegister rt = dest;
     as_add(ScratchRegister, base, lsl(index, scale));
 
     ma_vldr(Operand(ScratchRegister, offset), rt.singleOverlay());
@@ -2268,18 +2299,18 @@ void
 MacroAssemblerARMCompat::loadFloat32(const Address &address, FloatRegister dest)
 {
     ma_vldr(Operand(address), VFPRegister(dest).singleOverlay());
 }
 
 void
 MacroAssemblerARMCompat::loadFloat32(const BaseIndex &src, FloatRegister dest)
 {
-    // VFP instructions don't even support register Base + register Index modes, so
-    // just add the index, then handle the offset like normal
+    // VFP instructions don't even support register Base + register Index modes,
+    // so just add the index, then handle the offset like normal.
     Register base = src.base;
     Register index = src.index;
     uint32_t scale = Imm32::ShiftOf(src.scale).value;
     int32_t offset = src.offset;
     as_add(ScratchRegister, base, lsl(index, scale));
 
     ma_vldr(Operand(ScratchRegister, offset), VFPRegister(dest).singleOverlay());
 }
@@ -2432,57 +2463,57 @@ MacroAssemblerARMCompat::storePtr(Regist
 }
 
 // Note: this function clobbers the input register.
 void
 MacroAssembler::clampDoubleToUint8(FloatRegister input, Register output)
 {
     JS_ASSERT(input != ScratchFloatReg);
     ma_vimm(0.5, ScratchFloatReg);
-    if (hasVFPv3()) {
+    if (HasVFPv3()) {
         Label notSplit;
         ma_vadd(input, ScratchFloatReg, ScratchFloatReg);
         // Convert the double into an unsigned fixed point value with 24 bits of
         // precision. The resulting number will look like 0xII.DDDDDD
         as_vcvtFixed(ScratchFloatReg, false, 24, true);
-        // Move the fixed point value into an integer register
+        // Move the fixed point value into an integer register.
         as_vxfer(output, InvalidReg, ScratchFloatReg, FloatToCore);
-        // see if this value *might* have been an exact integer after adding 0.5
-        // This tests the 1/2 through 1/16,777,216th places, but 0.5 needs to be tested out to
-        // the 1/140,737,488,355,328th place.
+        // See if this value *might* have been an exact integer after adding
+        // 0.5. This tests the 1/2 through 1/16,777,216th places, but 0.5 needs
+        // to be tested out to the 1/140,737,488,355,328th place.
         ma_tst(output, Imm32(0x00ffffff));
-        // convert to a uint8 by shifting out all of the fraction bits
+        // Convert to a uint8 by shifting out all of the fraction bits.
         ma_lsr(Imm32(24), output, output);
-        // If any of the bottom 24 bits were non-zero, then we're good, since this number
-        // can't be exactly XX.0
+        // If any of the bottom 24 bits were non-zero, then we're good, since
+        // this number can't be exactly XX.0
         ma_b(&notSplit, NonZero);
         as_vxfer(ScratchRegister, InvalidReg, input, FloatToCore);
         ma_cmp(ScratchRegister, Imm32(0));
         // If the lower 32 bits of the double were 0, then this was an exact number,
         // and it should be even.
         ma_bic(Imm32(1), output, NoSetCond, Zero);
         bind(&notSplit);
     } else {
         Label outOfRange;
         ma_vcmpz(input);
-        // do the add, in place so we can reference it later
+        // Do the add, in place so we can reference it later.
         ma_vadd(input, ScratchFloatReg, input);
-        // do the conversion to an integer.
+        // Do the conversion to an integer.
         as_vcvt(VFPRegister(ScratchFloatReg).uintOverlay(), VFPRegister(input));
-        // copy the converted value out
+        // Copy the converted value out.
         as_vxfer(output, InvalidReg, ScratchFloatReg, FloatToCore);
         as_vmrs(pc);
         ma_mov(Imm32(0), output, NoSetCond, Overflow);  // NaN => 0
         ma_b(&outOfRange, Overflow);  // NaN
         ma_cmp(output, Imm32(0xff));
         ma_mov(Imm32(0xff), output, NoSetCond, Above);
         ma_b(&outOfRange, Above);
-        // convert it back to see if we got the same value back
+        // Convert it back to see if we got the same value back.
         as_vcvt(ScratchFloatReg, VFPRegister(ScratchFloatReg).uintOverlay());
-        // do the check
+        // Do the check.
         as_vcmp(ScratchFloatReg, input);
         as_vmrs(pc);
         ma_bic(Imm32(1), output, NoSetCond, Zero);
         bind(&outOfRange);
     }
 }
 
 void
@@ -2997,23 +3028,23 @@ MacroAssemblerARMCompat::testGCThing(Con
     ma_cmp(ScratchRegister, ImmTag(JSVAL_LOWER_INCL_TAG_OF_GCTHING_SET));
     return cond == Equal ? AboveOrEqual : Below;
 }
 
 void
 MacroAssemblerARMCompat::branchTestValue(Condition cond, const ValueOperand &value, const Value &v,
                                          Label *label)
 {
-    // If cond == NotEqual, branch when a.payload != b.payload || a.tag != b.tag.
-    // If the payloads are equal, compare the tags. If the payloads are not equal,
-    // short circuit true (NotEqual).
+    // If cond == NotEqual, branch when a.payload != b.payload || a.tag !=
+    // b.tag. If the payloads are equal, compare the tags. If the payloads are
+    // not equal, short circuit true (NotEqual).
     //
     // If cand == Equal, branch when a.payload == b.payload && a.tag == b.tag.
-    // If the payloads are equal, compare the tags. If the payloads are not equal,
-    // short circuit false (NotEqual).
+    // If the payloads are equal, compare the tags. If the payloads are not
+    // equal, short circuit false (NotEqual).
     jsval_layout jv = JSVAL_TO_IMPL(v);
     if (v.isMarkable())
         ma_cmp(value.payloadReg(), ImmGCPtr(reinterpret_cast<gc::Cell *>(v.toGCThing())));
     else
         ma_cmp(value.payloadReg(), Imm32(jv.s.payload.i32));
     ma_cmp(value.typeReg(), Imm32(jv.s.tag), Equal);
     ma_b(label, cond);
 }
@@ -3040,17 +3071,17 @@ MacroAssemblerARMCompat::branchTestValue
 
         ma_ldr(tagOf(valaddr), ScratchRegister);
         branchPtr(Equal, ScratchRegister, value.typeReg(), label);
 
         bind(&fallthrough);
     }
 }
 
-// unboxing code
+// Unboxing code.
 void
 MacroAssemblerARMCompat::unboxInt32(const ValueOperand &operand, Register dest)
 {
     ma_mov(operand.payloadReg(), dest);
 }
 
 void
 MacroAssemblerARMCompat::unboxInt32(const Address &src, Register dest)
@@ -3151,42 +3182,42 @@ MacroAssemblerARMCompat::boolValueToDoub
     ma_cmp(operand.payloadReg(), Imm32(0));
     // If the source is 0, then subtract the dest from itself, producing 0.
     as_vsub(d, d, d, Equal);
 }
 
 void
 MacroAssemblerARMCompat::int32ValueToDouble(const ValueOperand &operand, FloatRegister dest)
 {
-    // transfer the integral value to a floating point register
+    // Transfer the integral value to a floating point register.
     VFPRegister vfpdest = VFPRegister(dest);
     as_vxfer(operand.payloadReg(), InvalidReg,
              vfpdest.sintOverlay(), CoreToFloat);
-    // convert the value to a double.
+    // Convert the value to a double.
     as_vcvt(vfpdest, vfpdest.sintOverlay());
 }
 
 void
 MacroAssemblerARMCompat::boolValueToFloat32(const ValueOperand &operand, FloatRegister dest)
 {
     VFPRegister d = VFPRegister(dest).singleOverlay();
     ma_vimm_f32(1.0, dest);
     ma_cmp(operand.payloadReg(), Imm32(0));
     // If the source is 0, then subtract the dest from itself, producing 0.
     as_vsub(d, d, d, Equal);
 }
 
 void
 MacroAssemblerARMCompat::int32ValueToFloat32(const ValueOperand &operand, FloatRegister dest)
 {
-    // transfer the integral value to a floating point register
+    // Transfer the integral value to a floating point register.
     VFPRegister vfpdest = VFPRegister(dest).singleOverlay();
     as_vxfer(operand.payloadReg(), InvalidReg,
              vfpdest.sintOverlay(), CoreToFloat);
-    // convert the value to a float.
+    // Convert the value to a float.
     as_vcvt(vfpdest, vfpdest.sintOverlay());
 }
 
 void
 MacroAssemblerARMCompat::loadConstantFloat32(float f, FloatRegister dest)
 {
     ma_vimm_f32(f, dest);
 }
@@ -3211,43 +3242,44 @@ MacroAssemblerARMCompat::loadInt32OrDoub
 void
 MacroAssemblerARMCompat::loadInt32OrDouble(Register base, Register index, FloatRegister dest, int32_t shift)
 {
     Label notInt32, end;
 
     JS_STATIC_ASSERT(NUNBOX32_PAYLOAD_OFFSET == 0);
 
     // If it's an int, convert it to double.
-    ma_alu(base, lsl(index, shift), ScratchRegister, op_add);
-
-    // Since we only have one scratch register, we need to stomp over it with the tag
+    ma_alu(base, lsl(index, shift), ScratchRegister, OpAdd);
+
+    // Since we only have one scratch register, we need to stomp over it with
+    // the tag.
     ma_ldr(Address(ScratchRegister, NUNBOX32_TYPE_OFFSET), ScratchRegister);
     branchTestInt32(Assembler::NotEqual, ScratchRegister, &notInt32);
 
     // Implicitly requires NUNBOX32_PAYLOAD_OFFSET == 0: no offset provided
     ma_ldr(DTRAddr(base, DtrRegImmShift(index, LSL, shift)), ScratchRegister);
     convertInt32ToDouble(ScratchRegister, dest);
     ma_b(&end);
 
     // Not an int, just load as double.
     bind(&notInt32);
     // First, recompute the offset that had been stored in the scratch register
     // since the scratch register was overwritten loading in the type.
-    ma_alu(base, lsl(index, shift), ScratchRegister, op_add);
+    ma_alu(base, lsl(index, shift), ScratchRegister, OpAdd);
     ma_vldr(Address(ScratchRegister, 0), dest);
     bind(&end);
 }
 
 void
 MacroAssemblerARMCompat::loadConstantDouble(double dp, FloatRegister dest)
 {
     as_FImm64Pool(dest, dp);
 }
 
-    // treat the value as a boolean, and set condition codes accordingly
+// Treat the value as a boolean, and set condition codes accordingly.
 
 Assembler::Condition
 MacroAssemblerARMCompat::testInt32Truthy(bool truthy, const ValueOperand &operand)
 {
     ma_tst(operand.payloadReg(), operand.payloadReg());
     return truthy ? NonZero : Zero;
 }
 
@@ -3279,17 +3311,17 @@ MacroAssemblerARMCompat::extractTag(cons
 {
     ma_ldr(tagOf(address), scratch);
     return scratch;
 }
 
 Register
 MacroAssemblerARMCompat::extractTag(const BaseIndex &address, Register scratch)
 {
-    ma_alu(address.base, lsl(address.index, address.scale), scratch, op_add, NoSetCond);
+    ma_alu(address.base, lsl(address.index, address.scale), scratch, OpAdd, NoSetCond);
     return extractTag(Address(scratch, address.offset), scratch);
 }
 
 template <typename T>
 void
 MacroAssemblerARMCompat::storeUnboxedValue(ConstantOrRegister value, MIRType valueType, const T &dest,
                                            MIRType slotType)
 {
@@ -3352,22 +3384,22 @@ MacroAssemblerARMCompat::storeValue(Valu
             if (dest.scale == TimesOne) {
                 tmpIdx = dest.index;
             } else {
                 ma_lsl(Imm32(dest.scale), dest.index, ScratchRegister);
                 tmpIdx = ScratchRegister;
             }
             ma_strd(val.payloadReg(), val.typeReg(), EDtrAddr(dest.base, EDtrOffReg(tmpIdx)));
         } else {
-            ma_alu(dest.base, lsl(dest.index, dest.scale), ScratchRegister, op_add);
+            ma_alu(dest.base, lsl(dest.index, dest.scale), ScratchRegister, OpAdd);
             ma_strd(val.payloadReg(), val.typeReg(),
                     EDtrAddr(ScratchRegister, EDtrOffImm(dest.offset)));
         }
     } else {
-        ma_alu(dest.base, lsl(dest.index, dest.scale), ScratchRegister, op_add);
+        ma_alu(dest.base, lsl(dest.index, dest.scale), ScratchRegister, OpAdd);
         storeValue(val, Address(ScratchRegister, dest.offset));
     }
 }
 
 void
 MacroAssemblerARMCompat::loadValue(const BaseIndex &addr, ValueOperand val)
 {
     if (isValueDTRDCandidate(val) && Abs(addr.offset) <= 255) {
@@ -3376,48 +3408,50 @@ MacroAssemblerARMCompat::loadValue(const
             if (addr.scale == TimesOne) {
                 tmpIdx = addr.index;
             } else {
                 ma_lsl(Imm32(addr.scale), addr.index, ScratchRegister);
                 tmpIdx = ScratchRegister;
             }
             ma_ldrd(EDtrAddr(addr.base, EDtrOffReg(tmpIdx)), val.payloadReg(), val.typeReg());
         } else {
-            ma_alu(addr.base, lsl(addr.index, addr.scale), ScratchRegister, op_add);
+            ma_alu(addr.base, lsl(addr.index, addr.scale), ScratchRegister, OpAdd);
             ma_ldrd(EDtrAddr(ScratchRegister, EDtrOffImm(addr.offset)),
                     val.payloadReg(), val.typeReg());
         }
     } else {
-        ma_alu(addr.base, lsl(addr.index, addr.scale), ScratchRegister, op_add);
+        ma_alu(addr.base, lsl(addr.index, addr.scale), ScratchRegister, OpAdd);
         loadValue(Address(ScratchRegister, addr.offset), val);
     }
 }
 
 void
 MacroAssemblerARMCompat::loadValue(Address src, ValueOperand val)
 {
     Operand srcOp = Operand(src);
     Operand payload = ToPayload(srcOp);
     Operand type = ToType(srcOp);
-    // TODO: copy this code into a generic function that acts on all sequences of memory accesses
+    // TODO: copy this code into a generic function that acts on all sequences
+    // of memory accesses
     if (isValueDTRDCandidate(val)) {
-        // If the value we want is in two consecutive registers starting with an even register,
-        // they can be combined as a single ldrd.
+        // If the value we want is in two consecutive registers starting with an
+        // even register, they can be combined as a single ldrd.
         int offset = srcOp.disp();
         if (offset < 256 && offset > -256) {
             ma_ldrd(EDtrAddr(Register::FromCode(srcOp.base()), EDtrOffImm(srcOp.disp())), val.payloadReg(), val.typeReg());
             return;
         }
     }
-    // if the value is lower than the type, then we may be able to use an ldm instruction
+    // If the value is lower than the type, then we may be able to use an ldm
+    // instruction.
 
     if (val.payloadReg().code() < val.typeReg().code()) {
         if (srcOp.disp() <= 4 && srcOp.disp() >= -8 && (srcOp.disp() & 3) == 0) {
-            // turns out each of the 4 value -8, -4, 0, 4 corresponds exactly with one of
-            // LDM{DB, DA, IA, IB}
+            // Turns out each of the 4 value -8, -4, 0, 4 corresponds exactly
+            // with one of LDM{DB, DA, IA, IB}
             DTMMode mode;
             switch(srcOp.disp()) {
               case -8:
                 mode = DB;
                 break;
               case -4:
                 mode = DA;
                 break;
@@ -3432,18 +3466,18 @@ MacroAssemblerARMCompat::loadValue(Addre
             }
             startDataTransferM(IsLoad, Register::FromCode(srcOp.base()), mode);
             transferReg(val.payloadReg());
             transferReg(val.typeReg());
             finishDataTransfer();
             return;
         }
     }
-    // Ensure that loading the payload does not erase the pointer to the
-    // Value in memory.
+    // Ensure that loading the payload does not erase the pointer to the Value
+    // in memory.
     if (Register::FromCode(type.base()) != val.payloadReg()) {
         ma_ldr(payload, val.payloadReg());
         ma_ldr(type, val.typeReg());
     } else {
         ma_ldr(type, val.typeReg());
         ma_ldr(payload, val.payloadReg());
     }
 }
@@ -3509,37 +3543,39 @@ MacroAssemblerARMCompat::storePayload(co
     MOZ_ASSERT(dest.offset == 0);
 
     jsval_layout jv = JSVAL_TO_IMPL(val);
     if (val.isMarkable())
         ma_mov(ImmGCPtr((gc::Cell *)jv.s.payload.ptr), ScratchRegister);
     else
         ma_mov(Imm32(jv.s.payload.i32), ScratchRegister);
 
-    // If NUNBOX32_PAYLOAD_OFFSET is not zero, the memory operand [base + index << shift + imm]
-    // cannot be encoded into a single instruction, and cannot be integrated into the as_dtr call.
+    // If NUNBOX32_PAYLOAD_OFFSET is not zero, the memory operand [base + index
+    // << shift + imm] cannot be encoded into a single instruction, and cannot
+    // be integrated into the as_dtr call.
     JS_STATIC_ASSERT(NUNBOX32_PAYLOAD_OFFSET == 0);
 
     as_dtr(IsStore, 32, Offset, ScratchRegister,
            DTRAddr(dest.base, DtrRegImmShift(dest.index, LSL, shift)));
 }
 
 void
 MacroAssemblerARMCompat::storePayload(Register src, const BaseIndex &dest)
 {
     unsigned shift = ScaleToShift(dest.scale);
-    MOZ_ASSERT(shift < 32 && shift >= 0);
+    MOZ_ASSERT(shift < 32);
     MOZ_ASSERT(dest.offset == 0);
 
-    // If NUNBOX32_PAYLOAD_OFFSET is not zero, the memory operand [base + index << shift + imm]
-    // cannot be encoded into a single instruction, and cannot be integrated into the as_dtr call.
+    // If NUNBOX32_PAYLOAD_OFFSET is not zero, the memory operand [base + index
+    // << shift + imm] cannot be encoded into a single instruction, and cannot
+    // be integrated into the as_dtr call.
     JS_STATIC_ASSERT(NUNBOX32_PAYLOAD_OFFSET == 0);
 
-    // Technically, shift > -32 can be handle by changing LSL to ASR, but should never come up,
-    // and this is one less code path to get wrong.
+    // Technically, shift > -32 can be handle by changing LSL to ASR, but should
+    // never come up, and this is one less code path to get wrong.
     as_dtr(IsStore, 32, Offset, src, DTRAddr(dest.base, DtrRegImmShift(dest.index, LSL, shift)));
 }
 
 void
 MacroAssemblerARMCompat::storeTypeTag(ImmTag tag, Operand dest) {
     if (dest.getTag() == Operand::MEM) {
         ma_mov(tag, secondScratchReg_);
         ma_str(secondScratchReg_, ToType(dest));
@@ -3556,72 +3592,72 @@ MacroAssemblerARMCompat::storeTypeTag(Im
     Register base = dest.base;
     Register index = dest.index;
     unsigned shift = ScaleToShift(dest.scale);
     MOZ_ASSERT(dest.offset == 0);
     MOZ_ASSERT(base != ScratchRegister);
     MOZ_ASSERT(index != ScratchRegister);
 
     // A value needs to be store a value int base + index << shift + 4.
-    // Arm cannot handle this in a single operand, so a temp register is required.
-    // However, the scratch register is presently in use to hold the immediate that
-    // is being stored into said memory location. Work around this by modifying
-    // the base so the valid [base + index << shift] format can be used, then
-    // restore it.
+    // ARM cannot handle this in a single operand, so a temp register is
+    // required. However, the scratch register is presently in use to hold the
+    // immediate that is being stored into said memory location. Work around
+    // this by modifying the base so the valid [base + index << shift] format
+    // can be used, then restore it.
     ma_add(base, Imm32(NUNBOX32_TYPE_OFFSET), base);
     ma_mov(tag, ScratchRegister);
     ma_str(ScratchRegister, DTRAddr(base, DtrRegImmShift(index, LSL, shift)));
     ma_sub(base, Imm32(NUNBOX32_TYPE_OFFSET), base);
 }
 
-// ARM says that all reads of pc will return 8 higher than the
-// address of the currently executing instruction.  This means we are
-// correctly storing the address of the instruction after the call
-// in the register.
-// Also ION is breaking the ARM EABI here (sort of). The ARM EABI
-// says that a function call should move the pc into the link register,
-// then branch to the function, and *sp is data that is owned by the caller,
-// not the callee.  The ION ABI says *sp should be the address that
-// we will return to when leaving this function
+// ARM says that all reads of pc will return 8 higher than the address of the
+// currently executing instruction. This means we are correctly storing the
+// address of the instruction after the call in the register.
+//
+// Also ION is breaking the ARM EABI here (sort of). The ARM EABI says that a
+// function call should move the pc into the link register, then branch to the
+// function, and *sp is data that is owned by the caller, not the callee. The
+// ION ABI says *sp should be the address that we will return to when leaving
+// this function.
 void
 MacroAssemblerARM::ma_callIon(const Register r)
 {
-    // When the stack is 8 byte aligned,
-    // we want to decrement sp by 8, and write pc+8 into the new sp.
-    // when we return from this call, sp will be its present value minus 4.
+    // When the stack is 8 byte aligned, we want to decrement sp by 8, and write
+    // pc + 8 into the new sp. When we return from this call, sp will be its
+    // present value minus 4.
     AutoForbidPools afp(this);
     as_dtr(IsStore, 32, PreIndex, pc, DTRAddr(sp, DtrOffImm(-8)));
     as_blx(r);
 }
 void
 MacroAssemblerARM::ma_callIonNoPush(const Register r)
 {
-    // Since we just write the return address into the stack, which is
-    // popped on return, the net effect is removing 4 bytes from the stack
+    // Since we just write the return address into the stack, which is popped on
+    // return, the net effect is removing 4 bytes from the stack.
     AutoForbidPools afp(this);
     as_dtr(IsStore, 32, Offset, pc, DTRAddr(sp, DtrOffImm(0)));
     as_blx(r);
 }
 
 void
 MacroAssemblerARM::ma_callIonHalfPush(const Register r)
 {
-    // The stack is unaligned by 4 bytes.
-    // We push the pc to the stack to align the stack before the call, when we
-    // return the pc is poped and the stack is restored to its unaligned state.
+    // The stack is unaligned by 4 bytes. We push the pc to the stack to align
+    // the stack before the call, when we return the pc is poped and the stack
+    // is restored to its unaligned state.
     AutoForbidPools afp(this);
     ma_push(pc);
     as_blx(r);
 }
 
 void
 MacroAssemblerARM::ma_call(ImmPtr dest)
 {
     RelocStyle rs;
-    if (hasMOVWT())
+    if (HasMOVWT())
         rs = L_MOVWT;
     else
         rs = L_LDR;
 
     ma_movPatchable(dest, CallReg, Always, rs);
     as_blx(CallReg);
 }
 
@@ -3697,54 +3733,55 @@ MacroAssemblerARMCompat::setupAlignedABI
 void
 MacroAssemblerARMCompat::setupUnalignedABICall(uint32_t args, Register scratch)
 {
     setupABICall(args);
     dynamicAlignment_ = true;
 
     ma_mov(sp, scratch);
 
-    // Force sp to be aligned
+    // Force sp to be aligned.
     ma_and(Imm32(~(StackAlignment - 1)), sp, sp);
     ma_push(scratch);
 }
 
 #if defined(JS_CODEGEN_ARM_HARDFP) || defined(JS_ARM_SIMULATOR)
 void
 MacroAssemblerARMCompat::passHardFpABIArg(const MoveOperand &from, MoveOp::Type type)
 {
     MoveOperand to;
     ++passedArgs_;
     if (!enoughMemory_)
         return;
     switch (type) {
       case MoveOp::FLOAT32:
       case MoveOp::DOUBLE: {
-        // N.B. this isn't a limitation of the ABI, it is a limitation of the compiler right now.
-        // There isn't a good way to handle odd numbered single registers, so everything goes to hell
-        // when we try.  Current fix is to never use more than one float in a function call.
-        // Fix coming along with complete float32 support in bug 957504.
+        // N.B. This isn't a limitation of the ABI, it is a limitation of the
+        // compiler right now. There isn't a good way to handle odd numbered
+        // single registers, so everything goes to hell when we try. Current fix
+        // is to never use more than one float in a function call. Fix coming
+        // along with complete float32 support in bug 957504.
         JS_ASSERT(!usedFloat32_);
         if (type == MoveOp::FLOAT32)
             usedFloat32_ = true;
         FloatRegister fr;
         if (GetFloatArgReg(usedIntSlots_, usedFloatSlots_, &fr)) {
             if (from.isFloatReg() && from.floatReg() == fr) {
-                // Nothing to do; the value is in the right register already
+                // Nothing to do; the value is in the right register already.
                 usedFloatSlots_++;
                 if (type == MoveOp::FLOAT32)
                     passedArgTypes_ = (passedArgTypes_ << ArgType_Shift) | ArgType_Float32;
                 else
                     passedArgTypes_ = (passedArgTypes_ << ArgType_Shift) | ArgType_Double;
                 return;
             }
             to = MoveOperand(fr);
         } else {
-            // If (and only if) the integer registers have started spilling, do we
-            // need to take the register's alignment into account
+            // If (and only if) the integer registers have started spilling, do
+            // we need to take the register's alignment into account.
             uint32_t disp = INT_MAX;
             if (type == MoveOp::FLOAT32)
                 disp = GetFloat32ArgStackDisp(usedIntSlots_, usedFloatSlots_, &padding_);
             else
                 disp = GetDoubleArgStackDisp(usedIntSlots_, usedFloatSlots_, &padding_);
             to = MoveOperand(sp, disp);
         }
         usedFloatSlots_++;
@@ -3753,17 +3790,17 @@ MacroAssemblerARMCompat::passHardFpABIAr
         else
             passedArgTypes_ = (passedArgTypes_ << ArgType_Shift) | ArgType_Double;
         break;
       }
       case MoveOp::GENERAL: {
         Register r;
         if (GetIntArgReg(usedIntSlots_, usedFloatSlots_, &r)) {
             if (from.isGeneralReg() && from.reg() == r) {
-                // Nothing to do; the value is in the right register already
+                // Nothing to do; the value is in the right register already.
                 usedIntSlots_++;
                 passedArgTypes_ = (passedArgTypes_ << ArgType_Shift) | ArgType_General;
                 return;
             }
             to = MoveOperand(r);
         } else {
             uint32_t disp = GetIntArgStackDisp(usedIntSlots_, usedFloatSlots_, &padding_);
             to = MoveOperand(sp, disp);
@@ -3809,17 +3846,17 @@ MacroAssemblerARMCompat::passSoftFpABIAr
     Register destReg;
     MoveOperand dest;
     if (GetIntArgReg(usedIntSlots_, 0, &destReg)) {
         if (type == MoveOp::DOUBLE || type == MoveOp::FLOAT32) {
             floatArgsInGPR[destReg.code() >> 1] = from;
             floatArgsInGPRValid[destReg.code() >> 1] = true;
             useResolver = false;
         } else if (from.isGeneralReg() && from.reg() == destReg) {
-            // No need to move anything
+            // No need to move anything.
             useResolver = false;
         } else {
             dest = MoveOperand(destReg);
         }
     } else {
         uint32_t disp = GetArgStackDisp(usedIntSlots_);
         dest = MoveOperand(sp, disp);
     }
@@ -3829,17 +3866,17 @@ MacroAssemblerARMCompat::passSoftFpABIAr
     usedIntSlots_ += increment;
 }
 #endif
 
 void
 MacroAssemblerARMCompat::passABIArg(const MoveOperand &from, MoveOp::Type type)
 {
 #if defined(JS_ARM_SIMULATOR)
-    if (useHardFpABI())
+    if (UseHardFpABI())
         MacroAssemblerARMCompat::passHardFpABIArg(from, type);
     else
         MacroAssemblerARMCompat::passSoftFpABIArg(from, type);
 #elif defined(JS_CODEGEN_ARM_HARDFP)
     MacroAssemblerARMCompat::passHardFpABIArg(from, type);
 #else
     MacroAssemblerARMCompat::passSoftFpABIArg(from, type);
 #endif
@@ -3867,26 +3904,27 @@ void MacroAssemblerARMCompat::checkStack
 
 void
 MacroAssemblerARMCompat::callWithABIPre(uint32_t *stackAdjust, bool callFromAsmJS)
 {
     JS_ASSERT(inCall_);
 
     *stackAdjust = ((usedIntSlots_ > NumIntArgRegs) ? usedIntSlots_ - NumIntArgRegs : 0) * sizeof(intptr_t);
 #if defined(JS_CODEGEN_ARM_HARDFP) || defined(JS_ARM_SIMULATOR)
-    if (useHardFpABI())
+    if (UseHardFpABI())
         *stackAdjust += 2*((usedFloatSlots_ > NumFloatArgRegs) ? usedFloatSlots_ - NumFloatArgRegs : 0) * sizeof(intptr_t);
 #endif
     uint32_t alignmentAtPrologue = callFromAsmJS ? AlignmentAtAsmJSPrologue : 0;
 
     if (!dynamicAlignment_) {
         *stackAdjust += ComputeByteAlignment(framePushed_ + *stackAdjust + alignmentAtPrologue,
                                              StackAlignment);
     } else {
-        // sizeof(intptr_t) account for the saved stack pointer pushed by setupUnalignedABICall
+        // sizeof(intptr_t) accounts for the saved stack pointer pushed by
+        // setupUnalignedABICall.
         *stackAdjust += ComputeByteAlignment(*stackAdjust + sizeof(intptr_t), StackAlignment);
     }
 
     reserveStack(*stackAdjust);
 
     // Position all arguments.
     {
         enoughMemory_ = enoughMemory_ && moveResolver_.resolve();
@@ -3904,18 +3942,18 @@ MacroAssemblerARMCompat::callWithABIPre(
 
             if (from.isFloatReg()) {
                 ma_vxfer(VFPRegister(from.floatReg()), to0, to1);
             } else {
                 JS_ASSERT(from.isMemory());
                 // Note: We can safely use the MoveOperand's displacement here,
                 // even if the base is SP: MoveEmitter::toOperand adjusts
                 // SP-relative operands by the difference between the current
-                // stack usage and stackAdjust, which emitter.finish() resets
-                // to 0.
+                // stack usage and stackAdjust, which emitter.finish() resets to
+                // 0.
                 //
                 // Warning: if the offset isn't within [-255,+255] then this
                 // will assert-fail (or, if non-debug, load the wrong words).
                 // Nothing uses such an offset at the time of this writing.
                 ma_ldrd(EDtrAddr(from.base(), EDtrOffImm(from.disp())), to0, to1);
             }
         }
     }
@@ -3929,39 +3967,39 @@ MacroAssemblerARMCompat::callWithABIPre(
 void
 MacroAssemblerARMCompat::callWithABIPost(uint32_t stackAdjust, MoveOp::Type result)
 {
     if (secondScratchReg_ != lr)
         ma_mov(secondScratchReg_, lr);
 
     switch (result) {
       case MoveOp::DOUBLE:
-        if (!useHardFpABI()) {
+        if (!UseHardFpABI()) {
             // Move double from r0/r1 to ReturnFloatReg.
             as_vxfer(r0, r1, ReturnFloatReg, CoreToFloat);
             break;
         }
       case MoveOp::FLOAT32:
-        if (!useHardFpABI()) {
+        if (!UseHardFpABI()) {
             // Move float32 from r0 to ReturnFloatReg.
             as_vxfer(r0, InvalidReg, VFPRegister(d0).singleOverlay(), CoreToFloat);
             break;
         }
       case MoveOp::GENERAL:
         break;
 
       default:
         MOZ_ASSUME_UNREACHABLE("unexpected callWithABI result");
     }
 
     freeStack(stackAdjust);
 
     if (dynamicAlignment_) {
-        // x86 supports pop esp.  on arm, that isn't well defined, so just
-        // do it manually
+        // While the x86 supports pop esp, on ARM that isn't well defined, so
+        // just do it manually.
         as_dtr(IsLoad, 32, Offset, sp, DTRAddr(sp, DtrOffImm(0)));
     }
 
     JS_ASSERT(inCall_);
     inCall_ = false;
 }
 
 #if defined(DEBUG) && defined(JS_ARM_SIMULATOR)
@@ -4026,19 +4064,19 @@ MacroAssemblerARMCompat::callWithABI(Asm
     callWithABIPre(&stackAdjust, /* callFromAsmJS = */ true);
     call(imm);
     callWithABIPost(stackAdjust, result);
 }
 
 void
 MacroAssemblerARMCompat::callWithABI(const Address &fun, MoveOp::Type result)
 {
-    // Load the callee in r12, no instruction between the ldr and call
-    // should clobber it. Note that we can't use fun.base because it may
-    // be one of the IntArg registers clobbered before the call.
+    // Load the callee in r12, no instruction between the ldr and call should
+    // clobber it. Note that we can't use fun.base because it may be one of the
+    // IntArg registers clobbered before the call.
     ma_ldr(fun, r12);
     uint32_t stackAdjust;
     callWithABIPre(&stackAdjust);
     call(r12);
     callWithABIPost(stackAdjust, result);
 }
 
 void
@@ -4089,42 +4127,42 @@ MacroAssemblerARMCompat::handleFailureWi
     // If we found a catch handler, this must be a baseline frame. Restore state
     // and jump to the catch block.
     bind(&catch_);
     ma_ldr(Operand(sp, offsetof(ResumeFromException, target)), r0);
     ma_ldr(Operand(sp, offsetof(ResumeFromException, framePointer)), r11);
     ma_ldr(Operand(sp, offsetof(ResumeFromException, stackPointer)), sp);
     jump(r0);
 
-    // If we found a finally block, this must be a baseline frame. Push
-    // two values expected by JSOP_RETSUB: BooleanValue(true) and the
-    // exception.
+    // If we found a finally block, this must be a baseline frame. Push two
+    // values expected by JSOP_RETSUB: BooleanValue(true) and the exception.
     bind(&finally);
     ValueOperand exception = ValueOperand(r1, r2);
     loadValue(Operand(sp, offsetof(ResumeFromException, exception)), exception);
 
     ma_ldr(Operand(sp, offsetof(ResumeFromException, target)), r0);
     ma_ldr(Operand(sp, offsetof(ResumeFromException, framePointer)), r11);
     ma_ldr(Operand(sp, offsetof(ResumeFromException, stackPointer)), sp);
 
     pushValue(BooleanValue(true));
     pushValue(exception);
     jump(r0);
 
-    // Only used in debug mode. Return BaselineFrame->returnValue() to the caller.
+    // Only used in debug mode. Return BaselineFrame->returnValue() to the
+    // caller.
     bind(&return_);
     ma_ldr(Operand(sp, offsetof(ResumeFromException, framePointer)), r11);
     ma_ldr(Operand(sp, offsetof(ResumeFromException, stackPointer)), sp);
     loadValue(Address(r11, BaselineFrame::reverseOffsetOfReturnValue()), JSReturnOperand);
     ma_mov(r11, sp);
     pop(r11);
     ret();
 
-    // If we are bailing out to baseline to handle an exception, jump to
-    // the bailout tail stub.
+    // If we are bailing out to baseline to handle an exception, jump to the
+    // bailout tail stub.
     bind(&bailout);
     ma_ldr(Operand(sp, offsetof(ResumeFromException, bailoutInfo)), r2);
     ma_mov(Imm32(BAILOUT_RETURN_OK), r0);
     ma_ldr(Operand(sp, offsetof(ResumeFromException, target)), r1);
     jump(r1);
 }
 
 Assembler::Condition
@@ -4143,51 +4181,50 @@ MacroAssemblerARMCompat::floor(FloatRegi
     Label handleNeg;
     Label fin;
     compareDouble(input, InvalidFloatReg);
     ma_b(&handleZero, Assembler::Equal);
     ma_b(&handleNeg, Assembler::Signed);
     // NaN is always a bail condition, just bail directly.
     ma_b(bail, Assembler::Overflow);
 
-    // The argument is a positive number, truncation is the path to glory;
-    // Since it is known to be > 0.0, explicitly convert to a larger range,
-    // then a value that rounds to INT_MAX is explicitly different from an
-    // argument that clamps to INT_MAX
+    // The argument is a positive number, truncation is the path to glory. Since
+    // it is known to be > 0.0, explicitly convert to a larger range, then a
+    // value that rounds to INT_MAX is explicitly different from an argument
+    // that clamps to INT_MAX.
     ma_vcvt_F64_U32(input, ScratchFloatReg);
     ma_vxfer(VFPRegister(ScratchFloatReg).uintOverlay(), output);
     ma_mov(output, output, SetCond);
     ma_b(bail, Signed);
     ma_b(&fin);
 
     bind(&handleZero);
     // Move the top word of the double into the output reg, if it is non-zero,
-    // then the original value was -0.0
+    // then the original value was -0.0.
     as_vxfer(output, InvalidReg, input, FloatToCore, Always, 1);
     ma_cmp(output, Imm32(0));
     ma_b(bail, NonZero);
     ma_b(&fin);
 
     bind(&handleNeg);
-    // Negative case, negate, then start dancing
+    // Negative case, negate, then start dancing.
     ma_vneg(input, input);
     ma_vcvt_F64_U32(input, ScratchFloatReg);
     ma_vxfer(VFPRegister(ScratchFloatReg).uintOverlay(), output);
     ma_vcvt_U32_F64(ScratchFloatReg, ScratchFloatReg);
     compareDouble(ScratchFloatReg, input);
     ma_add(output, Imm32(1), output, NoSetCond, NotEqual);
-    // Negate the output.  Since INT_MIN < -INT_MAX, even after adding 1,
-    // the result will still be a negative number
+    // Negate the output. Since INT_MIN < -INT_MAX, even after adding 1, the
+    // result will still be a negative number.
     ma_rsb(output, Imm32(0), output, SetCond);
     // Flip the negated input back to its original value.
     ma_vneg(input, input);
-    // If the result looks non-negative, then this value didn't actually fit into
-    // the int range, and special handling is required.
-    // zero is also caught by this case, but floor of a negative number
-    // should never be zero.
+    // If the result looks non-negative, then this value didn't actually fit
+    // into the int range, and special handling is required. Zero is also caught
+    // by this case, but floor of a negative number should never be zero.
     ma_b(bail, NotSigned);
 
     bind(&fin);
 }
 
 void
 MacroAssemblerARMCompat::floorf(FloatRegister input, Register output, Label *bail)
 {
@@ -4195,51 +4232,50 @@ MacroAssemblerARMCompat::floorf(FloatReg
     Label handleNeg;
     Label fin;
     compareFloat(input, InvalidFloatReg);
     ma_b(&handleZero, Assembler::Equal);
     ma_b(&handleNeg, Assembler::Signed);
     // NaN is always a bail condition, just bail directly.
     ma_b(bail, Assembler::Overflow);
 
-    // The argument is a positive number, truncation is the path to glory;
-    // Since it is known to be > 0.0, explicitly convert to a larger range,
-    // then a value that rounds to INT_MAX is explicitly different from an
-    // argument that clamps to INT_MAX
+    // The argument is a positive number, truncation is the path to glory; Since
+    // it is known to be > 0.0, explicitly convert to a larger range, then a
+    // value that rounds to INT_MAX is explicitly different from an argument
+    // that clamps to INT_MAX.
     ma_vcvt_F32_U32(input, ScratchFloatReg);
     ma_vxfer(VFPRegister(ScratchFloatReg).uintOverlay(), output);
     ma_mov(output, output, SetCond);
     ma_b(bail, Signed);
     ma_b(&fin);
 
     bind(&handleZero);
     // Move the top word of the double into the output reg, if it is non-zero,
-    // then the original value was -0.0
+    // then the original value was -0.0.
     as_vxfer(output, InvalidReg, VFPRegister(input).singleOverlay(), FloatToCore, Always, 0);
     ma_cmp(output, Imm32(0));
     ma_b(bail, NonZero);
     ma_b(&fin);
 
     bind(&handleNeg);
-    // Negative case, negate, then start dancing
+    // Negative case, negate, then start dancing.
     ma_vneg_f32(input, input);
     ma_vcvt_F32_U32(input, ScratchFloatReg);
     ma_vxfer(VFPRegister(ScratchFloatReg).uintOverlay(), output);
     ma_vcvt_U32_F32(ScratchFloatReg, ScratchFloatReg);
     compareFloat(ScratchFloatReg, input);
     ma_add(output, Imm32(1), output, NoSetCond, NotEqual);
-    // Negate the output.  Since INT_MIN < -INT_MAX, even after adding 1,
-    // the result will still be a negative number
+    // Negate the output. Since INT_MIN < -INT_MAX, even after adding 1, the
+    // result will still be a negative number.
     ma_rsb(output, Imm32(0), output, SetCond);
     // Flip the negated input back to its original value.
     ma_vneg_f32(input, input);
-    // If the result looks non-negative, then this value didn't actually fit into
-    // the int range, and special handling is required.
-    // zero is also caught by this case, but floor of a negative number
-    // should never be zero.
+    // If the result looks non-negative, then this value didn't actually fit
+    // into the int range, and special handling is required. Zero is also caught
+    // by this case, but floor of a negative number should never be zero.
     ma_b(bail, NotSigned);
 
     bind(&fin);
 }
 
 void
 MacroAssemblerARMCompat::ceil(FloatRegister input, Register output, Label *bail)
 {
@@ -4254,18 +4290,18 @@ MacroAssemblerARMCompat::ceil(FloatRegis
     ma_b(&handlePos, Assembler::NotSigned);
 
     // We are in the ]-Inf; 0[ range
     // If we are in the ]-1; 0[ range => bailout
     ma_vimm(-1.0, ScratchFloatReg);
     compareDouble(input, ScratchFloatReg);
     ma_b(bail, Assembler::GreaterThan);
 
-    // We are in the ]-Inf; -1] range: ceil(x) == -floor(-x) and floor can
-    // be computed with direct truncation here (x > 0).
+    // We are in the ]-Inf; -1] range: ceil(x) == -floor(-x) and floor can be
+    // computed with direct truncation here (x > 0).
     ma_vneg(input, ScratchFloatReg);
     ma_vcvt_F64_U32(ScratchFloatReg, ScratchFloatReg);
     ma_vxfer(VFPRegister(ScratchFloatReg).uintOverlay(), output);
     ma_neg(output, output, SetCond);
     ma_b(bail, NotSigned);
     ma_b(&fin);
 
     // Test for 0.0 / -0.0: if the top word of the input double is not zero,
@@ -4279,17 +4315,17 @@ MacroAssemblerARMCompat::ceil(FloatRegis
     // We are in the ]0; +inf] range: truncate integer values, maybe add 1 for
     // non integer values, maybe bail if overflow.
     bind(&handlePos);
     ma_vcvt_F64_U32(input, ScratchFloatReg);
     ma_vxfer(VFPRegister(ScratchFloatReg).uintOverlay(), output);
     ma_vcvt_U32_F64(ScratchFloatReg, ScratchFloatReg);
     compareDouble(ScratchFloatReg, input);
     ma_add(output, Imm32(1), output, NoSetCond, NotEqual);
-    // Bail out if the add overflowed or the result is non positive
+    // Bail out if the add overflowed or the result is non positive.
     ma_mov(output, output, SetCond);
     ma_b(bail, Signed);
     ma_b(bail, Zero);
 
     bind(&fin);
 }
 
 void
@@ -4306,18 +4342,18 @@ MacroAssemblerARMCompat::ceilf(FloatRegi
     ma_b(&handlePos, Assembler::NotSigned);
 
     // We are in the ]-Inf; 0[ range
     // If we are in the ]-1; 0[ range => bailout
     ma_vimm_f32(-1.f, ScratchFloatReg);
     compareFloat(input, ScratchFloatReg);
     ma_b(bail, Assembler::GreaterThan);
 
-    // We are in the ]-Inf; -1] range: ceil(x) == -floor(-x) and floor can
-    // be computed with direct truncation here (x > 0).
+    // We are in the ]-Inf; -1] range: ceil(x) == -floor(-x) and floor can be
+    // computed with direct truncation here (x > 0).
     ma_vneg_f32(input, ScratchFloatReg);
     ma_vcvt_F32_U32(ScratchFloatReg, ScratchFloatReg);
     ma_vxfer(VFPRegister(ScratchFloatReg).uintOverlay(), output);
     ma_neg(output, output, SetCond);
     ma_b(bail, NotSigned);
     ma_b(&fin);
 
     // Test for 0.0 / -0.0: if the top word of the input double is not zero,
@@ -4331,180 +4367,187 @@ MacroAssemblerARMCompat::ceilf(FloatRegi
     // We are in the ]0; +inf] range: truncate integer values, maybe add 1 for
     // non integer values, maybe bail if overflow.
     bind(&handlePos);
     ma_vcvt_F32_U32(input, ScratchFloatReg);
     ma_vxfer(VFPRegister(ScratchFloatReg).uintOverlay(), output);
     ma_vcvt_U32_F32(ScratchFloatReg, ScratchFloatReg);
     compareFloat(ScratchFloatReg, input);
     ma_add(output, Imm32(1), output, NoSetCond, NotEqual);
-    // Bail out if the add overflowed or the result is non positive
+    // Bail out if the add overflowed or the result is non positive.
     ma_mov(output, output, SetCond);
     ma_b(bail, Signed);
     ma_b(bail, Zero);
 
     bind(&fin);
 }
 
 CodeOffsetLabel
 MacroAssemblerARMCompat::toggledJump(Label *label)
 {
     // Emit a B that can be toggled to a CMP. See ToggleToJmp(), ToggleToCmp().
-    
     BufferOffset b = ma_b(label, Always, true);
     CodeOffsetLabel ret(b.getOffset());
     return ret;
 }
 
 CodeOffsetLabel
 MacroAssemblerARMCompat::toggledCall(JitCode *target, bool enabled)
 {
     BufferOffset bo = nextOffset();
     addPendingJump(bo, ImmPtr(target->raw()), Relocation::JITCODE);
-    ma_movPatchable(ImmPtr(target->raw()), ScratchRegister, Always, hasMOVWT() ? L_MOVWT : L_LDR);
+    ma_movPatchable(ImmPtr(target->raw()), ScratchRegister, Always, HasMOVWT() ? L_MOVWT : L_LDR);
     if (enabled)
         ma_blx(ScratchRegister);
     else
         ma_nop();
     return CodeOffsetLabel(bo.getOffset());
 }
 
 void
 MacroAssemblerARMCompat::round(FloatRegister input, Register output, Label *bail, FloatRegister tmp)
 {
     Label handleZero;
     Label handleNeg;
     Label fin;
-    // Do a compare based on the original value, then do most other things based on the
-    // shifted value.
+    // Do a compare based on the original value, then do most other things based
+    // on the shifted value.
     ma_vcmpz(input);
     // Adding 0.5 is technically incorrect!
-    // We want to add 0.5 to negative numbers, and 0.49999999999999999 to positive numbers.
+    // We want to add 0.5 to negative numbers, and 0.49999999999999999 to
+    // positive numbers.
     ma_vimm(0.5, ScratchFloatReg);
-    // Since we already know the sign bit, flip all numbers to be positive, stored in tmp.
+    // Since we already know the sign bit, flip all numbers to be positive,
+    // stored in tmp.
     ma_vabs(input, tmp);
     // Add 0.5, storing the result into tmp.
     ma_vadd(ScratchFloatReg, tmp, tmp);
     as_vmrs(pc);
     ma_b(&handleZero, Assembler::Equal);
     ma_b(&handleNeg, Assembler::Signed);
     // NaN is always a bail condition, just bail directly.
     ma_b(bail, Assembler::Overflow);
 
-    // The argument is a positive number, truncation is the path to glory;
-    // Since it is known to be > 0.0, explicitly convert to a larger range,
-    // then a value that rounds to INT_MAX is explicitly different from an
-    // argument that clamps to INT_MAX
+    // The argument is a positive number, truncation is the path to glory; Since
+    // it is known to be > 0.0, explicitly convert to a larger range, then a
+    // value that rounds to INT_MAX is explicitly different from an argument
+    // that clamps to INT_MAX.
     ma_vcvt_F64_U32(tmp, ScratchFloatReg);
     ma_vxfer(VFPRegister(ScratchFloatReg).uintOverlay(), output);
     ma_mov(output, output, SetCond);
     ma_b(bail, Signed);
     ma_b(&fin);
 
     bind(&handleZero);
     // Move the top word of the double into the output reg, if it is non-zero,
     // then the original value was -0.0
     as_vxfer(output, InvalidReg, input, FloatToCore, Always, 1);
     ma_cmp(output, Imm32(0));
     ma_b(bail, NonZero);
     ma_b(&fin);
 
     bind(&handleNeg);
-    // Negative case, negate, then start dancing.  This number may be positive, since we added 0.5
+    // Negative case, negate, then start dancing. This number may be positive,
+    // since we added 0.5.
     ma_vcvt_F64_U32(tmp, ScratchFloatReg);
     ma_vxfer(VFPRegister(ScratchFloatReg).uintOverlay(), output);
 
-    // -output is now a correctly rounded value, unless the original value was exactly
-    // halfway between two integers, at which point, it has been rounded away from zero, when
-    // it should be rounded towards \infty.
+    // -output is now a correctly rounded value, unless the original value was
+    // exactly halfway between two integers, at which point, it has been rounded
+    // away from zero, when it should be rounded towards \infty.
     ma_vcvt_U32_F64(ScratchFloatReg, ScratchFloatReg);
     compareDouble(ScratchFloatReg, tmp);
     ma_sub(output, Imm32(1), output, NoSetCond, Equal);
-    // Negate the output.  Since INT_MIN < -INT_MAX, even after adding 1,
-    // the result will still be a negative number
+    // Negate the output. Since INT_MIN < -INT_MAX, even after adding 1, the
+    // result will still be a negative number.
     ma_rsb(output, Imm32(0), output, SetCond);
 
-    // If the result looks non-negative, then this value didn't actually fit into
-    // the int range, and special handling is required, or it was zero, which means
-    // the result is actually -0.0 which also requires special handling.
+    // If the result looks non-negative, then this value didn't actually fit
+    // into the int range, and special handling is required, or it was zero,
+    // which means the result is actually -0.0 which also requires special
+    // handling.
     ma_b(bail, NotSigned);
 
     bind(&fin);
 }
 
 void
 MacroAssemblerARMCompat::roundf(FloatRegister input, Register output, Label *bail, FloatRegister tmp)
 {
     Label handleZero;
     Label handleNeg;
     Label fin;
-    // Do a compare based on the original value, then do most other things based on the
-    // shifted value.
+    // Do a compare based on the original value, then do most other things based
+    // on the shifted value.
     ma_vcmpz_f32(input);
     // Adding 0.5 is technically incorrect!
-    // We want to add 0.5 to negative numbers, and 0.49999999999999999 to positive numbers.
+    // We want to add 0.5 to negative numbers, and 0.49999999999999999 to
+    // positive numbers.
     ma_vimm_f32(0.5f, ScratchFloatReg);
-    // Since we already know the sign bit, flip all numbers to be positive, stored in tmp.
+    // Since we already know the sign bit, flip all numbers to be positive,
+    // stored in tmp.
     ma_vabs_f32(input, tmp);
     // Add 0.5, storing the result into tmp.
     ma_vadd_f32(ScratchFloatReg, tmp, tmp);
     as_vmrs(pc);
     ma_b(&handleZero, Assembler::Equal);
     ma_b(&handleNeg, Assembler::Signed);
     // NaN is always a bail condition, just bail directly.
     ma_b(bail, Assembler::Overflow);
 
-    // The argument is a positive number, truncation is the path to glory;
-    // Since it is known to be > 0.0, explicitly convert to a larger range,
-    // then a value that rounds to INT_MAX is explicitly different from an
-    // argument that clamps to INT_MAX
+    // The argument is a positive number, truncation is the path to glory; Since
+    // it is known to be > 0.0, explicitly convert to a larger range, then a
+    // value that rounds to INT_MAX is explicitly different from an argument
+    // that clamps to INT_MAX.
     ma_vcvt_F32_U32(tmp, ScratchFloatReg);
     ma_vxfer(VFPRegister(ScratchFloatReg).uintOverlay(), output);
     ma_mov(output, output, SetCond);
     ma_b(bail, Signed);
     ma_b(&fin);
 
     bind(&handleZero);
-    // Move the top word of the double into the output reg, if it is non-zero,
-    // then the original value was -0.0
+    // Move the whole float32 into the output reg, if it is non-zero, then the
+    // original value was -0.0.
     as_vxfer(output, InvalidReg, input, FloatToCore, Always, 1);
     ma_cmp(output, Imm32(0));
     ma_b(bail, NonZero);
     ma_b(&fin);
 
     bind(&handleNeg);
-    // Negative case, negate, then start dancing.  This number may be positive, since we added 0.5
+    // Negative case, negate, then start dancing. This number may be positive,
+    // since we added 0.5.
     ma_vcvt_F32_U32(tmp, ScratchFloatReg);
     ma_vxfer(VFPRegister(ScratchFloatReg).uintOverlay(), output);
 
-    // -output is now a correctly rounded value, unless the original value was exactly
-    // halfway between two integers, at which point, it has been rounded away from zero, when
-    // it should be rounded towards \infty.
+    // -output is now a correctly rounded value, unless the original value was
+    // exactly halfway between two integers, at which point, it has been rounded
+    // away from zero, when it should be rounded towards \infty.
     ma_vcvt_U32_F32(ScratchFloatReg, ScratchFloatReg);
     compareFloat(ScratchFloatReg, tmp);
     ma_sub(output, Imm32(1), output, NoSetCond, Equal);
-    // Negate the output.  Since INT_MIN < -INT_MAX, even after adding 1,
-    // the result will still be a negative number
+    // Negate the output. Since INT_MIN < -INT_MAX, even after adding 1, the
+    // result will still be a negative number.
     ma_rsb(output, Imm32(0), output, SetCond);
 
-    // If the result looks non-negative, then this value didn't actually fit into
-    // the int range, and special handling is required, or it was zero, which means
-    // the result is actually -0.0 which also requires special handling.
+    // If the result looks non-negative, then this value didn't actually fit
+    // into the int range, and special handling is required, or it was zero,
+    // which means the result is actually -0.0 which also requires special
+    // handling.
     ma_b(bail, NotSigned);
 
     bind(&fin);
 }
 
 CodeOffsetJump
 MacroAssemblerARMCompat::jumpWithPatch(RepatchLabel *label, Condition cond)
 {
     ARMBuffer::PoolEntry pe;
     BufferOffset bo = as_BranchPool(0xdeadbeef, label, &pe, cond);
-    // Fill in a new CodeOffset with both the load and the
-    // pool entry that the instruction loads from.
+    // Fill in a new CodeOffset with both the load and the pool entry that the
+    // instruction loads from.
     CodeOffsetJump ret(bo.getOffset(), pe.encode());
     return ret;
 }
 
 #ifdef JSGC_GENERATIONAL
 
 void
 MacroAssemblerARMCompat::branchPtrInNurseryRange(Condition cond, Register ptr, Register temp,
--- a/js/src/jit/arm/MacroAssembler-arm.h
+++ b/js/src/jit/arm/MacroAssembler-arm.h
@@ -20,27 +20,29 @@ using mozilla::DebugOnly;
 
 namespace js {
 namespace jit {
 
 static Register CallReg = ip;
 static const int defaultShift = 3;
 JS_STATIC_ASSERT(1 << defaultShift == sizeof(jsval));
 
-// MacroAssemblerARM is inheriting form Assembler defined in Assembler-arm.{h,cpp}
+// MacroAssemblerARM is inheriting form Assembler defined in
+// Assembler-arm.{h,cpp}
 class MacroAssemblerARM : public Assembler
 {
   protected:
-    // On ARM, some instructions require a second scratch register. This register
-    // defaults to lr, since it's non-allocatable (as it can be clobbered by some
-    // instructions). Allow the baseline compiler to override this though, since
-    // baseline IC stubs rely on lr holding the return address.
+    // On ARM, some instructions require a second scratch register. This
+    // register defaults to lr, since it's non-allocatable (as it can be
+    // clobbered by some instructions). Allow the baseline compiler to override
+    // this though, since baseline IC stubs rely on lr holding the return
+    // address.
     Register secondScratchReg_;
 
-    // higher level tag testing code
+    // Higher level tag testing code.
     Operand ToPayload(Operand base) {
         return Operand(Register::FromCode(base.base()), base.disp());
     }
     Address ToPayload(Address base) {
         return ToPayload(Operand(base)).toAddress();
     }
     Operand ToType(Operand base) {
         return Operand(Register::FromCode(base.base()), base.disp() + sizeof(void *));
@@ -80,20 +82,19 @@ class MacroAssemblerARM : public Assembl
     void addDouble(FloatRegister src, FloatRegister dest);
     void subDouble(FloatRegister src, FloatRegister dest);
     void mulDouble(FloatRegister src, FloatRegister dest);
     void divDouble(FloatRegister src, FloatRegister dest);
 
     void negateDouble(FloatRegister reg);
     void inc64(AbsoluteAddress dest);
 
-    // somewhat direct wrappers for the low-level assembler funcitons
-    // bitops
-    // attempt to encode a virtual alu instruction using
-    // two real instructions.
+    // Somewhat direct wrappers for the low-level assembler funcitons
+    // bitops. Attempt to encode a virtual alu instruction using two real
+    // instructions.
   private:
     bool alu_dbl(Register src1, Imm32 imm, Register dest, ALUOp op,
                  SetCond_ sc, Condition c);
 
   public:
     void ma_alu(Register src1, Operand2 op2, Register dest, ALUOp op,
                 SetCond_ sc = NoSetCond, Condition c = Always);
     void ma_alu(Register src1, Imm32 imm, Register dest,
@@ -102,19 +103,21 @@ class MacroAssemblerARM : public Assembl
 
     void ma_alu(Register src1, Operand op2, Register dest, ALUOp op,
                 SetCond_ sc = NoSetCond, Condition c = Always);
     void ma_nop();
     void ma_movPatchable(Imm32 imm, Register dest, Assembler::Condition c,
                          RelocStyle rs, Instruction *i = nullptr);
     void ma_movPatchable(ImmPtr imm, Register dest, Assembler::Condition c,
                          RelocStyle rs, Instruction *i = nullptr);
-    // These should likely be wrapped up as a set of macros
-    // or something like that.  I cannot think of a good reason
-    // to explicitly have all of this code.
+
+    // These should likely be wrapped up as a set of macros or something like
+    // that. I cannot think of a good reason to explicitly have all of this
+    // code.
+
     // ALU based ops
     // mov
     void ma_mov(Register src, Register dest,
                 SetCond_ sc = NoSetCond, Condition c = Always);
 
     void ma_mov(Imm32 imm, Register dest,
                 SetCond_ sc = NoSetCond, Condition c = Always);
     void ma_mov(ImmWord imm, Register dest,
@@ -142,145 +145,146 @@ class MacroAssemblerARM : public Assembl
 
     void ma_mvn(Register src1, Register dest,
                 SetCond_ sc = NoSetCond, Condition c = Always);
 
     // Negate (dest <- -src) implemented as rsb dest, src, 0
     void ma_neg(Register src, Register dest,
                 SetCond_ sc = NoSetCond, Condition c = Always);
 
-    // and
+    // And
     void ma_and(Register src, Register dest,
                 SetCond_ sc = NoSetCond, Condition c = Always);
 
     void ma_and(Register src1, Register src2, Register dest,
                 SetCond_ sc = NoSetCond, Condition c = Always);
 
     void ma_and(Imm32 imm, Register dest,
                 SetCond_ sc = NoSetCond, Condition c = Always);
 
     void ma_and(Imm32 imm, Register src1, Register dest,
                 SetCond_ sc = NoSetCond, Condition c = Always);
 
 
 
-    // bit clear (dest <- dest & ~imm) or (dest <- src1 & ~src2)
+    // Bit clear (dest <- dest & ~imm) or (dest <- src1 & ~src2)
     void ma_bic(Imm32 imm, Register dest,
                 SetCond_ sc = NoSetCond, Condition c = Always);
 
-    // exclusive or
+    // Exclusive or
     void ma_eor(Register src, Register dest,
                 SetCond_ sc = NoSetCond, Condition c = Always);
 
     void ma_eor(Register src1, Register src2, Register dest,
                 SetCond_ sc = NoSetCond, Condition c = Always);
 
     void ma_eor(Imm32 imm, Register dest,
                 SetCond_ sc = NoSetCond, Condition c = Always);
 
     void ma_eor(Imm32 imm, Register src1, Register dest,
                 SetCond_ sc = NoSetCond, Condition c = Always);
 
 
-    // or
+    // Or
     void ma_orr(Register src, Register dest,
                 SetCond_ sc = NoSetCond, Condition c = Always);
 
     void ma_orr(Register src1, Register src2, Register dest,
                 SetCond_ sc = NoSetCond, Condition c = Always);
 
     void ma_orr(Imm32 imm, Register dest,
                 SetCond_ sc = NoSetCond, Condition c = Always);
 
     void ma_orr(Imm32 imm, Register src1, Register dest,
                 SetCond_ sc = NoSetCond, Condition c = Always);
 
 
-    // arithmetic based ops
-    // add with carry
+    // Arithmetic based ops.
+    // Add with carry:
     void ma_adc(Imm32 imm, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
     void ma_adc(Register src, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
     void ma_adc(Register src1, Register src2, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
 
-    // add
+    // Add:
     void ma_add(Imm32 imm, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
     void ma_add(Register src1, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
     void ma_add(Register src1, Register src2, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
     void ma_add(Register src1, Operand op, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
     void ma_add(Register src1, Imm32 op, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
 
-    // subtract with carry
+    // Subtract with carry:
     void ma_sbc(Imm32 imm, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
     void ma_sbc(Register src1, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
     void ma_sbc(Register src1, Register src2, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
 
-    // subtract
+    // Subtract:
     void ma_sub(Imm32 imm, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
     void ma_sub(Register src1, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
     void ma_sub(Register src1, Register src2, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
     void ma_sub(Register src1, Operand op, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
     void ma_sub(Register src1, Imm32 op, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
 
-    // reverse subtract
+    // Reverse subtract:
     void ma_rsb(Imm32 imm, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
     void ma_rsb(Register src1, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
     void ma_rsb(Register src1, Register src2, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
     void ma_rsb(Register src1, Imm32 op2, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
 
-    // reverse subtract with carry
+    // Reverse subtract with carry:
     void ma_rsc(Imm32 imm, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
     void ma_rsc(Register src1, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
     void ma_rsc(Register src1, Register src2, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
 
-    // compares/tests
-    // compare negative (sets condition codes as src1 + src2 would)
+    // Compares/tests.
+    // Compare negative (sets condition codes as src1 + src2 would):
     void ma_cmn(Register src1, Imm32 imm, Condition c = Always);
     void ma_cmn(Register src1, Register src2, Condition c = Always);
     void ma_cmn(Register src1, Operand op, Condition c = Always);
 
-    // compare (src - src2)
+    // Compare (src - src2):
     void ma_cmp(Register src1, Imm32 imm, Condition c = Always);
     void ma_cmp(Register src1, ImmWord ptr, Condition c = Always);
     void ma_cmp(Register src1, ImmGCPtr ptr, Condition c = Always);
     void ma_cmp(Register src1, Operand op, Condition c = Always);
     void ma_cmp(Register src1, Register src2, Condition c = Always);
 
 
-    // test for equality, (src1^src2)
+    // Test for equality, (src1 ^ src2):
     void ma_teq(Register src1, Imm32 imm, Condition c = Always);
     void ma_teq(Register src1, Register src2, Condition c = Always);
     void ma_teq(Register src1, Operand op, Condition c = Always);
 
 
-    // test (src1 & src2)
+    // Test (src1 & src2):
     void ma_tst(Register src1, Imm32 imm, Condition c = Always);
     void ma_tst(Register src1, Register src2, Condition c = Always);
     void ma_tst(Register src1, Operand op, Condition c = Always);
 
-    // multiplies.  For now, there are only two that we care about.
+    // Multiplies. For now, there are only two that we care about.
     void ma_mul(Register src1, Register src2, Register dest);
     void ma_mul(Register src1, Imm32 imm, Register dest);
     Condition ma_check_mul(Register src1, Register src2, Register dest, Condition cond);
     Condition ma_check_mul(Register src1, Imm32 imm, Register dest, Condition cond);
 
-    // fast mod, uses scratch registers, and thus needs to be in the assembler
-    // implicitly assumes that we can overwrite dest at the beginning of the sequence
+    // Fast mod, uses scratch registers, and thus needs to be in the assembler
+    // implicitly assumes that we can overwrite dest at the beginning of the
+    // sequence.
     void ma_mod_mask(Register src, Register dest, Register hold, Register tmp,
                      int32_t shift);
 
-    // mod, depends on integer divide instructions being supported
+    // Mod - depends on integer divide instructions being supported.
     void ma_smod(Register num, Register div, Register dest);
     void ma_umod(Register num, Register div, Register dest);
 
-    // division, depends on integer divide instructions being supported
+    // Division - depends on integer divide instructions being supported.
     void ma_sdiv(Register num, Register div, Register dest, Condition cond = Always);
     void ma_udiv(Register num, Register div, Register dest, Condition cond = Always);
 
-    // memory
-    // shortcut for when we know we're transferring 32 bits of data
+    // Memory:
+    // Shortcut for when we know we're transferring 32 bits of data.
     void ma_dtr(LoadStore ls, Register rn, Imm32 offset, Register rt,
                 Index mode = Offset, Condition cc = Always);
 
     void ma_dtr(LoadStore ls, Register rn, Register rm, Register rt,
                 Index mode = Offset, Condition cc = Always);
 
 
     void ma_str(Register rt, DTRAddr addr, Index mode = Offset, Condition cc = Always);
@@ -293,43 +297,43 @@ class MacroAssemblerARM : public Assembl
     void ma_ldrb(DTRAddr addr, Register rt, Index mode = Offset, Condition cc = Always);
     void ma_ldrh(EDtrAddr addr, Register rt, Index mode = Offset, Condition cc = Always);
     void ma_ldrsh(EDtrAddr addr, Register rt, Index mode = Offset, Condition cc = Always);
     void ma_ldrsb(EDtrAddr addr, Register rt, Index mode = Offset, Condition cc = Always);
     void ma_ldrd(EDtrAddr addr, Register rt, DebugOnly<Register> rt2, Index mode = Offset, Condition cc = Always);
     void ma_strb(Register rt, DTRAddr addr, Index mode = Offset, Condition cc = Always);
     void ma_strh(Register rt, EDtrAddr addr, Index mode = Offset, Condition cc = Always);
     void ma_strd(Register rt, DebugOnly<Register> rt2, EDtrAddr addr, Index mode = Offset, Condition cc = Always);
-    // specialty for moving N bits of data, where n == 8,16,32,64
+    // Specialty for moving N bits of data, where n == 8,16,32,64.
     BufferOffset ma_dataTransferN(LoadStore ls, int size, bool IsSigned,
                           Register rn, Register rm, Register rt,
                           Index mode = Offset, Condition cc = Always, unsigned scale = TimesOne);
 
     BufferOffset ma_dataTransferN(LoadStore ls, int size, bool IsSigned,
                           Register rn, Imm32 offset, Register rt,
                           Index mode = Offset, Condition cc = Always);
     void ma_pop(Register r);
     void ma_push(Register r);
 
     void ma_vpop(VFPRegister r);
     void ma_vpush(VFPRegister r);
 
-    // branches when done from within arm-specific code
+    // Branches when done from within arm-specific code.
     BufferOffset ma_b(Label *dest, Condition c = Always, bool isPatchable = false);
     void ma_bx(Register dest, Condition c = Always);
 
     void ma_b(void *target, Relocation::Kind reloc, Condition c = Always);
 
-    // this is almost NEVER necessary, we'll basically never be calling a label
+    // This is almost NEVER necessary, we'll basically never be calling a label
     // except, possibly in the crazy bailout-table case.
     void ma_bl(Label *dest, Condition c = Always);
 
     void ma_blx(Register dest, Condition c = Always);
 
-    //VFP/ALU
+    // VFP/ALU:
     void ma_vadd(FloatRegister src1, FloatRegister src2, FloatRegister dst);
     void ma_vsub(FloatRegister src1, FloatRegister src2, FloatRegister dst);
 
     void ma_vmul(FloatRegister src1, FloatRegister src2, FloatRegister dst);
     void ma_vdiv(FloatRegister src1, FloatRegister src2, FloatRegister dst);
 
     void ma_vneg(FloatRegister src, FloatRegister dest, Condition cc = Always);
     void ma_vmov(FloatRegister src, FloatRegister dest, Condition cc = Always);
@@ -351,29 +355,29 @@ class MacroAssemblerARM : public Assembl
     void ma_vadd_f32(FloatRegister src1, FloatRegister src2, FloatRegister dst);
     void ma_vsub_f32(FloatRegister src1, FloatRegister src2, FloatRegister dst);
 
     void ma_vmul_f32(FloatRegister src1, FloatRegister src2, FloatRegister dst);
     void ma_vdiv_f32(FloatRegister src1, FloatRegister src2, FloatRegister dst);
 
     void ma_vneg_f32(FloatRegister src, FloatRegister dest, Condition cc = Always);
 
-    // source is F64, dest is I32
+    // Source is F64, dest is I32:
     void ma_vcvt_F64_I32(FloatRegister src, FloatRegister dest, Condition cc = Always);
     void ma_vcvt_F64_U32(FloatRegister src, FloatRegister dest, Condition cc = Always);
 
-    // source is I32, dest is F64
+    // Source is I32, dest is F64:
     void ma_vcvt_I32_F64(FloatRegister src, FloatRegister dest, Condition cc = Always);
     void ma_vcvt_U32_F64(FloatRegister src, FloatRegister dest, Condition cc = Always);
 
-    // source is F32, dest is I32
+    // Source is F32, dest is I32:
     void ma_vcvt_F32_I32(FloatRegister src, FloatRegister dest, Condition cc = Always);
     void ma_vcvt_F32_U32(FloatRegister src, FloatRegister dest, Condition cc = Always);
 
-    // source is I32, dest is F32
+    // Source is I32, dest is F32:
     void ma_vcvt_I32_F32(FloatRegister src, FloatRegister dest, Condition cc = Always);
     void ma_vcvt_U32_F32(FloatRegister src, FloatRegister dest, Condition cc = Always);
 
     void ma_vxfer(FloatRegister src, Register dest, Condition cc = Always);
     void ma_vxfer(FloatRegister src, Register dest1, Register dest2, Condition cc = Always);
 
     void ma_vxfer(VFPRegister src, Register dest, Condition cc = Always);
     void ma_vxfer(VFPRegister src, Register dest1, Register dest2, Condition cc = Always);
@@ -386,53 +390,53 @@ class MacroAssemblerARM : public Assembl
     BufferOffset ma_vldr(VFPAddr addr, VFPRegister dest, Condition cc = Always);
     BufferOffset ma_vldr(const Operand &addr, VFPRegister dest, Condition cc = Always);
     BufferOffset ma_vldr(VFPRegister src, Register base, Register index, int32_t shift = defaultShift, Condition cc = Always);
 
     BufferOffset ma_vstr(VFPRegister src, VFPAddr addr, Condition cc = Always);
     BufferOffset ma_vstr(VFPRegister src, const Operand &addr, Condition cc = Always);
 
     BufferOffset ma_vstr(VFPRegister src, Register base, Register index, int32_t shift = defaultShift, Condition cc = Always);
-    // calls an Ion function, assumes that the stack is untouched (8 byte alinged)
+    // Calls an Ion function, assumes that the stack is untouched (8 byte
+    // aligned).
     void ma_callIon(const Register reg);
-    // callso an Ion function, assuming that sp has already been decremented
+    // Calls an Ion function, assuming that sp has already been decremented.
     void ma_callIonNoPush(const Register reg);
-    // calls an ion function, assuming that the stack is currently not 8 byte aligned
+    // Calls an ion function, assuming that the stack is currently not 8 byte
+    // aligned.
     void ma_callIonHalfPush(const Register reg);
 
     void ma_call(ImmPtr dest);
 
     // calls reg, storing the return address into sp[0]
     void ma_callAndStoreRet(const Register reg, uint32_t stackArgBytes);
 
-    // Float registers can only be loaded/stored in continuous runs
-    // when using vstm/vldm.
-    // This function breaks set into continuous runs and loads/stores
-    // them at [rm]. rm will be modified and left in a state logically
-    // suitable for the next load/store.
-    // Returns the offset from [dm] for the logical next load/store.
+    // Float registers can only be loaded/stored in continuous runs when using
+    // vstm/vldm. This function breaks set into continuous runs and loads/stores
+    // them at [rm]. rm will be modified and left in a state logically suitable
+    // for the next load/store. Returns the offset from [dm] for the logical
+    // next load/store.
     int32_t transferMultipleByRuns(FloatRegisterSet set, LoadStore ls,
                                    Register rm, DTMMode mode)
     {
         if (mode == IA) {
             return transferMultipleByRunsImpl
                 <FloatRegisterForwardIterator>(set, ls, rm, mode, 1);
         }
         if (mode == DB) {
             return transferMultipleByRunsImpl
                 <FloatRegisterBackwardIterator>(set, ls, rm, mode, -1);
         }
         MOZ_ASSUME_UNREACHABLE("Invalid data transfer addressing mode");
     }
 
 private:
     // Implementation for transferMultipleByRuns so we can use different
-    // iterators for forward/backward traversals.
-    // The sign argument should be 1 if we traverse forwards, -1 if we
-    // traverse backwards.
+    // iterators for forward/backward traversals. The sign argument should be 1
+    // if we traverse forwards, -1 if we traverse backwards.
     template<typename RegisterIterator> int32_t
     transferMultipleByRunsImpl(FloatRegisterSet set, LoadStore ls,
                                Register rm, DTMMode mode, int32_t sign)
     {
         JS_ASSERT(sign == 1 || sign == -1);
 
         int32_t delta = sign * sizeof(double);
         int32_t offset = 0;
@@ -453,71 +457,70 @@ private:
 };
 
 class MacroAssemblerARMCompat : public MacroAssemblerARM
 {
     bool inCall_;
     // Number of bytes the stack is adjusted inside a call to C. Calls to C may
     // not be nested.
     uint32_t args_;
-    // The actual number of arguments that were passed, used to assert that
-    // the initial number of arguments declared was correct.
+    // The actual number of arguments that were passed, used to assert that the
+    // initial number of arguments declared was correct.
     uint32_t passedArgs_;
     uint32_t passedArgTypes_;
 
     // ARM treats arguments as a vector in registers/memory, that looks like:
     // { r0, r1, r2, r3, [sp], [sp,+4], [sp,+8] ... }
-    // usedIntSlots_ keeps track of how many of these have been used.
-    // It bears a passing resemblance to passedArgs_, but a single argument
-    // can effectively use between one and three slots depending on its size and
-    // alignment requirements
+    // usedIntSlots_ keeps track of how many of these have been used. It bears a
+    // passing resemblance to passedArgs_, but a single argument can effectively
+    // use between one and three slots depending on its size and alignment
+    // requirements.
     uint32_t usedIntSlots_;
 #if defined(JS_CODEGEN_ARM_HARDFP) || defined(JS_ARM_SIMULATOR)
     uint32_t usedFloatSlots_;
     bool usedFloat32_;
     uint32_t padding_;
 #endif
     bool dynamicAlignment_;
 
-    // Used to work around the move resolver's lack of support for
-    // moving into register pairs, which the softfp ABI needs.
+    // Used to work around the move resolver's lack of support for moving into
+    // register pairs, which the softfp ABI needs.
     mozilla::Array<MoveOperand, 2> floatArgsInGPR;
     mozilla::Array<bool, 2> floatArgsInGPRValid;
 
     // Compute space needed for the function call and set the properties of the
-    // callee.  It returns the space which has to be allocated for calling the
+    // callee. It returns the space which has to be allocated for calling the
     // function.
     //
     // arg            Number of arguments of the function.
     void setupABICall(uint32_t arg);
 
   protected:
     MoveResolver moveResolver_;
 
     // Extra bytes currently pushed onto the frame beyond frameDepth_. This is
     // needed to compute offsets to stack slots while temporary space has been
-    // reserved for unexpected spills or C++ function calls. It is maintained
-    // by functions which track stack alignment, which for clear distinction
-    // use StudlyCaps (for example, Push, Pop).
+    // reserved for unexpected spills or C++ function calls. It is maintained by
+    // functions which track stack alignment, which for clear distinction use
+    // StudlyCaps (for example, Push, Pop).
     uint32_t framePushed_;
     void adjustFrame(int value) {
         setFramePushed(framePushed_ + value);
     }
   public:
     MacroAssemblerARMCompat()
       : inCall_(false),
         framePushed_(0)
     { }
 
   public:
     using MacroAssemblerARM::call;
 
-    // jumps + other functions that should be called from
-    // non-arm specific code...
-    // basically, an x86 front end on top of the ARM code.
+    // Jumps + other functions that should be called from non-arm specific
+    // code. Basically, an x86 front end on top of the ARM code.
     void j(Condition code , Label *dest)
     {
         as_b(dest, code);
     }
     void j(Label *dest)
     {
         as_b(dest, Always);
     }
@@ -537,17 +540,17 @@ class MacroAssemblerARMCompat : public M
     void mov(Address src, Register dest) {
         MOZ_ASSUME_UNREACHABLE("NYI-IC");
     }
 
     void call(const Register reg) {
         as_blx(reg);
     }
     void call(Label *label) {
-        // for now, assume that it'll be nearby?
+        // For now, assume that it'll be nearby?
         as_bl(label, Always);
     }
     void call(ImmWord imm) {
         call(ImmPtr((void*)imm.value));
     }
     void call(ImmPtr imm) {
         BufferOffset bo = m_buffer.nextOffset();
         addPendingJump(bo, imm, Relocation::HARDCODED);
@@ -556,17 +559,17 @@ class MacroAssemblerARMCompat : public M
     void call(AsmJSImmPtr imm) {
         movePtr(imm, CallReg);
         call(CallReg);
     }
     void call(JitCode *c) {
         BufferOffset bo = m_buffer.nextOffset();
         addPendingJump(bo, ImmPtr(c->raw()), Relocation::JITCODE);
         RelocStyle rs;
-        if (hasMOVWT())
+        if (HasMOVWT())
             rs = L_MOVWT;
         else
             rs = L_LDR;
 
         ma_movPatchable(ImmPtr(c->raw()), ScratchRegister, Always, rs);
         ma_callIonHalfPush(ScratchRegister);
     }
 
@@ -602,17 +605,17 @@ class MacroAssemblerARMCompat : public M
         // the stack.
         subPtr(Imm32(sizeof(void*)), sp);
     }
 
     void branch(JitCode *c) {
         BufferOffset bo = m_buffer.nextOffset();
         addPendingJump(bo, ImmPtr(c->raw()), Relocation::JITCODE);
         RelocStyle rs;
-        if (hasMOVWT())
+        if (HasMOVWT())
             rs = L_MOVWT;
         else
             rs = L_LDR;
 
         ma_movPatchable(ImmPtr(c->raw()), ScratchRegister, Always, rs);
         ma_bx(ScratchRegister);
     }
     void branch(const Register reg) {
@@ -672,29 +675,29 @@ class MacroAssemblerARMCompat : public M
 
     void popN(Register reg, Imm32 extraSpace) {
         Imm32 totSpace = Imm32(extraSpace.value + 4);
         ma_dtr(IsLoad, sp, totSpace, reg, PostIndex);
     }
 
     CodeOffsetLabel toggledJump(Label *label);
 
-    // Emit a BLX or NOP instruction. ToggleCall can be used to patch
-    // this instruction.
+    // Emit a BLX or NOP instruction. ToggleCall can be used to patch this
+    // instruction.
     CodeOffsetLabel toggledCall(JitCode *target, bool enabled);
 
     CodeOffsetLabel pushWithPatch(ImmWord imm) {
         CodeOffsetLabel label = movWithPatch(imm, ScratchRegister);
         ma_push(ScratchRegister);
         return label;
     }
 
     CodeOffsetLabel movWithPatch(ImmWord imm, Register dest) {
         CodeOffsetLabel label = CodeOffsetLabel(currentOffset());
-        ma_movPatchable(Imm32(imm.value), dest, Always, hasMOVWT() ? L_MOVWT : L_LDR);
+        ma_movPatchable(Imm32(imm.value), dest, Always, HasMOVWT() ? L_MOVWT : L_LDR);
         return label;
     }
     CodeOffsetLabel movWithPatch(ImmPtr imm, Register dest) {
         return movWithPatch(ImmWord(uintptr_t(imm.value)), dest);
     }
 
     void jump(Label *label) {
         as_b(label);
@@ -727,30 +730,30 @@ class MacroAssemblerARMCompat : public M
         test32(lhs, rhs);
     }
 
     // Returns the register containing the type tag.
     Register splitTagForTest(const ValueOperand &value) {
         return value.typeReg();
     }
 
-    // higher level tag testing code
+    // Higher level tag testing code.
     Condition testInt32(Condition cond, const ValueOperand &value);
     Condition testBoolean(Condition cond, const ValueOperand &value);
     Condition testDouble(Condition cond, const ValueOperand &value);
     Condition testNull(Condition cond, const ValueOperand &value);
     Condition testUndefined(Condition cond, const ValueOperand &value);
     Condition testString(Condition cond, const ValueOperand &value);
     Condition testObject(Condition cond, const ValueOperand &value);
     Condition testNumber(Condition cond, const ValueOperand &value);
     Condition testMagic(Condition cond, const ValueOperand &value);
 
     Condition testPrimitive(Condition cond, const ValueOperand &value);
 
-    // register-based tests
+    // Register-based tests.
     Condition testInt32(Condition cond, Register tag);
     Condition testBoolean(Condition cond, Register tag);
     Condition testNull(Condition cond, Register tag);
     Condition testUndefined(Condition cond, Register tag);
     Condition testString(Condition cond, Register tag);
     Condition testObject(Condition cond, Register tag);
     Condition testDouble(Condition cond, Register tag);
     Condition testNumber(Condition cond, Register tag);
@@ -788,17 +791,17 @@ class MacroAssemblerARMCompat : public M
         Condition c = testPrimitive(cond, t);
         ma_b(label, c);
     }
 
     void branchTestValue(Condition cond, const ValueOperand &value, const Value &v, Label *label);
     void branchTestValue(Condition cond, const Address &valaddr, const ValueOperand &value,
                          Label *label);
 
-    // unboxing code
+    // Unboxing code.
     void unboxInt32(const ValueOperand &operand, Register dest);
     void unboxInt32(const Address &src, Register dest);
     void unboxBoolean(const ValueOperand &operand, Register dest);
     void unboxBoolean(const Address &src, Register dest);
     void unboxDouble(const ValueOperand &operand, FloatRegister dest);
     void unboxDouble(const Address &src, FloatRegister dest);
     void unboxString(const ValueOperand &operand, Register dest);
     void unboxString(const Address &src, Register dest);
@@ -806,17 +809,17 @@ class MacroAssemblerARMCompat : public M
     void unboxObject(const Address &src, Register dest);
     void unboxValue(const ValueOperand &src, AnyRegister dest);
     void unboxPrivate(const ValueOperand &src, Register dest);
 
     void notBoolean(const ValueOperand &val) {
         ma_eor(Imm32(1), val.payloadReg());
     }
 
-    // boxing code
+    // Boxing code.
     void boxDouble(FloatRegister src, const ValueOperand &dest);
     void boxNonDouble(JSValueType type, Register src, const ValueOperand &dest);
 
     // Extended unboxing API. If the payload is already in a register, returns
     // that register. Otherwise, provides a move to the given scratch register,
     // and returns that.
     Register extractObject(const Address &address, Register scratch);
     Register extractObject(const ValueOperand &value, Register scratch) {
@@ -835,17 +838,17 @@ class MacroAssemblerARMCompat : public M
     }
 
     void boolValueToDouble(const ValueOperand &operand, FloatRegister dest);
     void int32ValueToDouble(const ValueOperand &operand, FloatRegister dest);
     void loadInt32OrDouble(const Operand &src, FloatRegister dest);
     void loadInt32OrDouble(Register base, Register index,
                            FloatRegister dest, int32_t shift = defaultShift);
     void loadConstantDouble(double dp, FloatRegister dest);
-    // treat the value as a boolean, and set condition codes accordingly
+    // Treat the value as a boolean, and set condition codes accordingly.
     Condition testInt32Truthy(bool truthy, const ValueOperand &operand);
     Condition testBooleanTruthy(bool truthy, const ValueOperand &operand);
     Condition testDoubleTruthy(bool truthy, FloatRegister reg);
     Condition testStringTruthy(bool truthy, const ValueOperand &value);
 
     void boolValueToFloat32(const ValueOperand &operand, FloatRegister dest);
     void int32ValueToFloat32(const ValueOperand &operand, FloatRegister dest);
     void loadConstantFloat32(float f, FloatRegister dest);
@@ -1119,17 +1122,17 @@ class MacroAssemblerARMCompat : public M
             ma_mov(s1, d1);
     }
 
     void storeValue(ValueOperand val, Operand dst);
     void storeValue(ValueOperand val, const BaseIndex &dest);
     void storeValue(JSValueType type, Register reg, BaseIndex dest) {
         // Harder cases not handled yet.
         JS_ASSERT(dest.offset == 0);
-        ma_alu(dest.base, lsl(dest.index, dest.scale), ScratchRegister, op_add);
+        ma_alu(dest.base, lsl(dest.index, dest.scale), ScratchRegister, OpAdd);
         storeValue(type, reg, Address(ScratchRegister, 0));
     }
     void storeValue(ValueOperand val, const Address &dest) {
         storeValue(val, Operand(dest));
     }
     void storeValue(JSValueType type, Register reg, Address dest) {
         ma_str(reg, dest);
         ma_mov(ImmTag(JSVAL_TYPE_TO_TAG(type)), secondScratchReg_);
@@ -1143,17 +1146,17 @@ class MacroAssemblerARMCompat : public M
             ma_mov(ImmGCPtr(reinterpret_cast<gc::Cell *>(val.toGCThing())), secondScratchReg_);
         else
             ma_mov(Imm32(jv.s.payload.i32), secondScratchReg_);
         ma_str(secondScratchReg_, dest);
     }
     void storeValue(const Value &val, BaseIndex dest) {
         // Harder cases not handled yet.
         JS_ASSERT(dest.offset == 0);
-        ma_alu(dest.base, lsl(dest.index, dest.scale), ScratchRegister, op_add);
+        ma_alu(dest.base, lsl(dest.index, dest.scale), ScratchRegister, OpAdd);
         storeValue(val, Address(ScratchRegister, 0));
     }
 
     void loadValue(Address src, ValueOperand val);
     void loadValue(Operand dest, ValueOperand val) {
         loadValue(dest.toAddress(), val);
     }
     void loadValue(const BaseIndex &addr, ValueOperand val);
@@ -1262,17 +1265,17 @@ class MacroAssemblerARMCompat : public M
     // Builds an exit frame on the stack, with a return address to an internal
     // non-function. Returns offset to be passed to markSafepointAt().
     bool buildFakeExitFrame(Register scratch, uint32_t *offset);
 
     void callWithExitFrame(JitCode *target);
     void callWithExitFrame(JitCode *target, Register dynStack);
 
     // Makes an Ion call using the only two methods that it is sane for
-    // indep code to make a call
+    // independent code to make a call.
     void callIon(Register callee);
 
     void reserveStack(uint32_t amount);
     void freeStack(uint32_t amount);
     void freeStack(Register amount);
 
     void add32(Register src, Register dest);
     void add32(Imm32 imm, Register dest);
@@ -1389,18 +1392,18 @@ class MacroAssemblerARMCompat : public M
     void storeFloat32(FloatRegister src, BaseIndex addr) {
         // Harder cases not handled yet.
         JS_ASSERT(addr.offset == 0);
         uint32_t scale = Imm32::ShiftOf(addr.scale).value;
         ma_vstr(VFPRegister(src).singleOverlay(), addr.base, addr.index, scale);
     }
 
     void clampIntToUint8(Register reg) {
-        // look at (reg >> 8) if it is 0, then reg shouldn't be clamped
-        // if it is <0, then we want to clamp to 0, otherwise, we wish to clamp to 255
+        // Look at (reg >> 8) if it is 0, then reg shouldn't be clamped if it is
+        // <0, then we want to clamp to 0, otherwise, we wish to clamp to 255
         as_mov(ScratchRegister, asr(reg, 8), SetCond);
         ma_mov(Imm32(0xff), reg, NoSetCond, NotEqual);
         ma_mov(Imm32(0), reg, NoSetCond, Signed);
     }
 
     void incrementInt32Value(const Address &addr) {
         add32(Imm32(1), ToPayload(addr));
     }
@@ -1433,17 +1436,17 @@ class MacroAssemblerARMCompat : public M
     }
     void mulBy3(const Register &src, const Register &dest) {
         as_add(dest, src, lsl(src, 1));
     }
 
     void setStackArg(Register reg, uint32_t arg);
 
     void breakpoint();
-    // conditional breakpoint
+    // Conditional breakpoint.
     void breakpoint(Condition cc);
 
     void compareDouble(FloatRegister lhs, FloatRegister rhs);
     void branchDouble(DoubleCondition cond, FloatRegister lhs, FloatRegister rhs,
                       Label *label);
 
     void compareFloat(FloatRegister lhs, FloatRegister rhs);
     void branchFloat(DoubleCondition cond, FloatRegister lhs, FloatRegister rhs,
@@ -1456,18 +1459,18 @@ class MacroAssemblerARMCompat : public M
     }
     void rshiftPtrArithmetic(Imm32 imm, Register dest) {
         ma_asr(imm, dest, dest);
     }
     void lshiftPtr(Imm32 imm, Register dest) {
         ma_lsl(imm, dest, dest);
     }
 
-    // If source is a double, load it into dest. If source is int32,
-    // convert it to double. Else, branch to failure.
+    // If source is a double, load it into dest. If source is int32, convert it
+    // to double. Else, branch to failure.
     void ensureDouble(const ValueOperand &source, FloatRegister dest, Label *failure);
 
     void
     emitSet(Assembler::Condition cond, Register dest)
     {
         ma_mov(Imm32(0), dest);
         ma_mov(Imm32(1), dest, NoSetCond, cond);
     }
@@ -1505,18 +1508,18 @@ class MacroAssemblerARMCompat : public M
 
     // Sets up an ABI call for when the alignment is not known. This may need a
     // scratch register.
     void setupUnalignedABICall(uint32_t args, Register scratch);
 
     // Arguments must be assigned in a left-to-right order. This process may
     // temporarily use more stack, in which case esp-relative addresses will be
     // automatically adjusted. It is extremely important that esp-relative
-    // addresses are computed *after* setupABICall(). Furthermore, no
-    // operations should be emitted while setting arguments.
+    // addresses are computed *after* setupABICall(). Furthermore, no operations
+    // should be emitted while setting arguments.
     void passABIArg(const MoveOperand &from, MoveOp::Type type);
     void passABIArg(Register reg);
     void passABIArg(FloatRegister reg, MoveOp::Type type);
     void passABIArg(const ValueOperand &regs);
 
   private:
     void passHardFpABIArg(const MoveOperand &from, MoveOp::Type type);
     void passSoftFpABIArg(const MoveOperand &from, MoveOp::Type type);
@@ -1537,30 +1540,30 @@ class MacroAssemblerARMCompat : public M
     CodeOffsetLabel labelForPatch() {
         return CodeOffsetLabel(nextOffset().getOffset());
     }
 
     void computeEffectiveAddress(const Address &address, Register dest) {
         ma_add(address.base, Imm32(address.offset), dest, NoSetCond);
     }
     void computeEffectiveAddress(const BaseIndex &address, Register dest) {
-        ma_alu(address.base, lsl(address.index, address.scale), dest, op_add, NoSetCond);
+        ma_alu(address.base, lsl(address.index, address.scale), dest, OpAdd, NoSetCond);
         if (address.offset)
             ma_add(dest, Imm32(address.offset), dest, NoSetCond);
     }
     void floor(FloatRegister input, Register output, Label *handleNotAnInt);
     void floorf(FloatRegister input, Register output, Label *handleNotAnInt);
     void ceil(FloatRegister input, Register output, Label *handleNotAnInt);
     void ceilf(FloatRegister input, Register output, Label *handleNotAnInt);
     void round(FloatRegister input, Register output, Label *handleNotAnInt, FloatRegister tmp);
     void roundf(FloatRegister input, Register output, Label *handleNotAnInt, FloatRegister tmp);
 
     void clampCheck(Register r, Label *handleNotAnInt) {
-        // check explicitly for r == INT_MIN || r == INT_MAX
-        // this is the instruction sequence that gcc generated for this
+        // Check explicitly for r == INT_MIN || r == INT_MAX
+        // This is the instruction sequence that gcc generated for this
         // operation.
         ma_sub(r, Imm32(0x80000001), ScratchRegister);
         ma_cmn(ScratchRegister, Imm32(3));
         ma_b(handleNotAnInt, Above);
     }
 
     void memIntToValue(Address Source, Address Dest) {
         load32(Source, lr);
--- a/js/src/jit/arm/MoveEmitter-arm.cpp
+++ b/js/src/jit/arm/MoveEmitter-arm.cpp
@@ -41,17 +41,17 @@ MoveEmitterARM::~MoveEmitterARM()
 Operand
 MoveEmitterARM::cycleSlot() const
 {
     int offset =  masm.framePushed() - pushedAtCycle_;
     JS_ASSERT(offset < 4096 && offset > -4096);
     return Operand(StackPointer, offset);
 }
 
-// THIS IS ALWAYS AN LDRAddr.  It should not be wrapped in an operand, methinks
+// THIS IS ALWAYS AN LDRAddr. It should not be wrapped in an operand, methinks.
 Operand
 MoveEmitterARM::spillSlot() const
 {
     int offset =  masm.framePushed() - pushedAtSpill_;
     JS_ASSERT(offset < 4096 && offset > -4096);
     return Operand(StackPointer, offset);
 }
 
@@ -78,22 +78,22 @@ MoveEmitterARM::toOperand(const MoveOper
 }
 
 Register
 MoveEmitterARM::tempReg()
 {
     if (spilledReg_ != InvalidReg)
         return spilledReg_;
 
-    // For now, just pick r12/ip as the eviction point. This is totally
-    // random, and if it ends up being bad, we can use actual heuristics later.
-    // r12 is actually a bad choice.  it is the scratch register, which is frequently
-    // used for address computations, such as those found when we attempt to access
-    // values more than 4096 off of the stack pointer.
-    // instead, use lr, the LinkRegister.
+    // For now, just pick r12/ip as the eviction point. This is totally random,
+    // and if it ends up being bad, we can use actual heuristics later. r12 is
+    // actually a bad choice. It is the scratch register, which is frequently
+    // used for address computations, such as those found when we attempt to
+    // access values more than 4096 off of the stack pointer. Instead, use lr,
+    // the LinkRegister.
     spilledReg_ = r14;
     if (pushedAtSpill_ == -1) {
         masm.Push(spilledReg_);
         pushedAtSpill_ = masm.framePushed();
     } else {
         masm.ma_str(spilledReg_, spillSlot());
     }
     return spilledReg_;
--- a/js/src/jit/arm/Simulator-arm.cpp
+++ b/js/src/jit/arm/Simulator-arm.cpp
@@ -186,64 +186,64 @@ class SimInstruction {
     inline int VLValue() const { return bit(20); }
     inline int VCValue() const { return bit(8); }
     inline int VAValue() const { return bits(23, 21); }
     inline int VBValue() const { return bits(6, 5); }
     inline int VFPNRegValue(VFPRegPrecision pre) { return VFPGlueRegValue(pre, 16, 7); }
     inline int VFPMRegValue(VFPRegPrecision pre) { return VFPGlueRegValue(pre, 0, 5); }
     inline int VFPDRegValue(VFPRegPrecision pre) { return VFPGlueRegValue(pre, 12, 22); }
 
-    // Fields used in Data processing instructions
+    // Fields used in Data processing instructions.
     inline int opcodeValue() const { return static_cast<ALUOp>(bits(24, 21)); }
     inline ALUOp opcodeField() const { return static_cast<ALUOp>(bitField(24, 21)); }
     inline int sValue() const { return bit(20); }
 
-    // with register
+    // With register.
     inline int rmValue() const { return bits(3, 0); }
     inline ShiftType shifttypeValue() const { return static_cast<ShiftType>(bits(6, 5)); }
     inline int rsValue() const { return bits(11, 8); }
     inline int shiftAmountValue() const { return bits(11, 7); }
 
-    // with immediate
+    // With immediate.
     inline int rotateValue() const { return bits(11, 8); }
     inline int immed8Value() const { return bits(7, 0); }
     inline int immed4Value() const { return bits(19, 16); }
     inline int immedMovwMovtValue() const { return immed4Value() << 12 | offset12Value(); }
 
-    // Fields used in Load/Store instructions
+    // Fields used in Load/Store instructions.
     inline int PUValue() const { return bits(24, 23); }
     inline int PUField() const { return bitField(24, 23); }
     inline int bValue() const { return bit(22); }
     inline int wValue() const { return bit(21); }
     inline int lValue() const { return bit(20); }
 
-    // with register uses same fields as Data processing instructions above
-    // with immediate
+    // With register uses same fields as Data processing instructions above with
+    // immediate.
     inline int offset12Value() const { return bits(11, 0); }
 
-    // multiple
+    // Multiple.
     inline int rlistValue() const { return bits(15, 0); }
 
-    // extra loads and stores
+    // Extra loads and stores.
     inline int signValue() const { return bit(6); }
     inline int hValue() const { return bit(5); }
     inline int immedHValue() const { return bits(11, 8); }
     inline int immedLValue() const { return bits(3, 0); }
 
-    // Fields used in Branch instructions
+    // Fields used in Branch instructions.
     inline int linkValue() const { return bit(24); }
     inline int sImmed24Value() const { return ((instructionBits() << 8) >> 8); }
 
-    // Fields used in Software interrupt instructions
+    // Fields used in Software interrupt instructions.
     inline SoftwareInterruptCodes svcValue() const {
         return static_cast<SoftwareInterruptCodes>(bits(23, 0));
     }
 
-    // Test for special encodings of type 0 instructions (extra loads and stores,
-    // as well as multiplications).
+    // Test for special encodings of type 0 instructions (extra loads and
+    // stores, as well as multiplications).
     inline bool isSpecialType0() const { return (bit(7) == 1) && (bit(4) == 1); }
 
     // Test for miscellaneous instructions encodings of type 0 instructions.
     inline bool isMiscType0() const {
         return bit(24) == 1 && bit(23) == 0 && bit(20) == 0 && (bit(7) == 0);
     }
 
     // Test for a nop instruction, which falls under type 1.
@@ -626,22 +626,23 @@ ReadLine(const char *prompt)
         if (fgets(line_buf, sizeof(line_buf), stdin) == nullptr) {
             // fgets got an error. Just give up.
             if (result)
                 js_delete(result);
             return nullptr;
         }
         int len = strlen(line_buf);
         if (len > 0 && line_buf[len - 1] == '\n') {
-            // Since we read a new line we are done reading the line. This
-            // will exit the loop after copying this buffer into the result.
+            // Since we read a new line we are done reading the line. This will
+            // exit the loop after copying this buffer into the result.
             keep_going = false;
         }
         if (!result) {
-            // Allocate the initial result and make room for the terminating '\0'
+            // Allocate the initial result and make room for the terminating
+            // '\0'.
             result = (char *)js_malloc(len + 1);
             if (!result)
                 return nullptr;
         } else {
             // Allocate a new result with enough room for the new addition.
             int new_len = offset + len + 1;
             char *new_result = (char *)js_malloc(new_len);
             if (!new_result)
@@ -688,17 +689,17 @@ ArmDebugger::debug()
 #define STR(a) #a
 #define XSTR(a) STR(a)
 
     char cmd[COMMAND_SIZE + 1];
     char arg1[ARG_SIZE + 1];
     char arg2[ARG_SIZE + 1];
     char *argv[3] = { cmd, arg1, arg2 };
 
-    // make sure to have a proper terminating character if reaching the limit
+    // Make sure to have a proper terminating character if reaching the limit.
     cmd[COMMAND_SIZE] = 0;
     arg1[ARG_SIZE] = 0;
     arg2[ARG_SIZE] = 0;
 
     // Undo all set breakpoints while running in the debugger shell. This will
     // make them invisible to all commands.
     undoBreakpoints();
 
@@ -730,17 +731,18 @@ ArmDebugger::debug()
                 continue;
             } else if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) {
                 sim_->instructionDecode(reinterpret_cast<SimInstruction *>(sim_->get_pc()));
                 sim_->icount_++;
             } else if ((strcmp(cmd, "skip") == 0)) {
                 sim_->set_pc(sim_->get_pc() + 4);
                 sim_->icount_++;
             } else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) {
-                // Execute the one instruction we broke at with breakpoints disabled.
+                // Execute the one instruction we broke at with breakpoints
+                // disabled.
                 sim_->instructionDecode(reinterpret_cast<SimInstruction *>(sim_->get_pc()));
                 sim_->icount_++;
                 // Leave the debugger shell.
                 done = true;
             } else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) {
                 if (argc == 2 || (argc == 3 && strcmp(arg2, "fp") == 0)) {
                     int32_t value;
                     double dvalue;
@@ -1071,17 +1073,17 @@ CheckICache(SimulatorRuntime::ICacheMap 
     bool cache_hit = (*cache_valid_byte == CachePage::LINE_VALID);
     char *cached_line = cache_page->cachedData(offset & ~CachePage::kLineMask);
     if (cache_hit) {
         // Check that the data in memory matches the contents of the I-cache.
         MOZ_ASSERT(memcmp(reinterpret_cast<void*>(instr),
                           cache_page->cachedData(offset),
                           SimInstruction::kInstrSize) == 0);
     } else {
-        // Cache miss.  Load memory into the cache.
+        // Cache miss. Load memory into the cache.
         memcpy(cached_line, line, CachePage::kLineLength);
         *cache_valid_byte = CachePage::LINE_VALID;
     }
 }
 
 HashNumber
 SimulatorRuntime::ICacheHasher::hash(const Lookup &l)
 {
@@ -1175,18 +1177,18 @@ Simulator::Simulator(SimulatorRuntime *s
     registers_[pc] = bad_lr;
     registers_[lr] = bad_lr;
 
     lastDebuggerInput_ = nullptr;
 }
 
 // When the generated code calls a VM function (masm.callWithABI) we need to
 // call that function instead of trying to execute it with the simulator
-// (because it's x86 code instead of arm code). We do that by redirecting the
-// VM call to a svc (Supervisor Call) instruction that is handled by the