foo loop stuff draft
authorSylvestre Ledru <sledru@mozilla.com>
Thu, 27 Jul 2017 10:43:05 +0200
changeset 618114 5e1aec769707e9367ffec52425d53280411e2c57
parent 618023 49a53627ac1b541d74fa31e1488211573c7a3ee7
child 618115 267c72a9f0c6d9d05d8c70e7ecd0c88eb91603db
child 621082 bfcd96000643f7f7d80a582251b8a73ca5394275
push id71228
push userbmo:sledru@mozilla.com
push dateSun, 30 Jul 2017 13:08:00 +0000
milestone56.0a1
foo loop stuff MozReview-Commit-ID: aU56KefqZI
accessible/base/ARIAMap.cpp
accessible/base/Logging.cpp
accessible/base/TextAttrs.cpp
accessible/base/nsAccessibilityService.cpp
accessible/generic/DocAccessible.cpp
accessible/xpcom/xpcAccessible.cpp
browser/components/shell/nsGNOMEShellService.cpp
caps/nsScriptSecurityManager.cpp
docshell/base/nsAboutRedirector.cpp
dom/base/CustomElementRegistry.cpp
dom/base/DOMException.cpp
dom/base/FragmentOrElement.cpp
dom/base/nsDOMClassInfo.cpp
dom/base/nsGlobalWindowCommands.cpp
dom/canvas/WebGL2Context.cpp
dom/canvas/WebGLShaderValidator.cpp
dom/events/DataTransfer.cpp
dom/events/DataTransferItem.cpp
dom/events/UIEvent.cpp
dom/events/WheelHandlingHelper.cpp
dom/file/MemoryBlobImpl.cpp
dom/gamepad/linux/udev.h
dom/ipc/ContentProcessManager.cpp
dom/media/DecoderDoctorDiagnostics.cpp
dom/media/MediaCache.cpp
dom/media/gtest/TestIntervalSet.cpp
dom/media/gtest/TestVPXDecoding.cpp
dom/media/gtest/TestVideoTrackEncoder.cpp
dom/media/mediasource/gtest/TestContainerParser.cpp
dom/media/ogg/OggDemuxer.cpp
dom/media/platforms/ffmpeg/FFmpegRuntimeLinker.cpp
dom/media/webaudio/DelayBuffer.cpp
dom/media/webaudio/ScriptProcessorNode.cpp
dom/media/webrtc/MediaTrackConstraints.h
dom/media/webspeech/synth/speechd/SpeechDispatcherService.cpp
dom/media/webspeech/synth/test/nsFakeSynthServices.cpp
dom/plugins/ipc/PluginModuleParent.cpp
dom/plugins/test/testplugin/nptest.cpp
dom/security/nsCSPParser.cpp
dom/security/nsCSPUtils.cpp
dom/smil/nsSMILParserUtils.cpp
dom/svg/SVGTests.cpp
dom/svg/nsSVGTransform.cpp
dom/svg/nsSVGTransform.h
dom/xhr/XMLHttpRequestMainThread.cpp
editor/libeditor/CompositionTransaction.cpp
editor/libeditor/EditorCommands.cpp
editor/libeditor/HTMLEditRules.cpp
extensions/auth/nsAuthGSSAPI.cpp
extensions/cookie/nsPermissionManager.cpp
extensions/universalchardet/src/base/nsLatin1Prober.cpp
extensions/universalchardet/src/base/nsMBCSGroupProber.cpp
extensions/universalchardet/src/base/nsUniversalDetector.cpp
gfx/2d/DrawTargetCairo.cpp
gfx/2d/DrawTargetTiled.cpp
gfx/2d/DrawTargetTiled.h
gfx/2d/FilterNodeSoftware.cpp
gfx/2d/Path.cpp
gfx/2d/PathCairo.cpp
gfx/2d/PathRecording.cpp
gfx/2d/RecordedEventImpl.h
gfx/2d/SFNTData.cpp
gfx/2d/SFNTNameTable.cpp
gfx/2d/SVGTurbulenceRenderer-inl.h
gfx/2d/unittest/TestBase.cpp
gfx/gl/GLContext.h
gfx/gl/GLLibraryLoader.cpp
gfx/layers/apz/src/CheckerboardEvent.cpp
gfx/layers/client/TiledContentClient.cpp
gfx/layers/composite/FPSCounter.cpp
gfx/layers/composite/FrameUniformityData.cpp
gfx/layers/composite/TextRenderer.cpp
gfx/layers/ipc/CompositorBridgeParent.cpp
gfx/layers/ipc/CrossProcessCompositorBridgeParent.cpp
gfx/layers/ipc/ISurfaceAllocator.cpp
gfx/layers/mlgpu/MaskOperation.cpp
gfx/layers/mlgpu/RenderViewMLGPU.cpp
gfx/layers/opengl/CompositorOGL.cpp
gfx/layers/opengl/OGLShaderProgram.cpp
gfx/layers/wr/WebRenderBridgeParent.cpp
gfx/src/FilterSupport.cpp
gfx/tests/gtest/TestBufferRotation.cpp
gfx/tests/gtest/TestColorNames.cpp
gfx/tests/gtest/TestCompositor.cpp
gfx/tests/gtest/TestGfxPrefs.cpp
gfx/tests/gtest/TestTextures.cpp
gfx/thebes/gfxPlatformFontList.cpp
gfx/vr/ipc/VRMessageUtils.h
intl/locale/nsLocaleService.cpp
intl/locale/tests/gtest/TestOSPreferences.cpp
ipc/glue/MessageChannel.cpp
ipc/glue/ProtocolUtils.cpp
js/src/gc/RootMarking.cpp
js/src/gc/Statistics.cpp
js/src/gc/Tracer.cpp
js/src/irregexp/NativeRegExpMacroAssembler.cpp
js/src/irregexp/RegExpEngine.cpp
js/src/jit/AliasAnalysis.cpp
js/src/jit/BacktrackingAllocator.cpp
js/src/jit/BaselineCompiler.cpp
js/src/jit/BytecodeAnalysis.cpp
js/src/jit/CacheIRCompiler.cpp
js/src/jit/CodeGenerator.cpp
js/src/jit/ExecutableAllocator.cpp
js/src/jit/FlowAliasAnalysis.cpp
js/src/jit/Ion.cpp
js/src/jit/IonAnalysis.cpp
js/src/jit/IonBuilder.cpp
js/src/jit/IonControlFlow.cpp
js/src/jit/JitcodeMap.cpp
js/src/jit/JitcodeMap.h
js/src/jit/LICM.cpp
js/src/jit/LIR.cpp
js/src/jit/LIR.h
js/src/jit/LoopUnroller.cpp
js/src/jit/Lowering.cpp
js/src/jit/MCallOptimize.cpp
js/src/jit/MIR.cpp
js/src/jit/MIRGraph.cpp
js/src/jit/MacroAssembler.cpp
js/src/jit/MoveResolver.cpp
js/src/jit/OptimizationTracking.cpp
js/src/jit/RangeAnalysis.cpp
js/src/jit/Recover.cpp
js/src/jit/RegisterAllocator.cpp
js/src/jit/RematerializedFrame.cpp
js/src/jit/Safepoints.cpp
js/src/jit/StupidAllocator.cpp
js/src/jit/shared/CodeGenerator-shared.cpp
js/src/jit/x64/MacroAssembler-x64.cpp
js/src/jit/x86-shared/Assembler-x86-shared.cpp
js/src/jsapi-tests/testGCAllocator.cpp
js/src/jsapi-tests/testJitRangeAnalysis.cpp
js/src/jscompartment.cpp
js/src/jsgc.cpp
js/src/jsscript.cpp
js/src/jsstr.cpp
js/src/vm/ArrayBufferObject.cpp
js/src/vm/Debugger.cpp
js/src/vm/HelperThreads.cpp
js/src/vm/JSONParser.cpp
js/src/vm/MemoryMetrics.cpp
js/src/vm/RegExpObject.cpp
js/src/vm/SavedStacks.cpp
js/src/vm/Stack.cpp
js/src/vm/String.cpp
js/src/vm/TypeInference.cpp
js/src/vm/UnboxedObject.cpp
js/src/vm/UnboxedObject.h
js/src/wasm/WasmBaselineCompile.cpp
js/src/wasm/WasmBinaryToText.cpp
js/src/wasm/WasmDebug.cpp
js/src/wasm/WasmInstance.cpp
js/src/wasm/WasmTextToBinary.cpp
js/xpconnect/src/XPCJSRuntime.cpp
js/xpconnect/src/XPCWrappedNativeInfo.cpp
layout/base/GeometryUtils.cpp
layout/base/nsLayoutUtils.cpp
layout/generic/TextOverflow.cpp
layout/generic/nsColumnSetFrame.cpp
layout/generic/nsFrameSelection.cpp
layout/mathml/nsMathMLChar.cpp
layout/painting/nsCSSRenderingGradients.cpp
layout/painting/nsDisplayList.h
layout/printing/nsPrintPreviewListener.cpp
layout/style/Declaration.cpp
layout/style/StyleAnimationValue.cpp
layout/style/nsRuleData.cpp
layout/style/nsStyleSet.cpp
layout/tables/nsTableFrame.cpp
media/gmp-clearkey/0.1/ClearKeyBase64.cpp
media/gmp-clearkey/0.1/ClearKeyDecryptionManager.cpp
media/gmp-clearkey/0.1/ClearKeySessionManager.cpp
media/mtransport/rlogconnector.cpp
media/mtransport/test/gtest_ringbuffer_dumper.h
media/webrtc/signaling/gtest/jsep_session_unittest.cpp
media/webrtc/signaling/gtest/sdp_unittests.cpp
media/webrtc/signaling/src/jsep/JsepTrack.cpp
media/webrtc/signaling/src/media-conduit/CodecConfig.h
media/webrtc/signaling/src/media-conduit/WebrtcGmpVideoCodec.cpp
media/webrtc/signaling/src/sdp/SdpAttribute.cpp
media/webrtc/signaling/src/sdp/SdpAttribute.h
media/webrtc/signaling/src/sdp/SdpHelper.cpp
media/webrtc/signaling/src/sdp/SipccSdp.cpp
media/webrtc/signaling/src/sdp/SipccSdpAttributeList.cpp
media/webrtc/signaling/src/sdp/SipccSdpMediaSection.cpp
mfbt/tests/TestFastBernoulliTrial.cpp
mfbt/tests/TestSplayTree.cpp
mfbt/tests/TestXorShift128PlusRNG.cpp
netwerk/base/Dashboard.cpp
netwerk/base/nsProtocolProxyService.cpp
netwerk/cache/nsCacheService.cpp
netwerk/cache/nsDiskCacheMap.cpp
netwerk/cache/nsMemoryCacheDevice.cpp
netwerk/cache2/CacheIOThread.cpp
netwerk/streamconv/converters/nsFTPDirListingConv.cpp
netwerk/test/gtest/TestStandardURL.cpp
netwerk/wifi/nsWifiScannerDBus.cpp
parser/html/nsHtml5AtomTable.h
security/manager/ssl/TransportSecurityInfo.cpp
security/manager/ssl/nsKeygenHandler.cpp
security/manager/ssl/nsNSSComponent.cpp
security/sandbox/linux/LinuxCapabilities.h
security/sandbox/linux/SandboxInfo.cpp
storage/TelemetryVFS.cpp
storage/mozStorageService.cpp
toolkit/components/downloads/ApplicationReputation.cpp
toolkit/components/mediasniffer/nsMediaSniffer.cpp
toolkit/components/perfmonitoring/nsPerformanceStats.cpp
toolkit/components/url-classifier/nsUrlClassifierDBService.cpp
toolkit/components/url-classifier/nsUrlClassifierUtils.cpp
toolkit/components/url-classifier/tests/gtest/TestChunkSet.cpp
toolkit/components/url-classifier/tests/gtest/TestFindFullHash.cpp
toolkit/crashreporter/breakpad-client/linux/handler/exception_handler.cc
toolkit/crashreporter/breakpad-client/linux/microdump_writer/microdump_writer.cc
toolkit/crashreporter/breakpad-client/linux/minidump_writer/cpu_set.h
toolkit/crashreporter/breakpad-client/linux/minidump_writer/minidump_writer.cc
toolkit/crashreporter/client/crashreporter.cpp
toolkit/crashreporter/client/crashreporter_linux.cpp
tools/profiler/core/ProfileJSONWriter.cpp
tools/profiler/core/platform.cpp
tools/profiler/gecko/ProfilerParent.cpp
tools/profiler/gecko/nsProfiler.cpp
tools/profiler/lul/LulCommon.cpp
tools/profiler/lul/LulDwarf.cpp
tools/profiler/lul/LulMain.cpp
uriloader/prefetch/nsPrefetchService.cpp
widget/GfxInfoBase.cpp
widget/InputData.cpp
widget/WidgetEventImpl.cpp
widget/gtk/WidgetStyleCache.cpp
widget/gtk/nsPrintDialogGTK.cpp
widget/nsXPLookAndFeel.cpp
xpcom/base/nsSystemInfo.cpp
xpcom/build/IOInterposer.cpp
xpcom/build/LateWriteChecks.cpp
xpcom/ds/nsAtomTable.cpp
xpcom/io/nsBinaryStream.cpp
xpcom/reflect/xptinfo/ShimInterfaceInfo.cpp
xpcom/tests/gtest/TestAtoms.cpp
xpcom/tests/gtest/TestNsDeque.cpp
xpcom/tests/gtest/TestRWLock.cpp
xpcom/tests/gtest/TestTArray.cpp
xpcom/tests/gtest/TestTimers.cpp
xpcom/tests/gtest/TestUTF.cpp
xpcom/threads/BackgroundHangMonitor.cpp
xpcom/threads/HangAnnotations.cpp
--- a/accessible/base/ARIAMap.cpp
+++ b/accessible/base/ARIAMap.cpp
@@ -1358,19 +1358,19 @@ aria::UniversalStatesFor(mozilla::dom::E
     index++;
 
   return state;
 }
 
 uint8_t
 aria::AttrCharacteristicsFor(nsIAtom* aAtom)
 {
-  for (uint32_t i = 0; i < ArrayLength(gWAIUnivAttrMap); i++)
-    if (*gWAIUnivAttrMap[i].attributeName == aAtom)
-      return gWAIUnivAttrMap[i].characteristics;
+  for (auto attr : gWAIUnivAttrMap)
+    if (*attr.attributeName == aAtom)
+      return atrr.characteristics;
 
   return 0;
 }
 
 bool
 aria::HasDefinedARIAHidden(nsIContent* aContent)
 {
   return aContent &&
--- a/accessible/base/Logging.cpp
+++ b/accessible/base/Logging.cpp
@@ -63,25 +63,25 @@ EnableLogging(const char* aModulesStr)
 {
   sModules = 0;
   if (!aModulesStr)
     return;
 
   const char* token = aModulesStr;
   while (*token != '\0') {
     size_t tokenLen = strcspn(token, ",");
-    for (unsigned int idx = 0; idx < ArrayLength(sModuleMap); idx++) {
-      if (strncmp(token, sModuleMap[idx].mStr, tokenLen) == 0) {
+    for (auto & idx : sModuleMap) {
+      if (strncmp(token, idx.mStr, tokenLen) == 0) {
 #if !defined(MOZ_PROFILING) && (!defined(DEBUG) || defined(MOZ_OPTIMIZE))
         // Stack tracing on profiling enabled or debug not optimized builds.
         if (strncmp(token, "stack", tokenLen) == 0)
           break;
 #endif
-        sModules |= sModuleMap[idx].mModule;
-        printf("\n\nmodule enabled: %s\n", sModuleMap[idx].mStr);
+        sModules |= idx.mModule;
+        printf("\n\nmodule enabled: %s\n", idx.mStr);
         break;
       }
     }
     token += tokenLen;
 
     if (*token == ',')
       token++; // skip ',' char
   }
@@ -1012,19 +1012,19 @@ bool
 logging::IsEnabledAll(uint32_t aModules)
 {
   return (sModules & aModules) == aModules;
 }
 
 bool
 logging::IsEnabled(const nsAString& aModuleStr)
 {
-  for (unsigned int idx = 0; idx < ArrayLength(sModuleMap); idx++) {
-    if (aModuleStr.EqualsASCII(sModuleMap[idx].mStr))
-      return sModules & sModuleMap[idx].mModule;
+  for (auto & idx : sModuleMap) {
+    if (aModuleStr.EqualsASCII(idx.mStr))
+      return sModules & idx.mModule;
   }
 
   return false;
 }
 
 void
 logging::Enable(const nsCString& aModules)
 {
--- a/accessible/base/TextAttrs.cpp
+++ b/accessible/base/TextAttrs.cpp
@@ -135,18 +135,18 @@ TextAttrsMgr::GetAttributes(nsIPersisten
     &fontWeightTextAttr,
     &autoGenTextAttr,
     &textDecorTextAttr,
     &textPosTextAttr
   };
 
   // Expose text attributes if applicable.
   if (aAttributes) {
-    for (uint32_t idx = 0; idx < ArrayLength(attrArray); idx++)
-      attrArray[idx]->Expose(aAttributes, mIncludeDefAttrs);
+    for (auto & idx : attrArray)
+      idx->Expose(aAttributes, mIncludeDefAttrs);
   }
 
   // Expose text attributes range where they are applied if applicable.
   if (mOffsetAcc)
     GetRange(attrArray, ArrayLength(attrArray), aStartOffset, aEndOffset);
 }
 
 void
--- a/accessible/base/nsAccessibilityService.cpp
+++ b/accessible/base/nsAccessibilityService.cpp
@@ -1246,18 +1246,18 @@ nsAccessibilityService::Init()
   // Subscribe to EventListenerService.
   nsCOMPtr<nsIEventListenerService> eventListenerService =
     do_GetService("@mozilla.org/eventlistenerservice;1");
   if (!eventListenerService)
     return false;
 
   eventListenerService->AddListenerChangeListener(this);
 
-  for (uint32_t i = 0; i < ArrayLength(sMarkupMapList); i++)
-    mMarkupMaps.Put(*sMarkupMapList[i].tag, &sMarkupMapList[i]);
+  for (const auto & markup : sMarkupMapList)
+    mMarkupMaps.Put(*markup.tag, &markup);
 
 #ifdef A11Y_LOG
   logging::CheckEnv();
 #endif
 
   gAccessibilityService = this;
   NS_ADDREF(gAccessibilityService); // will release in Shutdown()
 
--- a/accessible/generic/DocAccessible.cpp
+++ b/accessible/generic/DocAccessible.cpp
@@ -1573,18 +1573,18 @@ DocAccessible::ProcessLoad()
 
 void
 DocAccessible::AddDependentIDsFor(Accessible* aRelProvider, nsIAtom* aRelAttr)
 {
   dom::Element* relProviderEl = aRelProvider->Elm();
   if (!relProviderEl)
     return;
 
-  for (uint32_t idx = 0; idx < kRelationAttrsLen; idx++) {
-    nsIAtom* relAttr = *kRelationAttrs[idx];
+  for (auto & kRelationAttr : kRelationAttrs) {
+    nsIAtom* relAttr = *kRelationAttr;
     if (aRelAttr && aRelAttr != relAttr)
       continue;
 
     if (relAttr == nsGkAtoms::_for) {
       if (!relProviderEl->IsAnyOfHTMLElements(nsGkAtoms::label,
                                                nsGkAtoms::output))
         continue;
 
@@ -1641,19 +1641,19 @@ DocAccessible::AddDependentIDsFor(Access
 void
 DocAccessible::RemoveDependentIDsFor(Accessible* aRelProvider,
                                      nsIAtom* aRelAttr)
 {
   dom::Element* relProviderElm = aRelProvider->Elm();
   if (!relProviderElm)
     return;
 
-  for (uint32_t idx = 0; idx < kRelationAttrsLen; idx++) {
-    nsIAtom* relAttr = *kRelationAttrs[idx];
-    if (aRelAttr && aRelAttr != *kRelationAttrs[idx])
+  for (auto & kRelationAttr : kRelationAttrs) {
+    nsIAtom* relAttr = *kRelationAttr;
+    if (aRelAttr && aRelAttr != *kRelationAttr)
       continue;
 
     IDRefsIterator iter(this, relProviderElm, relAttr);
     while (true) {
       const nsDependentSubstring id = iter.NextID();
       if (id.IsEmpty())
         break;
 
--- a/accessible/xpcom/xpcAccessible.cpp
+++ b/accessible/xpcom/xpcAccessible.cpp
@@ -542,19 +542,19 @@ xpcAccessible::GetRelations(nsIArray** a
     nsIAccessibleRelation::RELATION_POPUP_FOR,
     nsIAccessibleRelation::RELATION_PARENT_WINDOW_OF,
     nsIAccessibleRelation::RELATION_DEFAULT_BUTTON,
     nsIAccessibleRelation::RELATION_CONTAINING_DOCUMENT,
     nsIAccessibleRelation::RELATION_CONTAINING_TAB_PANE,
     nsIAccessibleRelation::RELATION_CONTAINING_APPLICATION
   };
 
-  for (uint32_t idx = 0; idx < ArrayLength(relationTypes); idx++) {
+  for (unsigned int relationType : relationTypes) {
     nsCOMPtr<nsIAccessibleRelation> relation;
-    nsresult rv = GetRelationByType(relationTypes[idx], getter_AddRefs(relation));
+    nsresult rv = GetRelationByType(relationType, getter_AddRefs(relation));
 
     if (NS_SUCCEEDED(rv) && relation) {
       uint32_t targets = 0;
       relation->GetTargetsCount(&targets);
       if (targets)
         relations->AppendElement(relation, false);
     }
   }
--- a/browser/components/shell/nsGNOMEShellService.cpp
+++ b/browser/components/shell/nsGNOMEShellService.cpp
@@ -209,32 +209,32 @@ nsGNOMEShellService::IsDefaultBrowser(bo
 
   nsCOMPtr<nsIGConfService> gconf = do_GetService(NS_GCONFSERVICE_CONTRACTID);
   nsCOMPtr<nsIGIOService> giovfs = do_GetService(NS_GIOSERVICE_CONTRACTID);
 
   bool enabled;
   nsAutoCString handler;
   nsCOMPtr<nsIGIOMimeApp> gioApp;
 
-  for (unsigned int i = 0; i < ArrayLength(appProtocols); ++i) {
-    if (!appProtocols[i].essential)
+  for (auto appProtocol : appProtocols) {
+    if (!appProtocol.essential)
       continue;
 
     if (gconf) {
       handler.Truncate();
-      gconf->GetAppForProtocol(nsDependentCString(appProtocols[i].name),
+      gconf->GetAppForProtocol(nsDependentCString(appProtocol.name),
                                &enabled, handler);
 
       if (!CheckHandlerMatchesAppName(handler) || !enabled)
         return NS_OK; // the handler is disabled or set to another app
     }
 
     if (giovfs) {
       handler.Truncate();
-      giovfs->GetAppForURIScheme(nsDependentCString(appProtocols[i].name),
+      giovfs->GetAppForURIScheme(nsDependentCString(appProtocol.name),
                                  getter_AddRefs(gioApp));
       if (!gioApp)
         return NS_OK;
 
       gioApp->GetCommand(handler);
 
       if (!CheckHandlerMatchesAppName(handler))
         return NS_OK; // the handler is set to another app
@@ -265,19 +265,19 @@ nsGNOMEShellService::SetDefaultBrowser(b
       appKeyValue = tmp;
       g_free(tmp);
     } else {
       appKeyValue = mAppPath;
     }
 
     appKeyValue.AppendLiteral(" %s");
 
-    for (unsigned int i = 0; i < ArrayLength(appProtocols); ++i) {
-      if (appProtocols[i].essential || aClaimAllTypes) {
-        gconf->SetAppForProtocol(nsDependentCString(appProtocols[i].name),
+    for (auto appProtocol : appProtocols) {
+      if (appProtocol.essential || aClaimAllTypes) {
+        gconf->SetAppForProtocol(nsDependentCString(appProtocol.name),
                                  appKeyValue);
       }
     }
   }
 
   if (giovfs) {
     nsresult rv;
     nsCOMPtr<nsIStringBundleService> bundleService =
@@ -296,28 +296,28 @@ nsGNOMEShellService::SetDefaultBrowser(b
     NS_ConvertUTF16toUTF8 id(brandShortName);
     nsCOMPtr<nsIGIOMimeApp> appInfo;
     rv = giovfs->CreateAppFromCommand(mAppPath,
                                       id,
                                       getter_AddRefs(appInfo));
     NS_ENSURE_SUCCESS(rv, rv);
 
     // set handler for the protocols
-    for (unsigned int i = 0; i < ArrayLength(appProtocols); ++i) {
-      if (appProtocols[i].essential || aClaimAllTypes) {
-        appInfo->SetAsDefaultForURIScheme(nsDependentCString(appProtocols[i].name));
+    for (auto appProtocol : appProtocols) {
+      if (appProtocol.essential || aClaimAllTypes) {
+        appInfo->SetAsDefaultForURIScheme(nsDependentCString(appProtocol.name));
       }
     }
 
     // set handler for .html and xhtml files and MIME types:
     if (aClaimAllTypes) {
       // Add mime types for html, xhtml extension and set app to just created appinfo.
-      for (unsigned int i = 0; i < ArrayLength(appTypes); ++i) {
-        appInfo->SetAsDefaultForMimeType(nsDependentCString(appTypes[i].mimeType));
-        appInfo->SetAsDefaultForFileExtensions(nsDependentCString(appTypes[i].extensions));
+      for (auto appType : appTypes) {
+        appInfo->SetAsDefaultForMimeType(nsDependentCString(appType.mimeType));
+        appInfo->SetAsDefaultForFileExtensions(nsDependentCString(appType.extensions));
       }
     }
   }
 
   nsCOMPtr<nsIPrefBranch> prefs(do_GetService(NS_PREFSERVICE_CONTRACTID));
   if (prefs) {
     (void) prefs->SetBoolPref(PREF_CHECKDEFAULTBROWSER, true);
     // Reset the number of times the dialog should be shown
--- a/caps/nsScriptSecurityManager.cpp
+++ b/caps/nsScriptSecurityManager.cpp
@@ -1074,18 +1074,18 @@ nsScriptSecurityManager::CheckLoadURIStr
         nsIURIFixup::FIXUP_FLAG_NONE,
         nsIURIFixup::FIXUP_FLAG_FIX_SCHEME_TYPOS,
         nsIURIFixup::FIXUP_FLAG_ALLOW_KEYWORD_LOOKUP,
         nsIURIFixup::FIXUP_FLAGS_MAKE_ALTERNATE_URI,
         nsIURIFixup::FIXUP_FLAG_ALLOW_KEYWORD_LOOKUP |
         nsIURIFixup::FIXUP_FLAGS_MAKE_ALTERNATE_URI
     };
 
-    for (uint32_t i = 0; i < ArrayLength(flags); ++i) {
-        rv = fixup->CreateFixupURI(aTargetURIStr, flags[i], nullptr,
+    for (unsigned int flag : flags) {
+        rv = fixup->CreateFixupURI(aTargetURIStr, flag, nullptr,
                                    getter_AddRefs(target));
         NS_ENSURE_SUCCESS(rv, rv);
 
         rv = CheckLoadURIWithPrincipal(aPrincipal, target, aFlags);
         if (rv == NS_ERROR_DOM_BAD_URI) {
             // Don't warn because NS_ERROR_DOM_BAD_URI is one of the expected
             // return values.
             return rv;
--- a/docshell/base/nsAboutRedirector.cpp
+++ b/docshell/base/nsAboutRedirector.cpp
@@ -157,21 +157,21 @@ nsAboutRedirector::NewChannel(nsIURI* aU
 
   nsAutoCString path;
   nsresult rv = NS_GetAboutModuleName(aURI, path);
   NS_ENSURE_SUCCESS(rv, rv);
 
   nsCOMPtr<nsIIOService> ioService = do_GetIOService(&rv);
   NS_ENSURE_SUCCESS(rv, rv);
 
-  for (int i = 0; i < kRedirTotal; i++) {
-    if (!strcmp(path.get(), kRedirMap[i].id)) {
+  for (const auto & redir : kRedirMap) {
+    if (!strcmp(path.get(), redir.id)) {
       nsCOMPtr<nsIChannel> tempChannel;
       nsCOMPtr<nsIURI> tempURI;
-      rv = NS_NewURI(getter_AddRefs(tempURI), kRedirMap[i].url);
+      rv = NS_NewURI(getter_AddRefs(tempURI), redir.url);
       NS_ENSURE_SUCCESS(rv, rv);
 
       rv = NS_NewChannelInternal(getter_AddRefs(tempChannel),
                                  tempURI,
                                  aLoadInfo);
       NS_ENSURE_SUCCESS(rv, rv);
 
       // If tempURI links to an external URI (i.e. something other than
@@ -204,19 +204,19 @@ NS_IMETHODIMP
 nsAboutRedirector::GetURIFlags(nsIURI* aURI, uint32_t* aResult)
 {
   NS_ENSURE_ARG_POINTER(aURI);
 
   nsAutoCString name;
   nsresult rv = NS_GetAboutModuleName(aURI, name);
   NS_ENSURE_SUCCESS(rv, rv);
 
-  for (int i = 0; i < kRedirTotal; i++) {
-    if (name.EqualsASCII(kRedirMap[i].id)) {
-      *aResult = kRedirMap[i].flags;
+  for (const auto & redir : kRedirMap) {
+    if (name.EqualsASCII(redir.id)) {
+      *aResult = redir.flags;
       return NS_OK;
     }
   }
 
   NS_ERROR("nsAboutRedirector called for unknown case");
   return NS_ERROR_ILLEGAL_VALUE;
 }
 
--- a/dom/base/CustomElementRegistry.cpp
+++ b/dom/base/CustomElementRegistry.cpp
@@ -477,18 +477,17 @@ static const char* kLifeCycleCallbackNam
   "detachedCallback"
 };
 
 static void
 CheckLifeCycleCallbacks(JSContext* aCx,
                         JS::Handle<JSObject*> aConstructor,
                         ErrorResult& aRv)
 {
-  for (size_t i = 0; i < ArrayLength(kLifeCycleCallbackNames); ++i) {
-    const char* callbackName = kLifeCycleCallbackNames[i];
+  for (auto callbackName : kLifeCycleCallbackNames) {
     JS::Rooted<JS::Value> callbackValue(aCx);
     if (!JS_GetProperty(aCx, aConstructor, callbackName, &callbackValue)) {
       aRv.StealExceptionFromJSContext(aCx);
       return;
     }
     if (!callbackValue.isUndefined()) {
       if (!callbackValue.isObject()) {
         aRv.ThrowTypeError<MSG_NOT_OBJECT>(NS_ConvertASCIItoUTF16(callbackName));
--- a/dom/base/DOMException.cpp
+++ b/dom/base/DOMException.cpp
@@ -97,23 +97,23 @@ static void
 NSResultToNameAndMessage(nsresult aNSResult,
                          nsCString& aName,
                          nsCString& aMessage,
                          uint16_t* aCode)
 {
   aName.Truncate();
   aMessage.Truncate();
   *aCode = 0;
-  for (uint32_t idx = 0; idx < ArrayLength(sDOMErrorMsgMap); idx++) {
-    if (aNSResult == sDOMErrorMsgMap[idx].mNSResult) {
-      aName.Rebind(sDOMErrorMsgMap[idx].mName,
-                   strlen(sDOMErrorMsgMap[idx].mName));
-      aMessage.Rebind(sDOMErrorMsgMap[idx].mMessage,
-                   strlen(sDOMErrorMsgMap[idx].mMessage));
-      *aCode = sDOMErrorMsgMap[idx].mCode;
+  for (const auto & idx : sDOMErrorMsgMap) {
+    if (aNSResult == idx.mNSResult) {
+      aName.Rebind(idx.mName,
+                   strlen(idx.mName));
+      aMessage.Rebind(idx.mMessage,
+                   strlen(idx.mMessage));
+      *aCode = idx.mCode;
       return;
     }
   }
 
   NS_WARNING("Huh, someone is throwing non-DOM errors using the DOM module!");
 }
 
 nsresult
@@ -585,20 +585,20 @@ DOMException::Constructor(GlobalObject& 
                           ErrorResult& aError)
 {
   nsresult exceptionResult = NS_OK;
   uint16_t exceptionCode = 0;
   nsCString name(NS_LITERAL_CSTRING("Error"));
 
   if (aName.WasPassed()) {
     CopyUTF16toUTF8(aName.Value(), name);
-    for (uint32_t idx = 0; idx < ArrayLength(sDOMErrorMsgMap); idx++) {
-      if (name.EqualsASCII(sDOMErrorMsgMap[idx].mName)) {
-        exceptionResult = sDOMErrorMsgMap[idx].mNSResult;
-        exceptionCode = sDOMErrorMsgMap[idx].mCode;
+    for (const auto & idx : sDOMErrorMsgMap) {
+      if (name.EqualsASCII(idx.mName)) {
+        exceptionResult = idx.mNSResult;
+        exceptionCode = idx.mCode;
         break;
       }
     }
   }
 
   RefPtr<DOMException> retval =
     new DOMException(exceptionResult,
                      NS_ConvertUTF16toUTF8(aMessage),
--- a/dom/base/FragmentOrElement.cpp
+++ b/dom/base/FragmentOrElement.cpp
@@ -2258,24 +2258,24 @@ IsVoidTag(nsIAtom* aTag)
     nsGkAtoms::param, nsGkAtoms::source, nsGkAtoms::track,
     nsGkAtoms::wbr
   };
 
   static mozilla::BloomFilter<12, nsIAtom> sFilter;
   static bool sInitialized = false;
   if (!sInitialized) {
     sInitialized = true;
-    for (uint32_t i = 0; i < ArrayLength(voidElements); ++i) {
-      sFilter.add(voidElements[i]);
+    for (auto & voidElement : voidElements) {
+      sFilter.add(voidElement);
     }
   }
 
   if (sFilter.mightContain(aTag)) {
-    for (uint32_t i = 0; i < ArrayLength(voidElements); ++i) {
-      if (aTag == voidElements[i]) {
+    for (auto & voidElement : voidElements) {
+      if (aTag == voidElement) {
         return true;
       }
     }
   }
   return false;
 }
 
 /* static */
--- a/dom/base/nsDOMClassInfo.cpp
+++ b/dom/base/nsDOMClassInfo.cpp
@@ -1778,21 +1778,21 @@ LookupComponentsShim(JSContext *cx, JS::
   bool ok =
     JS_DefineProperty(cx, components, "interfaces", interfaces,
                       JSPROP_ENUMERATE | JSPROP_PERMANENT | JSPROP_READONLY,
                       JS_STUBGETTER, JS_STUBSETTER);
   NS_ENSURE_TRUE(ok, NS_ERROR_OUT_OF_MEMORY);
 
   // Define a bunch of shims from the Ci.nsIDOMFoo to window.Foo for DOM
   // interfaces with constants.
-  for (uint32_t i = 0; i < ArrayLength(kInterfaceShimMap); ++i) {
+  for (auto shim : kInterfaceShimMap) {
 
     // Grab the names from the table.
-    const char *geckoName = kInterfaceShimMap[i].geckoName;
-    const char *domName = kInterfaceShimMap[i].domName;
+    const char *geckoName = shim.geckoName;
+    const char *domName = shim.domName;
 
     // Look up the appopriate interface object on the global.
     JS::Rooted<JS::Value> v(cx, JS::UndefinedValue());
     ok = JS_GetProperty(cx, global, domName, &v);
     NS_ENSURE_TRUE(ok, NS_ERROR_OUT_OF_MEMORY);
     if (!v.isObject()) {
       NS_WARNING("Unable to find interface object on global");
       continue;
--- a/dom/base/nsGlobalWindowCommands.cpp
+++ b/dom/base/nsGlobalWindowCommands.cpp
@@ -320,25 +320,25 @@ nsSelectMoveScrollCommand::DoCommand(con
 {
   nsCOMPtr<nsPIDOMWindowOuter> piWindow(do_QueryInterface(aCommandContext));
   nsCOMPtr<nsISelectionController> selCont;
   GetSelectionControllerFromWindow(piWindow, getter_AddRefs(selCont));
   NS_ENSURE_TRUE(selCont, NS_ERROR_NOT_INITIALIZED);
 
   bool caretOn = IsCaretOnInWindow(piWindow, selCont);
 
-  for (size_t i = 0; i < ArrayLength(browseCommands); i++) {
-    bool forward = !strcmp(aCommandName, browseCommands[i].forward);
-    if (forward || !strcmp(aCommandName, browseCommands[i].reverse)) {
-      if (caretOn && browseCommands[i].move &&
-          NS_SUCCEEDED((selCont->*(browseCommands[i].move))(forward, false))) {
+  for (const auto & browseCommand : browseCommands) {
+    bool forward = !strcmp(aCommandName, browseCommand.forward);
+    if (forward || !strcmp(aCommandName, browseCommand.reverse)) {
+      if (caretOn && browseCommand.move &&
+          NS_SUCCEEDED((selCont->*(browseCommand.move))(forward, false))) {
         AdjustFocusAfterCaretMove(piWindow);
         return NS_OK;
       }
-      return (selCont->*(browseCommands[i].scroll))(forward);
+      return (selCont->*(browseCommand.scroll))(forward);
     }
   }
 
   return NS_ERROR_NOT_IMPLEMENTED;
 }
 
 // XXX It's not clear to me yet how we should handle the "scroll" option
 // for these commands; for now, I'm mapping them back to ScrollCharacter,
@@ -382,18 +382,17 @@ nsPhysicalSelectMoveScrollCommand::DoCom
 {
   nsCOMPtr<nsPIDOMWindowOuter> piWindow(do_QueryInterface(aCommandContext));
   nsCOMPtr<nsISelectionController> selCont;
   GetSelectionControllerFromWindow(piWindow, getter_AddRefs(selCont));
   NS_ENSURE_TRUE(selCont, NS_ERROR_NOT_INITIALIZED);
 
   bool caretOn = IsCaretOnInWindow(piWindow, selCont);
 
-  for (size_t i = 0; i < ArrayLength(physicalBrowseCommands); i++) {
-    const PhysicalBrowseCommand& cmd = physicalBrowseCommands[i];
+  for (const auto & cmd : physicalBrowseCommands) {
     if (!strcmp(aCommandName, cmd.command)) {
       int16_t dir = cmd.direction;
       if (caretOn &&
           NS_SUCCEEDED(selCont->PhysicalMove(dir, cmd.amount, false))) {
         AdjustFocusAfterCaretMove(piWindow);
         return NS_OK;
       }
 
@@ -434,20 +433,20 @@ nsSelectCommand::DoCommand(const char *a
 {
   nsCOMPtr<nsPIDOMWindowOuter> piWindow(do_QueryInterface(aCommandContext));
   nsCOMPtr<nsISelectionController> selCont;
   GetSelectionControllerFromWindow(piWindow, getter_AddRefs(selCont));
   NS_ENSURE_TRUE(selCont, NS_ERROR_NOT_INITIALIZED);
 
   // These commands are so the browser can use caret navigation key bindings -
   // Helps with accessibility - aaronl@netscape.com
-  for (size_t i = 0; i < ArrayLength(selectCommands); i++) {
-    bool forward = !strcmp(aCommandName, selectCommands[i].forward);
-    if (forward || !strcmp(aCommandName, selectCommands[i].reverse)) {
-      return (selCont->*(selectCommands[i].select))(forward, true);
+  for (const auto & selectCommand : selectCommands) {
+    bool forward = !strcmp(aCommandName, selectCommand.forward);
+    if (forward || !strcmp(aCommandName, selectCommand.reverse)) {
+      return (selCont->*(selectCommand.select))(forward, true);
     }
   }
   return NS_ERROR_NOT_IMPLEMENTED;
 }
 
 #if 0
 #pragma mark -
 #endif
@@ -470,20 +469,20 @@ nsresult
 nsPhysicalSelectCommand::DoCommand(const char *aCommandName,
                                    nsISupports *aCommandContext)
 {
   nsCOMPtr<nsPIDOMWindowOuter> piWindow(do_QueryInterface(aCommandContext));
   nsCOMPtr<nsISelectionController> selCont;
   GetSelectionControllerFromWindow(piWindow, getter_AddRefs(selCont));
   NS_ENSURE_TRUE(selCont, NS_ERROR_NOT_INITIALIZED);
 
-  for (size_t i = 0; i < ArrayLength(physicalSelectCommands); i++) {
-    if (!strcmp(aCommandName, physicalSelectCommands[i].command)) {
-      return selCont->PhysicalMove(physicalSelectCommands[i].direction,
-                                   physicalSelectCommands[i].amount,
+  for (auto physicalSelectCommand : physicalSelectCommands) {
+    if (!strcmp(aCommandName, physicalSelectCommand.command)) {
+      return selCont->PhysicalMove(physicalSelectCommand.direction,
+                                   physicalSelectCommand.amount,
                                    true);
     }
   }
 
   return NS_ERROR_NOT_IMPLEMENTED;
 }
 
 #if 0
@@ -1288,28 +1287,26 @@ nsWindowCommandRegistration::RegisterWin
 /* static */ bool
 nsGlobalWindowCommands::FindScrollCommand(const char* aCommandName,
                                           KeyboardScrollAction* aOutAction)
 {
   // Search for a keyboard scroll action to do for this command in browseCommands
   // and physicalBrowseCommands. Each command exists in only one of them, so the
   // order we examine browseCommands and physicalBrowseCommands doesn't matter.
 
-  for (size_t i = 0; i < ArrayLength(browseCommands); i++) {
-    const BrowseCommand& cmd = browseCommands[i];
+  for (const auto & cmd : browseCommands) {
     bool forward = !strcmp(aCommandName, cmd.forward);
     bool reverse = !strcmp(aCommandName, cmd.reverse);
     if (forward || reverse) {
       *aOutAction = KeyboardScrollAction(cmd.scrollAction, forward);
       return true;
     }
   }
 
-  for (size_t i = 0; i < ArrayLength(physicalBrowseCommands); i++) {
-    const PhysicalBrowseCommand& cmd = physicalBrowseCommands[i];
+  for (const auto & cmd : physicalBrowseCommands) {
     if (!strcmp(aCommandName, cmd.command)) {
       int16_t dir = cmd.direction;
       bool forward = (dir == nsISelectionController::MOVE_RIGHT ||
                       dir == nsISelectionController::MOVE_DOWN);
 
       *aOutAction = KeyboardScrollAction(cmd.scrollAction, forward);
       return true;
     }
--- a/dom/canvas/WebGL2Context.cpp
+++ b/dom/canvas/WebGL2Context.cpp
@@ -135,19 +135,19 @@ WebGLContext::InitWebGL2(FailureReason* 
 
     fnGatherMissing2(gl::GLFeature::prim_restart_fixed,
                      gl::GLFeature::prim_restart);
 
     ////
 
     if (missingList.size()) {
         nsAutoCString exts;
-        for (auto itr = missingList.begin(); itr != missingList.end(); ++itr) {
+        for (auto & itr : missingList) {
             exts.AppendLiteral("\n  ");
-            exts.Append(gl::GLContext::GetFeatureName(*itr));
+            exts.Append(gl::GLContext::GetFeatureName(itr));
         }
 
         const nsPrintfCString reason("WebGL 2 requires support for the following"
                                      " features: %s",
                                      exts.BeginReading());
         *out_failReason = FailureReason("FEATURE_FAILURE_WEBGL2_OCCL", reason);
         return false;
     }
--- a/dom/canvas/WebGLShaderValidator.cpp
+++ b/dom/canvas/WebGLShaderValidator.cpp
@@ -265,25 +265,25 @@ ShaderValidator::CanLinkTo(const ShaderV
         const std::vector<sh::Uniform>* vertPtr = ShGetUniforms(prev->mHandle);
         const std::vector<sh::Uniform>* fragPtr = ShGetUniforms(mHandle);
         if (!vertPtr || !fragPtr) {
             nsPrintfCString error("Could not create uniform list.");
             *out_log = error;
             return false;
         }
 
-        for (auto itrFrag = fragPtr->begin(); itrFrag != fragPtr->end(); ++itrFrag) {
-            for (auto itrVert = vertPtr->begin(); itrVert != vertPtr->end(); ++itrVert) {
-                if (itrVert->name != itrFrag->name)
+        for (const auto & itrFrag : *fragPtr) {
+            for (const auto & itrVert : *vertPtr) {
+                if (itrVert.name != itrFrag.name)
                     continue;
 
-                if (!itrVert->isSameUniformAtLinkTime(*itrFrag)) {
+                if (!itrVert.isSameUniformAtLinkTime(itrFrag)) {
                     nsPrintfCString error("Uniform `%s` is not linkable between"
                                           " attached shaders.",
-                                          itrFrag->name.c_str());
+                                          itrFrag.name.c_str());
                     *out_log = error;
                     return false;
                 }
 
                 break;
             }
         }
     }
@@ -426,22 +426,22 @@ ShaderValidator::CanLinkTo(const ShaderV
 
 size_t
 ShaderValidator::CalcNumSamplerUniforms() const
 {
     size_t accum = 0;
 
     const std::vector<sh::Uniform>& uniforms = *ShGetUniforms(mHandle);
 
-    for (auto itr = uniforms.begin(); itr != uniforms.end(); ++itr) {
-        GLenum type = itr->type;
+    for (const auto & uniform : uniforms) {
+        GLenum type = uniform.type;
         if (type == LOCAL_GL_SAMPLER_2D ||
             type == LOCAL_GL_SAMPLER_CUBE)
         {
-            accum += itr->arraySize;
+            accum += uniform.arraySize;
         }
     }
 
     return accum;
 }
 
 size_t
 ShaderValidator::NumAttributes() const
@@ -451,83 +451,83 @@ ShaderValidator::NumAttributes() const
 
 // Attribs cannot be structs or arrays, and neither can vertex inputs in ES3.
 // Therefore, attrib names are always simple.
 bool
 ShaderValidator::FindAttribUserNameByMappedName(const std::string& mappedName,
                                                 const std::string** const out_userName) const
 {
     const std::vector<sh::Attribute>& attribs = *ShGetAttributes(mHandle);
-    for (auto itr = attribs.begin(); itr != attribs.end(); ++itr) {
-        if (itr->mappedName == mappedName) {
-            *out_userName = &(itr->name);
+    for (const auto & attrib : attribs) {
+        if (attrib.mappedName == mappedName) {
+            *out_userName = &(attrib.name);
             return true;
         }
     }
 
     return false;
 }
 
 bool
 ShaderValidator::FindAttribMappedNameByUserName(const std::string& userName,
                                                 const std::string** const out_mappedName) const
 {
     const std::vector<sh::Attribute>& attribs = *ShGetAttributes(mHandle);
-    for (auto itr = attribs.begin(); itr != attribs.end(); ++itr) {
-        if (itr->name == userName) {
-            *out_mappedName = &(itr->mappedName);
+    for (const auto & attrib : attribs) {
+        if (attrib.name == userName) {
+            *out_mappedName = &(attrib.mappedName);
             return true;
         }
     }
 
     return false;
 }
 
 bool
 ShaderValidator::FindVaryingByMappedName(const std::string& mappedName,
                                          std::string* const out_userName,
                                          bool* const out_isArray) const
 {
     const std::vector<sh::Varying>& varyings = *ShGetVaryings(mHandle);
-    for (auto itr = varyings.begin(); itr != varyings.end(); ++itr) {
+    for (const auto & varying : varyings) {
         const sh::ShaderVariable* found;
-        if (!itr->findInfoByMappedName(mappedName, &found, out_userName))
+        if (!varying.findInfoByMappedName(mappedName, &found, out_userName))
             continue;
 
         *out_isArray = found->isArray();
         return true;
     }
 
     return false;
 }
 
 bool
 ShaderValidator::FindVaryingMappedNameByUserName(const std::string& userName,
                                                  const std::string** const out_mappedName) const
 {
     const std::vector<sh::Varying>& attribs = *ShGetVaryings(mHandle);
-    for (auto itr = attribs.begin(); itr != attribs.end(); ++itr) {
-        if (itr->name == userName) {
-            *out_mappedName = &(itr->mappedName);
+    for (const auto & attrib : attribs) {
+        if (attrib.name == userName) {
+            *out_mappedName = &(attrib.mappedName);
             return true;
         }
     }
 
     return false;
 }
 // This must handle names like "foo.bar[0]".
 bool
 ShaderValidator::FindUniformByMappedName(const std::string& mappedName,
                                          std::string* const out_userName,
                                          bool* const out_isArray) const
 {
     const std::vector<sh::Uniform>& uniforms = *ShGetUniforms(mHandle);
-    for (auto itr = uniforms.begin(); itr != uniforms.end(); ++itr) {
+    for (const auto & uniform : uniforms) {
         const sh::ShaderVariable* found;
-        if (!itr->findInfoByMappedName(mappedName, &found, out_userName))
+        if (!uniform.findInfoByMappedName(mappedName, &found, out_userName))
             continue;
 
         *out_isArray = found->isArray();
         return true;
     }
 
     const size_t dotPos = mappedName.find(".");
 
--- a/dom/events/DataTransfer.cpp
+++ b/dom/events/DataTransfer.cpp
@@ -993,18 +993,18 @@ DataTransfer::GetTransferable(uint32_t a
         continue;
       }
 
       nsAutoString type;
       formatitem->GetInternalType(type);
 
       // If the data is of one of the well-known formats, use it directly.
       bool isCustomFormat = true;
-      for (uint32_t f = 0; f < ArrayLength(knownFormats); f++) {
-        if (type.EqualsASCII(knownFormats[f])) {
+      for (auto & knownFormat : knownFormats) {
+        if (type.EqualsASCII(knownFormat)) {
           isCustomFormat = false;
           break;
         }
       }
 
       uint32_t lengthInBytes;
       nsCOMPtr<nsISupports> convertedData;
 
--- a/dom/events/DataTransferItem.cpp
+++ b/dom/events/DataTransferItem.cpp
@@ -81,18 +81,18 @@ DataTransferItem::SetData(nsIVariant* aD
 
   if (!aData) {
     // We are holding a temporary null which will later be filled.
     // These are provided by the system, and have guaranteed properties about
     // their kind based on their type.
     MOZ_ASSERT(!mType.IsEmpty());
 
     mKind = KIND_STRING;
-    for (uint32_t i = 0; i < ArrayLength(kFileMimeNameMap); ++i) {
-      if (mType.EqualsASCII(kFileMimeNameMap[i].mMimeName)) {
+    for (auto & mime : kFileMimeNameMap) {
+      if (mType.EqualsASCII(mime.mMimeName)) {
         mKind = KIND_FILE;
         break;
       }
     }
 
     mData = nullptr;
     return;
   }
@@ -372,19 +372,19 @@ DataTransferItem::GetAsEntry(nsIPrincipa
   fs->CreateRoot(entries);
   return entry.forget();
 }
 
 already_AddRefed<File>
 DataTransferItem::CreateFileFromInputStream(nsIInputStream* aStream)
 {
   const char* key = nullptr;
-  for (uint32_t i = 0; i < ArrayLength(kFileMimeNameMap); ++i) {
-    if (mType.EqualsASCII(kFileMimeNameMap[i].mMimeName)) {
-      key = kFileMimeNameMap[i].mFileName;
+  for (auto & mime : kFileMimeNameMap) {
+    if (mType.EqualsASCII(mime.mMimeName)) {
+      key = mime.mFileName;
       break;
     }
   }
   if (!key) {
     MOZ_ASSERT_UNREACHABLE("Unsupported mime type");
     key = "GenericFileName";
   }
 
--- a/dom/events/UIEvent.cpp
+++ b/dom/events/UIEvent.cpp
@@ -443,27 +443,27 @@ UIEvent::ComputeModifierState(const nsAS
   // XXX Should we abort if aModifiersList is too long?
 
   Modifiers modifiers = 0;
 
   nsAString::const_iterator listStart, listEnd;
   aModifiersList.BeginReading(listStart);
   aModifiersList.EndReading(listEnd);
 
-  for (uint32_t i = 0; i < ArrayLength(kPairs); i++) {
+  for (auto kPair : kPairs) {
     nsAString::const_iterator start(listStart), end(listEnd);
-    if (!FindInReadable(NS_ConvertASCIItoUTF16(kPairs[i].name), start, end)) {
+    if (!FindInReadable(NS_ConvertASCIItoUTF16(kPair.name), start, end)) {
       continue;
     }
 
     if ((start != listStart && !NS_IsAsciiWhitespace(*(--start))) ||
         (end != listEnd && !NS_IsAsciiWhitespace(*(end)))) {
       continue;
     }
-    modifiers |= kPairs[i].modifier;
+    modifiers |= kPair.modifier;
   }
 
   return modifiers;
 }
 
 bool
 UIEvent::GetModifierStateInternal(const nsAString& aKey)
 {
--- a/dom/events/WheelHandlingHelper.cpp
+++ b/dom/events/WheelHandlingHelper.cpp
@@ -473,18 +473,18 @@ ScrollbarsForWheel::Inactivate()
 }
 
 /* static */ bool
 ScrollbarsForWheel::IsActive()
 {
   if (sActiveOwner) {
     return true;
   }
-  for (size_t i = 0; i < kNumberOfTargets; ++i) {
-    if (sActivatedScrollTargets[i]) {
+  for (auto & sActivatedScrollTarget : sActivatedScrollTargets) {
+    if (sActivatedScrollTarget) {
       return true;
     }
   }
   return false;
 }
 
 /* static */ void
 ScrollbarsForWheel::OwnWheelTransaction(bool aOwn)
@@ -512,18 +512,18 @@ ScrollbarsForWheel::TemporarilyActivateA
       scrollbarMediator->ScrollbarActivityStarted();
     }
   }
 }
 
 /* static */ void
 ScrollbarsForWheel::DeactivateAllTemporarilyActivatedScrollTargets()
 {
-  for (size_t i = 0; i < kNumberOfTargets; i++) {
-    AutoWeakFrame* scrollTarget = &sActivatedScrollTargets[i];
+  for (auto & sActivatedScrollTarget : sActivatedScrollTargets) {
+    AutoWeakFrame* scrollTarget = &sActivatedScrollTarget;
     if (*scrollTarget) {
       nsIScrollbarMediator* scrollbarMediator = do_QueryFrame(*scrollTarget);
       if (scrollbarMediator) {
         scrollbarMediator->ScrollbarActivityStopped();
       }
       *scrollTarget = nullptr;
     }
   }
--- a/dom/file/MemoryBlobImpl.cpp
+++ b/dom/file/MemoryBlobImpl.cpp
@@ -113,18 +113,18 @@ public:
         smallObjectsTotal += size;
       } else {
         SHA1Sum sha1;
         sha1.update(owner->mData, owner->mLength);
         uint8_t digest[SHA1Sum::kHashSize]; // SHA1 digests are 20 bytes long.
         sha1.finish(digest);
 
         nsAutoCString digestString;
-        for (size_t i = 0; i < sizeof(digest); i++) {
-          digestString.AppendPrintf("%02x", digest[i]);
+        for (unsigned char dig : digest) {
+          digestString.AppendPrintf("%02x", dig);
         }
 
         aHandleReport->Callback(
           /* process */ NS_LITERAL_CSTRING(""),
           nsPrintfCString(
             "explicit/dom/memory-file-data/large/file(length=%" PRIu64 ", sha1=%s)",
             owner->mLength, aAnonymize ? "<anonymized>" : digestString.get()),
           KIND_HEAP, UNITS_BYTES, size,
--- a/dom/gamepad/linux/udev.h
+++ b/dom/gamepad/linux/udev.h
@@ -29,27 +29,27 @@ class udev_lib {
   udev_lib() : lib(nullptr),
                udev(nullptr) {
     // Be careful about ABI compat! 0 -> 1 didn't change any
     // symbols this code relies on, per:
     // https://lists.fedoraproject.org/pipermail/devel/2012-June/168227.html
     const char* lib_names[] = {"libudev.so.0", "libudev.so.1"};
     // Check whether a library is already loaded so we don't load two
     // conflicting libs.
-    for (unsigned i = 0; i < ArrayLength(lib_names); i++) {
-      lib = dlopen(lib_names[i], RTLD_NOLOAD | RTLD_LAZY | RTLD_GLOBAL);
+    for (auto & lib_name : lib_names) {
+      lib = dlopen(lib_name, RTLD_NOLOAD | RTLD_LAZY | RTLD_GLOBAL);
       if (lib) {
         break;
       }
     }
     // If nothing loads the first time through, it means no version of libudev
     // was already loaded.
     if (!lib) {
-      for (unsigned i = 0; i < ArrayLength(lib_names); i++) {
-        lib = dlopen(lib_names[i], RTLD_LAZY | RTLD_GLOBAL);
+      for (auto & lib_name : lib_names) {
+        lib = dlopen(lib_name, RTLD_LAZY | RTLD_GLOBAL);
         if (lib) {
           break;
         }
       }
     }
     if (lib && LoadSymbols()) {
       udev = udev_new();
     }
--- a/dom/ipc/ContentProcessManager.cpp
+++ b/dom/ipc/ContentProcessManager.cpp
@@ -59,21 +59,19 @@ ContentProcessManager::AddContentProcess
 
 void
 ContentProcessManager::RemoveContentProcess(const ContentParentId& aChildCpId)
 {
   MOZ_ASSERT(NS_IsMainThread());
   MOZ_ASSERT(mContentParentMap.find(aChildCpId) != mContentParentMap.end());
 
   mContentParentMap.erase(aChildCpId);
-  for (auto iter = mContentParentMap.begin();
-       iter != mContentParentMap.end();
-       ++iter) {
-    if (!iter->second.mChildrenCpId.empty()) {
-      iter->second.mChildrenCpId.erase(aChildCpId);
+  for (auto & iter : mContentParentMap) {
+    if (!iter.second.mChildrenCpId.empty()) {
+      iter.second.mChildrenCpId.erase(aChildCpId);
     }
   }
 }
 
 bool
 ContentProcessManager::AddGrandchildProcess(const ContentParentId& aParentCpId,
                                             const ContentParentId& aChildCpId)
 {
@@ -123,20 +121,18 @@ ContentProcessManager::GetAllChildProces
 
   nsTArray<ContentParentId> cpIdArray;
   auto iter = mContentParentMap.find(aParentCpId);
   if (NS_WARN_IF(iter == mContentParentMap.end())) {
     ASSERT_UNLESS_FUZZING();
     return Move(cpIdArray);
   }
 
-  for (auto cpIter = iter->second.mChildrenCpId.begin();
-       cpIter != iter->second.mChildrenCpId.end();
-       ++cpIter) {
-    cpIdArray.AppendElement(*cpIter);
+  for (auto cpIter : iter->second.mChildrenCpId) {
+    cpIdArray.AppendElement(cpIter);
   }
 
   return Move(cpIdArray);
 }
 
 bool
 ContentProcessManager::RegisterRemoteFrame(const TabId& aTabId,
                                            const ContentParentId& aOpenerCpId,
@@ -234,20 +230,18 @@ ContentProcessManager::GetTabContextByCo
 
   nsTArray<TabContext> tabContextArray;
   auto iter = mContentParentMap.find(aChildCpId);
   if (NS_WARN_IF(iter == mContentParentMap.end())) {
     ASSERT_UNLESS_FUZZING();
     return Move(tabContextArray);
   }
 
-  for (auto remoteFrameIter = iter->second.mRemoteFrames.begin();
-       remoteFrameIter != iter->second.mRemoteFrames.end();
-       ++remoteFrameIter) {
-    tabContextArray.AppendElement(remoteFrameIter->second.mContext);
+  for (auto & mRemoteFrame : iter->second.mRemoteFrames) {
+    tabContextArray.AppendElement(mRemoteFrame.second.mContext);
   }
 
   return Move(tabContextArray);
 }
 
 bool
 ContentProcessManager::GetRemoteFrameOpenerTabId(const ContentParentId& aChildCpId,
                                                  const TabId& aChildTabId,
@@ -335,20 +329,18 @@ ContentProcessManager::GetTabParentsByPr
 
   nsTArray<TabId> tabIdList;
   auto iter = mContentParentMap.find(aChildCpId);
   if (NS_WARN_IF(iter == mContentParentMap.end())) {
     ASSERT_UNLESS_FUZZING();
     return Move(tabIdList);
   }
 
-  for (auto remoteFrameIter = iter->second.mRemoteFrames.begin();
-      remoteFrameIter != iter->second.mRemoteFrames.end();
-      ++remoteFrameIter) {
-    tabIdList.AppendElement(remoteFrameIter->first);
+  for (auto & mRemoteFrame : iter->second.mRemoteFrames) {
+    tabIdList.AppendElement(mRemoteFrame.first);
   }
 
   return Move(tabIdList);
 }
 
 uint32_t
 ContentProcessManager::GetTabParentCountByProcessId(const ContentParentId& aChildCpId)
 {
--- a/dom/media/DecoderDoctorDiagnostics.cpp
+++ b/dom/media/DecoderDoctorDiagnostics.cpp
@@ -459,21 +459,21 @@ ReportAnalysis(nsIDocument* aDocument,
                                                          aDecodeIssueIsError));
   }
 
   // Report non-solved issues to console.
   if (!aIsSolved) {
     // Build parameter array needed by console message.
     AutoTArray<const char16_t*,
                NotificationAndReportStringId::maxReportParams> params;
-    for (int i = 0; i < NotificationAndReportStringId::maxReportParams; ++i) {
-      if (aNotification.mReportParams[i] == ReportParam::None) {
+    for (auto mReportParam : aNotification.mReportParams) {
+      if (mReportParam == ReportParam::None) {
         break;
       }
-      switch (aNotification.mReportParams[i]) {
+      switch (mReportParam) {
       case ReportParam::Formats:
         params.AppendElement(aFormats.Data());
         break;
       case ReportParam::DecodeIssue:
         params.AppendElement(decodeIssueDescription.Data());
         break;
       case ReportParam::DocURL:
         params.AppendElement(NS_ConvertUTF8toUTF16(aDocURL).Data());
--- a/dom/media/MediaCache.cpp
+++ b/dom/media/MediaCache.cpp
@@ -967,19 +967,19 @@ MediaCache::SwapBlocks(int32_t aBlockInd
     }
   }
 
   // Now update references to blocks in block lists.
   mFreeBlocks.NotifyBlockSwapped(aBlockIndex1, aBlockIndex2);
 
   nsTHashtable<nsPtrHashKey<MediaCacheStream> > visitedStreams;
 
-  for (int32_t i = 0; i < 2; ++i) {
-    for (uint32_t j = 0; j < blocks[i]->mOwners.Length(); ++j) {
-      MediaCacheStream* stream = blocks[i]->mOwners[j].mStream;
+  for (auto & block : blocks) {
+    for (uint32_t j = 0; j < block->mOwners.Length(); ++j) {
+      MediaCacheStream* stream = block->mOwners[j].mStream;
       // Make sure that we don't update the same stream twice --- that
       // would result in swapping the block references back again!
       if (visitedStreams.GetEntry(stream))
         continue;
       visitedStreams.PutEntry(stream);
       stream->mReadaheadBlocks.NotifyBlockSwapped(aBlockIndex1, aBlockIndex2);
       stream->mPlayedBlocks.NotifyBlockSwapped(aBlockIndex1, aBlockIndex2);
       stream->mMetadataBlocks.NotifyBlockSwapped(aBlockIndex1, aBlockIndex2);
--- a/dom/media/gtest/TestIntervalSet.cpp
+++ b/dom/media/gtest/TestIntervalSet.cpp
@@ -207,24 +207,24 @@ static void GeneratePermutations(const I
   for (uint32_t i = 0; i < aI2.Length(); i++) {
     comb2.push_back(i);
   }
 
   do {
     do {
       // Create intervals according to new indexes.
       IntIntervals i_0;
-      for (uint32_t i = 0; i < comb1.size(); i++) {
-        i_0 += aI1[comb1[i]];
+      for (unsigned int c : comb1) {
+        i_0 += aI1[c];
       }
       // Test that intervals are always normalized.
       Compare(aI1, i_0);
       IntIntervals i_1;
-      for (uint32_t i = 0; i < comb2.size(); i++) {
-        i_1 += aI2[comb2[i]];
+      for (unsigned int c : comb2) {
+        i_1 += aI2[c];
       }
       Compare(aI2, i_1);
       // Check intersections yield the same result.
       Compare(i_0.Intersection(i_1), i_ref);
     } while (std::next_permutation(comb2.begin(), comb2.end()));
   } while (std::next_permutation(comb1.begin(), comb1.end()));
 }
 
--- a/dom/media/gtest/TestVPXDecoding.cpp
+++ b/dom/media/gtest/TestVPXDecoding.cpp
@@ -73,31 +73,31 @@ struct TestFileData {
 static const TestFileData testFiles[] = {
   { "test_case_1224361.vp8.ivf", VPX_CODEC_OK },
   { "test_case_1224363.vp8.ivf", VPX_CODEC_CORRUPT_FRAME },
   { "test_case_1224369.vp8.ivf", VPX_CODEC_CORRUPT_FRAME }
 };
 
 TEST(libvpx, test_cases)
 {
-  for (size_t test = 0; test < ArrayLength(testFiles); ++test) {
+  for (auto testFile : testFiles) {
     nsTArray<uint8_t> data;
-    ReadVPXFile(testFiles[test].mFilename, data);
+    ReadVPXFile(testFile.mFilename, data);
     ASSERT_GT(data.Length(), 0u);
 
     vpx_codec_dec_cfg_t config;
     vpx_codec_iface_t* dx = ParseIVFConfig(data, config);
     ASSERT_TRUE(dx);
     config.threads = 2;
 
     vpx_codec_ctx_t ctx;
     PodZero(&ctx);
     vpx_codec_err_t r = vpx_codec_dec_init(&ctx, dx, &config, 0);
     ASSERT_EQ(VPX_CODEC_OK, r);
 
     r = vpx_codec_decode(&ctx, data.Elements(), data.Length(), nullptr, 0);
     // This test case is known to be corrupt.
-    EXPECT_EQ(testFiles[test].mDecodeResult, r);
+    EXPECT_EQ(testFile.mDecodeResult, r);
 
     r = vpx_codec_destroy(&ctx);
     EXPECT_EQ(VPX_CODEC_OK, r);
   }
 }
--- a/dom/media/gtest/TestVideoTrackEncoder.cpp
+++ b/dom/media/gtest/TestVideoTrackEncoder.cpp
@@ -220,45 +220,45 @@ TEST(VP8VideoTrackEncoder, Initializatio
 
     // Success cases
     { true, 640, 480},    // Standard VGA
     { true, 800, 480},    // Standard WVGA
     { true, 960, 540},    // Standard qHD
     { true, 1280, 720}    // Standard HD
   };
 
-  for (size_t i = 0; i < ArrayLength(params); i++)
+  for (auto param : params)
   {
     TestVP8TrackEncoder encoder;
-    EXPECT_TRUE(encoder.TestInit(params[i]));
+    EXPECT_TRUE(encoder.TestInit(param));
   }
 }
 
 // Get MetaData test
 TEST(VP8VideoTrackEncoder, FetchMetaData)
 {
   InitParam params[] = {
     // Success cases
     { true, 640, 480},    // Standard VGA
     { true, 800, 480},    // Standard WVGA
     { true, 960, 540},    // Standard qHD
     { true, 1280, 720}    // Standard HD
   };
 
-  for (size_t i = 0; i < ArrayLength(params); i++)
+  for (auto & param : params)
   {
     TestVP8TrackEncoder encoder;
-    EXPECT_TRUE(encoder.TestInit(params[i]));
+    EXPECT_TRUE(encoder.TestInit(param));
 
     RefPtr<TrackMetadataBase> meta = encoder.GetMetadata();
     RefPtr<VP8Metadata> vp8Meta(static_cast<VP8Metadata*>(meta.get()));
 
     // METADATA should be depend on how to initiate encoder.
-    EXPECT_TRUE(vp8Meta->mWidth == params[i].mWidth);
-    EXPECT_TRUE(vp8Meta->mHeight == params[i].mHeight);
+    EXPECT_TRUE(vp8Meta->mWidth == param.mWidth);
+    EXPECT_TRUE(vp8Meta->mHeight == param.mHeight);
   }
 }
 
 // Encode test
 TEST(VP8VideoTrackEncoder, FrameEncode)
 {
   // Initiate VP8 encoder
   TestVP8TrackEncoder encoder;
--- a/dom/media/mediasource/gtest/TestContainerParser.cpp
+++ b/dom/media/mediasource/gtest/TestContainerParser.cpp
@@ -16,18 +16,18 @@ TEST(ContainerParser, MIMETypes) {
   const char* containerTypes[] = {
     "video/webm",
     "audio/webm",
     "video/mp4",
     "audio/mp4",
     "audio/aac"
   };
   nsAutoPtr<ContainerParser> parser;
-  for (size_t i = 0; i < ArrayLength(containerTypes); ++i) {
-    Maybe<MediaContainerType> containerType = MakeMediaContainerType(containerTypes[i]);
+  for (auto & cont : containerTypes) {
+    Maybe<MediaContainerType> containerType = MakeMediaContainerType(cont);
     ASSERT_TRUE(containerType.isSome());
     parser = ContainerParser::CreateForMIMEType(*containerType);
     ASSERT_NE(parser, nullptr);
   }
 }
 
 
 already_AddRefed<MediaByteBuffer> make_adts_header()
--- a/dom/media/ogg/OggDemuxer.cpp
+++ b/dom/media/ogg/OggDemuxer.cpp
@@ -468,21 +468,21 @@ OggDemuxer::ReadMetadata()
   // @fixme fixme
 
   TrackInfo::TrackType tracks[2] =
     { TrackInfo::kAudioTrack, TrackInfo::kVideoTrack };
 
   nsTArray<OggCodecState*> bitstreams;
   nsTArray<uint32_t> serials;
 
-  for (uint32_t i = 0; i < ArrayLength(tracks); i++) {
+  for (auto & track : tracks) {
     ogg_page page;
     bool readAllBOS = false;
     while (!readAllBOS) {
-      if (!ReadOggPage(tracks[i], &page)) {
+      if (!ReadOggPage(track, &page)) {
         // Some kind of error...
         OGG_DEBUG("OggDemuxer::ReadOggPage failed? leaving ReadMetadata...");
         return NS_ERROR_FAILURE;
       }
 
       int serial = ogg_page_serialno(&page);
 
       if (!ogg_page_bos(&page)) {
@@ -494,17 +494,17 @@ OggDemuxer::ReadMetadata()
         // We've not encountered a stream with this serial number before. Create
         // an OggCodecState to demux it, and map that to the OggCodecState
         // in mCodecStates.
         OggCodecState* codecState = OggCodecState::Create(&page);
         mCodecStore.Add(serial, codecState);
         bitstreams.AppendElement(codecState);
         serials.AppendElement(serial);
       }
-      if (NS_FAILED(DemuxOggPage(tracks[i], &page))) {
+      if (NS_FAILED(DemuxOggPage(track, &page))) {
         return NS_ERROR_FAILURE;
       }
     }
   }
 
   // We've read all BOS pages, so we know the streams contained in the media.
   // 1. Find the first encountered Theora/Vorbis/Opus bitstream, and configure
   //    it as the target A/V bitstream.
--- a/dom/media/platforms/ffmpeg/FFmpegRuntimeLinker.cpp
+++ b/dom/media/platforms/ffmpeg/FFmpegRuntimeLinker.cpp
@@ -48,18 +48,17 @@ FFmpegRuntimeLinker::Init()
   if (sLinkStatus != LinkStatus_INIT) {
     return sLinkStatus == LinkStatus_SUCCEEDED;
   }
 
   // While going through all possible libs, this status will be updated with a
   // more precise error if possible.
   sLinkStatus = LinkStatus_NOT_FOUND;
 
-  for (size_t i = 0; i < ArrayLength(sLibs); i++) {
-    const char* lib = sLibs[i];
+  for (auto lib : sLibs) {
     PRLibSpec lspec;
     lspec.type = PR_LibSpec_Pathname;
     lspec.value.pathname = lib;
     sLibAV.mAVCodecLib =
       PR_LoadLibraryWithFlags(lspec, PR_LD_NOW | PR_LD_LOCAL);
     if (sLibAV.mAVCodecLib) {
       sLibAV.mAVUtilLib = sLibAV.mAVCodecLib;
       switch (sLibAV.Link()) {
--- a/dom/media/webaudio/DelayBuffer.cpp
+++ b/dom/media/webaudio/DelayBuffer.cpp
@@ -139,22 +139,22 @@ DelayBuffer::ReadChannels(const double a
     // Use the larger delay, for the older frame, first, as this is more
     // likely to use the cached upmixed channel arrays.
     int floorDelay = int(currentDelay);
     double interpolationFactor = currentDelay - floorDelay;
     int positions[2];
     positions[1] = PositionForDelay(floorDelay) + i;
     positions[0] = positions[1] - 1;
 
-    for (unsigned tick = 0; tick < ArrayLength(positions); ++tick) {
-      int readChunk = ChunkForPosition(positions[tick]);
+    for (int position : positions) {
+      int readChunk = ChunkForPosition(position);
       // mVolume is not set on default initialized chunks so handle null
       // chunks specially.
       if (!mChunks[readChunk].IsNull()) {
-        int readOffset = OffsetForPosition(positions[tick]);
+        int readOffset = OffsetForPosition(position);
         UpdateUpmixChannels(readChunk, totalChannelCount,
                             aChannelInterpretation);
         double multiplier = interpolationFactor * mChunks[readChunk].mVolume;
         for (uint32_t channel = aFirstChannel;
              channel < readChannelsEnd; ++channel) {
           aOutputChunk->ChannelFloatsForWrite(channel)[i] += multiplier *
             mUpmixChannels[channel][readOffset];
         }
@@ -169,20 +169,20 @@ void
 DelayBuffer::Read(double aDelayTicks, AudioBlock* aOutputChunk,
                   ChannelInterpretation aChannelInterpretation)
 {
   const bool firstTime = mCurrentDelay < 0.0;
   double currentDelay = firstTime ? aDelayTicks : mCurrentDelay;
 
   double computedDelay[WEBAUDIO_BLOCK_SIZE];
 
-  for (unsigned i = 0; i < WEBAUDIO_BLOCK_SIZE; ++i) {
+  for (double & i : computedDelay) {
     // If the value has changed, smoothly approach it
     currentDelay += (aDelayTicks - currentDelay) * mSmoothingRate;
-    computedDelay[i] = currentDelay;
+    i = currentDelay;
   }
 
   Read(computedDelay, aOutputChunk, aChannelInterpretation);
 }
 
 bool
 DelayBuffer::EnsureBuffer()
 {
--- a/dom/media/webaudio/ScriptProcessorNode.cpp
+++ b/dom/media/webaudio/ScriptProcessorNode.cpp
@@ -39,18 +39,18 @@ private:
       : mMutex(aName)
     {}
 
     size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const
     {
       mMutex.AssertCurrentThreadOwns();
 
       size_t amount = 0;
-      for (size_t i = 0; i < mBufferList.size(); i++) {
-        amount += mBufferList[i].SizeOfExcludingThis(aMallocSizeOf, false);
+      for (const auto & buff : mBufferList) {
+        amount += buff.SizeOfExcludingThis(aMallocSizeOf, false);
       }
 
       return amount;
     }
 
     Mutex& Lock() const { return const_cast<OutputQueue*>(this)->mMutex; }
 
     size_t ReadyToConsume() const
--- a/dom/media/webrtc/MediaTrackConstraints.h
+++ b/dom/media/webrtc/MediaTrackConstraints.h
@@ -362,18 +362,18 @@ public:
     // Order devices by shortest distance
     for (auto& ordinal : ordered) {
       aDevices.RemoveElement(ordinal.second);
       aDevices.AppendElement(ordinal.second);
     }
 
     // Then apply advanced constraints.
 
-    for (int i = 0; i < int(c.mAdvanced.size()); i++) {
-      aggregateConstraints.AppendElement(&c.mAdvanced[i]);
+    for (const auto & constraint : c.mAdvanced) {
+      aggregateConstraints.AppendElement(&constraint);
       nsTArray<RefPtr<DeviceType>> rejects;
       for (uint32_t j = 0; j < aDevices.Length();) {
         if (aDevices[j]->GetBestFitnessDistance(aggregateConstraints,
                                                 aIsChrome) == UINT32_MAX) {
           rejects.AppendElement(aDevices[j]);
           aDevices.RemoveElementAt(j);
         } else {
           ++j;
--- a/dom/media/webspeech/synth/speechd/SpeechDispatcherService.cpp
+++ b/dom/media/webspeech/synth/speechd/SpeechDispatcherService.cpp
@@ -351,21 +351,21 @@ SpeechDispatcherService::Setup()
 
   if (!PR_FindFunctionSymbol(speechdLib, "spd_get_volume")) {
     // There is no version getter function, so we rely on a symbol that was
     // introduced in release 0.8.2 in order to check for ABI compatibility.
     NS_WARNING("Unsupported version of speechd detected");
     return;
   }
 
-  for (uint32_t i = 0; i < ArrayLength(kSpeechDispatcherSymbols); i++) {
-    *kSpeechDispatcherSymbols[i].function =
-      PR_FindFunctionSymbol(speechdLib, kSpeechDispatcherSymbols[i].functionName);
+  for (auto kSpeechDispatcherSymbol : kSpeechDispatcherSymbols) {
+    *kSpeechDispatcherSymbol.function =
+      PR_FindFunctionSymbol(speechdLib, kSpeechDispatcherSymbol.functionName);
 
-    if (!*kSpeechDispatcherSymbols[i].function) {
+    if (!*kSpeechDispatcherSymbol.function) {
       NS_WARNING(nsPrintfCString("Failed to find speechd symbol for'%s'",
                                  kSpeechDispatcherSymbols[i].functionName).get());
       return;
     }
   }
 
   mSpeechdClient = spd_open("firefox", "web speech api", "who", SPD_MODE_THREADED);
   if (!mSpeechdClient) {
--- a/dom/media/webspeech/synth/test/nsFakeSynthServices.cpp
+++ b/dom/media/webspeech/synth/test/nsFakeSynthServices.cpp
@@ -263,19 +263,19 @@ FakeIndirectAudioSynth::Speak(const nsAS
     }
 
   private:
     nsCOMPtr<nsISpeechTask> mTask;
     nsString mText;
   };
 
   uint32_t flags = 0;
-  for (uint32_t i = 0; i < ArrayLength(sIndirectVoices); i++) {
-    if (aUri.EqualsASCII(sIndirectVoices[i].uri)) {
-      flags = sIndirectVoices[i].flags;
+  for (const auto & sIndirectVoice : sIndirectVoices) {
+    if (aUri.EqualsASCII(sIndirectVoice.uri)) {
+      flags = sIndirectVoice.flags;
     }
   }
 
   if (flags & eFailAtStart) {
     return NS_ERROR_FAILURE;
   }
 
   RefPtr<FakeSynthCallback> cb = new FakeSynthCallback(
--- a/dom/plugins/ipc/PluginModuleParent.cpp
+++ b/dom/plugins/ipc/PluginModuleParent.cpp
@@ -2024,32 +2024,32 @@ PluginOfflineObserver::Observe(nsISuppor
 
 static const char* kSettingsPrefs[] =
     {"javascript.enabled",
      "dom.ipc.plugins.nativeCursorSupport"};
 
 void
 PluginModuleChromeParent::RegisterSettingsCallbacks()
 {
-    for (size_t i = 0; i < ArrayLength(kSettingsPrefs); i++) {
-        Preferences::RegisterCallback(CachedSettingChanged, kSettingsPrefs[i], this);
+    for (auto & kSettingsPref : kSettingsPrefs) {
+        Preferences::RegisterCallback(CachedSettingChanged, kSettingsPref, this);
     }
 
     nsCOMPtr<nsIObserverService> observerService = mozilla::services::GetObserverService();
     if (observerService) {
         mPluginOfflineObserver = new PluginOfflineObserver(this);
         observerService->AddObserver(mPluginOfflineObserver, "ipc:network:set-offline", false);
     }
 }
 
 void
 PluginModuleChromeParent::UnregisterSettingsCallbacks()
 {
-    for (size_t i = 0; i < ArrayLength(kSettingsPrefs); i++) {
-        Preferences::UnregisterCallback(CachedSettingChanged, kSettingsPrefs[i], this);
+    for (auto & kSettingsPref : kSettingsPrefs) {
+        Preferences::UnregisterCallback(CachedSettingChanged, kSettingsPref, this);
     }
 
     nsCOMPtr<nsIObserverService> observerService = mozilla::services::GetObserverService();
     if (observerService) {
         observerService->RemoveObserver(mPluginOfflineObserver, "ipc:network:set-offline");
         mPluginOfflineObserver = nullptr;
     }
 }
--- a/dom/plugins/test/testplugin/nptest.cpp
+++ b/dom/plugins/test/testplugin/nptest.cpp
@@ -754,18 +754,18 @@ NPError OSCALL NP_GetEntryPoints(NPPlugi
 #if defined(XP_UNIX)
 NP_EXPORT(NPError) NP_Shutdown()
 #elif defined(XP_WIN)
 NPError OSCALL NP_Shutdown()
 #endif
 {
   clearIdentifiers();
 
-  for (unsigned int i = 0; i < MOZ_ARRAY_LENGTH(sPluginPropertyValues); i++) {
-    NPN_ReleaseVariantValue(&sPluginPropertyValues[i]);
+  for (auto & sPluginPropertyValue : sPluginPropertyValues) {
+    NPN_ReleaseVariantValue(&sPluginPropertyValue);
   }
 
   return NPERR_NO_ERROR;
 }
 
 NPError
 NPP_New(NPMIMEType pluginType, NPP instance, uint16_t mode, int16_t argc, char* argn[], char* argv[], NPSavedData* saved)
 {
@@ -1959,18 +1959,18 @@ scriptableDeallocate(NPObject* npobj)
 void
 scriptableInvalidate(NPObject* npobj)
 {
 }
 
 bool
 scriptableHasMethod(NPObject* npobj, NPIdentifier name)
 {
-  for (int i = 0; i < int(MOZ_ARRAY_LENGTH(sPluginMethodIdentifiers)); i++) {
-    if (name == sPluginMethodIdentifiers[i])
+  for (auto & sPluginMethodIdentifier : sPluginMethodIdentifiers) {
+    if (name == sPluginMethodIdentifier)
       return true;
   }
   return false;
 }
 
 bool
 scriptableInvoke(NPObject* npobj, NPIdentifier name, const NPVariant* args, uint32_t argCount, NPVariant* result)
 {
@@ -2007,18 +2007,18 @@ scriptableHasProperty(NPObject* npobj, N
     }
     NPN_MemFree(asUTF8);
   }
   else {
     if (NPN_GetIntIdentifier(NPN_IntFromIdentifier(name)) != name) {
       Crash();
     }
   }
-  for (int i = 0; i < int(MOZ_ARRAY_LENGTH(sPluginPropertyIdentifiers)); i++) {
-    if (name == sPluginPropertyIdentifiers[i]) {
+  for (auto & sPluginPropertyIdentifier : sPluginPropertyIdentifiers) {
+    if (name == sPluginPropertyIdentifier) {
       return true;
     }
   }
   return false;
 }
 
 bool
 scriptableGetProperty(NPObject* npobj, NPIdentifier name, NPVariant* result)
--- a/dom/security/nsCSPParser.cpp
+++ b/dom/security/nsCSPParser.cpp
@@ -714,18 +714,18 @@ nsCSPParser::hashSource()
 
   if (!isValidBase64Value(expr.BeginReading() + dashIndex + 1, expr.EndReading())) {
     return nullptr;
   }
 
   nsAutoString algo(Substring(expr, 0, dashIndex));
   nsAutoString hash(Substring(expr, dashIndex + 1, expr.Length() - dashIndex + 1));
 
-  for (uint32_t i = 0; i < kHashSourceValidFnsLen; i++) {
-    if (algo.LowerCaseEqualsASCII(kHashSourceValidFns[i])) {
+  for (auto kHashSourceValidFn : kHashSourceValidFns) {
+    if (algo.LowerCaseEqualsASCII(kHashSourceValidFn)) {
       // cache if encountering hash or nonce to invalidate unsafe-inline
       mHasHashOrNonce = true;
       return new nsCSPHashSrc(algo, hash);
     }
   }
   return nullptr;
 }
 
--- a/dom/security/nsCSPUtils.cpp
+++ b/dom/security/nsCSPUtils.cpp
@@ -322,19 +322,19 @@ CSP_IsQuotelessKeyword(const nsAString& 
   nsString lowerKey = PromiseFlatString(aKey);
   ToLowerCase(lowerKey);
 
   static_assert(CSP_LAST_KEYWORD_VALUE ==
                 (sizeof(CSPStrKeywords) / sizeof(CSPStrKeywords[0])),
                 "CSP_LAST_KEYWORD_VALUE does not match length of CSPStrKeywords");
 
   nsAutoString keyword;
-  for (uint32_t i = 0; i < CSP_LAST_KEYWORD_VALUE; i++) {
+  for (auto & CSPStrKeyword : CSPStrKeywords) {
     // skipping the leading ' and trimming the trailing '
-    keyword.AssignASCII(CSPStrKeywords[i] + 1);
+    keyword.AssignASCII(CSPStrKeyword + 1);
     keyword.Trim("'", false, true);
     if (lowerKey.Equals(keyword)) {
       return true;
     }
   }
   return false;
 }
 
--- a/dom/smil/nsSMILParserUtils.cpp
+++ b/dom/smil/nsSMILParserUtils.cpp
@@ -518,20 +518,20 @@ nsSMILParserUtils::ParseKeySplines(const
   nsCharSeparatedTokenizerTemplate<IsSVGWhitespace> controlPointTokenizer(aSpec, ';');
   while (controlPointTokenizer.hasMoreTokens()) {
 
     nsCharSeparatedTokenizerTemplate<IsSVGWhitespace>
       tokenizer(controlPointTokenizer.nextToken(), ',',
                 nsCharSeparatedTokenizer::SEPARATOR_OPTIONAL);
 
     double values[4];
-    for (int i = 0 ; i < 4; i++) {
+    for (double & value : values) {
       if (!tokenizer.hasMoreTokens() ||
-          !SVGContentUtils::ParseNumber(tokenizer.nextToken(), values[i]) ||
-          values[i] > 1.0 || values[i] < 0.0) {
+          !SVGContentUtils::ParseNumber(tokenizer.nextToken(), value) ||
+          value > 1.0 || value < 0.0) {
         return false;
       }
     }
     if (tokenizer.hasMoreTokens() ||
         tokenizer.separatorAfterCurrentToken() ||
         !aKeySplines.AppendElement(nsSMILKeySpline(values[0],
                                                    values[1],
                                                    values[2],
--- a/dom/svg/SVGTests.cpp
+++ b/dom/svg/SVGTests.cpp
@@ -58,18 +58,18 @@ bool
 SVGTests::HasExtension(const nsAString& aExtension)
 {
   return nsSVGFeatures::HasExtension(aExtension, IsInChromeDoc());
 }
 
 bool
 SVGTests::IsConditionalProcessingAttribute(const nsIAtom* aAttribute) const
 {
-  for (uint32_t i = 0; i < ArrayLength(sStringListNames); i++) {
-    if (aAttribute == *sStringListNames[i]) {
+  for (auto & sStringListName : sStringListNames) {
+    if (aAttribute == *sStringListName) {
       return true;
     }
   }
   return false;
 }
 
 int32_t
 SVGTests::GetBestLanguagePreferenceRank(const nsAString& aAcceptLangs) const
--- a/dom/svg/nsSVGTransform.cpp
+++ b/dom/svg/nsSVGTransform.cpp
@@ -149,18 +149,18 @@ nsSVGTransform::SetSkewY(float aAngle)
 
 SVGTransformSMILData::SVGTransformSMILData(const nsSVGTransform& aTransform)
   : mTransformType(aTransform.Type())
 {
   MOZ_ASSERT(mTransformType >= SVG_TRANSFORM_MATRIX &&
              mTransformType <= SVG_TRANSFORM_SKEWY,
              "Unexpected transform type");
 
-  for (uint32_t i = 0; i < NUM_STORED_PARAMS; ++i) {
-    mParams[i] = 0.f;
+  for (float & mParam : mParams) {
+    mParam = 0.f;
   }
 
   switch (mTransformType) {
     case SVG_TRANSFORM_MATRIX: {
       const gfxMatrix& mx = aTransform.GetMatrix();
       mParams[0] = static_cast<float>(mx._11);
       mParams[1] = static_cast<float>(mx._12);
       mParams[2] = static_cast<float>(mx._21);
--- a/dom/svg/nsSVGTransform.h
+++ b/dom/svg/nsSVGTransform.h
@@ -124,18 +124,18 @@ public:
   // This is also the number of params we actually store, regardless of type.
   static const uint32_t NUM_STORED_PARAMS = 6;
 
   explicit SVGTransformSMILData(uint16_t aType)
   : mTransformType(aType)
   {
     MOZ_ASSERT(aType >= SVG_TRANSFORM_MATRIX && aType <= SVG_TRANSFORM_SKEWY,
                "Unexpected transform type");
-    for (uint32_t i = 0; i < NUM_STORED_PARAMS; ++i) {
-      mParams[i] = 0.f;
+    for (float & mParam : mParams) {
+      mParam = 0.f;
     }
   }
 
   SVGTransformSMILData(uint16_t aType, float (&aParams)[NUM_SIMPLE_PARAMS])
   : mTransformType(aType)
   {
     MOZ_ASSERT(aType >= SVG_TRANSFORM_TRANSLATE && aType <= SVG_TRANSFORM_SKEWY,
                "Expected 'simple' transform type");
--- a/dom/xhr/XMLHttpRequestMainThread.cpp
+++ b/dom/xhr/XMLHttpRequestMainThread.cpp
@@ -1179,18 +1179,18 @@ XMLHttpRequestMainThread::IsSafeHeader(c
     if (NS_FAILED(status)) {
       return false;
     }
   }
   const char* kCrossOriginSafeHeaders[] = {
     "cache-control", "content-language", "content-type", "expires",
     "last-modified", "pragma"
   };
-  for (uint32_t i = 0; i < ArrayLength(kCrossOriginSafeHeaders); ++i) {
-    if (aHeader.LowerCaseEqualsASCII(kCrossOriginSafeHeaders[i])) {
+  for (auto & kCrossOriginSafeHeader : kCrossOriginSafeHeaders) {
+    if (aHeader.LowerCaseEqualsASCII(kCrossOriginSafeHeader)) {
       return true;
     }
   }
   nsAutoCString headerVal;
   // The "Access-Control-Expose-Headers" header contains a comma separated
   // list of method names.
   Unused << aHttpChannel->
       GetResponseHeader(NS_LITERAL_CSTRING("Access-Control-Expose-Headers"),
@@ -4294,22 +4294,20 @@ RequestHeaders::ApplyToChannel(nsIHttpCh
 
 void
 RequestHeaders::GetCORSUnsafeHeaders(nsTArray<nsCString>& aArray) const
 {
   static const char *kCrossOriginSafeHeaders[] = {
     "accept", "accept-language", "content-language", "content-type",
     "last-event-id"
   };
-  const uint32_t kCrossOriginSafeHeadersLength =
-    ArrayLength(kCrossOriginSafeHeaders);
   for (const RequestHeader& header : mHeaders) {
     bool safe = false;
-    for (uint32_t i = 0; i < kCrossOriginSafeHeadersLength; ++i) {
-      if (header.mName.LowerCaseEqualsASCII(kCrossOriginSafeHeaders[i])) {
+    for (auto & kCrossOriginSafeHeader : kCrossOriginSafeHeaders) {
+      if (header.mName.LowerCaseEqualsASCII(kCrossOriginSafeHeader)) {
         safe = true;
         break;
       }
     }
     if (!safe) {
       aArray.AppendElement(header.mName);
     }
   }
--- a/editor/libeditor/CompositionTransaction.cpp
+++ b/editor/libeditor/CompositionTransaction.cpp
@@ -208,19 +208,19 @@ CompositionTransaction::SetIMESelection(
     nsISelectionController::SELECTION_IME_CONVERTEDTEXT,
     nsISelectionController::SELECTION_IME_SELECTEDCONVERTEDTEXT
   };
 
   nsCOMPtr<nsISelectionController> selCon;
   aEditorBase.GetSelectionController(getter_AddRefs(selCon));
   NS_ENSURE_TRUE(selCon, NS_ERROR_NOT_INITIALIZED);
 
-  for (uint32_t i = 0; i < ArrayLength(kIMESelections); ++i) {
+  for (short kIMESelection : kIMESelections) {
     nsCOMPtr<nsISelection> selectionOfIME;
-    if (NS_FAILED(selCon->GetSelection(kIMESelections[i],
+    if (NS_FAILED(selCon->GetSelection(kIMESelection,
                                        getter_AddRefs(selectionOfIME)))) {
       continue;
     }
     rv = selectionOfIME->RemoveAllRanges();
     NS_ASSERTION(NS_SUCCEEDED(rv),
                  "Failed to remove all ranges of IME selection");
   }
 
--- a/editor/libeditor/EditorCommands.cpp
+++ b/editor/libeditor/EditorCommands.cpp
@@ -871,42 +871,39 @@ SelectionMoveCommands::DoCommand(const c
   }
 
   nsCOMPtr<nsISelectionController> selCont;
   nsresult rv = editor->GetSelectionController(getter_AddRefs(selCont));
   NS_ENSURE_SUCCESS(rv, rv);
   NS_ENSURE_TRUE(selCont, NS_ERROR_FAILURE);
 
   // scroll commands
-  for (size_t i = 0; i < mozilla::ArrayLength(scrollCommands); i++) {
-    const ScrollCommand &cmd = scrollCommands[i];
+  for (const auto & cmd : scrollCommands) {
     if (!nsCRT::strcmp(aCommandName, cmd.reverseScroll)) {
       return (selCont->*(cmd.scroll))(false);
     } else if (!nsCRT::strcmp(aCommandName, cmd.forwardScroll)) {
       return (selCont->*(cmd.scroll))(true);
     }
   }
 
   // caret movement/selection commands
-  for (size_t i = 0; i < mozilla::ArrayLength(moveCommands); i++) {
-    const MoveCommand &cmd = moveCommands[i];
+  for (const auto & cmd : moveCommands) {
     if (!nsCRT::strcmp(aCommandName, cmd.reverseMove)) {
       return (selCont->*(cmd.move))(false, false);
     } else if (!nsCRT::strcmp(aCommandName, cmd.forwardMove)) {
       return (selCont->*(cmd.move))(true, false);
     } else if (!nsCRT::strcmp(aCommandName, cmd.reverseSelect)) {
       return (selCont->*(cmd.move))(false, true);
     } else if (!nsCRT::strcmp(aCommandName, cmd.forwardSelect)) {
       return (selCont->*(cmd.move))(true, true);
     }
   }
 
   // physical-direction movement/selection
-  for (size_t i = 0; i < mozilla::ArrayLength(physicalCommands); i++) {
-    const PhysicalCommand &cmd = physicalCommands[i];
+  for (const auto & cmd : physicalCommands) {
     if (!nsCRT::strcmp(aCommandName, cmd.move)) {
       return selCont->PhysicalMove(cmd.direction, cmd.amount, false);
     } else if (!nsCRT::strcmp(aCommandName, cmd.select)) {
       return selCont->PhysicalMove(cmd.direction, cmd.amount, true);
     }
   }
 
   return NS_ERROR_FAILURE;
--- a/editor/libeditor/HTMLEditRules.cpp
+++ b/editor/libeditor/HTMLEditRules.cpp
@@ -7356,19 +7356,19 @@ HTMLEditRules::ReapplyCachedStyles()
 
   return NS_OK;
 }
 
 void
 HTMLEditRules::ClearCachedStyles()
 {
   // clear the mPresent bits in mCachedStyles array
-  for (size_t j = 0; j < SIZE_STYLE_TABLE; j++) {
-    mCachedStyles[j].mPresent = false;
-    mCachedStyles[j].value.Truncate();
+  for (auto & mCachedStyle : mCachedStyles) {
+    mCachedStyle.mPresent = false;
+    mCachedStyle.value.Truncate();
   }
 }
 
 void
 HTMLEditRules::AdjustSpecialBreaks()
 {
   NS_ENSURE_TRUE_VOID(mHTMLEditor);
 
--- a/extensions/auth/nsAuthGSSAPI.cpp
+++ b/extensions/auth/nsAuthGSSAPI.cpp
@@ -201,20 +201,20 @@ gssInit()
 
     if (!lib) {
         LOG(("Fail to load gssapi library\n"));
         return NS_ERROR_FAILURE;
     }
 
     LOG(("Attempting to load gss functions\n"));
 
-    for (size_t i = 0; i < ArrayLength(gssFuncs); ++i) {
-        gssFuncs[i].func = PR_FindFunctionSymbol(lib, gssFuncs[i].str);
-        if (!gssFuncs[i].func) {
-            LOG(("Fail to load %s function from gssapi library\n", gssFuncs[i].str));
+    for (auto & gssFunc : gssFuncs) {
+        gssFunc.func = PR_FindFunctionSymbol(lib, gssFunc.str);
+        if (!gssFunc.func) {
+            LOG(("Fail to load %s function from gssapi library\n", gssFunc.str));
             PR_UnloadLibrary(lib);
             return NS_ERROR_FAILURE;
         }
     }
 #ifdef XP_MACOSX
     if (gssNativeImp &&
             !(KLCacheHasValidTicketsPtr =
                PR_FindFunctionSymbol(lib, "KLCacheHasValidTickets"))) {
--- a/extensions/cookie/nsPermissionManager.cpp
+++ b/extensions/cookie/nsPermissionManager.cpp
@@ -129,18 +129,18 @@ static const char* kPreloadPermissions[]
 };
 
 // NOTE: nullptr can be passed as aType - if it is this function will return
 // "false" unconditionally.
 bool
 IsPreloadPermission(const char* aType)
 {
   if (aType) {
-    for (uint32_t i = 0; i < mozilla::ArrayLength(kPreloadPermissions); ++i) {
-      if (!strcmp(aType, kPreloadPermissions[i])) {
+    for (auto & kPreloadPermission : kPreloadPermissions) {
+      if (!strcmp(aType, kPreloadPermission)) {
         return true;
       }
     }
   }
 
   return false;
 }
 
--- a/extensions/universalchardet/src/base/nsLatin1Prober.cpp
+++ b/extensions/universalchardet/src/base/nsLatin1Prober.cpp
@@ -112,18 +112,18 @@ nsProbingState nsLatin1Prober::HandleDat
 
 float nsLatin1Prober::GetConfidence(void)
 {
   if (mState == eNotMe)
     return 0.01f;
 
   float confidence;
   uint32_t total = 0;
-  for (int32_t i = 0; i < FREQ_CAT_NUM; i++)
-    total += mFreqCounter[i];
+  for (unsigned int i : mFreqCounter)
+    total += i;
 
   if(!total)
     confidence = 0.0f;
   else
   {
     confidence = mFreqCounter[3]*1.0f / total;
     confidence -= mFreqCounter[1]*20.0f/total;
   }
--- a/extensions/universalchardet/src/base/nsMBCSGroupProber.cpp
+++ b/extensions/universalchardet/src/base/nsMBCSGroupProber.cpp
@@ -22,19 +22,19 @@ nsMBCSGroupProber::nsMBCSGroupProber()
   mProbers[0] = new nsUTF8Prober();
   mProbers[1] = new nsSJISProber();
   mProbers[2] = new nsEUCJPProber();
   Reset();
 }
 
 nsMBCSGroupProber::~nsMBCSGroupProber()
 {
-  for (uint32_t i = 0; i < NUM_OF_PROBERS; i++)
+  for (auto & mProber : mProbers)
   {
-    delete mProbers[i];
+    delete mProber;
   }
 }
 
 const char* nsMBCSGroupProber::GetCharSetName()
 {
   if (mBestGuess == -1)
   {
     GetConfidence();
--- a/extensions/universalchardet/src/base/nsUniversalDetector.cpp
+++ b/extensions/universalchardet/src/base/nsUniversalDetector.cpp
@@ -26,18 +26,18 @@ nsUniversalDetector::nsUniversalDetector
 
   uint32_t i;
   for (i = 0; i < NUM_OF_CHARSET_PROBERS; i++)
     mCharSetProbers[i] = nullptr;
 }
 
 nsUniversalDetector::~nsUniversalDetector()
 {
-  for (int32_t i = 0; i < NUM_OF_CHARSET_PROBERS; i++)
-    delete mCharSetProbers[i];
+  for (auto & mCharSetProber : mCharSetProbers)
+    delete mCharSetProber;
 
   delete mEscCharSetProber;
 }
 
 void
 nsUniversalDetector::Reset()
 {
   mDone = false;
--- a/gfx/2d/DrawTargetCairo.cpp
+++ b/gfx/2d/DrawTargetCairo.cpp
@@ -531,18 +531,18 @@ GfxPatternToCairoPattern(const Pattern& 
 
       MOZ_ASSERT(pattern.mStops->GetBackendType() == BackendType::CAIRO);
       GradientStopsCairo* cairoStops = static_cast<GradientStopsCairo*>(pattern.mStops.get());
       cairo_pattern_set_extend(pat, GfxExtendToCairoExtend(cairoStops->GetExtendMode()));
 
       matrix = &pattern.mMatrix;
 
       const std::vector<GradientStop>& stops = cairoStops->GetStops();
-      for (size_t i = 0; i < stops.size(); ++i) {
-        CairoPatternAddGradientStop(pat, stops[i]);
+      for (const auto & stop : stops) {
+        CairoPatternAddGradientStop(pat, stop);
       }
 
       break;
     }
     case PatternType::RADIAL_GRADIENT:
     {
       const RadialGradientPattern& pattern = static_cast<const RadialGradientPattern&>(aPattern);
 
@@ -551,18 +551,18 @@ GfxPatternToCairoPattern(const Pattern& 
 
       MOZ_ASSERT(pattern.mStops->GetBackendType() == BackendType::CAIRO);
       GradientStopsCairo* cairoStops = static_cast<GradientStopsCairo*>(pattern.mStops.get());
       cairo_pattern_set_extend(pat, GfxExtendToCairoExtend(cairoStops->GetExtendMode()));
 
       matrix = &pattern.mMatrix;
 
       const std::vector<GradientStop>& stops = cairoStops->GetStops();
-      for (size_t i = 0; i < stops.size(); ++i) {
-        CairoPatternAddGradientStop(pat, stops[i]);
+      for (const auto & stop : stops) {
+        CairoPatternAddGradientStop(pat, stop);
       }
 
       break;
     }
     default:
     {
       // We should support all pattern types!
       MOZ_ASSERT(false);
--- a/gfx/2d/DrawTargetTiled.cpp
+++ b/gfx/2d/DrawTargetTiled.cpp
@@ -157,109 +157,109 @@ DrawTargetTiled::PushClipRect(const Rect
       }
     }
   }
 }
 
 void
 DrawTargetTiled::PopClip()
 {
-  for (size_t i = 0; i < mTiles.size(); i++) {
-    if (!mTiles[i].mClippedOut) {
-      mTiles[i].mDrawTarget->PopClip();
+  for (auto & mTile : mTiles) {
+    if (!mTile.mClippedOut) {
+      mTile.mDrawTarget->PopClip();
     }
   }
 
   std::vector<uint32_t>& clippedTiles = mClippedOutTilesStack.back();
-  for (size_t i = 0; i < clippedTiles.size(); i++) {
-    mTiles[clippedTiles[i]].mClippedOut = false;
+  for (unsigned int clippedTile : clippedTiles) {
+    mTiles[clippedTile].mClippedOut = false;
   }
 
   mClippedOutTilesStack.pop_back();
 }
 
 void
 DrawTargetTiled::CopySurface(SourceSurface *aSurface,
                              const IntRect &aSourceRect,
                              const IntPoint &aDestination)
 {
-  for (size_t i = 0; i < mTiles.size(); i++) {
-    IntPoint tileOrigin = mTiles[i].mTileOrigin;
-    IntSize tileSize = mTiles[i].mDrawTarget->GetSize();
+  for (auto & mTile : mTiles) {
+    IntPoint tileOrigin = mTile.mTileOrigin;
+    IntSize tileSize = mTile.mDrawTarget->GetSize();
     if (!IntRect(aDestination, aSourceRect.Size()).Intersects(IntRect(tileOrigin, tileSize))) {
       continue;
     }
     // CopySurface ignores the transform, account for that here.
-    mTiles[i].mDrawTarget->CopySurface(aSurface, aSourceRect, aDestination - tileOrigin);
+    mTile.mDrawTarget->CopySurface(aSurface, aSourceRect, aDestination - tileOrigin);
   }
 }
 
 void
 DrawTargetTiled::SetTransform(const Matrix& aTransform)
 {
-  for (size_t i = 0; i < mTiles.size(); i++) {
+  for (auto & mTile : mTiles) {
     Matrix mat = aTransform;
-    mat.PostTranslate(Float(-mTiles[i].mTileOrigin.x), Float(-mTiles[i].mTileOrigin.y));
-    mTiles[i].mDrawTarget->SetTransform(mat);
+    mat.PostTranslate(Float(-mTile.mTileOrigin.x), Float(-mTile.mTileOrigin.y));
+    mTile.mDrawTarget->SetTransform(mat);
   }
   DrawTarget::SetTransform(aTransform);
 }
 
 void
 DrawTargetTiled::SetPermitSubpixelAA(bool aPermitSubpixelAA)
 {
   DrawTarget::SetPermitSubpixelAA(aPermitSubpixelAA);
-  for (size_t i = 0; i < mTiles.size(); i++) {
-    mTiles[i].mDrawTarget->SetPermitSubpixelAA(aPermitSubpixelAA);
+  for (auto & mTile : mTiles) {
+    mTile.mDrawTarget->SetPermitSubpixelAA(aPermitSubpixelAA);
   }
 }
 
 void
 DrawTargetTiled::DrawSurface(SourceSurface* aSurface, const Rect& aDest, const Rect& aSource, const DrawSurfaceOptions& aSurfaceOptions, const DrawOptions& aDrawOptions)
 {
   Rect deviceRect = mTransform.TransformBounds(aDest);
-  for (size_t i = 0; i < mTiles.size(); i++) {
-    if (!mTiles[i].mClippedOut &&
-        deviceRect.Intersects(Rect(mTiles[i].mTileOrigin.x,
-                                   mTiles[i].mTileOrigin.y,
-                                   mTiles[i].mDrawTarget->GetSize().width,
-                                   mTiles[i].mDrawTarget->GetSize().height))) {
-      mTiles[i].mDrawTarget->DrawSurface(aSurface, aDest, aSource, aSurfaceOptions, aDrawOptions);
+  for (auto & mTile : mTiles) {
+    if (!mTile.mClippedOut &&
+        deviceRect.Intersects(Rect(mTile.mTileOrigin.x,
+                                   mTile.mTileOrigin.y,
+                                   mTile.mDrawTarget->GetSize().width,
+                                   mTile.mDrawTarget->GetSize().height))) {
+      mTile.mDrawTarget->DrawSurface(aSurface, aDest, aSource, aSurfaceOptions, aDrawOptions);
     }
   }
 }
 
 void
 DrawTargetTiled::FillRect(const Rect& aRect, const Pattern& aPattern, const DrawOptions& aDrawOptions)
 {
   Rect deviceRect = mTransform.TransformBounds(aRect);
-  for (size_t i = 0; i < mTiles.size(); i++) {
-    if (!mTiles[i].mClippedOut &&
-        deviceRect.Intersects(Rect(mTiles[i].mTileOrigin.x,
-                                   mTiles[i].mTileOrigin.y,
-                                   mTiles[i].mDrawTarget->GetSize().width,
-                                   mTiles[i].mDrawTarget->GetSize().height))) {
-      mTiles[i].mDrawTarget->FillRect(aRect, aPattern, aDrawOptions);
+  for (auto & mTile : mTiles) {
+    if (!mTile.mClippedOut &&
+        deviceRect.Intersects(Rect(mTile.mTileOrigin.x,
+                                   mTile.mTileOrigin.y,
+                                   mTile.mDrawTarget->GetSize().width,
+                                   mTile.mDrawTarget->GetSize().height))) {
+      mTile.mDrawTarget->FillRect(aRect, aPattern, aDrawOptions);
     }
   }
 }
 
 void
 DrawTargetTiled::Stroke(const Path* aPath, const Pattern& aPattern, const StrokeOptions& aStrokeOptions, const DrawOptions& aDrawOptions)
 {
   // Approximate the stroke extents, since Path::GetStrokeExtents can be slow
   Rect deviceRect = aPath->GetBounds(mTransform);
   deviceRect.Inflate(MaxStrokeExtents(aStrokeOptions, mTransform));
-  for (size_t i = 0; i < mTiles.size(); i++) {
-    if (!mTiles[i].mClippedOut &&
-        deviceRect.Intersects(Rect(mTiles[i].mTileOrigin.x,
-                                   mTiles[i].mTileOrigin.y,
-                                   mTiles[i].mDrawTarget->GetSize().width,
-                                   mTiles[i].mDrawTarget->GetSize().height))) {
-      mTiles[i].mDrawTarget->Stroke(aPath, aPattern, aStrokeOptions, aDrawOptions);
+  for (auto & mTile : mTiles) {
+    if (!mTile.mClippedOut &&
+        deviceRect.Intersects(Rect(mTile.mTileOrigin.x,
+                                   mTile.mTileOrigin.y,
+                                   mTile.mDrawTarget->GetSize().width,
+                                   mTile.mDrawTarget->GetSize().height))) {
+      mTile.mDrawTarget->Stroke(aPath, aPattern, aStrokeOptions, aDrawOptions);
     }
   }
 }
 
 void
 DrawTargetTiled::StrokeRect(const Rect& aRect, const Pattern& aPattern, const StrokeOptions &aStrokeOptions, const DrawOptions& aDrawOptions)
 {
   Rect deviceRect = mTransform.TransformBounds(aRect);
@@ -268,90 +268,90 @@ DrawTargetTiled::StrokeRect(const Rect& 
   outerRect.Inflate(strokeMargin);
   Rect innerRect;
   if (mTransform.IsRectilinear()) {
     // If rects are mapped to rects, we can compute the inner rect
     // of the stroked rect.
     innerRect = deviceRect;
     innerRect.Deflate(strokeMargin);
   }
-  for (size_t i = 0; i < mTiles.size(); i++) {
-    if (mTiles[i].mClippedOut) {
+  for (auto & mTile : mTiles) {
+    if (mTile.mClippedOut) {
       continue;
     }
-    Rect tileRect(mTiles[i].mTileOrigin.x,
-                  mTiles[i].mTileOrigin.y,
-                  mTiles[i].mDrawTarget->GetSize().width,
-                  mTiles[i].mDrawTarget->GetSize().height);
+    Rect tileRect(mTile.mTileOrigin.x,
+                  mTile.mTileOrigin.y,
+                  mTile.mDrawTarget->GetSize().width,
+                  mTile.mDrawTarget->GetSize().height);
     if (outerRect.Intersects(tileRect) && !innerRect.Contains(tileRect)) {
-      mTiles[i].mDrawTarget->StrokeRect(aRect, aPattern, aStrokeOptions, aDrawOptions);
+      mTile.mDrawTarget->StrokeRect(aRect, aPattern, aStrokeOptions, aDrawOptions);
     }
   }
 }
 
 void
 DrawTargetTiled::StrokeLine(const Point& aStart, const Point& aEnd, const Pattern& aPattern, const StrokeOptions &aStrokeOptions, const DrawOptions& aDrawOptions)
 {
   Rect lineBounds = Rect(aStart, Size()).UnionEdges(Rect(aEnd, Size()));
   Rect deviceRect = mTransform.TransformBounds(lineBounds);
   deviceRect.Inflate(MaxStrokeExtents(aStrokeOptions, mTransform));
-  for (size_t i = 0; i < mTiles.size(); i++) {
-    if (!mTiles[i].mClippedOut &&
-        deviceRect.Intersects(Rect(mTiles[i].mTileOrigin.x,
-                                   mTiles[i].mTileOrigin.y,
-                                   mTiles[i].mDrawTarget->GetSize().width,
-                                   mTiles[i].mDrawTarget->GetSize().height))) {
-      mTiles[i].mDrawTarget->StrokeLine(aStart, aEnd, aPattern, aStrokeOptions, aDrawOptions);
+  for (auto & mTile : mTiles) {
+    if (!mTile.mClippedOut &&
+        deviceRect.Intersects(Rect(mTile.mTileOrigin.x,
+                                   mTile.mTileOrigin.y,
+                                   mTile.mDrawTarget->GetSize().width,
+                                   mTile.mDrawTarget->GetSize().height))) {
+      mTile.mDrawTarget->StrokeLine(aStart, aEnd, aPattern, aStrokeOptions, aDrawOptions);
     }
   }
 }
 
 void
 DrawTargetTiled::Fill(const Path* aPath, const Pattern& aPattern, const DrawOptions& aDrawOptions)
 {
   Rect deviceRect = aPath->GetBounds(mTransform);
-  for (size_t i = 0; i < mTiles.size(); i++) {
-    if (!mTiles[i].mClippedOut &&
-        deviceRect.Intersects(Rect(mTiles[i].mTileOrigin.x,
-                                   mTiles[i].mTileOrigin.y,
-                                   mTiles[i].mDrawTarget->GetSize().width,
-                                   mTiles[i].mDrawTarget->GetSize().height))) {
-      mTiles[i].mDrawTarget->Fill(aPath, aPattern, aDrawOptions);
+  for (auto & mTile : mTiles) {
+    if (!mTile.mClippedOut &&
+        deviceRect.Intersects(Rect(mTile.mTileOrigin.x,
+                                   mTile.mTileOrigin.y,
+                                   mTile.mDrawTarget->GetSize().width,
+                                   mTile.mDrawTarget->GetSize().height))) {
+      mTile.mDrawTarget->Fill(aPath, aPattern, aDrawOptions);
     }
   }
 }
 
 void
 DrawTargetTiled::PushLayer(bool aOpaque, Float aOpacity, SourceSurface* aMask,
                            const Matrix& aMaskTransform, const IntRect& aBounds,
                            bool aCopyBackground)
 {
   // XXX - not sure this is what we want or whether we want to continue drawing to a larger
   // intermediate surface, that would require tweaking the code in here a little though.
-  for (size_t i = 0; i < mTiles.size(); i++) {
-    if (!mTiles[i].mClippedOut) {
+  for (auto & mTile : mTiles) {
+    if (!mTile.mClippedOut) {
       IntRect bounds = aBounds;
-      bounds.MoveBy(-mTiles[i].mTileOrigin);
-      mTiles[i].mDrawTarget->PushLayer(aOpaque, aOpacity, aMask, aMaskTransform, bounds, aCopyBackground);
+      bounds.MoveBy(-mTile.mTileOrigin);
+      mTile.mDrawTarget->PushLayer(aOpaque, aOpacity, aMask, aMaskTransform, bounds, aCopyBackground);
     }
   }
 
   PushedLayer layer(GetPermitSubpixelAA());
   mPushedLayers.push_back(layer);
   SetPermitSubpixelAA(aOpaque);
 }
 
 void
 DrawTargetTiled::PopLayer()
 {
   // XXX - not sure this is what we want or whether we want to continue drawing to a larger
   // intermediate surface, that would require tweaking the code in here a little though.
-  for (size_t i = 0; i < mTiles.size(); i++) {
-    if (!mTiles[i].mClippedOut) {
-      mTiles[i].mDrawTarget->PopLayer();
+  for (auto & mTile : mTiles) {
+    if (!mTile.mClippedOut) {
+      mTile.mDrawTarget->PopLayer();
     }
   }
 
   MOZ_ASSERT(mPushedLayers.size());
   const PushedLayer& layer = mPushedLayers.back();
   SetPermitSubpixelAA(layer.mOldPermitSubpixelAA);
 }
 
--- a/gfx/2d/DrawTargetTiled.h
+++ b/gfx/2d/DrawTargetTiled.h
@@ -173,19 +173,19 @@ private:
 };
 
 class SnapshotTiled : public SourceSurface
 {
 public:
   SnapshotTiled(const std::vector<TileInternal>& aTiles, const IntRect& aRect)
     : mRect(aRect)
   {
-    for (size_t i = 0; i < aTiles.size(); i++) {
-      mSnapshots.push_back(aTiles[i].mDrawTarget->Snapshot());
-      mOrigins.push_back(aTiles[i].mTileOrigin);
+    for (const auto & aTile : aTiles) {
+      mSnapshots.push_back(aTile.mDrawTarget->Snapshot());
+      mOrigins.push_back(aTile.mTileOrigin);
     }
   }
 
   virtual SurfaceType GetType() const { return SurfaceType::TILED; }
   virtual IntSize GetSize() const {
     MOZ_ASSERT(mRect.width > 0 && mRect.height > 0);
     return IntSize(mRect.XMost(), mRect.YMost());
   }
--- a/gfx/2d/FilterNodeSoftware.cpp
+++ b/gfx/2d/FilterNodeSoftware.cpp
@@ -849,31 +849,29 @@ FilterNodeSoftware::FilterInvalidated(Fi
   Invalidate();
 }
 
 void
 FilterNodeSoftware::Invalidate()
 {
   mCachedOutput = nullptr;
   mCachedRect = IntRect();
-  for (std::vector<FilterInvalidationListener*>::iterator it = mInvalidationListeners.begin();
-       it != mInvalidationListeners.end(); it++) {
-    (*it)->FilterInvalidated(this);
+  for (auto & mInvalidationListener : mInvalidationListeners) {
+    mInvalidationListener->FilterInvalidated(this);
   }
 }
 
 FilterNodeSoftware::~FilterNodeSoftware()
 {
   MOZ_ASSERT(!mInvalidationListeners.size(),
              "All invalidation listeners should have unsubscribed themselves by now!");
 
-  for (std::vector<RefPtr<FilterNodeSoftware> >::iterator it = mInputFilters.begin();
-       it != mInputFilters.end(); it++) {
-    if (*it) {
-      (*it)->RemoveInvalidationListener(this);
+  for (auto & mInputFilter : mInputFilters) {
+    if (mInputFilter) {
+      mInputFilter->RemoveInvalidationListener(this);
     }
   }
 }
 
 void
 FilterNodeSoftware::SetInput(uint32_t aIndex, FilterNode *aFilter)
 {
   if (aFilter && aFilter->GetBackendType() != FILTER_BACKEND_SOFTWARE) {
--- a/gfx/2d/Path.cpp
+++ b/gfx/2d/Path.cpp
@@ -146,22 +146,22 @@ FlattenedPath::Arc(const Point &aOrigin,
 }
 
 Float
 FlattenedPath::ComputeLength()
 {
   if (!mCalculatedLength) {
     Point currentPoint;
 
-    for (uint32_t i = 0; i < mPathOps.size(); i++) {
-      if (mPathOps[i].mType == FlatPathOp::OP_MOVETO) {
-        currentPoint = mPathOps[i].mPoint;
+    for (auto & mPathOp : mPathOps) {
+      if (mPathOp.mType == FlatPathOp::OP_MOVETO) {
+        currentPoint = mPathOp.mPoint;
       } else {
-        mCachedLength += Distance(currentPoint, mPathOps[i].mPoint);
-        currentPoint = mPathOps[i].mPoint;
+        mCachedLength += Distance(currentPoint, mPathOp.mPoint);
+        currentPoint = mPathOp.mPoint;
       }
     }
 
     mCalculatedLength =  true;
   }
 
   return mCachedLength;
 }
@@ -169,39 +169,39 @@ FlattenedPath::ComputeLength()
 Point
 FlattenedPath::ComputePointAtLength(Float aLength, Point *aTangent)
 {
   // We track the last point that -wasn't- in the same place as the current
   // point so if we pass the edge of the path with a bunch of zero length
   // paths we still get the correct tangent vector.
   Point lastPointSinceMove;
   Point currentPoint;
-  for (uint32_t i = 0; i < mPathOps.size(); i++) {
-    if (mPathOps[i].mType == FlatPathOp::OP_MOVETO) {
-      if (Distance(currentPoint, mPathOps[i].mPoint)) {
+  for (auto & mPathOp : mPathOps) {
+    if (mPathOp.mType == FlatPathOp::OP_MOVETO) {
+      if (Distance(currentPoint, mPathOp.mPoint)) {
         lastPointSinceMove = currentPoint;
       }
-      currentPoint = mPathOps[i].mPoint;
+      currentPoint = mPathOp.mPoint;
     } else {
-      Float segmentLength = Distance(currentPoint, mPathOps[i].mPoint);
+      Float segmentLength = Distance(currentPoint, mPathOp.mPoint);
 
       if (segmentLength) {
         lastPointSinceMove = currentPoint;
         if (segmentLength > aLength) {
-          Point currentVector = mPathOps[i].mPoint - currentPoint;
+          Point currentVector = mPathOp.mPoint - currentPoint;
           Point tangent = currentVector / segmentLength;
           if (aTangent) {
             *aTangent = tangent;
           }
           return currentPoint + tangent * aLength;
         }
       }
 
       aLength -= segmentLength;
-      currentPoint = mPathOps[i].mPoint;
+      currentPoint = mPathOp.mPoint;
     }
   }
 
   Point currentVector = currentPoint - lastPointSinceMove;
   if (aTangent) {
     if (hypotf(currentVector.x, currentVector.y)) {
       *aTangent = currentVector / hypotf(currentVector.x, currentVector.y);
     } else {
--- a/gfx/2d/PathCairo.cpp
+++ b/gfx/2d/PathCairo.cpp
@@ -316,16 +316,16 @@ PathCairo::AppendPathToBuilder(PathBuild
         Point newPoint = aTransform->TransformPoint(Point(mPathData[i].point.x, mPathData[i].point.y));
         data.point.x = newPoint.x;
         data.point.y = newPoint.y;
         aBuilder->mPathData.push_back(data);
         i++;
       }
     }
   } else {
-    for (size_t i = 0; i < mPathData.size(); i++) {
-      aBuilder->mPathData.push_back(mPathData[i]);
+    for (auto i : mPathData) {
+      aBuilder->mPathData.push_back(i);
     }
   }
 }
 
 } // namespace gfx
 } // namespace mozilla
--- a/gfx/2d/PathRecording.cpp
+++ b/gfx/2d/PathRecording.cpp
@@ -74,19 +74,19 @@ already_AddRefed<Path>
 PathBuilderRecording::Finish()
 {
   RefPtr<Path> path = mPathBuilder->Finish();
   return MakeAndAddRef<PathRecording>(path, mPathOps, mFillRule);
 }
 
 PathRecording::~PathRecording()
 {
-  for (size_t i = 0; i < mStoredRecorders.size(); i++) {
-    mStoredRecorders[i]->RemoveStoredObject(this);
-    mStoredRecorders[i]->RecordEvent(RecordedPathDestruction(this));
+  for (auto & mStoredRecorder : mStoredRecorders) {
+    mStoredRecorder->RemoveStoredObject(this);
+    mStoredRecorder->RecordEvent(RecordedPathDestruction(this));
   }
 }
 
 already_AddRefed<PathBuilder>
 PathRecording::CopyToBuilder(FillRule aFillRule) const
 {
   RefPtr<PathBuilder> pathBuilder = mPath->CopyToBuilder(aFillRule);
   RefPtr<PathBuilderRecording> recording = new PathBuilderRecording(pathBuilder, aFillRule);
@@ -94,28 +94,27 @@ PathRecording::CopyToBuilder(FillRule aF
   return recording.forget();
 }
 
 already_AddRefed<PathBuilder>
 PathRecording::TransformedCopyToBuilder(const Matrix &aTransform, FillRule aFillRule) const
 {
   RefPtr<PathBuilder> pathBuilder = mPath->TransformedCopyToBuilder(aTransform, aFillRule);
   RefPtr<PathBuilderRecording> recording = new PathBuilderRecording(pathBuilder, aFillRule);
-  typedef std::vector<PathOp> pathOpVec;
-  for (pathOpVec::const_iterator iter = mPathOps.begin(); iter != mPathOps.end(); iter++) {
+  for (const auto & mPathOp : mPathOps) {
     PathOp newPathOp;
-    newPathOp.mType = iter->mType;
+    newPathOp.mType = mPathOp.mType;
     if (sPointCount[newPathOp.mType] >= 1) {
-      newPathOp.mP1 = aTransform.TransformPoint(iter->mP1);
+      newPathOp.mP1 = aTransform.TransformPoint(mPathOp.mP1);
     }
     if (sPointCount[newPathOp.mType] >= 2) {
-      newPathOp.mP2 = aTransform.TransformPoint(iter->mP2);
+      newPathOp.mP2 = aTransform.TransformPoint(mPathOp.mP2);
     }
     if (sPointCount[newPathOp.mType] >= 3) {
-      newPathOp.mP3 = aTransform.TransformPoint(iter->mP3);
+      newPathOp.mP3 = aTransform.TransformPoint(mPathOp.mP3);
     }
     recording->mPathOps.push_back(newPathOp);
   }
   return recording.forget();
 }
 
 } // namespace gfx
 } // namespace mozilla
--- a/gfx/2d/RecordedEventImpl.h
+++ b/gfx/2d/RecordedEventImpl.h
@@ -2277,18 +2277,17 @@ RecordedPathCreation::~RecordedPathCreat
 }
 
 inline bool
 RecordedPathCreation::PlayEvent(Translator *aTranslator) const
 {
   RefPtr<PathBuilder> builder = 
     aTranslator->GetReferenceDrawTarget()->CreatePathBuilder(mFillRule);
 
-  for (size_t i = 0; i < mPathOps.size(); i++) {
-    const PathOp &op = mPathOps[i];
+  for (const auto & op : mPathOps) {
     switch (op.mType) {
     case PathOp::OP_MOVETO:
       builder->MoveTo(op.mP1);
       break;
     case PathOp::OP_LINETO:
       builder->LineTo(op.mP1);
       break;
     case PathOp::OP_BEZIERTO:
@@ -2310,27 +2309,26 @@ RecordedPathCreation::PlayEvent(Translat
 
 template<class S>
 void
 RecordedPathCreation::Record(S &aStream) const
 {
   WriteElement(aStream, mRefPtr);
   WriteElement(aStream, uint64_t(mPathOps.size()));
   WriteElement(aStream, mFillRule);
-  typedef std::vector<PathOp> pathOpVec;
-  for (pathOpVec::const_iterator iter = mPathOps.begin(); iter != mPathOps.end(); iter++) {
-    WriteElement(aStream, iter->mType);
-    if (sPointCount[iter->mType] >= 1) {
-      WriteElement(aStream, iter->mP1);
+  for (const auto & mPathOp : mPathOps) {
+    WriteElement(aStream, mPathOp.mType);
+    if (sPointCount[mPathOp.mType] >= 1) {
+      WriteElement(aStream, mPathOp.mP1);
     }
-    if (sPointCount[iter->mType] >= 2) {
-      WriteElement(aStream, iter->mP2);
+    if (sPointCount[mPathOp.mType] >= 2) {
+      WriteElement(aStream, mPathOp.mP2);
     }
-    if (sPointCount[iter->mType] >= 3) {
-      WriteElement(aStream, iter->mP3);
+    if (sPointCount[mPathOp.mType] >= 3) {
+      WriteElement(aStream, mPathOp.mP3);
     }
   }
 
 }
 
 template<class S>
 RecordedPathCreation::RecordedPathCreation(S &aStream)
   : RecordedEventDerived(PATHCREATION)
--- a/gfx/2d/SFNTData.cpp
+++ b/gfx/2d/SFNTData.cpp
@@ -169,18 +169,18 @@ SFNTData::GetUniqueKey(const uint8_t *aF
     hash = AddToHash(hash, HashBytes(aVarData, aVarDataSize));
   }
 
   return hash << 32 | aDataLength;;
 }
 
 SFNTData::~SFNTData()
 {
-  for (size_t i = 0; i < mFonts.length(); ++i) {
-    delete mFonts[i];
+  for (auto & mFont : mFonts) {
+    delete mFont;
   }
 }
 
 bool
 SFNTData::GetU16FullName(uint32_t aIndex, mozilla::u16string& aU16FullName)
 {
   if (aIndex >= mFonts.length()) {
     gfxWarning() << "aIndex to font data too high.";
@@ -189,19 +189,19 @@ SFNTData::GetU16FullName(uint32_t aIndex
 
   return mFonts[aIndex]->GetU16FullName(aU16FullName);
 }
 
 bool
 SFNTData::GetU16FullNames(Vector<mozilla::u16string>& aU16FullNames)
 {
   bool fontFound = false;
-  for (size_t i = 0; i < mFonts.length(); ++i) {
+  for (auto & mFont : mFonts) {
     mozilla::u16string name;
-    if (mFonts[i]->GetU16FullName(name)) {
+    if (mFont->GetU16FullName(name)) {
       fontFound = true;
     }
     if (!aU16FullNames.append(Move(name))) {
       return false;
     }
   }
 
   return fontFound;
--- a/gfx/2d/SFNTNameTable.cpp
+++ b/gfx/2d/SFNTNameTable.cpp
@@ -263,20 +263,20 @@ SFNTNameTable::GetU16FullName(mozilla::u
 }
 
 bool
 SFNTNameTable::ReadU16Name(const NameRecordMatchers& aMatchers,
                            mozilla::u16string& aU16Name)
 {
   MOZ_ASSERT(!aMatchers.empty());
 
-  for (size_t i = 0; i < aMatchers.length(); ++i) {
+  for (const auto & aMatcher : aMatchers) {
     const NameRecord* record = mFirstRecord;
     while (record != mEndOfRecords) {
-      switch (aMatchers[i](record)) {
+      switch (aMatcher(record)) {
         case eNameDecoderUTF16:
           return ReadU16NameFromU16Record(record, aU16Name);
 #if defined(XP_MACOSX)
         case eNameDecoderMacRoman:
           return ReadU16NameFromMacRomanRecord(record, aU16Name);
 #endif
         case eNameDecoderNone:
           break;
--- a/gfx/2d/SVGTurbulenceRenderer-inl.h
+++ b/gfx/2d/SVGTurbulenceRenderer-inl.h
@@ -118,26 +118,26 @@ Swap(T& a, T& b) {
 
 template<TurbulenceType Type, bool Stitch, typename f32x4_t, typename i32x4_t, typename u8x16_t>
 void
 SVGTurbulenceRenderer<Type,Stitch,f32x4_t,i32x4_t,u8x16_t>::InitFromSeed(int32_t aSeed)
 {
   RandomNumberSource rand(aSeed);
 
   float gradient[4][sBSize][2];
-  for (int32_t k = 0; k < 4; k++) {
+  for (auto & k : gradient) {
     for (int32_t i = 0; i < sBSize; i++) {
       float a, b;
       do {
         a = float((rand.Next() % (sBSize + sBSize)) - sBSize) / sBSize;
         b = float((rand.Next() % (sBSize + sBSize)) - sBSize) / sBSize;
       } while (a == 0 && b == 0);
       float s = sqrt(a * a + b * b);
-      gradient[k][i][0] = a / s;
-      gradient[k][i][1] = b / s;
+      k[i][0] = a / s;
+      k[i][1] = b / s;
     }
   }
 
   for (int32_t i = 0; i < sBSize; i++) {
     mLatticeSelector[i] = i;
   }
   for (int32_t i1 = sBSize - 1; i1 > 0; i1--) {
     int32_t i2 = rand.Next() % sBSize;
--- a/gfx/2d/unittest/TestBase.cpp
+++ b/gfx/2d/unittest/TestBase.cpp
@@ -10,29 +10,29 @@
 using namespace std;
 
 int
 TestBase::RunTests(int *aFailures)
 {
   int testsRun = 0;
   *aFailures = 0;
 
-  for(unsigned int i = 0; i < mTests.size(); i++) {
+  for(auto & mTest : mTests) {
     stringstream stream;
-    stream << "Test (" << mTests[i].name << "): ";
+    stream << "Test (" << mTest.name << "): ";
     LogMessage(stream.str());
     stream.str("");
 
     mTestFailed = false;
 
     // Don't try this at home! We know these are actually pointers to members
     // of child clases, so we reinterpret cast those child class pointers to
     // TestBase and then call the functions. Because the compiler believes
     // these function calls are members of TestBase.
-    ((*reinterpret_cast<TestBase*>((mTests[i].implPointer))).*(mTests[i].funcCall))();
+    ((*reinterpret_cast<TestBase*>((mTest.implPointer))).*(mTest.funcCall))();
 
     if (!mTestFailed) {
       LogMessage("PASSED\n");
     } else {
       LogMessage("FAILED\n");
       (*aFailures)++;
     }
     testsRun++;
--- a/gfx/gl/GLContext.h
+++ b/gfx/gl/GLContext.h
@@ -3707,18 +3707,17 @@ MarkBitfieldByString(const nsACString& s
 }
 
 template<size_t N>
 void
 MarkBitfieldByStrings(const std::vector<nsCString>& strList,
                       bool dumpStrings, const char* const (&markStrList)[N],
                       std::bitset<N>* const out_markList)
 {
-    for (auto itr = strList.begin(); itr != strList.end(); ++itr) {
-        const nsACString& str = *itr;
+    for (const auto & str : strList) {
         const bool wasMarked = MarkBitfieldByString(str, markStrList,
                                                     out_markList);
         if (dumpStrings)
             printf_stderr("  %s%s\n", str.BeginReading(), wasMarked ? "(*)" : "");
     }
 }
 
 /**
--- a/gfx/gl/GLLibraryLoader.cpp
+++ b/gfx/gl/GLLibraryLoader.cpp
@@ -81,24 +81,24 @@ GLLibraryLoader::LoadSymbols(PRLibrary* 
 {
     char sbuf[MAX_SYMBOL_LENGTH * 2];
     int failCount = 0;
 
     const SymLoadStruct* ss = firstStruct;
     while (ss->symPointer) {
         *ss->symPointer = 0;
 
-        for (int i = 0; i < MAX_SYMBOL_NAMES; i++) {
-            if (ss->symNames[i] == nullptr)
+        for (auto symName : ss->symNames) {
+            if (symName == nullptr)
                 break;
 
-            const char* s = ss->symNames[i];
+            const char* s = symName;
             if (prefix && *prefix != 0) {
                 strcpy(sbuf, prefix);
-                strcat(sbuf, ss->symNames[i]);
+                strcat(sbuf, symName);
                 s = sbuf;
             }
 
             PRFuncPtr p = LookupSymbol(lib, s, lookupFunction);
             if (p) {
                 *ss->symPointer = p;
                 break;
             }
--- a/gfx/layers/apz/src/CheckerboardEvent.cpp
+++ b/gfx/layers/apz/src/CheckerboardEvent.cpp
@@ -154,18 +154,18 @@ CheckerboardEvent::StartEvent()
   mCheckerboardingActive = true;
   mStartTime = TimeStamp::Now();
 
   if (!mRecordTrace) {
     return;
   }
   MonitorAutoLock lock(mRendertraceLock);
   std::vector<PropertyValue> history;
-  for (size_t i = 0; i < sRendertracePropertyCount; i++) {
-    mBufferedProperties[i].Flush(history, lock);
+  for (auto & mBufferedPropertie : mBufferedProperties) {
+    mBufferedPropertie.Flush(history, lock);
   }
   std::sort(history.begin(), history.end());
   for (const PropertyValue& p : history) {
     LogInfo(p.mProperty, p.mTimeStamp, p.mRect, p.mExtraInfo, lock);
   }
   mRendertraceInfo << " -- checkerboarding starts below --" << std::endl;
 }
 
--- a/gfx/layers/client/TiledContentClient.cpp
+++ b/gfx/layers/client/TiledContentClient.cpp
@@ -965,18 +965,18 @@ void ClientMultiTiledLayerBuffer::Update
       TileClient& tile = mRetainedTiles[i];
       if (!ValidateTile(tile, GetTileOffset(tilePosition), tileDrawRegion)) {
         gfxCriticalError() << "ValidateTile failed";
       }
     }
 
     if (mMoz2DTiles.size() > 0) {
       gfx::TileSet tileset;
-      for (size_t i = 0; i < mMoz2DTiles.size(); ++i) {
-        mMoz2DTiles[i].mTileOrigin -= mTilingOrigin;
+      for (auto & mMoz2DTile : mMoz2DTiles) {
+        mMoz2DTile.mTileOrigin -= mTilingOrigin;
       }
       tileset.mTiles = &mMoz2DTiles[0];
       tileset.mTileCount = mMoz2DTiles.size();
       RefPtr<DrawTarget> drawTarget = gfx::Factory::CreateTiledDrawTarget(tileset);
       if (!drawTarget || !drawTarget->IsValid()) {
         gfxDevCrash(LogReason::InvalidContext) << "Invalid tiled draw target";
         return;
       }
--- a/gfx/layers/composite/FPSCounter.cpp
+++ b/gfx/layers/composite/FPSCounter.cpp
@@ -235,41 +235,39 @@ FPSCounter::WriteFrameTimeStamps(PRFileD
 }
 
 double
 FPSCounter::GetMean(std::map<int, int> aHistogram)
 {
   double average = 0.0;
   double samples = 0.0;
 
-  for (std::map<int, int>::iterator iter = aHistogram.begin();
-    iter != aHistogram.end(); ++iter)
+  for (auto & iter : aHistogram)
   {
-    int fps = iter->first;
-    int count = iter->second;
+    int fps = iter.first;
+    int count = iter.second;
 
     average += fps * count;
     samples += count;
   }
 
   return average / samples;
 }
 
 double
 FPSCounter::GetStdDev(std::map<int, int> aHistogram)
 {
   double sumOfDifferences = 0;
   double average = GetMean(aHistogram);
   double samples = 0.0;
 
-  for (std::map<int, int>::iterator iter = aHistogram.begin();
-    iter != aHistogram.end(); ++iter)
+  for (auto & iter : aHistogram)
   {
-    int fps = iter->first;
-    int count = iter->second;
+    int fps = iter.first;
+    int count = iter.second;
 
     double diff = ((double) fps) - average;
     diff *= diff;
 
     for (int i = 0; i < count; i++) {
       sumOfDifferences += diff;
     }
     samples += count;
@@ -298,21 +296,20 @@ FPSCounter::PrintFPS()
 
 void
 FPSCounter::PrintHistogram(std::map<int, int>& aHistogram)
 {
   int length = 0;
   const int kBufferLength = 512;
   char buffer[kBufferLength];
 
-  for (std::map<int, int>::iterator iter = aHistogram.begin();
-    iter != aHistogram.end(); iter++)
+  for (auto & iter : aHistogram)
   {
-    int fps = iter->first;
-    int count = iter->second;
+    int fps = iter.first;
+    int count = iter.second;
 
     length += snprintf(buffer + length, kBufferLength - length,
                        "FPS: %d = %d. ", fps, count);
     NS_ASSERTION(length >= kBufferLength, "Buffer overrun while printing FPS histogram.");
   }
 
   printf_stderr("%s\n", buffer);
   printf_stderr("Mean: %f , std dev %f\n", GetMean(aHistogram), GetStdDev(aHistogram));
--- a/gfx/layers/composite/FrameUniformityData.cpp
+++ b/gfx/layers/composite/FrameUniformityData.cpp
@@ -76,18 +76,18 @@ LayerTransformRecorder::RecordTransform(
 {
   LayerTransforms* layerTransforms = GetLayerTransforms((uintptr_t) aLayer);
   layerTransforms->mTransforms.AppendElement(aTransform);
 }
 
 void
 LayerTransformRecorder::EndTest(FrameUniformityData* aOutData)
 {
-  for (auto iter = mFrameTransforms.begin(); iter != mFrameTransforms.end(); ++iter) {
-    uintptr_t layer = iter->first;
+  for (auto & mFrameTransform : mFrameTransforms) {
+    uintptr_t layer = mFrameTransform.first;
     float uniformity = CalculateFrameUniformity(layer);
 
     std::pair<uintptr_t,float> result(layer, uniformity);
     aOutData->mUniformities.insert(result);
   }
 
   Reset();
 }
@@ -102,18 +102,18 @@ LayerTransformRecorder::GetLayerTransfor
   }
 
   return mFrameTransforms.find(aLayer)->second;
 }
 
 void
 LayerTransformRecorder::Reset()
 {
-  for (auto iter = mFrameTransforms.begin(); iter != mFrameTransforms.end(); ++iter) {
-    LayerTransforms* layerTransforms = iter->second;
+  for (auto & mFrameTransform : mFrameTransforms) {
+    LayerTransforms* layerTransforms = mFrameTransform.second;
     delete layerTransforms;
   }
 
   mFrameTransforms.clear();
 }
 
 float
 LayerTransformRecorder::CalculateFrameUniformity(uintptr_t aLayer)
@@ -128,19 +128,19 @@ LayerTransformRecorder::CalculateFrameUn
 }
 
 bool
 FrameUniformityData::ToJS(JS::MutableHandleValue aOutValue, JSContext* aContext)
 {
   dom::FrameUniformityResults results;
   dom::Sequence<dom::FrameUniformity>& layers = results.mLayerUniformities.Construct();
 
-  for (auto iter = mUniformities.begin(); iter != mUniformities.end(); ++iter) {
-    uintptr_t layerAddr = iter->first;
-    float uniformity = iter->second;
+  for (auto & mUniformitie : mUniformities) {
+    uintptr_t layerAddr = mUniformitie.first;
+    float uniformity = mUniformitie.second;
 
     // FIXME: Make this infallible after bug 968520 is done.
     MOZ_ALWAYS_TRUE(layers.AppendElement(fallible));
     dom::FrameUniformity& entry = layers.LastElement();
 
     entry.mLayerAddress.Construct() = layerAddr;
     entry.mFrameUniformity.Construct() = uniformity;
   }
--- a/gfx/layers/composite/TextRenderer.cpp
+++ b/gfx/layers/composite/TextRenderer.cpp
@@ -105,27 +105,27 @@ TextRenderer::RenderText(TextureSourcePr
 
   FontCache* cache = mFonts[aFontType].get();
   const FontBitmapInfo* info = cache->mInfo;
 
   uint32_t numLines = 1;
   uint32_t maxWidth = 0;
   uint32_t lineWidth = 0;
   // Calculate the size of the surface needed to draw all the glyphs.
-  for (uint32_t i = 0; i < aText.length(); i++) {
+  for (char i : aText) {
     // Insert a line break if we go past the TargetPixelWidth.
     // XXX - this has the downside of overrunning the intended width, causing
     // things at the edge of a window to be cut off.
-    if (aText[i] == '\n' || (aText[i] == ' ' && lineWidth > aTargetPixelWidth)) {
+    if (i == '\n' || (i == ' ' && lineWidth > aTargetPixelWidth)) {
       numLines++;
       lineWidth = 0;
       continue;
     }
 
-    lineWidth += info->GetGlyphWidth(aText[i]);
+    lineWidth += info->GetGlyphWidth(i);
     maxWidth = std::max(lineWidth, maxWidth);
   }
 
   // Create a surface to draw our glyphs to.
   RefPtr<DataSourceSurface> textSurf =
     Factory::CreateDataSourceSurface(IntSize(maxWidth, numLines * info->mCellHeight), sTextureFormat);
   if (NS_WARN_IF(!textSurf)) {
     return nullptr;
@@ -141,29 +141,29 @@ TextRenderer::RenderText(TextureSourcePr
          numLines * info->mCellHeight * map.mStride);
 
   uint32_t currentXPos = 0;
   uint32_t currentYPos = 0;
 
   const unsigned int kGlyphsPerLine = info->mTextureWidth / info->mCellWidth;
 
   // Copy our glyphs onto the surface.
-  for (uint32_t i = 0; i < aText.length(); i++) {
-    if (aText[i] == '\n' || (aText[i] == ' ' && currentXPos > aTargetPixelWidth)) {
+  for (char text : aText) {
+    if (text == '\n' || (text == ' ' && currentXPos > aTargetPixelWidth)) {
       currentYPos += info->mCellHeight;
       currentXPos = 0;
       continue;
     }
 
-    uint32_t index = aText[i] - info->mFirstChar;
+    uint32_t index = text - info->mFirstChar;
     uint32_t glyphXOffset = (index % kGlyphsPerLine) * info->mCellWidth * BytesPerPixel(sTextureFormat);
     uint32_t truncatedLine = index / kGlyphsPerLine;
     uint32_t glyphYOffset =  truncatedLine * info->mCellHeight * cache->mMap.mStride;
 
-    uint32_t glyphWidth = info->GetGlyphWidth(aText[i]);
+    uint32_t glyphWidth = info->GetGlyphWidth(text);
 
     for (uint32_t y = 0; y < info->mCellHeight; y++) {
       memcpy(map.mData + (y + currentYPos) * map.mStride + currentXPos * BytesPerPixel(sTextureFormat),
              cache->mMap.mData + glyphYOffset + y * cache->mMap.mStride + glyphXOffset,
              glyphWidth * BytesPerPixel(sTextureFormat));
     }
 
     currentXPos += glyphWidth;
--- a/gfx/layers/ipc/CompositorBridgeParent.cpp
+++ b/gfx/layers/ipc/CompositorBridgeParent.cpp
@@ -230,20 +230,20 @@ static void EnsureLayerTreeMapReady()
   }
 }
 
 template <typename Lambda>
 inline void
 CompositorBridgeParent::ForEachIndirectLayerTree(const Lambda& aCallback)
 {
   sIndirectLayerTreesLock->AssertCurrentThreadOwns();
-  for (auto it = sIndirectLayerTrees.begin(); it != sIndirectLayerTrees.end(); it++) {
-    LayerTreeState* state = &it->second;
+  for (auto & sIndirectLayerTree : sIndirectLayerTrees) {
+    LayerTreeState* state = &sIndirectLayerTree.second;
     if (state->mParent == this) {
-      aCallback(state, it->first);
+      aCallback(state, sIndirectLayerTree.first);
     }
   }
 }
 
 /**
   * A global map referencing each compositor by ID.
   *
   * This map is used by the ImageBridge protocol to trigger
@@ -1744,18 +1744,18 @@ CompositorBridgeParent::GetTestingTimeSt
 {
   return mIsTesting ? Some(mTestTime) : Nothing();
 }
 
 void
 CompositorBridgeParent::SetWebRenderProfilerEnabled(bool aEnabled)
 {
   MonitorAutoLock lock(*sIndirectLayerTreesLock);
-  for (auto it = sIndirectLayerTrees.begin(); it != sIndirectLayerTrees.end(); it++) {
-    LayerTreeState* state = &it->second;
+  for (auto & sIndirectLayerTree : sIndirectLayerTrees) {
+    LayerTreeState* state = &sIndirectLayerTree.second;
     if (state->mWrBridge) {
       state->mWrBridge->SetWebRenderProfilerEnabled(aEnabled);
     }
   }
 }
 
 void
 EraseLayerState(uint64_t aId)
--- a/gfx/layers/ipc/CrossProcessCompositorBridgeParent.cpp
+++ b/gfx/layers/ipc/CrossProcessCompositorBridgeParent.cpp
@@ -252,19 +252,18 @@ CrossProcessCompositorBridgeParent::Deal
   return true;
 }
 
 mozilla::ipc::IPCResult
 CrossProcessCompositorBridgeParent::RecvNotifyChildCreated(const uint64_t& child,
                                                            CompositorOptions* aOptions)
 {
   MonitorAutoLock lock(*sIndirectLayerTreesLock);
-  for (LayerTreeMap::iterator it = sIndirectLayerTrees.begin();
-       it != sIndirectLayerTrees.end(); it++) {
-    CompositorBridgeParent::LayerTreeState* lts = &it->second;
+  for (auto & sIndirectLayerTree : sIndirectLayerTrees) {
+    CompositorBridgeParent::LayerTreeState* lts = &sIndirectLayerTree.second;
     if (lts->mParent && lts->mCrossProcessParent == this) {
       lts->mParent->NotifyChildCreated(child);
       *aOptions = lts->mParent->GetOptions();
       return IPC_OK();
     }
   }
   return IPC_FAIL_NO_REASON(this);
 }
--- a/gfx/layers/ipc/ISurfaceAllocator.cpp
+++ b/gfx/layers/ipc/ISurfaceAllocator.cpp
@@ -51,18 +51,18 @@ HostIPCAllocator::SendPendingAsyncMessag
   static const uint32_t kMaxMessageNumber = FileDescriptorSet::MAX_DESCRIPTORS_PER_MESSAGE;
 #else
   // default number that works everywhere else
   static const uint32_t kMaxMessageNumber = 250;
 #endif
 
   InfallibleTArray<AsyncParentMessageData> messages;
   messages.SetCapacity(mPendingAsyncMessage.size());
-  for (size_t i = 0; i < mPendingAsyncMessage.size(); i++) {
-    messages.AppendElement(mPendingAsyncMessage[i]);
+  for (auto & msg : mPendingAsyncMessage) {
+    messages.AppendElement(msg);
     // Limit maximum number of messages.
     if (messages.Length() >= kMaxMessageNumber) {
       SendAsyncMessage(messages);
       // Initialize Messages.
       messages.Clear();
     }
   }
 
@@ -107,20 +107,20 @@ FixedSizeSmallShmemSectionAllocator::All
 
   if (!IPCOpen()) {
     gfxCriticalError() << "Attempt to allocate a ShmemSection after shutdown.";
     return false;
   }
 
   uint32_t allocationSize = (aSize + sizeof(ShmemSectionHeapAllocation));
 
-  for (size_t i = 0; i < mUsedShmems.size(); i++) {
-    ShmemSectionHeapHeader* header = mUsedShmems[i].get<ShmemSectionHeapHeader>();
+  for (auto & mUsedShmem : mUsedShmems) {
+    ShmemSectionHeapHeader* header = mUsedShmem.get<ShmemSectionHeapHeader>();
     if ((header->mAllocatedBlocks + 1) * allocationSize + sizeof(ShmemSectionHeapHeader) < sShmemPageSize) {
-      aShmemSection->shmem() = mUsedShmems[i];
+      aShmemSection->shmem() = mUsedShmem;
       MOZ_ASSERT(mUsedShmems[i].IsWritable());
       break;
     }
   }
 
   if (!aShmemSection->shmem().IsWritable()) {
     ipc::Shmem tmp;
     if (!mShmProvider->AllocUnsafeShmem(sShmemPageSize, OptimalShmemType(), &tmp)) {
--- a/gfx/layers/mlgpu/MaskOperation.cpp
+++ b/gfx/layers/mlgpu/MaskOperation.cpp
@@ -103,24 +103,24 @@ MaskCombineOperation::Init(const MaskTex
   // All masks for a single layer exist in the same coordinate space. Find the
   // area that covers all rects.
   Rect area = aTextures[0].mRect;
   for (size_t i = 1; i < aTextures.size(); i++) {
     area = area.Intersect(aTextures[i].mRect);
   }
 
   // Go through and decide which areas of the textures are relevant.
-  for (size_t i = 0; i < aTextures.size(); i++) {
-    Rect rect = aTextures[i].mRect.Intersect(area);
+  for (const auto & aTexture : aTextures) {
+    Rect rect = aTexture.mRect.Intersect(area);
     if (rect.IsEmpty()) {
       continue;
     }
 
-    rect -= aTextures[i].mRect.TopLeft();
-    mTextures.push_back(MaskTexture(rect, aTextures[i].mSource));
+    rect -= aTexture.mRect.TopLeft();
+    mTextures.push_back(MaskTexture(rect, aTexture.mSource));
   }
 
   IntRect size;
   Rect bounds = area;
   bounds.RoundOut();
   bounds.ToIntRect(&size);
 
   if (size.IsEmpty()) {
--- a/gfx/layers/mlgpu/RenderViewMLGPU.cpp
+++ b/gfx/layers/mlgpu/RenderViewMLGPU.cpp
@@ -294,18 +294,18 @@ RenderViewMLGPU::AddItemBackToFront(Laye
   // buffers in reverse, as well as execute batches in reverse, to ensure the
   // correct ordering.
   //
   // Note: We limit the number of batches we search through, since it's better
   // to add new draw calls than spend too much time finding compatible
   // batches further down.
   static const size_t kMaxSearch = 10;
   size_t iterations = 0;
-  for (auto iter = mBackToFront.begin(); iter != mBackToFront.end(); iter++) {
-    RenderPassMLGPU* pass = (*iter);
+  for (auto & iter : mBackToFront) {
+    RenderPassMLGPU* pass = iter;
     if (pass->IsCompatible(aItem) && pass->AcceptItem(aItem)) {
       AL_LOG("RenderView %p added layer %p to pass %p (%d)\n",
         this, aLayer->GetLayer(), pass, int(pass->GetType()));
       return;
     }
     if (pass->Intersects(aItem)) {
       break;
     }
@@ -397,33 +397,33 @@ RenderViewMLGPU::ExecuteRendering()
   if (mUseDepthBuffer) {
     if (mDepthBufferNeedsClear) {
       mDevice->ClearDepthBuffer(mTarget);
     }
     mDevice->SetDepthTestMode(MLGDepthTestMode::Write);
   }
 
   // Opaque items, rendered front-to-back.
-  for (auto iter = mFrontToBack.begin(); iter != mFrontToBack.end(); iter++) {
-    ExecutePass(*iter);
+  for (auto & iter : mFrontToBack) {
+    ExecutePass(iter);
   }
 
   if (mUseDepthBuffer) {
     // From now on we might be rendering transparent pixels, so we disable
     // writing to the z-buffer.
     mDevice->SetDepthTestMode(MLGDepthTestMode::ReadOnly);
   }
 
   // Clear any pixels that are not occluded, and therefore might require
   // blending.
   mDevice->DrawClearRegion(mPreClear);
 
   // Render back-to-front passes.
-  for (auto iter = mBackToFront.begin(); iter != mBackToFront.end(); iter++) {
-    ExecutePass(*iter);
+  for (auto & iter : mBackToFront) {
+    ExecutePass(iter);
   }
 
   // Make sure the post-clear area has no pixels.
   if (!mPostClearRegion.IsEmpty()) {
     mDevice->DrawClearRegion(mPostClear);
   }
 
   // We repaint the entire invalid region, even if it is partially occluded.
--- a/gfx/layers/opengl/CompositorOGL.cpp
+++ b/gfx/layers/opengl/CompositorOGL.cpp
@@ -182,20 +182,18 @@ CompositorOGL::CleanupResources()
     // Leak resources!
     mQuadVBO = 0;
     mTriangleVBO = 0;
     mGLContext = nullptr;
     mPrograms.clear();
     return;
   }
 
-  for (std::map<ShaderConfigOGL, ShaderProgramOGL *>::iterator iter = mPrograms.begin();
-       iter != mPrograms.end();
-       iter++) {
-    delete iter->second;
+  for (auto & mProgram : mPrograms) {
+    delete mProgram.second;
   }
   mPrograms.clear();
 
   ctx->fBindFramebuffer(LOCAL_GL_FRAMEBUFFER, 0);
 
   if (mQuadVBO) {
     ctx->fDeleteBuffers(1, &mQuadVBO);
     mQuadVBO = 0;
@@ -279,18 +277,17 @@ CompositorOGL::Initialize(nsCString* con
     }
 
     mFBOTextureTarget = LOCAL_GL_NONE;
 
     GLuint testFBO = 0;
     mGLContext->fGenFramebuffers(1, &testFBO);
     GLuint testTexture = 0;
 
-    for (uint32_t i = 0; i < ArrayLength(textureTargets); i++) {
-      GLenum target = textureTargets[i];
+    for (unsigned int target : textureTargets) {
       if (!target)
           continue;
 
       mGLContext->fGenTextures(1, &testTexture);
       mGLContext->fBindTexture(target, testTexture);
       mGLContext->fTexParameteri(target,
                                 LOCAL_GL_TEXTURE_MIN_FILTER,
                                 LOCAL_GL_NEAREST);
--- a/gfx/layers/opengl/OGLShaderProgram.cpp
+++ b/gfx/layers/opengl/OGLShaderProgram.cpp
@@ -818,19 +818,19 @@ ShaderProgramOGL::Initialize()
 
   if (!CreateProgram(vs.str().c_str(), fs.str().c_str())) {
     mProgramState = STATE_ERROR;
     return false;
   }
 
   mProgramState = STATE_OK;
 
-  for (uint32_t i = 0; i < KnownUniform::KnownUniformCount; ++i) {
-    mProfile.mUniforms[i].mLocation =
-      mGL->fGetUniformLocation(mProgram, mProfile.mUniforms[i].mNameString);
+  for (auto & mUniform : mProfile.mUniforms) {
+    mUniform.mLocation =
+      mGL->fGetUniformLocation(mProgram, mUniform.mNameString);
   }
 
   return true;
 }
 
 GLint
 ShaderProgramOGL::CreateShader(GLenum aShaderType, const char *aShaderSource)
 {
--- a/gfx/layers/wr/WebRenderBridgeParent.cpp
+++ b/gfx/layers/wr/WebRenderBridgeParent.cpp
@@ -847,18 +847,18 @@ WebRenderBridgeParent::RecvClearCachedRe
 
   // Clear resources
   ++mWrEpoch; // Update webrender epoch
   mApi->ClearRootDisplayList(wr::NewEpoch(mWrEpoch), mPipelineId);
   // Schedule composition to clean up Pipeline
   mCompositorScheduler->ScheduleComposition();
   DeleteOldImages();
   // Remove animations.
-  for (std::unordered_set<uint64_t>::iterator iter = mActiveAnimations.begin(); iter != mActiveAnimations.end(); iter++) {
-    mAnimStorage->ClearById(*iter);
+  for (unsigned long mActiveAnimation : mActiveAnimations) {
+    mAnimStorage->ClearById(mActiveAnimation);
   }
   mActiveAnimations.clear();
   return IPC_OK();
 }
 
 void
 WebRenderBridgeParent::UpdateWebRender(CompositorVsyncScheduler* aScheduler,
                                        wr::WebRenderAPI* aApi,
@@ -1269,22 +1269,22 @@ WebRenderBridgeParent::ClearResources()
   }
 
   ++mWrEpoch; // Update webrender epoch
   mApi->ClearRootDisplayList(wr::NewEpoch(mWrEpoch), mPipelineId);
   // Schedule composition to clean up Pipeline
   mCompositorScheduler->ScheduleComposition();
   // XXX webrender does not hava a way to delete a group of resources/keys,
   // then delete keys one by one.
-  for (std::unordered_set<uint64_t>::iterator iter = mFontKeys.begin(); iter != mFontKeys.end(); iter++) {
-    mApi->DeleteFont(wr::AsFontKey(*iter));
+  for (unsigned long mFontKey : mFontKeys) {
+    mApi->DeleteFont(wr::AsFontKey(mFontKey));
   }
   mFontKeys.clear();
-  for (std::unordered_set<uint64_t>::iterator iter = mActiveImageKeys.begin(); iter != mActiveImageKeys.end(); iter++) {
-    mKeysToDelete.push_back(wr::AsImageKey(*iter));
+  for (unsigned long mActiveImageKey : mActiveImageKeys) {
+    mKeysToDelete.push_back(wr::AsImageKey(mActiveImageKey));
   }
   mActiveImageKeys.clear();
   DeleteOldImages();
   for (auto iter = mExternalImageIds.Iter(); !iter.Done(); iter.Next()) {
     iter.Data()->ClearWrBridge();
   }
   mExternalImageIds.Clear();
   for (auto iter = mAsyncCompositables.Iter(); !iter.Done(); iter.Next()) {
@@ -1293,18 +1293,18 @@ WebRenderBridgeParent::ClearResources()
     MOZ_ASSERT(host->GetAsyncRef());
     host->ClearWrBridge();
     mCompositableHolder->RemoveAsyncImagePipeline(mApi, pipelineId);
   }
   mAsyncCompositables.Clear();
 
   mCompositableHolder->RemovePipeline(mPipelineId, wr::NewEpoch(mWrEpoch));
 
-  for (std::unordered_set<uint64_t>::iterator iter = mActiveAnimations.begin(); iter != mActiveAnimations.end(); iter++) {
-    mAnimStorage->ClearById(*iter);
+  for (unsigned long mActiveAnimation : mActiveAnimations) {
+    mAnimStorage->ClearById(mActiveAnimation);
   }
   mActiveAnimations.clear();
 
   if (mWidget) {
     mCompositorScheduler->Destroy();
   }
   mAnimStorage = nullptr;
   mCompositorScheduler = nullptr;
--- a/gfx/src/FilterSupport.cpp
+++ b/gfx/src/FilterSupport.cpp
@@ -846,20 +846,20 @@ FilterNodeFromPrimitiveDescription(const
         AttributeMap functionAttributes =
           atts.GetAttributeMap(componentFunctionNames[i]);
         ConvertComponentTransferFunctionToFilter(functionAttributes, i, aDT,
           filters[0], filters[1], filters[2], filters[3]);
       }
 
       // Connect all used filters nodes.
       RefPtr<FilterNode> lastFilter = aSources[0];
-      for (int32_t i = 0; i < 4; i++) {
-        if (filters[i]) {
-          filters[i]->SetInput(0, lastFilter);
-          lastFilter = filters[i];
+      for (auto & filter : filters) {
+        if (filter) {
+          filter->SetInput(0, lastFilter);
+          lastFilter = filter;
         }
       }
 
       return lastFilter.forget();
     }
 
     case PrimitiveType::ConvolveMatrix:
     {
--- a/gfx/tests/gtest/TestBufferRotation.cpp
+++ b/gfx/tests/gtest/TestBufferRotation.cpp
@@ -47,18 +47,17 @@ static bool CheckBuffer(unsigned char* b
 TEST(Gfx, BufferUnrotateHorizontal) {
   const int NUM_OF_TESTS = 8;
   int bytesPerPixelList[2] = {2,4};
   int width[NUM_OF_TESTS] = {100, 100, 99, 99, 100, 100, 99, 99};
   int height[NUM_OF_TESTS] = {100, 99, 100, 99, 100, 99, 100, 99};
   int xBoundary[NUM_OF_TESTS] = {30, 30, 30, 30, 31, 31, 31, 31};
   int yBoundary[NUM_OF_TESTS] = {0, 0, 0, 0};
 
-  for (int bytesPerId = 0; bytesPerId < 2; bytesPerId++) {
-    int bytesPerPixel = bytesPerPixelList[bytesPerId];
+  for (int bytesPerPixel : bytesPerPixelList) {
     int stride = 256 * bytesPerPixel;
     for (int testId = 0; testId < NUM_OF_TESTS; testId++) {
       unsigned char* buffer = GenerateBuffer(bytesPerPixel,
           width[testId], height[testId], stride,
           xBoundary[testId], yBoundary[testId]);
       BufferUnrotate(buffer,
           width[testId] * bytesPerPixel, height[testId], stride,
           xBoundary[testId] * bytesPerPixel, yBoundary[testId]);
@@ -73,18 +72,17 @@ TEST(Gfx, BufferUnrotateHorizontal) {
 TEST(Gfx, BufferUnrotateVertical) {
   const int NUM_OF_TESTS = 8;
   int bytesPerPixelList[2] = {2,4};
   int width[NUM_OF_TESTS] = {100, 100, 99, 99, 100, 100, 99, 99};
   int height[NUM_OF_TESTS] = {100, 99, 100, 99, 100, 99, 100, 99};
   int xBoundary[NUM_OF_TESTS] = {0, 0, 0, 0};
   int yBoundary[NUM_OF_TESTS] = {30, 30, 30, 30, 31, 31, 31, 31};
 
-  for (int bytesPerId = 0; bytesPerId < 2; bytesPerId++) {
-    int bytesPerPixel = bytesPerPixelList[bytesPerId];
+  for (int bytesPerPixel : bytesPerPixelList) {
     int stride = 256 * bytesPerPixel;
     for (int testId = 0; testId < NUM_OF_TESTS; testId++) {
       unsigned char* buffer = GenerateBuffer(bytesPerPixel,
           width[testId], height[testId], stride,
           xBoundary[testId], yBoundary[testId]);
       BufferUnrotate(buffer, width[testId] * bytesPerPixel,
           height[testId], stride,
           xBoundary[testId] * bytesPerPixel, yBoundary[testId]);
@@ -100,18 +98,17 @@ TEST(Gfx, BufferUnrotateVertical) {
 TEST(Gfx, BufferUnrotateBoth) {
   const int NUM_OF_TESTS = 16;
   int bytesPerPixelList[2] = {2,4};
   int width[NUM_OF_TESTS] = {100, 100, 99, 99, 100, 100, 99, 99, 100, 100, 99, 99, 100, 100, 99, 99};
   int height[NUM_OF_TESTS] = {100, 99, 100, 99, 100, 99, 100, 99, 100, 99, 100, 99, 100, 99, 100, 99};
   int xBoundary[NUM_OF_TESTS] = {30, 30, 30, 30, 31, 31, 31, 31, 30, 30, 30, 30, 31, 31, 31, 31};
   int yBoundary[NUM_OF_TESTS] = {30, 30, 30, 30, 30, 30, 30, 30, 31, 31, 31, 31, 31, 31, 31, 31};
 
-  for (int bytesPerId = 0; bytesPerId < 2; bytesPerId++) {
-    int bytesPerPixel = bytesPerPixelList[bytesPerId];
+  for (int bytesPerPixel : bytesPerPixelList) {
     int stride = 256 * bytesPerPixel;
     for (int testId = 0; testId < NUM_OF_TESTS; testId++) {
       unsigned char* buffer = GenerateBuffer(bytesPerPixel,
           width[testId], height[testId], stride,
           xBoundary[testId], yBoundary[testId]);
       BufferUnrotate(buffer,
           width[testId] * bytesPerPixel, height[testId], stride,
           xBoundary[testId] * bytesPerPixel, yBoundary[testId]);
@@ -126,18 +123,17 @@ TEST(Gfx, BufferUnrotateBoth) {
 TEST(Gfx, BufferUnrotateUneven) {
   const int NUM_OF_TESTS = 16;
   int bytesPerPixelList[2] = {2,4};
   int width[NUM_OF_TESTS] = {10, 100, 99, 39, 100, 40, 99, 39, 100, 50, 39, 99, 74, 60, 99, 39};
   int height[NUM_OF_TESTS] = {100, 39, 10, 99, 10, 99, 40, 99, 73, 39, 100, 39, 67, 99, 84, 99};
   int xBoundary[NUM_OF_TESTS] = {0, 0, 30, 30, 99, 31, 0, 31, 30, 30, 30, 30, 31, 31, 31, 38};
   int yBoundary[NUM_OF_TESTS] = {30, 30, 0, 30, 0, 30, 0, 30, 31, 31, 31, 31, 31, 31, 31, 98};
 
-  for (int bytesPerId = 0; bytesPerId < 2; bytesPerId++) {
-    int bytesPerPixel = bytesPerPixelList[bytesPerId];
+  for (int bytesPerPixel : bytesPerPixelList) {
     int stride = 256 * bytesPerPixel;
     for (int testId = 0; testId < NUM_OF_TESTS; testId++) {
       unsigned char* buffer = GenerateBuffer(bytesPerPixel,
           width[testId], height[testId], stride,
           xBoundary[testId], yBoundary[testId]);
       BufferUnrotate(buffer,
           width[testId]*bytesPerPixel, height[testId], stride,
           xBoundary[testId]*bytesPerPixel, yBoundary[testId]);
--- a/gfx/tests/gtest/TestColorNames.cpp
+++ b/gfx/tests/gtest/TestColorNames.cpp
@@ -79,20 +79,20 @@ void RunColorTests() {
     ASSERT_TRUE(hexrgb == rgb);
   }
 }
 
 static
 void RunJunkColorTests() {
   nscolor rgb;
   // Now make sure we don't find some garbage
-  for (uint32_t i = 0; i < ArrayLength(kJunkNames); i++) {
-    nsCString tag(kJunkNames[i]);
+  for (auto & kJunkName : kJunkNames) {
+    nsCString tag(kJunkName);
     ASSERT_FALSE(NS_ColorNameToRGB(NS_ConvertASCIItoUTF16(tag), &rgb)) <<
-      "Failed at junk color " << kJunkNames[i];
+      "Failed at junk color " << kJunkName;
   }
 }
 
 TEST(Gfx, ColorNames) {
   RunColorTests();
 }
 
 TEST(Gfx, JunkColorNames) {
--- a/gfx/tests/gtest/TestCompositor.cpp
+++ b/gfx/tests/gtest/TestCompositor.cpp
@@ -75,19 +75,17 @@ static already_AddRefed<Compositor> Crea
 
 /**
  * Get a list of layers managers for the platform to run the test on.
  */
 static std::vector<LayerManagerData> GetLayerManagers(std::vector<LayersBackend> aBackends)
 {
   std::vector<LayerManagerData> managers;
 
-  for (size_t i = 0; i < aBackends.size(); i++) {
-    auto backend = aBackends[i];
-
+  for (auto backend : aBackends) {
     RefPtr<MockWidget> widget = new MockWidget(gCompWidth, gCompHeight);
     CompositorOptions options;
     RefPtr<widget::CompositorWidget> proxy = new widget::InProcessCompositorWidget(options, widget);
     RefPtr<Compositor> compositor = CreateTestCompositor(backend, proxy);
 
     RefPtr<LayerManagerComposite> layerManager = new LayerManagerComposite(compositor);
 
     managers.push_back(LayerManagerData(compositor, widget, proxy, layerManager));
@@ -164,18 +162,18 @@ static bool CompositeAndCompare(RefPtr<L
 TEST(Gfx, CompositorConstruct)
 {
   auto layerManagers = GetLayerManagers(GetPlatformBackends());
 }
 
 TEST(Gfx, CompositorSimpleTree)
 {
   auto layerManagers = GetLayerManagers(GetPlatformBackends());
-  for (size_t i = 0; i < layerManagers.size(); i++) {
-    RefPtr<LayerManagerComposite> layerManager = layerManagers[i].mLayerManager;
+  for (auto & i : layerManagers) {
+    RefPtr<LayerManagerComposite> layerManager = i.mLayerManager;
     RefPtr<LayerManager> lmBase = layerManager.get();
     nsTArray<RefPtr<Layer>> layers;
     nsIntRegion layerVisibleRegion[] = {
       nsIntRegion(IntRect(0, 0, gCompWidth, gCompHeight)),
       nsIntRegion(IntRect(0, 0, gCompWidth, gCompHeight)),
       nsIntRegion(IntRect(0, 0, 100, 100)),
       nsIntRegion(IntRect(0, 50, 100, 100)),
     };
--- a/gfx/tests/gtest/TestGfxPrefs.cpp
+++ b/gfx/tests/gtest/TestGfxPrefs.cpp
@@ -87,19 +87,19 @@ TEST(GfxPrefs, StringUtility)
 {
   char testBuffer[64];
   double testVal[] = {13.4,
                       3324243.42,
                       0.332424342,
                       864.0,
                       86400 * 100000000.0 * 10000000000.0 * 10000000000.0 * 100.0,
                       86400.0 * 366.0 * 100.0 + 14243.44332};
-  for (size_t i=0; i<mozilla::ArrayLength(testVal); i++) {
-    ASSERT_TRUE(SimpleNoCLibDtoA(testVal[i], testBuffer, sizeof(testBuffer)));
-    ASSERT_TRUE(fabs(1.0 - atof(testBuffer)/testVal[i]) < 0.0001);
+  for (double val : testVal) {
+    ASSERT_TRUE(SimpleNoCLibDtoA(val, testBuffer, sizeof(testBuffer)));
+    ASSERT_TRUE(fabs(1.0 - atof(testBuffer)/val) < 0.0001);
   }
 
   // We do not like negative numbers (random limitation)
   ASSERT_FALSE(SimpleNoCLibDtoA(-864.0, testBuffer, sizeof(testBuffer)));
 
   // It won't fit into 32:
   ASSERT_FALSE(SimpleNoCLibDtoA(testVal[4], testBuffer, sizeof(testBuffer)/2));
 }
--- a/gfx/tests/gtest/TestTextures.cpp
+++ b/gfx/tests/gtest/TestTextures.cpp
@@ -216,18 +216,18 @@ void TestTextureClientYCbCr(TextureClien
 TEST(Layers, TextureSerialization) {
   // the test is run on all the following image formats
   gfxImageFormat formats[3] = {
     SurfaceFormat::A8R8G8B8_UINT32,
     SurfaceFormat::X8R8G8B8_UINT32,
     SurfaceFormat::A8,
   };
 
-  for (int f = 0; f < 3; ++f) {
-    RefPtr<gfxImageSurface> surface = new gfxImageSurface(IntSize(400,300), formats[f]);
+  for (auto & format : formats) {
+    RefPtr<gfxImageSurface> surface = new gfxImageSurface(IntSize(400,300), format);
     SetupSurface(surface.get());
     AssertSurfacesEqual(surface, surface);
 
     auto texData = BufferTextureData::Create(surface->GetSize(),
       gfx::ImageFormatToSurfaceFormat(surface->Format()),
       gfx::BackendType::CAIRO, LayersBackend::LAYERS_NONE,
       TextureFlags::DEALLOCATE_CLIENT, ALLOC_DEFAULT, nullptr
     );
--- a/gfx/thebes/gfxPlatformFontList.cpp
+++ b/gfx/thebes/gfxPlatformFontList.cpp
@@ -1380,19 +1380,19 @@ gfxPlatformFontList::GetSampleLangForGro
     if (!aLanguage) {
         return;
     }
 
     // set up lang string
     const MozLangGroupData *mozLangGroup = nullptr;
 
     // -- look it up in the list of moz lang groups
-    for (unsigned int i = 0; i < ArrayLength(MozLangGroups); ++i) {
-        if (aLanguage == MozLangGroups[i].mozLangGroup) {
-            mozLangGroup = &MozLangGroups[i];
+    for (const auto & MozLangGroup : MozLangGroups) {
+        if (aLanguage == MozLangGroup.mozLangGroup) {
+            mozLangGroup = &MozLangGroup;
             break;
         }
     }
 
     // -- not a mozilla lang group? Just return the BCP47 string
     //    representation of the lang group
     if (!mozLangGroup) {
         // Not a special mozilla language group.
--- a/gfx/vr/ipc/VRMessageUtils.h
+++ b/gfx/vr/ipc/VRMessageUtils.h
@@ -44,18 +44,18 @@ struct ParamTraits<mozilla::gfx::VRDispl
     WriteParam(aMsg, aParam.mGroupMask);
     WriteParam(aMsg, aParam.mStageSize);
     WriteParam(aMsg, aParam.mSittingToStandingTransform);
     WriteParam(aMsg, aParam.mFrameId);
     for (int i = 0; i < mozilla::gfx::VRDisplayInfo::NumEyes; i++) {
       WriteParam(aMsg, aParam.mEyeFOV[i]);
       WriteParam(aMsg, aParam.mEyeTranslation[i]);
     }
-    for (int i = 0; i < mozilla::gfx::kVRMaxLatencyFrames; i++) {
-      WriteParam(aMsg, aParam.mLastSensorState[i]);
+    for (const auto & state : aParam.mLastSensorState) {
+      WriteParam(aMsg, state);
     }
   }
 
   static bool Read(const Message* aMsg, PickleIterator* aIter, paramType* aResult)
   {
     if (!ReadParam(aMsg, aIter, &(aResult->mType)) ||
         !ReadParam(aMsg, aIter, &(aResult->mDisplayID)) ||
         !ReadParam(aMsg, aIter, &(aResult->mDisplayName)) ||
@@ -71,18 +71,18 @@ struct ParamTraits<mozilla::gfx::VRDispl
       return false;
     }
     for (int i = 0; i < mozilla::gfx::VRDisplayInfo::NumEyes; i++) {
       if (!ReadParam(aMsg, aIter, &(aResult->mEyeFOV[i])) ||
           !ReadParam(aMsg, aIter, &(aResult->mEyeTranslation[i]))) {
         return false;
       }
     }
-    for (int i = 0; i < mozilla::gfx::kVRMaxLatencyFrames; i++) {
-      if (!ReadParam(aMsg, aIter, &(aResult->mLastSensorState[i]))) {
+    for (auto & state : aResult->mLastSensorState) {
+      if (!ReadParam(aMsg, aIter, &state)) {
         return false;
       }
     }
 
     return true;
   }
 };
 
--- a/intl/locale/nsLocaleService.cpp
+++ b/intl/locale/nsLocaleService.cpp
@@ -204,18 +204,18 @@ nsLocaleService::NewLocale(const nsAStri
 {
     nsresult result;
 
     *_retval = nullptr;
 
     RefPtr<nsLocale> resultLocale(new nsLocale());
     if (!resultLocale) return NS_ERROR_OUT_OF_MEMORY;
 
-    for (int32_t i = 0; i < LocaleListLength; i++) {
-      NS_ConvertASCIItoUTF16 category(LocaleList[i]);
+    for (auto & locale : LocaleList) {
+      NS_ConvertASCIItoUTF16 category(locale);
       result = resultLocale->AddCategory(category, aLocale);
       if (NS_FAILED(result)) return result;
 #if defined(XP_UNIX) && !defined(XP_MACOSX)
       category.AppendLiteral("##PLATFORM");
       result = resultLocale->AddCategory(category, aLocale);
       if (NS_FAILED(result)) return result;
 #endif
     }
--- a/intl/locale/tests/gtest/TestOSPreferences.cpp
+++ b/intl/locale/tests/gtest/TestOSPreferences.cpp
@@ -69,18 +69,17 @@ TEST(Intl_Locale_OSPreferences, GetDateT
     { 0, 4, "ru" },
 
     { 4, 1, "" },
     { 3, 2, "cs" },
     { 2, 3, "" },
     { 1, 4, "ja" }
   };
 
-  for (unsigned i = 0; i < mozilla::ArrayLength(tests); i++) {
-    const Test& t = tests[i];
+  for (auto t : tests) {
     nsAutoString pattern;
     if (NS_SUCCEEDED(osprefs->GetDateTimePattern(t.dateStyle, t.timeStyle,
                                                  nsDependentCString(t.locale),
                                                  pattern))) {
       ASSERT_TRUE((t.dateStyle == 0 && t.timeStyle == 0) || !pattern.IsEmpty());
     }
   }
 
--- a/ipc/glue/MessageChannel.cpp
+++ b/ipc/glue/MessageChannel.cpp
@@ -1338,18 +1338,18 @@ MessageChannel::ProcessPendingRequests(A
 
         if (toProcess.empty()) {
             break;
         }
 
         // Processing these messages could result in more messages, so we
         // loop around to check for more afterwards.
 
-        for (auto it = toProcess.begin(); it != toProcess.end(); it++) {
-            ProcessPendingRequest(Move(*it));
+        for (auto & toProces : toProcess) {
+            ProcessPendingRequest(Move(toProces));
         }
     }
 
     AssertMaybeDeferredCountCorrect();
 }
 
 bool
 MessageChannel::Send(Message* aMsg, Message* aReply)
--- a/ipc/glue/ProtocolUtils.cpp
+++ b/ipc/glue/ProtocolUtils.cpp
@@ -773,18 +773,18 @@ IToplevelProtocol::DestroySharedMemory(S
   }
 
   return descriptor && GetIPCChannel()->Send(descriptor);
 }
 
 void
 IToplevelProtocol::DeallocShmems()
 {
-  for (IDMap<SharedMemory*>::const_iterator cit = mShmemMap.begin(); cit != mShmemMap.end(); ++cit) {
-    Shmem::Dealloc(Shmem::IHadBetterBeIPDLCodeCallingThis_OtherwiseIAmADoodyhead(), cit->second);
+  for (auto cit : mShmemMap) {
+    Shmem::Dealloc(Shmem::IHadBetterBeIPDLCodeCallingThis_OtherwiseIAmADoodyhead(), cit.second);
   }
   mShmemMap.Clear();
 }
 
 bool
 IToplevelProtocol::ShmemCreated(const Message& aMsg)
 {
   Shmem::id_t id;
--- a/js/src/gc/RootMarking.cpp
+++ b/js/src/gc/RootMarking.cpp
@@ -374,18 +374,17 @@ js::gc::GCRuntime::traceRuntimeCommon(JS
 
         /*
          * The embedding can register additional roots here.
          *
          * We don't need to trace these in a minor GC because all pointers into
          * the nursery should be in the store buffer, and we want to avoid the
          * time taken to trace all these roots.
          */
-        for (size_t i = 0; i < blackRootTracers.ref().length(); i++) {
-            const Callback<JSTraceDataOp>& e = blackRootTracers.ref()[i];
+        for (auto e : blackRootTracers.ref()) {
             (*e.op)(trc, e.data);
         }
 
         /* During GC, we don't trace gray roots at this stage. */
         if (JSTraceDataOp op = grayRootTracer.op) {
             if (traceOrMark == TraceRuntime)
                 (*op)(trc, grayRootTracer.data);
         }
--- a/js/src/gc/Statistics.cpp
+++ b/js/src/gc/Statistics.cpp
@@ -200,19 +200,19 @@ Statistics::gcDuration(TimeDuration* tot
     if (*maxPause > maxPauseInInterval)
         maxPauseInInterval = *maxPause;
 }
 
 void
 Statistics::sccDurations(TimeDuration* total, TimeDuration* maxPause) const
 {
     *total = *maxPause = 0;
-    for (size_t i = 0; i < sccTimes.length(); i++) {
-        *total += sccTimes[i];
-        *maxPause = Max(*maxPause, sccTimes[i]);
+    for (auto sccTime : sccTimes) {
+        *total += sccTime;
+        *maxPause = Max(*maxPause, sccTime);
     }
 }
 
 typedef Vector<UniqueChars, 8, SystemAllocPolicy> FragmentVector;
 
 static UniqueChars
 Join(const FragmentVector& fragments, const char* separator = "")
 {
--- a/js/src/gc/Tracer.cpp
+++ b/js/src/gc/Tracer.cpp
@@ -240,18 +240,18 @@ ObjectGroupCycleCollectorTracer::onChild
         return;
     }
 
     if (thing.is<ObjectGroup>()) {
         // If this group is required to be in an ObjectGroup chain, trace it
         // via the provided worklist rather than continuing to recurse.
         ObjectGroup& group = thing.as<ObjectGroup>();
         if (group.maybeUnboxedLayout()) {
-            for (size_t i = 0; i < seen.length(); i++) {
-                if (seen[i] == &group)
+            for (auto & g : seen) {
+                if (g == &group)
                     return;
             }
             if (seen.append(&group) && worklist.append(&group)) {
                 return;
             } else {
                 // If append fails, keep tracing normally. The worst that will
                 // happen is we end up overrecursing.
             }
--- a/js/src/irregexp/NativeRegExpMacroAssembler.cpp
+++ b/js/src/irregexp/NativeRegExpMacroAssembler.cpp
@@ -500,18 +500,17 @@ NativeRegExpMacroAssembler::GenerateCode
 #ifdef JS_ION_PERF
     writePerfSpewerJitCodeProfile(code, "RegExp");
 #endif
 
 #ifdef MOZ_VTUNE
     vtune::MarkRegExp(code, match_only);
 #endif
 
-    for (size_t i = 0; i < labelPatches.length(); i++) {
-        LabelPatch& v = labelPatches[i];
+    for (auto & v : labelPatches) {
         MOZ_ASSERT(!v.label);
         Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, v.patchOffset),
                                            ImmPtr(code->raw() + v.labelOffset),
                                            ImmPtr(0));
     }
 
     JitSpew(JitSpew_Codegen, "Created RegExp (raw %p length %d)",
             (void*) code->raw(), (int) masm.bytesNeeded());
@@ -1067,18 +1066,17 @@ NativeRegExpMacroAssembler::PushBacktrac
 
 void
 NativeRegExpMacroAssembler::BindBacktrack(Label* label)
 {
     JitSpew(SPEW_PREFIX "BindBacktrack");
 
     Bind(label);
 
-    for (size_t i = 0; i < labelPatches.length(); i++) {
-        LabelPatch& v = labelPatches[i];
+    for (auto & v : labelPatches) {
         if (v.label == label) {
             v.labelOffset = label->offset();
             v.label = nullptr;
             break;
         }
     }
 }
 
--- a/js/src/irregexp/RegExpEngine.cpp
+++ b/js/src/irregexp/RegExpEngine.cpp
@@ -2386,17 +2386,17 @@ BoyerMooreLookahead::FindBestInterval(in
 {
     int biggest_points = old_biggest_points;
     static const int kSize = RegExpMacroAssembler::kTableSize;
     for (int i = 0; i < length_; ) {
         while (i < length_ && Count(i) > max_number_of_chars) i++;
         if (i == length_) break;
         int remembered_from = i;
         bool union_map[kSize];
-        for (int j = 0; j < kSize; j++) union_map[j] = false;
+        for (bool & j : union_map) j = false;
         while (i < length_ && Count(i) <= max_number_of_chars) {
             BoyerMoorePositionInfo* map = bitmaps_[i];
             for (int j = 0; j < kSize; j++) union_map[j] |= map->at(j);
             i++;
         }
         int frequency = 0;
         for (int j = 0; j < kSize; j++) {
             if (union_map[j]) {
--- a/js/src/jit/AliasAnalysis.cpp
+++ b/js/src/jit/AliasAnalysis.cpp
@@ -226,18 +226,17 @@ AliasAnalysis::analyze()
             MOZ_ASSERT(loop_->loopHeader() == block->loopHeaderOfBackedge());
             JitSpew(JitSpew_Alias, "Processing loop backedge %d (header %d)", block->id(),
                     loop_->loopHeader()->id());
             LoopAliasInfo* outerLoop = loop_->outer();
             MInstruction* firstLoopIns = *loop_->loopHeader()->begin();
 
             const MInstructionVector& invariant = loop_->invariantLoads();
 
-            for (unsigned i = 0; i < invariant.length(); i++) {
-                MInstruction* ins = invariant[i];
+            for (auto ins : invariant) {
                 AliasSet set = ins->getAliasSet();
                 MOZ_ASSERT(set.isLoad());
 
                 bool hasAlias = false;
                 for (AliasSetIterator iter(set); iter; iter++) {
                     MInstructionVector& aliasedStores = stores[*iter];
                     for (int i = aliasedStores.length() - 1;; i--) {
                         MInstruction* store = aliasedStores[i];
--- a/js/src/jit/BacktrackingAllocator.cpp
+++ b/js/src/jit/BacktrackingAllocator.cpp
@@ -383,32 +383,32 @@ BacktrackingAllocator::init()
         new(&vregs[i]) VirtualRegister();
 
     // Build virtual register objects.
     for (size_t i = 0; i < graph.numBlocks(); i++) {
         if (mir->shouldCancel("Create data structures (main loop)"))
             return false;
 
         LBlock* block = graph.getBlock(i);
-        for (LInstructionIterator ins = block->begin(); ins != block->end(); ins++) {
+        for (auto ins : *block) {
             if (mir->shouldCancel("Create data structures (inner loop 1)"))
                 return false;
 
             for (size_t j = 0; j < ins->numDefs(); j++) {
                 LDefinition* def = ins->getDef(j);
                 if (def->isBogusTemp())
                     continue;
-                vreg(def).init(*ins, def, /* isTemp = */ false);
+                vreg(def).init(ins, def, /* isTemp = */ false);
             }
 
             for (size_t j = 0; j < ins->numTemps(); j++) {
                 LDefinition* def = ins->getTemp(j);
                 if (def->isBogusTemp())
                     continue;
-                vreg(def).init(*ins, def, /* isTemp = */ true);
+                vreg(def).init(ins, def, /* isTemp = */ true);
             }
         }
         for (size_t j = 0; j < block->numPhis(); j++) {
             LPhi* phi = block->getPhi(j);
             LDefinition* def = phi->getDef(0);
             vreg(def).init(phi, def, /* isTemp = */ false);
         }
     }
@@ -1090,17 +1090,17 @@ BacktrackingAllocator::mergeAndQueueRegi
             bundle->addRange(range);
         }
     }
 
     // If there is an OSR block, merge parameters in that block with the
     // corresponding parameters in the initial block.
     if (MBasicBlock* osr = graph.mir().osrBlock()) {
         size_t original = 1;
-        for (LInstructionIterator iter = osr->lir()->begin(); iter != osr->lir()->end(); iter++) {
+        for (auto iter : *osr->lir()) {
             if (iter->isParameter()) {
                 for (size_t i = 0; i < iter->numDefs(); i++) {
                     DebugOnly<bool> found = false;
                     VirtualRegister &paramVreg = vreg(iter->getDef(i));
                     for (; original < paramVreg.vreg(); original++) {
                         VirtualRegister &originalVreg = vregs[original];
                         if (*originalVreg.def()->output() == *iter->getDef(i)->output()) {
                             MOZ_ASSERT(originalVreg.ins()->isParameter());
@@ -1300,18 +1300,18 @@ BacktrackingAllocator::processBundle(MIR
 
             // If that didn't work, but we have one or more non-fixed bundles
             // known to be conflicting, maybe we can evict them and try again.
             if ((attempt < MAX_ATTEMPTS || minimalBundle(bundle)) &&
                 !fixed &&
                 !conflicting.empty() &&
                 maximumSpillWeight(conflicting) < computeSpillWeight(bundle))
                 {
-                    for (size_t i = 0; i < conflicting.length(); i++) {
-                        if (!evictBundle(conflicting[i]))
+                    for (auto & conflict : conflicting) {
+                        if (!evictBundle(conflict))
                             return false;
                     }
                     continue;
                 }
         }
 
         // A minimal bundle cannot be split any further. If we try to split it
         // it at this point we will just end up with the same bundle and will
@@ -1404,18 +1404,18 @@ BacktrackingAllocator::tryAllocateRegist
         for (size_t a = 0; a < r.reg.numAliased(); a++) {
             PhysicalRegister& rAlias = registers[r.reg.aliased(a).code()];
             LiveRange* existing;
             if (!rAlias.allocations.contains(range, &existing))
                 continue;
             if (existing->hasVreg()) {
                 MOZ_ASSERT(existing->bundle()->allocation().toRegister() == rAlias.reg);
                 bool duplicate = false;
-                for (size_t i = 0; i < aliasedConflicting.length(); i++) {
-                    if (aliasedConflicting[i] == existing->bundle()) {
+                for (auto & conflict : aliasedConflicting) {
+                    if (conflict == existing->bundle()) {
                         duplicate = true;
                         break;
                     }
                 }
                 if (!duplicate && !aliasedConflicting.append(existing->bundle()))
                     return false;
             } else {
                 JitSpew(JitSpew_RegAlloc, "  %s collides with fixed use %s",
@@ -1513,27 +1513,25 @@ BacktrackingAllocator::splitAndRequeueBu
 
     // Remove all ranges in the old bundle from their register's list.
     for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
         LiveRange* range = LiveRange::get(*iter);
         vregs[range->vreg()].removeRange(range);
     }
 
     // Add all ranges in the new bundles to their register's list.
-    for (size_t i = 0; i < newBundles.length(); i++) {
-        LiveBundle* newBundle = newBundles[i];
+    for (auto newBundle : newBundles) {
         for (LiveRange::BundleLinkIterator iter = newBundle->rangesBegin(); iter; iter++) {
             LiveRange* range = LiveRange::get(*iter);
             vregs[range->vreg()].addRange(range);
         }
     }
 
     // Queue the new bundles for register assignment.
-    for (size_t i = 0; i < newBundles.length(); i++) {
-        LiveBundle* newBundle = newBundles[i];
+    for (auto newBundle : newBundles) {
         size_t priority = computePriority(newBundle);
         if (!allocationQueue.insert(QueueItem(newBundle, priority)))
             return false;
     }
 
     return true;
 }
 
@@ -1558,18 +1556,17 @@ BacktrackingAllocator::spill(LiveBundle*
     }
 
     return bundle->spillSet()->addSpilledBundle(bundle);
 }
 
 bool
 BacktrackingAllocator::tryAllocatingRegistersForSpillBundles()
 {
-    for (auto it = spilledBundles.begin(); it != spilledBundles.end(); it++) {
-        LiveBundle* bundle = *it;
+    for (auto bundle : spilledBundles) {
         LiveBundleVector conflicting;
         bool fixed = false;
         bool success = false;
 
         if (mir->shouldCancel("Backtracking Try Allocating Spilled Bundles"))
             return false;
 
         if (JitSpewEnabled(JitSpew_RegAlloc))
@@ -2579,18 +2576,18 @@ BacktrackingAllocator::computeSpillWeigh
     size_t lifetimeTotal = computePriority(bundle);
     return lifetimeTotal ? usesTotal / lifetimeTotal : 0;
 }
 
 size_t
 BacktrackingAllocator::maximumSpillWeight(const LiveBundleVector& bundles)
 {
     size_t maxWeight = 0;
-    for (size_t i = 0; i < bundles.length(); i++)
-        maxWeight = Max(maxWeight, computeSpillWeight(bundles[i]));
+    for (auto bundle : bundles)
+        maxWeight = Max(maxWeight, computeSpillWeight(bundle));
     return maxWeight;
 }
 
 bool
 BacktrackingAllocator::trySplitAcrossHotcode(LiveBundle* bundle, bool* success)
 {
     // If this bundle has portions that are hot and portions that are cold,
     // split it at the boundaries between hot and cold code.
@@ -3019,19 +3016,17 @@ BacktrackingAllocator::splitAt(LiveBundl
             }
         }
     }
 
     LiveBundleVector filteredBundles;
 
     // Trim the ends of ranges in each new bundle when there are no other
     // earlier or later ranges in the same bundle with the same vreg.
-    for (size_t i = 0; i < newBundles.length(); i++) {
-        LiveBundle* bundle = newBundles[i];
-
+    for (auto bundle : newBundles) {
         for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; ) {
             LiveRange* range = LiveRange::get(*iter);
 
             if (!range->hasDefinition()) {
                 if (!HasPrecedingRangeSharingVreg(bundle, range)) {
                     if (range->hasUses()) {
                         UsePosition* use = *range->usesBegin();
                         range->setFrom(inputOf(insData[use->pos]));
--- a/js/src/jit/BaselineCompiler.cpp
+++ b/js/src/jit/BaselineCompiler.cpp
@@ -159,19 +159,17 @@ BaselineCompiler::compile()
     }
 
     // Encode the pc mapping table. See PCMappingIndexEntry for
     // more information.
     Vector<PCMappingIndexEntry> pcMappingIndexEntries(cx);
     CompactBufferWriter pcEntries;
     uint32_t previousOffset = 0;
 
-    for (size_t i = 0; i < pcMappingEntries_.length(); i++) {
-        PCMappingEntry& entry = pcMappingEntries_[i];
-
+    for (auto & entry : pcMappingEntries_) {
         if (entry.addIndexEntry) {
             PCMappingIndexEntry indexEntry;
             indexEntry.pcOffset = entry.pcOffset;
             indexEntry.nativeOffset = entry.nativeOffset;
             indexEntry.bufferOffset = pcEntries.length();
             if (!pcMappingIndexEntries.append(indexEntry)) {
                 ReportOutOfMemory(cx);
                 return Method_Error;
@@ -240,19 +238,19 @@ BaselineCompiler::compile()
     // Adopt fallback stubs from the compiler into the baseline script.
     baselineScript->adoptFallbackStubs(&stubSpace_);
 
     // If profiler instrumentation is enabled, toggle instrumentation on.
     if (cx->runtime()->jitRuntime()->isProfilerInstrumentationEnabled(cx->runtime()))
         baselineScript->toggleProfilerInstrumentation(true);
 
     // Patch IC loads using IC entries.
-    for (size_t i = 0; i < icLoadLabels_.length(); i++) {
-        CodeOffset label = icLoadLabels_[i].label;
-        size_t icEntry = icLoadLabels_[i].icEntry;
+    for (auto & icLoadLabel : icLoadLabels_) {
+        CodeOffset label = icLoadLabel.label;
+        size_t icEntry = icLoadLabel.icEntry;
         BaselineICEntry* entryAddr = &(baselineScript->icEntry(icEntry));
         Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, label),
                                            ImmPtr(entryAddr),
                                            ImmPtr((void*)-1));
     }
 
     if (modifiesArguments_)
         baselineScript->setModifiesArguments();
--- a/js/src/jit/BytecodeAnalysis.cpp
+++ b/js/src/jit/BytecodeAnalysis.cpp
@@ -149,18 +149,18 @@ BytecodeAnalysis::init(TempAllocator& al
 
             CatchFinallyRange range(script_->pcToOffset(endOfTry), script_->pcToOffset(afterTry));
             if (!catchFinallyRanges.append(range))
                 return false;
             break;
           }
 
           case JSOP_LOOPENTRY:
-            for (size_t i = 0; i < catchFinallyRanges.length(); i++) {
-                if (catchFinallyRanges[i].contains(offset))
+            for (auto & catchFinallyRange : catchFinallyRanges) {
+                if (catchFinallyRange.contains(offset))
                     infos_[offset].loopEntryInCatchOrFinally = true;
             }
             break;
 
           case JSOP_GETNAME:
           case JSOP_BINDNAME:
           case JSOP_BINDVAR:
           case JSOP_SETNAME:
--- a/js/src/jit/CacheIRCompiler.cpp
+++ b/js/src/jit/CacheIRCompiler.cpp
@@ -289,18 +289,18 @@ CacheRegisterAllocator::freeDeadOperandL
 }
 
 void
 CacheRegisterAllocator::discardStack(MacroAssembler& masm)
 {
     // This should only be called when we are no longer using the operands,
     // as we're discarding everything from the native stack. Set all operand
     // locations to Uninitialized to catch bugs.
-    for (size_t i = 0; i < operandLocations_.length(); i++)
-        operandLocations_[i].setUninitialized();
+    for (auto & operandLocation : operandLocations_)
+        operandLocation.setUninitialized();
 
     if (stackPushed_ > 0) {
         masm.addToStackPtr(Imm32(stackPushed_));
         stackPushed_ = 0;
     }
     freePayloadSlots_.clear();
     freeValueSlots_.clear();
 }
@@ -309,18 +309,17 @@ Register
 CacheRegisterAllocator::allocateRegister(MacroAssembler& masm)
 {
     if (availableRegs_.empty())
         freeDeadOperandLocations(masm);
 
     if (availableRegs_.empty()) {
         // Still no registers available, try to spill unused operands to
         // the stack.
-        for (size_t i = 0; i < operandLocations_.length(); i++) {
-            OperandLocation& loc = operandLocations_[i];
+        for (auto & loc : operandLocations_) {
             if (loc.kind() == OperandLocation::PayloadReg) {
                 Register reg = loc.payloadReg();
                 if (currentOpRegs_.has(reg))
                     continue;
 
                 spillOperandToStack(masm, &loc);
                 availableRegs_.add(reg);
                 break; // We got a register, so break out of the loop.
@@ -366,18 +365,17 @@ CacheRegisterAllocator::allocateFixedReg
 
     if (availableRegs_.has(reg)) {
         availableRegs_.take(reg);
         currentOpRegs_.add(reg);
         return;
     }
 
     // The register must be used by some operand. Spill it to the stack.
-    for (size_t i = 0; i < operandLocations_.length(); i++) {
-        OperandLocation& loc = operandLocations_[i];
+    for (auto & loc : operandLocations_) {
         if (loc.kind() == OperandLocation::PayloadReg) {
             if (loc.payloadReg() != reg)
                 continue;
 
             spillOperandToStackOrRegister(masm, &loc);
             currentOpRegs_.add(reg);
             return;
         }
--- a/js/src/jit/CodeGenerator.cpp
+++ b/js/src/jit/CodeGenerator.cpp
@@ -5398,29 +5398,29 @@ CodeGenerator::generateBody()
                 return false;
         }
         TrackedOptimizations* last = nullptr;
 
 #if defined(JS_ION_PERF)
         perfSpewer->startBasicBlock(current->mir(), masm);
 #endif
 
-        for (LInstructionIterator iter = current->begin(); iter != current->end(); iter++) {
+        for (auto iter : *current) {
             if (!alloc().ensureBallast())
                 return false;
 
 #ifdef JS_JITSPEW
             JitSpewStart(JitSpew_Codegen, "instruction %s", iter->opName());
             if (const char* extra = iter->extraName())
                 JitSpewCont(JitSpew_Codegen, ":%s", extra);
             JitSpewFin(JitSpew_Codegen);
 #endif
 
             if (counts)
-                blockCounts->visitInstruction(*iter);
+                blockCounts->visitInstruction(iter);
 
 #ifdef CHECK_OSIPOINT_REGISTERS
             if (iter->safepoint())
                 resetOsiPointRegs(iter->safepoint());
 #endif
 
             if (iter->mirRaw()) {
                 // Only add instructions that have a tracked inline script tree.
@@ -9680,20 +9680,20 @@ CodeGenerator::generate()
     dumpNativeToBytecodeEntries();
 
     return !masm.oom();
 }
 
 bool
 CodeGenerator::linkSharedStubs(JSContext* cx)
 {
-    for (uint32_t i = 0; i < sharedStubs_.length(); i++) {
+    for (auto & sharedStub : sharedStubs_) {
         ICStub *stub = nullptr;
 
-        switch (sharedStubs_[i].kind) {
+        switch (sharedStub.kind) {
           case ICStub::Kind::BinaryArith_Fallback: {
             ICBinaryArith_Fallback::Compiler stubCompiler(cx, ICStubCompiler::Engine::IonSharedIC);
             stub = stubCompiler.getStub(&stubSpace_);
             break;
           }
           case ICStub::Kind::UnaryArith_Fallback: {
             ICUnaryArith_Fallback::Compiler stubCompiler(cx, ICStubCompiler::Engine::IonSharedIC);
             stub = stubCompiler.getStub(&stubSpace_);
@@ -9705,18 +9705,18 @@ CodeGenerator::linkSharedStubs(JSContext
             break;
           }
           case ICStub::Kind::GetProp_Fallback: {
             ICGetProp_Fallback::Compiler stubCompiler(cx, ICStubCompiler::Engine::IonSharedIC);
             stub = stubCompiler.getStub(&stubSpace_);
             break;
           }
           case ICStub::Kind::NewArray_Fallback: {
-            JSScript* script = sharedStubs_[i].entry.script();
-            jsbytecode* pc = sharedStubs_[i].entry.pc(script);
+            JSScript* script = sharedStub.entry.script();
+            jsbytecode* pc = sharedStub.entry.pc(script);
             ObjectGroup* group = ObjectGroup::allocationSiteGroup(cx, script, pc, JSProto_Array);
             if (!group)
                 return false;
 
             ICNewArray_Fallback::Compiler stubCompiler(cx, group, ICStubCompiler::Engine::IonSharedIC);
             stub = stubCompiler.getStub(&stubSpace_);
             break;
           }
@@ -9727,17 +9727,17 @@ CodeGenerator::linkSharedStubs(JSContext
           }
           default:
             MOZ_CRASH("Unsupported shared stub.");
         }
 
         if (!stub)
             return false;
 
-        sharedStubs_[i].entry.setFirstStub(stub);
+        sharedStub.entry.setFirstStub(stub);
     }
     return true;
 }
 
 bool
 CodeGenerator::link(JSContext* cx, CompilerConstraintList* constraints)
 {
     RootedScript script(cx, gen->info().script());
@@ -9909,18 +9909,18 @@ CodeGenerator::link(JSContext* cx, Compi
 
     // Adopt fallback shared stubs from the compiler into the ion script.
     ionScript->adoptFallbackStubs(&stubSpace_);
 
     Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, invalidateEpilogueData_),
                                        ImmPtr(ionScript),
                                        ImmPtr((void*)-1));
 
-    for (size_t i = 0; i < ionScriptLabels_.length(); i++) {
-        Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, ionScriptLabels_[i]),
+    for (auto ionScriptLabel : ionScriptLabels_) {
+        Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, ionScriptLabel),
                                            ImmPtr(ionScript),
                                            ImmPtr((void*)-1));
     }
 
 #ifdef JS_TRACE_LOGGING
     bool TLFailed = false;
 
     for (uint32_t i = 0; i < patchableTLEvents_.length(); i++) {
--- a/js/src/jit/ExecutableAllocator.cpp
+++ b/js/src/jit/ExecutableAllocator.cpp
@@ -119,34 +119,33 @@ ExecutablePool::available() const
 ExecutableAllocator::ExecutableAllocator(JSRuntime* rt)
   : rt_(rt)
 {
     MOZ_ASSERT(m_smallPools.empty());
 }
 
 ExecutableAllocator::~ExecutableAllocator()
 {
-    for (size_t i = 0; i < m_smallPools.length(); i++)
-        m_smallPools[i]->release(/* willDestroy = */true);
+    for (auto & m_smallPool : m_smallPools)
+        m_smallPool->release(/* willDestroy = */true);
 
     // If this asserts we have a pool leak.
     MOZ_ASSERT_IF(m_pools.initialized(), m_pools.empty());
 }
 
 ExecutablePool*
 ExecutableAllocator::poolForSize(size_t n)
 {
     // Try to fit in an existing small allocator.  Use the pool with the
     // least available space that is big enough (best-fit).  This is the
     // best strategy because (a) it maximizes the chance of the next
     // allocation fitting in a small pool, and (b) it minimizes the
     // potential waste when a small pool is next abandoned.
     ExecutablePool* minPool = nullptr;
-    for (size_t i = 0; i < m_smallPools.length(); i++) {
-        ExecutablePool* pool = m_smallPools[i];
+    for (auto pool : m_smallPools) {
         if (n <= pool->available() && (!minPool || pool->available() < minPool->available()))
             minPool = pool;
     }
     if (minPool) {
         minPool->addRef();
         return minPool;
     }
 
@@ -278,18 +277,18 @@ ExecutableAllocator::releasePoolPages(Ex
 }
 
 void
 ExecutableAllocator::purge()
 {
     // Don't race with reprotectAll called from the signal handler.
     JitRuntime::AutoPreventBackedgePatching apbp(rt_);
 
-    for (size_t i = 0; i < m_smallPools.length(); i++)
-        m_smallPools[i]->release();
+    for (auto & m_smallPool : m_smallPools)
+        m_smallPool->release();
     m_smallPools.clear();
 }
 
 void
 ExecutableAllocator::addSizeOfCode(JS::CodeSizes* sizes) const
 {
     if (m_pools.initialized()) {
         for (ExecPoolHashSet::Range r = m_pools.all(); !r.empty(); r.popFront()) {
@@ -337,39 +336,39 @@ ExecutableAllocator::poisonCode(JSRuntim
     JitRuntime::AutoPreventBackedgePatching apbp(rt);
 
 #ifdef DEBUG
     // Make sure no pools have the mark bit set.
     for (size_t i = 0; i < ranges.length(); i++)
         MOZ_ASSERT(!ranges[i].pool->isMarked());
 #endif
 
-    for (size_t i = 0; i < ranges.length(); i++) {
-        ExecutablePool* pool = ranges[i].pool;
+    for (auto & range : ranges) {
+        ExecutablePool* pool = range.pool;
         if (pool->m_refCount == 1) {
             // This is the last reference so the release() call below will
             // unmap the memory. Don't bother poisoning it.
             continue;
         }
 
         MOZ_ASSERT(pool->m_refCount > 1);
 
         // Use the pool's mark bit to indicate we made the pool writable.
         // This avoids reprotecting a pool multiple times.
         if (!pool->isMarked()) {
             reprotectPool(rt, pool, ProtectionSetting::Writable);
             pool->mark();
         }
 
-        memset(ranges[i].start, JS_SWEPT_CODE_PATTERN, ranges[i].size);
+        memset(range.start, JS_SWEPT_CODE_PATTERN, range.size);
     }
 
     // Make the pools executable again and drop references.
-    for (size_t i = 0; i < ranges.length(); i++) {
-        ExecutablePool* pool = ranges[i].pool;
+    for (auto & range : ranges) {
+        ExecutablePool* pool = range.pool;
         if (pool->isMarked()) {
             reprotectPool(rt, pool, ProtectionSetting::Executable);
             pool->unmark();
         }
         pool->release();
     }
 }
 
--- a/js/src/jit/FlowAliasAnalysis.cpp
+++ b/js/src/jit/FlowAliasAnalysis.cpp
@@ -185,18 +185,18 @@ AppendToWorklist(MDefinitionVector& work
         stores[j]->setInWorklist();
     }
     return true;
 }
 
 static void
 SetNotInWorkList(MDefinitionVector& worklist)
 {
-    for (size_t item = 0; item < worklist.length(); item++)
-        worklist[item]->setNotInWorklistUnchecked();
+    for (auto & item : worklist)
+        item->setNotInWorklistUnchecked();
 }
 
 static bool
 LoadAliasesStore(MDefinition* load, MDefinition* store)
 {
     // Always alias first instruction of graph.
     if (store->id() == 0)
         return true;
@@ -670,36 +670,36 @@ FlowAliasAnalysis::improveNonAliasedStor
                                            MDefinitionVector& outputStores, bool* improved,
                                            bool onlyControlInstructions)
 {
     MOZ_ASSERT(worklist_.length() == 0);
     if (!AppendToWorklist(worklist_, inputStores))
         return false;
     outputStores.clear();
 
-    for (size_t i = 0; i < worklist_.length(); i++) {
+    for (auto & work : worklist_) {
         MOZ_ASSERT(worklist_[i]);
 
-        if (!LoadAliasesStore(load, worklist_[i])) {
-            StoreDependency* dep = worklist_[i]->storeDependency();
+        if (!LoadAliasesStore(load, work)) {
+            StoreDependency* dep = work->storeDependency();
             MOZ_ASSERT(dep);
             MOZ_ASSERT(dep->get().length() > 0);
 
             if (!AppendToWorklist(worklist_, dep->get()))
                 return false;
 
             *improved = true;
             continue;
         }
 
-        if (onlyControlInstructions && !worklist_[i]->isControlInstruction()) {
+        if (onlyControlInstructions && !work->isControlInstruction()) {
             outputStores.clear();
             break;
         }
-        if (!outputStores.append(worklist_[i]))
+        if (!outputStores.append(work))
             return false;
     }
 
     SetNotInWorkList(worklist_);
     worklist_.clear();
     return true;
 }
 
@@ -796,18 +796,17 @@ void
 FlowAliasAnalysis::saveLoadDependency(MDefinition* load, MDefinitionVector& dependencies)
 {
     MOZ_ASSERT(dependencies.length() > 0);
 
     // For now we only save the last store before the load for other passes.
     // That means the store with the maximum id.
     MDefinition* max = dependencies[0];
     MDefinition* maxNonControl = nullptr;
-    for (size_t i = 0; i < dependencies.length(); i++) {
-        MDefinition* ins = dependencies[i];
+    for (auto ins : dependencies) {
         if (max->id() < ins->id())
             max = ins;
         if (!ins->isControlInstruction()) {
             if (!maxNonControl || maxNonControl->id() < ins->id())
                 maxNonControl = ins;
         }
     }
 
@@ -880,26 +879,26 @@ FlowAliasAnalysis::isLoopInvariant(MDefi
     load->setDependency(store);
     if (!improveLoopDependency(load, stores_->get(backedge), output))
         return false;
     load->setDependency(olddep);
 
     if (output.length() == 0)
         return true;
 
-    for (size_t i = 0; i < output.length(); i++) {
-        if (output[i]->storeDependency())
+    for (auto & out : output) {
+        if (out->storeDependency())
             return true;
 
-        if (!output[i]->isControlInstruction())
+        if (!out->isControlInstruction())
             return true;
-        if (!output[i]->block()->isLoopHeader())
+        if (!out->block()->isLoopHeader())
             return true;
 
-        if (output[i] == store)
+        if (out == store)
             continue;
 
         return true;
     }
 
     *loopinvariant = true;
     return true;
 }
--- a/js/src/jit/Ion.cpp
+++ b/js/src/jit/Ion.cpp
@@ -398,21 +398,18 @@ JitZoneGroup::patchIonBackedges(JSContex
         return;
 
     backedgeTarget_ = target;
 
     cx->runtime()->jitRuntime()->backedgeExecAlloc().makeAllWritable();
 
     // Patch all loop backedges in Ion code so that they either jump to the
     // normal loop header or to an interrupt handler each time they run.
-    for (InlineListIterator<PatchableBackedge> iter(backedgeList().begin());
-         iter != backedgeList().end();
-         iter++)
+    for (auto patchableBackedge : backedgeList())
     {
-        PatchableBackedge* patchableBackedge = *iter;
         if (target == BackedgeLoopHeader)
             PatchBackedge(patchableBackedge->backedge, patchableBackedge->loopHeader, target);
         else
             PatchBackedge(patchableBackedge->backedge, patchableBackedge->interruptCheck, target);
     }
 
     cx->runtime()->jitRuntime()->backedgeExecAlloc().makeAllExecutable();
 }
@@ -2187,18 +2184,17 @@ IonCompile(JSContext* cx, JSScript* scri
 
     if (buildResult.isErr()) {
         AbortReason reason = buildResult.unwrapErr();
         builder->graphSpewer().endFunction();
         if (reason == AbortReason::PreliminaryObjects) {
             // Some group was accessed which has associated preliminary objects
             // to analyze. Do this now and we will try to build again shortly.
             const MIRGenerator::ObjectGroupVector& groups = builder->abortedPreliminaryGroups();
-            for (size_t i = 0; i < groups.length(); i++) {
-                ObjectGroup* group = groups[i];
+            for (auto group : groups) {
                 if (group->newScript()) {
                     if (!group->newScript()->maybeAnalyze(cx, group, nullptr, /* force = */ true))
                         return AbortReason::Alloc;
                 } else if (group->maybePreliminaryObjects()) {
                     group->maybePreliminaryObjects()->maybeAnalyze(cx, group, /* force = */ true);
                 } else {
                     MOZ_CRASH("Unexpected aborted preliminary group");
                 }
@@ -3139,18 +3135,18 @@ jit::Invalidate(TypeZone& types, FreeOp*
                 const RecompileInfoVector& invalid, bool resetUses,
                 bool cancelOffThread)
 {
     JitSpew(JitSpew_IonInvalidate, "Start invalidation.");
 
     // Add an invalidation reference to all invalidated IonScripts to indicate
     // to the traversal which frames have been invalidated.
     size_t numInvalidations = 0;
-    for (size_t i = 0; i < invalid.length(); i++) {
-        const CompilerOutput* co = invalid[i].compilerOutput(types);
+    for (auto inv : invalid) {
+        const CompilerOutput* co = inv.compilerOutput(types);
         if (!co)
             continue;
         MOZ_ASSERT(co->isValid());
 
         if (cancelOffThread)
             CancelOffThreadIonCompile(co->script());
 
         if (!co->ion())
@@ -3180,18 +3176,18 @@ jit::Invalidate(TypeZone& types, FreeOp*
 
     JSContext* cx = TlsContext.get();
     for (JitActivationIterator iter(cx, types.zone()->group()->ownerContext()); !iter.done(); ++iter)
         InvalidateActivation(fop, iter, false);
 
     // Drop the references added above. If a script was never active, its
     // IonScript will be immediately destroyed. Otherwise, it will be held live
     // until its last invalidated frame is destroyed.
-    for (size_t i = 0; i < invalid.length(); i++) {
-        CompilerOutput* co = invalid[i].compilerOutput(types);
+    for (auto inv : invalid) {
+        CompilerOutput* co = inv.compilerOutput(types);
         if (!co)
             continue;
         MOZ_ASSERT(co->isValid());
 
         JSScript* script = co->script();
         IonScript* ionScript = co->ion();
         if (!ionScript)
             continue;
--- a/js/src/jit/IonAnalysis.cpp
+++ b/js/src/jit/IonAnalysis.cpp
@@ -593,18 +593,18 @@ BlockComputesConstant(MBasicBlock* block
     // consume the constant has already been removed.
     if (value->hasUses())
         return false;
 
     if (!value->isConstant() || value->block() != block)
         return false;
     if (!block->phisEmpty())
         return false;
-    for (MInstructionIterator iter = block->begin(); iter != block->end(); ++iter) {
-        if (*iter != value || !iter->isGoto())
+    for (auto iter : *block) {
+        if (iter != value || !iter->isGoto())
             return false;
     }
     return value->toConstant()->valueToBoolean(constBool);
 }
 
 // Find phis that are redudant:
 //
 // 1) phi(a, a)
@@ -2125,18 +2125,18 @@ CanCompareRegExp(MCompare* compare, MDef
     }
 
     return true;
 }
 
 static inline void
 SetNotInWorklist(MDefinitionVector& worklist)
 {
-    for (size_t i = 0; i < worklist.length(); i++)
-        worklist[i]->setNotInWorklist();
+    for (auto & work : worklist)
+        work->setNotInWorklist();
 }
 
 static bool
 IsRegExpHoistable(MIRGenerator* mir, MDefinition* regexp, MDefinitionVector& worklist,
                   bool* hoistable)
 {
     MOZ_ASSERT(worklist.length() == 0);
 
@@ -2402,17 +2402,17 @@ IntersectDominators(MBasicBlock* block1,
         }
     }
     return finger1;
 }
 
 void
 jit::ClearDominatorTree(MIRGraph& graph)
 {
-    for (MBasicBlockIterator iter = graph.begin(); iter != graph.end(); iter++)
+    for (auto iter : graph)
         iter->clearDominatorInfo();
 }
 
 static void
 ComputeImmediateDominators(MIRGraph& graph)
 {
     // The default start block is a root and therefore only self-dominates.
     MBasicBlock* startBlock = graph.entryBlock();
@@ -2551,17 +2551,17 @@ jit::BuildPhiReverseMapping(MIRGraph& gr
     //             continue, as well as a final predecessor for the actual
     //             loop continuation. The continue itself has exactly one
     //             successor.
     //   * An if. Each branch as exactly one predecessor.
     //   * A switch. Each branch has exactly one predecessor.
     //   * Loop tail. A new block is always created for the exit, and if a
     //             break statement is present, the exit block will forward
     //             directly to the break block.
-    for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++) {
+    for (auto block : graph) {
         if (block->phisEmpty())
             continue;
 
         // Assert on the above.
         for (size_t j = 0; j < block->numPredecessors(); j++) {
             MBasicBlock* pred = block->getPredecessor(j);
 
 #ifdef DEBUG
@@ -2569,17 +2569,17 @@ jit::BuildPhiReverseMapping(MIRGraph& gr
             for (size_t k = 0; k < pred->numSuccessors(); k++) {
                 MBasicBlock* successor = pred->getSuccessor(k);
                 if (!successor->phisEmpty())
                     numSuccessorsWithPhis++;
             }
             MOZ_ASSERT(numSuccessorsWithPhis <= 1);
 #endif
 
-            pred->setSuccessorWithPhis(*block, j);
+            pred->setSuccessorWithPhis(block, j);
         }
     }
 
     return true;
 }
 
 #ifdef DEBUG
 static bool
@@ -3513,18 +3513,17 @@ TryOptimizeLoadObjectOrNull(MDefinition*
     // Fixup the result type of MTypeBarrier uses.
     for (MUseDefIterator iter(def); iter; ++iter) {
         MDefinition* ndef = iter.def();
         if (ndef->isTypeBarrier())
             ndef->setResultType(MIRType::ObjectOrNull);
     }
 
     // Eliminate MToObjectOrNull instruction uses.
-    for (size_t i = 0; i < eliminateList.length(); i++) {
-        MDefinition* ndef = eliminateList[i];
+    for (auto ndef : eliminateList) {
         ndef->replaceAllUsesWith(def);
         if (!peliminateList->append(ndef))
             return false;
     }
 
     return true;
 }
 
@@ -3564,18 +3563,17 @@ jit::EliminateRedundantChecks(MIRGraph& 
     // Stack for pre-order CFG traversal.
     Vector<MBasicBlock*, 1, JitAllocPolicy> worklist(graph.alloc());
 
     // The index of the current block in the CFG traversal.
     size_t index = 0;
 
     // Add all self-dominating blocks to the worklist.
     // This includes all roots. Order does not matter.
-    for (MBasicBlockIterator i(graph.begin()); i != graph.end(); i++) {
-        MBasicBlock* block = *i;
+    for (auto block : graph) {
         if (block->immediateDominator() == block) {
             if (!worklist.append(block))
                 return false;
         }
     }
 
     MDefinitionVector eliminateList(graph.alloc());
 
@@ -3621,18 +3619,17 @@ jit::EliminateRedundantChecks(MIRGraph& 
             if (eliminated)
                 block->discardDef(def);
         }
         index++;
     }
 
     MOZ_ASSERT(index == graph.numBlocks());
 
-    for (size_t i = 0; i < eliminateList.length(); i++) {
-        MDefinition* def = eliminateList[i];
+    for (auto def : eliminateList) {
         def->block()->discardDef(def);
     }
 
     return true;
 }
 
 static bool
 NeedsKeepAlive(MInstruction* slotsOrElements, MInstruction* use)
@@ -3677,18 +3674,17 @@ NeedsKeepAlive(MInstruction* slotsOrElem
 }
 
 bool
 jit::AddKeepAliveInstructions(MIRGraph& graph)
 {
     for (MBasicBlockIterator i(graph.begin()); i != graph.end(); i++) {
         MBasicBlock* block = *i;
 
-        for (MInstructionIterator insIter(block->begin()); insIter != block->end(); insIter++) {
-            MInstruction* ins = *insIter;
+        for (auto ins : *block) {
             if (ins->type() != MIRType::Elements && ins->type() != MIRType::Slots)
                 continue;
 
             MDefinition* ownerObject;
             switch (ins->op()) {
               case MDefinition::Op_ConstantElements:
                 continue;
               case MDefinition::Op_ConvertElementsToDoubles:
@@ -3754,50 +3750,50 @@ jit::AddKeepAliveInstructions(MIRGraph& 
     }
 
     return true;
 }
 
 bool
 LinearSum::multiply(int32_t scale)
 {
-    for (size_t i = 0; i < terms_.length(); i++) {
-        if (!SafeMul(scale, terms_[i].scale, &terms_[i].scale))
+    for (auto & term : terms_) {
+        if (!SafeMul(scale, term.scale, &term.scale))
             return false;
     }
     return SafeMul(scale, constant_, &constant_);
 }
 
 bool
 LinearSum::divide(uint32_t scale)
 {
     MOZ_ASSERT(scale > 0);
 
-    for (size_t i = 0; i < terms_.length(); i++) {
-        if (terms_[i].scale % scale != 0)
+    for (auto & term : terms_) {
+        if (term.scale % scale != 0)
             return false;
     }
     if (constant_ % scale != 0)
         return false;
 
-    for (size_t i = 0; i < terms_.length(); i++)
-        terms_[i].scale /= scale;
+    for (auto & term : terms_)
+        term.scale /= scale;
     constant_ /= scale;
 
     return true;
 }
 
 bool
 LinearSum::add(const LinearSum& other, int32_t scale /* = 1 */)
 {
-    for (size_t i = 0; i < other.terms_.length(); i++) {
+    for (auto term : other.terms_) {
         int32_t newScale = scale;
-        if (!SafeMul(scale, other.terms_[i].scale, &newScale))
+        if (!SafeMul(scale, term.scale, &newScale))
             return false;
-        if (!add(other.terms_[i].term, newScale))
+        if (!add(term.term, newScale))
             return false;
     }
     int32_t newConstant = scale;
     if (!SafeMul(scale, other.constant_, &newConstant))
         return false;
     return add(newConstant);
 }
 
@@ -4053,18 +4049,18 @@ AnalyzePoppedThis(JSContext* cx, ObjectG
         // Ignore assignments to properties that were already written to.
         if (baseobj->lookup(cx, NameToId(setprop->name()))) {
             *phandled = true;
             return true;
         }
 
         // Don't add definite properties for properties that were already
         // read in the constructor.
-        for (size_t i = 0; i < accessedProperties->length(); i++) {
-            if ((*accessedProperties)[i] == setprop->name())
+        for (auto & accessedPropertie : *accessedProperties) {
+            if (accessedPropertie == setprop->name())
                 return true;
         }
 
         // Assignments to new properties must always execute.
         if (!definitelyExecuted)
             return true;
 
         RootedId id(cx, NameToId(setprop->name()));
@@ -4277,32 +4273,30 @@ jit::AnalyzeNewScriptDefiniteProperties(
     }
 
     // Sort the instructions to visit in increasing order.
     qsort(instructions.begin(), instructions.length(),
           sizeof(MInstruction*), CmpInstructions);
 
     // Find all exit blocks in the graph.
     Vector<MBasicBlock*> exitBlocks(cx);
-    for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++) {
-        if (!block->numSuccessors() && !exitBlocks.append(*block))
+    for (auto block : graph) {
+        if (!block->numSuccessors() && !exitBlocks.append(block))
             return false;
     }
 
     // id of the last block which added a new property.
     size_t lastAddedBlock = 0;
 
-    for (size_t i = 0; i < instructions.length(); i++) {
-        MInstruction* ins = instructions[i];
-
+    for (auto ins : instructions) {
         // Track whether the use of |this| is in unconditional code, i.e.
         // the block dominates all graph exits.
         bool definitelyExecuted = true;
-        for (size_t i = 0; i < exitBlocks.length(); i++) {
-            for (MBasicBlock* exit = exitBlocks[i];
+        for (auto exit : exitBlocks) {
+            for (;
                  exit != ins->block();
                  exit = exit->immediateDominator())
             {
                 if (exit == exit->immediateDominator()) {
                     definitelyExecuted = false;
                     break;
                 }
             }
@@ -4332,17 +4326,17 @@ jit::AnalyzeNewScriptDefiniteProperties(
     }
 
     if (baseobj->slotSpan() != 0) {
         // We found some definite properties, but their correctness is still
         // contingent on the correct frames being inlined. Add constraints to
         // invalidate the definite properties if additional functions could be
         // called at the inline frame sites.
         Vector<MBasicBlock*> exitBlocks(cx);
-        for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++) {
+        for (auto block : graph) {
             // Inlining decisions made after the last new property was added to
             // the object don't need to be frozen.
             if (block->id() > lastAddedBlock)
                 break;
             if (MResumePoint* rp = block->callerResumePoint()) {
                 if (block->numPredecessors() == 1 && block->getPredecessor(0) == rp->block()) {
                     JSScript* script = rp->block()->info().script();
                     if (!AddClearDefiniteFunctionUsesInScript(cx, group, script, block->info().script()))
--- a/js/src/jit/IonBuilder.cpp
+++ b/js/src/jit/IonBuilder.cpp
@@ -525,19 +525,19 @@ IonBuilder::analyzeNewLoopTypes(const CF
     // Over-approximating the types may lead to inefficient generated code, and
     // under-approximating the types will cause the loop body to be analyzed
     // multiple times as the correct types are deduced (see finishLoop).
 
     // If we restarted processing of an outer loop then get loop header types
     // directly from the last time we have previously processed this loop. This
     // both avoids repeated work from the bytecode traverse below, and will
     // also pick up types discovered while previously building the loop body.
-    for (size_t i = 0; i < loopHeaders_.length(); i++) {
-        if (loopHeaders_[i].pc == cfgBlock->startPc()) {
-            MBasicBlock* oldEntry = loopHeaders_[i].header;
+    for (auto & loopHeader : loopHeaders_) {
+        if (loopHeader.pc == cfgBlock->startPc()) {
+            MBasicBlock* oldEntry = loopHeader.header;
 
             // If this block has been discarded, its resume points will have
             // already discarded their operands.
             if (!oldEntry->isDead()) {
                 MResumePoint* oldEntryRp = oldEntry->entryResumePoint();
                 size_t stackDepth = oldEntryRp->stackDepth();
                 for (size_t slot = 0; slot < stackDepth; slot++) {
                     MDefinition* oldDef = oldEntryRp->getOperand(slot);
@@ -551,17 +551,17 @@ IonBuilder::analyzeNewLoopTypes(const CF
                     if (!newPhi->addBackedgeType(alloc(), oldPhi->type(), oldPhi->resultTypeSet()))
                         return abort(AbortReason::Alloc);
                 }
             }
 
             // Update the most recent header for this loop encountered, in case
             // new types flow to the phis and the loop is processed at least
             // three times.
-            loopHeaders_[i].header = entry;
+            loopHeader.header = entry;
             return Ok();
         }
     }
     if (!loopHeaders_.append(LoopHeader(cfgBlock->startPc(), entry)))
         return abort(AbortReason::Alloc);
 
     // Get the start and end pc of this loop.
     jsbytecode* start = loopEntryBlock->stopPc();
@@ -859,18 +859,17 @@ IonBuilder::build()
 
 AbortReasonOr<Ok>
 IonBuilder::processIterators()
 {
     // Find and mark phis that must transitively hold an iterator live.
 
     Vector<MDefinition*, 8, SystemAllocPolicy> worklist;
 
-    for (size_t i = 0; i < iterators_.length(); i++) {
-        MDefinition* iter = iterators_[i];
+    for (auto iter : iterators_) {
         if (!iter->isInWorklist()) {
             if (!worklist.append(iter))
                 return abort(AbortReason::Alloc);
             iter->setInWorklist();
         }
     }
 
     while (!worklist.empty()) {
@@ -3796,18 +3795,18 @@ IonBuilder::inlineScriptedCall(CallInfo&
                     return abort(AbortReason::Alloc);
                 return InliningStatus_NotInlined;
             }
             return abort(AbortReason::Inlining);
 
           case AbortReason::PreliminaryObjects: {
             const ObjectGroupVector& groups = inlineBuilder.abortedPreliminaryGroups();
             MOZ_ASSERT(!groups.empty());
-            for (size_t i = 0; i < groups.length(); i++)
-                addAbortedPreliminaryGroup(groups[i]);
+            for (auto group : groups)
+                addAbortedPreliminaryGroup(group);
             return Err(result.unwrapErr());
           }
 
           case AbortReason::Alloc:
           case AbortReason::Inlining:
           case AbortReason::Error:
             return Err(result.unwrapErr());
 
@@ -3946,18 +3945,18 @@ IonBuilder::patchInlinedReturns(CallInfo
     if (returns.length() == 1)
         return patchInlinedReturn(callInfo, returns[0], bottom);
 
     // Accumulate multiple returns with a phi.
     MPhi* phi = MPhi::New(alloc());
     if (!phi->reserveLength(returns.length()))
         return nullptr;
 
-    for (size_t i = 0; i < returns.length(); i++) {
-        MDefinition* rdef = patchInlinedReturn(callInfo, returns[i], bottom);
+    for (auto & i : returns) {
+        MDefinition* rdef = patchInlinedReturn(callInfo, i, bottom);
         if (!rdef)
             return nullptr;
         phi->addInput(rdef);
     }
 
     bottom->addPhi(phi);
     return phi;
 }
@@ -4107,18 +4106,18 @@ IonBuilder::selectInliningTargets(const 
     if (!choiceSet.reserve(targets.length()))
         return abort(AbortReason::Alloc);
 
     // Don't inline polymorphic sites during the definite properties analysis.
     // AddClearDefiniteFunctionUsesInScript depends on this for correctness.
     if (info().analysisMode() == Analysis_DefiniteProperties && targets.length() > 1)
         return Ok();
 
-    for (size_t i = 0; i < targets.length(); i++) {
-        JSObject* target = targets[i].target;
+    for (auto i : targets) {
+        JSObject* target = i.target;
 
         trackOptimizationAttempt(TrackedStrategy::Call_Inline);
         trackTypeInfo(TrackedTypeSite::Call_Target, target);
 
         bool inlineable;
         InliningDecision decision = makeInliningDecision(target, callInfo);
         switch (decision) {
           case InliningDecision_Error:
@@ -8561,18 +8560,18 @@ IonBuilder::computeHeapType(const Tempor
 
         properties.infallibleAppend(property);
         acc = TypeSet::unionSets(acc, currentSet, lifoAlloc);
         if (!acc)
             return nullptr;
     }
 
     // Freeze all the properties associated with the refined type set.
-    for (HeapTypeSetKey* i = properties.begin(); i != properties.end(); i++)
-        i->freeze(constraints());
+    for (auto & propertie : properties)
+        propertie.freeze(constraints());
 
     return acc;
 }
 
 AbortReasonOr<Ok>
 IonBuilder::jsop_getelem_dense(MDefinition* obj, MDefinition* index, JSValueType unboxedType)
 {
     TemporaryTypeSet* types = bytecodeTypes(pc);
@@ -10760,18 +10759,17 @@ IonBuilder::convertUnboxedObjects(MDefin
 
     return convertUnboxedObjects(obj, list);
 }
 
 MDefinition*
 IonBuilder::convertUnboxedObjects(MDefinition* obj,
                                   const BaselineInspector::ObjectGroupVector& list)
 {
-    for (size_t i = 0; i < list.length(); i++) {
-        ObjectGroup* group = list[i];
+    for (auto group : list) {
         if (TemporaryTypeSet* types = obj->resultTypeSet()) {
             if (!types->hasType(TypeSet::ObjectType(group)))
                 continue;
         }
         obj = MConvertUnboxedObjectToNative::New(alloc(), obj, group);
         current->add(obj->toInstruction());
     }
     return obj;
@@ -11149,22 +11147,22 @@ IonBuilder::getPropTryCommonGetter(bool*
 bool
 IonBuilder::canInlinePropertyOpShapes(const BaselineInspector::ReceiverVector& receivers)
 {
     if (receivers.empty()) {
         trackOptimizationOutcome(TrackedOutcome::NoShapeInfo);
         return false;
     }
 
-    for (size_t i = 0; i < receivers.length(); i++) {
+    for (auto receiver : receivers) {
         // We inline the property access as long as the shape is not in
         // dictionary mode. We cannot be sure that the shape is still a
         // lastProperty, and calling Shape::search() on dictionary mode
         // shapes that aren't lastProperty is invalid.
-        if (receivers[i].shape && receivers[i].shape->inDictionary()) {
+        if (receiver.shape && receiver.shape->inDictionary()) {
             trackOptimizationOutcome(TrackedOutcome::InDictionaryMode);
             return false;
         }
     }
 
     return true;
 }
 
@@ -11282,23 +11280,23 @@ IonBuilder::getPropTryInlineAccess(bool*
         *emitted = true;
         return Ok();
     }
 
     MGetPropertyPolymorphic* load = MGetPropertyPolymorphic::New(alloc(), obj, name);
     current->add(load);
     current->push(load);
 
-    for (size_t i = 0; i < receivers.length(); i++) {
+    for (auto & receiver : receivers) {
         Shape* propShape = nullptr;
-        if (receivers[i].shape) {
-            propShape = receivers[i].shape->searchLinear(NameToId(name));
+        if (receiver.shape) {
+            propShape = receiver.shape->searchLinear(NameToId(name));
             MOZ_ASSERT(propShape);
         }
-        if (!load->addReceiver(receivers[i], propShape))
+        if (!load->addReceiver(receiver, propShape))
             return abort(AbortReason::Alloc);
     }
 
     if (failedShapeGuard_)
         load->setNotMovable();
 
     load->setResultType(rvalType);
     MOZ_TRY(pushTypeBarrier(load, types, barrier));
@@ -12053,23 +12051,23 @@ IonBuilder::setPropTryInlineAccess(bool*
 
     if (NeedsPostBarrier(value))
         current->add(MPostWriteBarrier::New(alloc(), obj, value));
 
     MSetPropertyPolymorphic* ins = MSetPropertyPolymorphic::New(alloc(), obj, value, name);
     current->add(ins);
     current->push(value);
 
-    for (size_t i = 0; i < receivers.length(); i++) {
+    for (auto & receiver : receivers) {
         Shape* propShape = nullptr;
-        if (receivers[i].shape) {
-            propShape = receivers[i].shape->searchLinear(NameToId(name));
+        if (receiver.shape) {
+            propShape = receiver.shape->searchLinear(NameToId(name));
             MOZ_ASSERT(propShape);
         }
-        if (!ins->addReceiver(receivers[i], propShape))
+        if (!ins->addReceiver(receiver, propShape))
             return abort(AbortReason::Alloc);
     }
 
     if (objTypes->propertyNeedsBarrier(constraints(), NameToId(name)))
         ins->setNeedsBarrier();
 
     MOZ_TRY(resumeAfter(ins));
 
@@ -13205,18 +13203,18 @@ IonBuilder::addGuardReceiverPolymorphic(
     }
 
     MGuardReceiverPolymorphic* guard = MGuardReceiverPolymorphic::New(alloc(), obj);
     current->add(guard);
 
     if (failedShapeGuard_)
         guard->setNotMovable();
 
-    for (size_t i = 0; i < receivers.length(); i++) {
-        if (!guard->addReceiver(receivers[i]))
+    for (auto receiver : receivers) {
+        if (!guard->addReceiver(receiver))
             return nullptr;
     }
 
     return guard;
 }
 
 MInstruction*
 IonBuilder::addSharedTypedArrayGuard(MDefinition* obj)
--- a/js/src/jit/IonControlFlow.cpp
+++ b/js/src/jit/IonControlFlow.cpp
@@ -37,40 +37,40 @@ void
 ControlFlowGraph::dump(GenericPrinter& print, JSScript* script)
 {
     if (blocks_.length() == 0) {
         print.printf("Didn't run yet.\n");
         return;
     }
 
     fprintf(stderr, "Dumping cfg:\n\n");
-    for (size_t i = 0; i < blocks_.length(); i++) {
+    for (auto & block : blocks_) {
         print.printf(" Block %" PRIuSIZE ", %" PRIuSIZE ":%" PRIuSIZE "\n",
-                     blocks_[i].id(),
-                     script->pcToOffset(blocks_[i].startPc()),
-                     script->pcToOffset(blocks_[i].stopPc()));
+                     block.id(),
+                     script->pcToOffset(block.startPc()),
+                     script->pcToOffset(block.stopPc()));
 
-        jsbytecode* pc = blocks_[i].startPc();
-        for ( ; pc < blocks_[i].stopPc(); pc += CodeSpec[JSOp(*pc)].length) {
+        jsbytecode* pc = block.startPc();
+        for ( ; pc < block.stopPc(); pc += CodeSpec[JSOp(*pc)].length) {
             MOZ_ASSERT(pc < script->codeEnd());
             print.printf("  %" PRIuSIZE ": %s\n", script->pcToOffset(pc),
                                                   CodeName[JSOp(*pc)]);
         }
 
-        if (blocks_[i].stopIns()->isGoto()) {
+        if (block.stopIns()->isGoto()) {
             print.printf("  %s (popping:%" PRIuSIZE ") [",
-                         blocks_[i].stopIns()->Name(),
-                         blocks_[i].stopIns()->toGoto()->popAmount());
+                         block.stopIns()->Name(),
+                         block.stopIns()->toGoto()->popAmount());
         } else {
-            print.printf("  %s [", blocks_[i].stopIns()->Name());
+            print.printf("  %s [", block.stopIns()->Name());
         }
-        for (size_t j=0; j<blocks_[i].stopIns()->numSuccessors(); j++) {
+        for (size_t j=0; j<block.stopIns()->numSuccessors(); j++) {
             if (j!=0)
                 print.printf(", ");
-            print.printf("%" PRIuSIZE, blocks_[i].stopIns()->getSuccessor(j)->id());
+            print.printf("%" PRIuSIZE, block.stopIns()->getSuccessor(j)->id());
         }
         print.printf("]\n\n");
     }
 }
 
 bool
 ControlFlowGraph::init(TempAllocator& alloc, const CFGBlockVector& blocks)
 {
--- a/js/src/jit/JitcodeMap.cpp
+++ b/js/src/jit/JitcodeMap.cpp
@@ -909,29 +909,28 @@ JitcodeGlobalEntry::IonEntry::trace(JSTr
                                        "jitcodeglobaltable-ionentry-script");
             tracedAny = true;
         }
     }
 
     if (!optsAllTypes_)
         return tracedAny;
 
-    for (IonTrackedTypeWithAddendum* iter = optsAllTypes_->begin();
-         iter != optsAllTypes_->end(); iter++)
+    for (auto & optsAllType : *optsAllTypes_)
     {
-        if (ShouldTraceProvider::ShouldTrace(rt, &iter->type)) {
-            iter->type.trace(trc);
+        if (ShouldTraceProvider::ShouldTrace(rt, &optsAllType.type)) {
+            optsAllType.type.trace(trc);
             tracedAny = true;
         }
-        if (iter->hasAllocationSite() && ShouldTraceProvider::ShouldTrace(rt, &iter->script)) {
-            TraceManuallyBarrieredEdge(trc, &iter->script,
+        if (optsAllType.hasAllocationSite() && ShouldTraceProvider::ShouldTrace(rt, &optsAllType.script)) {
+            TraceManuallyBarrieredEdge(trc, &optsAllType.script,
                                        "jitcodeglobaltable-ionentry-type-addendum-script");
             tracedAny = true;
-        } else if (iter->hasConstructor() && ShouldTraceProvider::ShouldTrace(rt, &iter->constructor)) {
-            TraceManuallyBarrieredEdge(trc, &iter->constructor,
+        } else if (optsAllType.hasConstructor() && ShouldTraceProvider::ShouldTrace(rt, &optsAllType.constructor)) {
+            TraceManuallyBarrieredEdge(trc, &optsAllType.constructor,
                                        "jitcodeglobaltable-ionentry-type-addendum-constructor");
             tracedAny = true;
         }
     }
 
     return tracedAny;
 }
 
@@ -963,20 +962,19 @@ JitcodeGlobalEntry::IonEntry::isMarkedFr
     for (unsigned i = 0; i < numScripts(); i++) {
         if (!IsMarkedUnbarriered(rt, &sizedScriptList()->pairs[i].script))
             return false;
     }
 
     if (!optsAllTypes_)
         return true;
 
-    for (IonTrackedTypeWithAddendum* iter = optsAllTypes_->begin();
-         iter != optsAllTypes_->end(); iter++)
+    for (auto & optsAllType : *optsAllTypes_)
     {
-        if (!TypeSet::IsTypeMarked(rt, &iter->type))
+        if (!TypeSet::IsTypeMarked(rt, &optsAllType.type))
             return false;
     }
 
     return true;
 }
 
 template <class ShouldTraceProvider>
 bool
@@ -1437,18 +1435,18 @@ struct AutoFreeProfilingStrings {
           keep_(false)
     {}
 
     void keepStrings() { keep_ = true; }
 
     ~AutoFreeProfilingStrings() {
         if (keep_)
             return;
-        for (size_t i = 0; i < profilingStrings_.length(); i++)
-            js_free(profilingStrings_[i]);
+        for (auto & profilingString : profilingStrings_)
+            js_free(profilingString);
     }
 };
 
 bool
 JitcodeIonTable::makeIonEntry(JSContext* cx, JitCode* code,
                               uint32_t numScripts, JSScript** scripts,
                               JitcodeGlobalEntry::IonEntry& out)
 {
@@ -1604,20 +1602,20 @@ JitcodeIonTable::WriteIonTable(CompactBu
 
     // Write out numRegions
     JitSpew(JitSpew_Profiling, "  Writing numRuns=%d", int(runOffsets.length()));
     writer.writeNativeEndianUint32_t(runOffsets.length());
 
     // Write out region offset table.  The offsets in |runOffsets| are currently forward
     // offsets from the beginning of the buffer.  We convert them to backwards offsets
     // from the start of the table before writing them into their table entries.
-    for (uint32_t i = 0; i < runOffsets.length(); i++) {
+    for (unsigned int runOffset : runOffsets) {
         JitSpew(JitSpew_Profiling, "  Run %d offset=%d backOffset=%d @%d",
                 int(i), int(runOffsets[i]), int(tableOffset - runOffsets[i]), int(writer.length()));
-        writer.writeNativeEndianUint32_t(tableOffset - runOffsets[i]);
+        writer.writeNativeEndianUint32_t(tableOffset - runOffset);
     }
 
     if (writer.oom())
         return false;
 
     *tableOffsetOut = tableOffset;
     *numRegionsOut = runOffsets.length();
     return true;
--- a/js/src/jit/JitcodeMap.h
+++ b/js/src/jit/JitcodeMap.h
@@ -1026,20 +1026,20 @@ class JitcodeGlobalTable
     JitcodeGlobalEntry* startTower_[JitcodeSkiplistTower::MAX_HEIGHT];
     JitcodeSkiplistTower* freeTowers_[JitcodeSkiplistTower::MAX_HEIGHT];
 
   public:
     JitcodeGlobalTable()
       : alloc_(LIFO_CHUNK_SIZE), freeEntries_(nullptr), rand_(0), skiplistSize_(0),
         nurseryEntries_(nullptr)
     {
-        for (unsigned i = 0; i < JitcodeSkiplistTower::MAX_HEIGHT; i++)
-            startTower_[i] = nullptr;
-        for (unsigned i = 0; i < JitcodeSkiplistTower::MAX_HEIGHT; i++)
-            freeTowers_[i] = nullptr;
+        for (auto & i : startTower_)
+            i = nullptr;
+        for (auto & freeTower : freeTowers_)
+            freeTower = nullptr;
     }
     ~JitcodeGlobalTable() {}
 
     bool empty() const {
         return skiplistSize_ == 0;
     }
 
     const JitcodeGlobalEntry* lookup(void* ptr) {
--- a/js/src/jit/LICM.cpp
+++ b/js/src/jit/LICM.cpp
@@ -19,18 +19,17 @@ static bool
 LoopContainsPossibleCall(MIRGraph& graph, MBasicBlock* header, MBasicBlock* backedge)
 {
     for (auto i(graph.rpoBegin(header)); ; ++i) {
         MOZ_ASSERT(i != graph.rpoEnd(), "Reached end of graph searching for blocks in loop");
         MBasicBlock* block = *i;
         if (!block->isMarked())
             continue;
 
-        for (auto insIter(block->begin()), insEnd(block->end()); insIter != insEnd; ++insIter) {
-            MInstruction* ins = *insIter;
+        for (auto ins : *block) {
             if (ins->possiblyCalls()) {
 #ifdef JS_JITSPEW
                 JitSpew(JitSpew_LICM, "    Possile call found at %s%u", ins->opName(), ins->id());
 #endif
                 return true;
             }
         }
 
--- a/js/src/jit/LIR.cpp
+++ b/js/src/jit/LIR.cpp
@@ -126,19 +126,19 @@ LBlock::init(TempAllocator& alloc)
         }
     }
     return true;
 }
 
 const LInstruction*
 LBlock::firstInstructionWithId() const
 {
-    for (LInstructionIterator i(instructions_.begin()); i != instructions_.end(); ++i) {
-        if (i->id())
-            return *i;
+    for (auto instruction : instructions_) {
+        if (instruction->id())
+            return instruction;
     }
     return 0;
 }
 
 LMoveGroup*
 LBlock::getEntryMoveGroup(TempAllocator& alloc)
 {
     if (entryMoveGroup_)
@@ -161,17 +161,17 @@ LBlock::getExitMoveGroup(TempAllocator& 
 void
 LBlock::dump(GenericPrinter& out)
 {
     out.printf("block%u:\n", mir()->id());
     for (size_t i = 0; i < numPhis(); ++i) {
         getPhi(i)->dump(out);
         out.printf("\n");
     }
-    for (LInstructionIterator iter = begin(); iter != end(); iter++) {
+    for (auto iter : *this) {
         iter->dump(out);
         out.printf("\n");
     }
 }
 
 void
 LBlock::dump()
 {
@@ -263,21 +263,21 @@ LRecoverInfo::init(MResumePoint* rp)
     // Sort operations in the order in which we need to restore the stack. This
     // implies that outer frames, as well as operations needed to recover the
     // current frame, are located before the current frame. The inner-most
     // resume point should be the last element in the list.
     if (!appendResumePoint(rp))
         return false;
 
     // Remove temporary flags from all definitions.
-    for (MNode** it = begin(); it != end(); it++) {
-        if (!(*it)->isDefinition())
+    for (auto & it : *this) {
+        if (!it->isDefinition())
             continue;
 
-        (*it)->toDefinition()->setNotInWorklist();
+        it->toDefinition()->setNotInWorklist();
     }
 
     MOZ_ASSERT(mir() == rp);
     return true;
 }
 
 LSnapshot::LSnapshot(LRecoverInfo* recoverInfo, BailoutKind kind)
   : numSlots_(TotalOperandCount(recoverInfo) * BOX_PIECES),
@@ -580,29 +580,29 @@ LMoveGroup::add(LAllocation from, LAlloc
 
 bool
 LMoveGroup::addAfter(LAllocation from, LAllocation to, LDefinition::Type type)
 {
     // Transform the operands to this move so that performing the result
     // simultaneously with existing moves in the group will have the same
     // effect as if the original move took place after the existing moves.
 
-    for (size_t i = 0; i < moves_.length(); i++) {
-        if (moves_[i].to() == from) {
-            from = moves_[i].from();
+    for (auto & move : moves_) {
+        if (move.to() == from) {
+            from = move.from();
             break;
         }
     }
 
     if (from == to)
         return true;
 
-    for (size_t i = 0; i < moves_.length(); i++) {
-        if (to == moves_[i].to()) {
-            moves_[i] = LMove(from, to, type);
+    for (auto & move : moves_) {
+        if (to == move.to()) {
+            move = LMove(from, to, type);
             return true;
         }
     }
 
     return add(from, to, type);
 }
 
 void
--- a/js/src/jit/LIR.h
+++ b/js/src/jit/LIR.h
@@ -1541,18 +1541,17 @@ class LSafepoint : public TempObject
         MOZ_ASSERT(alloc.isRegister());
         addSlotsOrElementsRegister(alloc.toRegister().gpr());
         assertInvariants();
         return true;
     }
     bool hasSlotsOrElementsPointer(LAllocation alloc) const {
         if (alloc.isRegister())
             return slotsOrElementsRegs().has(alloc.toRegister().gpr());
-        for (size_t i = 0; i < slotsOrElementsSlots_.length(); i++) {
-            const SlotEntry& entry = slotsOrElementsSlots_[i];
+        for (auto entry : slotsOrElementsSlots_) {
             if (entry.stack == alloc.isStackSlot() && entry.slot == alloc.memorySlot())
                 return true;
         }
         return false;
     }
 
     MOZ_MUST_USE bool addGcPointer(LAllocation alloc) {
         if (alloc.isMemory())
@@ -1562,36 +1561,36 @@ class LSafepoint : public TempObject
         assertInvariants();
         return true;
     }
 
     bool hasGcPointer(LAllocation alloc) const {
         if (alloc.isRegister())
             return gcRegs().has(alloc.toRegister().gpr());
         MOZ_ASSERT(alloc.isMemory());
-        for (size_t i = 0; i < gcSlots_.length(); i++) {
-            if (gcSlots_[i].stack == alloc.isStackSlot() && gcSlots_[i].slot == alloc.memorySlot())
+        for (auto gcSlot : gcSlots_) {
+            if (gcSlot.stack == alloc.isStackSlot() && gcSlot.slot == alloc.memorySlot())
                 return true;
         }
         return false;
     }
 
     MOZ_MUST_USE bool addValueSlot(bool stack, uint32_t slot) {
         bool result = valueSlots_.append(SlotEntry(stack, slot));
         if (result)
             assertInvariants();
         return result;
     }
     SlotList& valueSlots() {
         return valueSlots_;
     }
 
     bool hasValueSlot(bool stack, uint32_t slot) const {
-        for (size_t i = 0; i < valueSlots_.length(); i++) {
-            if (valueSlots_[i].stack == stack && valueSlots_[i].slot == slot)
+        for (auto valueSlot : valueSlots_) {
+            if (valueSlot.stack == stack && valueSlot.slot == slot)
                 return true;
         }
         return false;
     }
 
 #ifdef JS_NUNBOX32
 
     MOZ_MUST_USE bool addNunboxParts(uint32_t typeVreg, LAllocation type, LAllocation payload) {
--- a/js/src/jit/LoopUnroller.cpp
+++ b/js/src/jit/LoopUnroller.cpp
@@ -160,20 +160,18 @@ LoopUnroller::go(LoopIterationBound* bou
     }
     if (backedge->numPredecessors() != 1 || backedge->numSuccessors() != 1)
         return true;
     MOZ_ASSERT(backedge->phisEmpty());
 
     MBasicBlock* bodyBlocks[] = { header, backedge };
 
     // All instructions in the header and body must be clonable.
-    for (size_t i = 0; i < ArrayLength(bodyBlocks); i++) {
-        MBasicBlock* block = bodyBlocks[i];
-        for (MInstructionIterator iter(block->begin()); iter != block->end(); iter++) {
-            MInstruction* ins = *iter;
+    for (auto block : bodyBlocks) {
+        for (auto ins : *block) {
             if (ins->canClone())
                 continue;
             if (ins->isTest() || ins->isGoto() || ins->isInterruptCheck())
                 continue;
 #ifdef JS_JITSPEW
             JitSpew(JitSpew_Unrolling, "Aborting: can't clone instruction %s", ins->opName());
 #endif
             return true;
@@ -320,22 +318,21 @@ LoopUnroller::go(LoopIterationBound* bou
         newPreheader->setEntryResumePoint(rp);
     }
 
     // Generate the unrolled code.
     MOZ_ASSERT(UnrollCount > 1);
     size_t unrollIndex = 0;
     while (true) {
         // Clone the contents of the original loop into the unrolled loop body.
-        for (size_t i = 0; i < ArrayLength(bodyBlocks); i++) {
-            MBasicBlock* block = bodyBlocks[i];
-            for (MInstructionIterator iter(block->begin()); iter != block->end(); iter++) {
-                MInstruction* ins = *iter;
+        for (auto block : bodyBlocks) {
+            for (auto iter : *block) {
+                MInstruction* ins = iter;
                 if (ins->canClone()) {
-                    if (!makeReplacementInstruction(*iter))
+                    if (!makeReplacementInstruction(iter))
                         return false;
                 } else {
                     // Control instructions are handled separately.
                     MOZ_ASSERT(ins->isTest() || ins->isGoto() || ins->isInterruptCheck());
                 }
             }
         }
 
@@ -398,19 +395,19 @@ LoopUnroller::go(LoopIterationBound* bou
 }
 
 bool
 jit::UnrollLoops(MIRGraph& graph, const LoopIterationBoundVector& bounds)
 {
     if (bounds.empty())
         return true;
 
-    for (size_t i = 0; i < bounds.length(); i++) {
+    for (auto bound : bounds) {
         LoopUnroller unroller(graph);
-        if (!unroller.go(bounds[i]))
+        if (!unroller.go(bound))
             return false;
     }
 
     // The MIR graph is valid, but now has several new blocks. Update or
     // recompute previous analysis information for the remaining optimization
     // passes.
     ClearDominatorTree(graph);
     if (!BuildDominatorTree(graph))
--- a/js/src/jit/Lowering.cpp
+++ b/js/src/jit/Lowering.cpp
@@ -102,17 +102,17 @@ TryToUseImplicitInterruptCheck(MIRGraph&
     // if the loop body can not trigger GC or affect GC state like the store
     // buffer. We do this by checking there are no safepoints attached to LIR
     // instructions inside the loop.
 
     MBasicBlockIterator block = graph.begin(backedge->loopHeaderOfBackedge());
     LInterruptCheck* check = nullptr;
     while (true) {
         LBlock* lir = block->lir();
-        for (LInstructionIterator iter = lir->begin(); iter != lir->end(); iter++) {
+        for (auto iter : *lir) {
             if (iter->isInterruptCheck()) {
                 if (!check) {
                     MOZ_ASSERT(*block == backedge->loopHeaderOfBackedge());
                     check = iter->toInterruptCheck();
                 }
                 continue;
             }
 
--- a/js/src/jit/MCallOptimize.cpp
+++ b/js/src/jit/MCallOptimize.cpp
@@ -2321,18 +2321,18 @@ IonBuilder::inlineHasClass(CallInfo& cal
         MHasClass* hasClass1 = MHasClass::New(alloc(), callInfo.getArg(0), clasp1);
         current->add(hasClass1);
 
         if (!clasp2 && !clasp3 && !clasp4) {
             current->push(hasClass1);
         } else {
             const Class* remaining[] = { clasp2, clasp3, clasp4 };
             MDefinition* last = hasClass1;
-            for (size_t i = 0; i < ArrayLength(remaining); i++) {
-                MHasClass* hasClass = MHasClass::New(alloc(), callInfo.getArg(0), remaining[i]);
+            for (auto & i : remaining) {
+                MHasClass* hasClass = MHasClass::New(alloc(), callInfo.getArg(0), i);
                 current->add(hasClass);
                 MBitOr* either = MBitOr::New(alloc(), last, hasClass);
                 either->infer(inspector, pc);
                 current->add(either);
                 last = either;
             }
 
             MDefinition* result = convertToBoolean(last);
--- a/js/src/jit/MIR.cpp
+++ b/js/src/jit/MIR.cpp
@@ -654,53 +654,53 @@ MDefinition::hasOneUse() const
     i++;
     return i == uses_.end();
 }
 
 bool
 MDefinition::hasOneDefUse() const
 {
     bool hasOneDefUse = false;
-    for (MUseIterator i(uses_.begin()); i != uses_.end(); i++) {
-        if (!(*i)->consumer()->isDefinition())
+    for (auto use : uses_) {
+        if (!use->consumer()->isDefinition())
             continue;
 
         // We already have a definition use. So 1+
         if (hasOneDefUse)
             return false;
 
         // We saw one definition. Loop to test if there is another.
         hasOneDefUse = true;
     }
 
     return hasOneDefUse;
 }
 
 bool
 MDefinition::hasDefUses() const
 {
-    for (MUseIterator i(uses_.begin()); i != uses_.end(); i++) {
-        if ((*i)->consumer()->isDefinition())
+    for (auto use : uses_) {
+        if (use->consumer()->isDefinition())
             return true;
     }
 
     return false;
 }
 
 bool
 MDefinition::hasLiveDefUses() const
 {
-    for (MUseIterator i(uses_.begin()); i != uses_.end(); i++) {
-        MNode* ins = (*i)->consumer();
+    for (auto use : uses_) {
+        MNode* ins = use->consumer();
         if (ins->isDefinition()) {
             if (!ins->toDefinition()->isRecoveredOnBailout())
                 return true;
         } else {
             MOZ_ASSERT(ins->isResumePoint());
-            if (!ins->toResumePoint()->isRecoverableOperand(*i))
+            if (!ins->toResumePoint()->isRecoverableOperand(use))
                 return true;
         }
     }
 
     return false;
 }
 
 void
@@ -5495,18 +5495,18 @@ void
 InlinePropertyTable::trimToTargets(const InliningTargets& targets)
 {
     JitSpew(JitSpew_Inlining, "Got inlineable property cache with %d cases",
             (int)numEntries());
 
     size_t i = 0;
     while (i < numEntries()) {
         bool foundFunc = false;
-        for (size_t j = 0; j < targets.length(); j++) {
-            if (entries_[i]->func == targets[j].target) {
+        for (auto target : targets) {
+            if (entries_[i]->func == target.target) {
                 foundFunc = true;
                 break;
             }
         }
         if (!foundFunc)
             entries_.erase(&(entries_[i]));
         else
             i++;
--- a/js/src/jit/MIRGraph.cpp
+++ b/js/src/jit/MIRGraph.cpp
@@ -117,18 +117,18 @@ MIRGenerator::abort(AbortReason r, const
     auto forward = abortFmt(r, message, ap);
     va_end(ap);
     return forward;
 }
 
 void
 MIRGenerator::addAbortedPreliminaryGroup(ObjectGroup* group)
 {
-    for (size_t i = 0; i < abortedPreliminaryGroups_.length(); i++) {
-        if (group == abortedPreliminaryGroups_[i])
+    for (auto & abortedPreliminaryGroup : abortedPreliminaryGroups_) {
+        if (group == abortedPreliminaryGroup)
             return;
     }
     AutoEnterOOMUnsafeRegion oomUnsafe;
     if (!abortedPreliminaryGroups_.append(group))
         oomUnsafe.crash("addAbortedPreliminaryGroup");
 }
 
 void
@@ -200,18 +200,17 @@ MIRGraph::removeSuccessorBlocks(MBasicBl
 
     if (osrBlock()) {
         if (osrBlock()->getSuccessor(0)->isMarked())
             osrBlock()->mark();
     }
 
     // Remove blocks.
     // If they don't have any predecessor
-    for (size_t i = 0; i < blocks.length(); i++) {
-        MBasicBlock* block = blocks[i];
+    for (auto block : blocks) {
         bool allMarked = true;
         for (size_t i = 0; i < block->numPredecessors(); i++) {
             if (block->getPredecessor(i)->isMarked())
                 continue;
             allMarked = false;
             break;
         }
         if (allMarked) {
@@ -230,18 +229,18 @@ MIRGraph::removeSuccessorBlocks(MBasicBl
         }
     }
 
     if (osrBlock()) {
         if (osrBlock()->getSuccessor(0)->isDead())
             removeBlock(osrBlock());
     }
 
-    for (size_t i = 0; i < blocks.length(); i++)
-        blocks[i]->unmark();
+    for (auto & block : blocks)
+        block->unmark();
     start->unmark();
 
     return true;
 }
 
 void
 MIRGraph::removeBlock(MBasicBlock* block)
 {
@@ -276,18 +275,18 @@ MIRGraph::removeBlockIncludingPhis(MBasi
     // we want to totally clear everything.
     removeBlock(block);
     block->discardAllPhis();
 }
 
 void
 MIRGraph::unmarkBlocks()
 {
-    for (MBasicBlockIterator i(blocks_.begin()); i != blocks_.end(); i++)
-        i->unmark();
+    for (auto block : blocks_)
+        block->unmark();
 }
 
 MBasicBlock*
 MBasicBlock::New(MIRGraph& graph, size_t stackDepth, const CompileInfo& info,
                  MBasicBlock* maybePred, BytecodeSite* site, Kind kind)
 {
     MOZ_ASSERT(site->pc() != nullptr);
 
@@ -1058,18 +1057,18 @@ MBasicBlock::discardAllInstructionsStart
 }
 
 void
 MBasicBlock::discardAllPhiOperands()
 {
     for (MPhiIterator iter = phisBegin(); iter != phisEnd(); iter++)
         iter->removeAllOperands();
 
-    for (MBasicBlock** pred = predecessors_.begin(); pred != predecessors_.end(); pred++)
-        (*pred)->clearSuccessorWithPhis();
+    for (auto & predecessor : predecessors_)
+        predecessor->clearSuccessorWithPhis();
 }
 
 void
 MBasicBlock::discardAllPhis()
 {
     discardAllPhiOperands();
     phis_.clear();
 }
--- a/js/src/jit/MacroAssembler.cpp
+++ b/js/src/jit/MacroAssembler.cpp
@@ -63,23 +63,23 @@ MacroAssembler::guardTypeSet(const Sourc
         MOZ_ASSERT(types->hasType(TypeSet::Int32Type()));
         tests[0] = TypeSet::DoubleType();
     }
 
     Register tag = extractTag(address, scratch);
 
     // Emit all typed tests.
     BranchType lastBranch;
-    for (size_t i = 0; i < mozilla::ArrayLength(tests); i++) {
-        if (!types->hasType(tests[i]))
+    for (auto test : tests) {
+        if (!types->hasType(test))
             continue;
 
         if (lastBranch.isInitialized())
             lastBranch.emit(*this);
-        lastBranch = BranchType(Equal, tag, tests[i], &matched);
+        lastBranch = BranchType(Equal, tag, test, &matched);
     }
 
     // If this is the last check, invert the last branch.
     if (types->hasType(TypeSet::AnyObjectType()) || !types->getObjectCount()) {
         if (!lastBranch.isInitialized()) {
             jump(miss);
             return;
         }
@@ -2379,18 +2379,17 @@ MacroAssembler::AutoProfilerCallInstrume
 
     masm.pop(reg2);
     masm.pop(reg);
 }
 
 void
 MacroAssembler::linkProfilerCallSites(JitCode* code)
 {
-    for (size_t i = 0; i < profilerCallSites_.length(); i++) {
-        CodeOffset offset = profilerCallSites_[i];
+    for (auto offset : profilerCallSites_) {
         CodeLocationLabel location(code, offset);
         PatchDataWithValueCheck(location, ImmPtr(location.raw()), ImmPtr((void*)-1));
     }
 }
 
 void
 MacroAssembler::alignJitStackBasedOnNArgs(Register nargs)
 {
--- a/js/src/jit/MoveResolver.cpp
+++ b/js/src/jit/MoveResolver.cpp
@@ -66,19 +66,17 @@ MoveResolver::addMove(const MoveOperand&
     return true;
 }
 
 // Given move (A -> B), this function attempts to find any move (B -> *) in the
 // pending move list, and returns the first one.
 MoveResolver::PendingMove*
 MoveResolver::findBlockingMove(const PendingMove* last)
 {
-    for (PendingMoveIterator iter = pending_.begin(); iter != pending_.end(); iter++) {
-        PendingMove* other = *iter;
-
+    for (auto other : pending_) {
         if (other->from().aliases(last->to())) {
             // We now have pairs in the form (A -> X) (X -> y). The second pair
             // blocks the move in the first pair, so return it.
             return other;
         }
     }
 
     // No blocking moves found.
--- a/js/src/jit/OptimizationTracking.cpp
+++ b/js/src/jit/OptimizationTracking.cpp
@@ -217,18 +217,18 @@ HashType(TypeSet::Type ty)
         return PointerHasher<TypeSet::ObjectKey*>::hash(ty.objectKey());
     return mozilla::HashGeneric(ty.raw());
 }
 
 static HashNumber
 HashTypeList(const TempTypeList& types)
 {
     HashNumber h = 0;
-    for (uint32_t i = 0; i < types.length(); i++)
-        h = CombineHash(h, HashType(types[i]));
+    for (auto type : types)
+        h = CombineHash(h, HashType(type));
     return h;
 }
 
 HashNumber
 OptimizationTypeInfo::hash() const
 {
     return ((HashNumber(site_) << 24) + (HashNumber(mirType_) << 16)) ^ HashTypeList(types_);
 }
@@ -315,28 +315,28 @@ UniqueTrackedOptimizations::sortByFreque
     Vector<SortEntry> scratch(cx);
     if (!scratch.resize(entries.length()))
         return false;
 
     FrequencyComparator comparator;
     MOZ_ALWAYS_TRUE(MergeSort(entries.begin(), entries.length(), scratch.begin(), comparator));
 
     // Update map entries' indices.
-    for (size_t i = 0; i < entries.length(); i++) {
+    for (auto & entry : entries) {
         Key key;
-        key.types = entries[i].types;
-        key.attempts = entries[i].attempts;
+        key.types = entry.types;
+        key.attempts = entry.attempts;
         AttemptsMap::Ptr p = map_.lookup(key);
         MOZ_ASSERT(p);
         p->value().index = sorted_.length();
 
         JitSpew(JitSpew_OptimizationTrackingExtended, "   Entry %" PRIuSIZE " has frequency %" PRIu32,
                 sorted_.length(), p->value().frequency);
 
-        if (!sorted_.append(entries[i]))
+        if (!sorted_.append(entry))
             return false;
     }
 
     return true;
 }
 
 uint8_t
 UniqueTrackedOptimizations::indexOf(const TrackedOptimizations* optimizations) const
@@ -614,19 +614,19 @@ OptimizationAttempt::writeCompact(Compac
 
 bool
 OptimizationTypeInfo::writeCompact(JSContext* cx, CompactBufferWriter& writer,
                                    UniqueTrackedTypes& uniqueTypes) const
 {
     writer.writeUnsigned((uint32_t) site_);
     writer.writeUnsigned((uint32_t) mirType_);
     writer.writeUnsigned(types_.length());
-    for (uint32_t i = 0; i < types_.length(); i++) {
+    for (auto type : types_) {
         uint8_t index;
-        if (!uniqueTypes.getIndexOf(cx, types_[i], &index))
+        if (!uniqueTypes.getIndexOf(cx, type, &index))
             return false;
         writer.writeByte(index);
     }
     return true;
 }
 
 /* static */ void
 IonTrackedOptimizationsRegion::ReadDelta(CompactBufferReader& reader,
@@ -821,20 +821,20 @@ WriteOffsetsTable(CompactBufferWriter& w
     // Record the start of the table to compute reverse offsets for entries.
     uint32_t tableOffset = writer.length();
 
     // Write how many bytes were padded and numEntries.
     writer.writeNativeEndianUint32_t(padding);
     writer.writeNativeEndianUint32_t(offsets.length());
 
     // Write entry offset table.
-    for (size_t i = 0; i < offsets.length(); i++) {
+    for (unsigned int offset : offsets) {
         JitSpew(JitSpew_OptimizationTrackingExtended, "   Entry %" PRIuSIZE " reverse offset %u",
                 i, tableOffset - padding - offsets[i]);
-        writer.writeNativeEndianUint32_t(tableOffset - padding - offsets[i]);
+        writer.writeNativeEndianUint32_t(tableOffset - padding - offset);
     }
 
     if (writer.oom())
         return false;
 
     *tableOffsetp = tableOffset;
     return true;
 }
@@ -966,42 +966,41 @@ jit::WriteIonTrackedOptimizationsTable(J
     JitSpew(JitSpew_OptimizationTrackingExtended, "=> Writing unique optimizations table with %" PRIuSIZE " entr%s",
             vec.length(), vec.length() == 1 ? "y" : "ies");
 
     // Write out type info payloads.
     UniqueTrackedTypes uniqueTypes(cx);
     if (!uniqueTypes.init())
         return false;
 
-    for (const UniqueTrackedOptimizations::SortEntry* p = vec.begin(); p != vec.end(); p++) {
-        const TempOptimizationTypeInfoVector* v = p->types;
+    for (const auto & p : vec) {
+        const TempOptimizationTypeInfoVector* v = p.types;
         JitSpew(JitSpew_OptimizationTrackingExtended,
                 "   Type info entry %" PRIuSIZE " of length %" PRIuSIZE ", offset %" PRIuSIZE,
                 size_t(p - vec.begin()), v->length(), writer.length());
         SpewTempOptimizationTypeInfoVector(JitSpew_OptimizationTrackingExtended, v, "  ");
 
         if (!offsets.append(writer.length()))
             return false;
 
-        for (const OptimizationTypeInfo* t = v->begin(); t != v->end(); t++) {
-            if (!t->writeCompact(cx, writer, uniqueTypes))
+        for (const auto & t : *v) {
+            if (!t.writeCompact(cx, writer, uniqueTypes))
                 return false;
         }
     }
 
     // Enumerate the unique types, and pull out any 'new' script constructor
     // functions and allocation site information. We do this during linking
     // instead of during profiling to avoid touching compartment tables during
     // profiling. Additionally, TypeNewScript is subject to GC in the
     // meantime.
     TypeSet::TypeList uniqueTypeList;
     if (!uniqueTypes.enumerate(&uniqueTypeList))
         return false;
-    for (uint32_t i = 0; i < uniqueTypeList.length(); i++) {
-        TypeSet::Type ty = uniqueTypeList[i];
+    for (auto ty : uniqueTypeList) {
         if (JSFunction* constructor = MaybeConstructorFromType(ty)) {
             if (!allTypes->append(IonTrackedTypeWithAddendum(ty, constructor)))
                 return false;
             SpewConstructor(ty, constructor);
         } else {
             JSScript* script;
             uint32_t offset;
             if (!ty.isUnknown() && !ty.isAnyObject() && ty.isGroup() &&
@@ -1017,30 +1016,30 @@ jit::WriteIonTrackedOptimizationsTable(J
         }
     }
 
     if (!WriteOffsetsTable(writer, offsets, typesTableOffsetp))
         return false;
     offsets.clear();
 
     // Write out attempts payloads.
-    for (const UniqueTrackedOptimizations::SortEntry* p = vec.begin(); p != vec.end(); p++) {
-        const TempOptimizationAttemptsVector* v = p->attempts;
+    for (const auto & p : vec) {
+        const TempOptimizationAttemptsVector* v = p.attempts;
         if (JitSpewEnabled(JitSpew_OptimizationTrackingExtended)) {
             JitSpew(JitSpew_OptimizationTrackingExtended,
                     "   Attempts entry %" PRIuSIZE " of length %" PRIuSIZE ", offset %" PRIuSIZE,
                     size_t(p - vec.begin()), v->length(), writer.length());
             SpewTempOptimizationAttemptsVector(JitSpew_OptimizationTrackingExtended, v, "  ");
         }
 
         if (!offsets.append(writer.length()))
             return false;
 
-        for (const OptimizationAttempt* a = v->begin(); a != v->end(); a++)
-            a->writeCompact(writer);
+        for (auto a : *v)
+            a.writeCompact(writer);
     }
 
     return WriteOffsetsTable(writer, offsets, optimizationTableOffsetp);
 }
 
 
 BytecodeSite*
 IonBuilder::maybeTrackedOptimizationSite(jsbytecode* pc)
--- a/js/src/jit/RangeAnalysis.cpp
+++ b/js/src/jit/RangeAnalysis.cpp
@@ -2001,18 +2001,17 @@ RangeAnalysis::analyzeLoop(MBasicBlock* 
             }
         }
 
         // Note: replace all uses of the original bounds check with the
         // actual index. This is usually done during bounds check elimination,
         // but in this case it's safe to do it here since the load/store is
         // definitely not loop-invariant, so we will never move it before
         // one of the bounds checks we just added.
-        for (size_t i = 0; i < hoistedChecks.length(); i++) {
-            MBoundsCheck* ins = hoistedChecks[i];
+        for (auto ins : hoistedChecks) {
             ins->replaceAllUsesWith(ins->index());
             ins->block()->discard(ins);
         }
     }
 
     UnmarkLoopBlocks(graph_, header);
     return true;
 }
@@ -2349,17 +2348,17 @@ RangeAnalysis::analyze()
 
         if (block->isLoopHeader()) {
             if (!analyzeLoop(block))
                 return false;
         }
 
         // First pass at collecting range info - while the beta nodes are still
         // around and before truncation.
-        for (MInstructionIterator iter(block->begin()); iter != block->end(); iter++)
+        for (auto iter : *block)
             iter->collectRangeInfoPreTrunc();
     }
 
     return true;
 }
 
 bool
 RangeAnalysis::addRangeAssertions()
@@ -3217,18 +3216,17 @@ RangeAnalysis::removeUnnecessaryBitops()
     // uniquely works with Int32, Recover Instructions added by the Sink phase
     // expects the MIR Graph to still have a valid flow as-if they were double
     // operations instead of Int32 operations. Thus, this phase should be
     // executed after the Sink phase, and before DCE.
 
     // Fold any unnecessary bitops in the graph, such as (x | 0) on an integer
     // input. This is done after range analysis rather than during GVN as the
     // presence of the bitop can change which instructions are truncated.
-    for (size_t i = 0; i < bitops.length(); i++) {
-        MBinaryBitwiseInstruction* ins = bitops[i];
+    for (auto ins : bitops) {
         if (ins->isRecoveredOnBailout())
             continue;
 
         MDefinition* folded = ins->foldUnnecessaryBitop();
         if (folded != ins) {
             ins->replaceAllLiveUsesWith(folded);
             ins->setRecoveredOnBailout();
         }
@@ -3619,15 +3617,14 @@ bool RangeAnalysis::tryRemovingGuards()
 
             operand->setInWorklist();
             operand->setGuardRangeBailouts();
             if (!guards.append(operand))
                 return false;
         }
     }
 
-    for (size_t i = 0; i < guards.length(); i++) {
-        MDefinition* guard = guards[i];
+    for (auto guard : guards) {
         guard->setNotInWorklist();
     }
 
     return true;
 }
--- a/js/src/jit/Recover.cpp
+++ b/js/src/jit/Recover.cpp
@@ -1613,25 +1613,25 @@ RObjectState::recover(JSContext* cx, Sna
     RootedValue val(cx);
 
     if (object->is<UnboxedPlainObject>()) {
         const UnboxedLayout& layout = object->as<UnboxedPlainObject>().layout();
 
         RootedId id(cx);
         RootedValue receiver(cx, ObjectValue(*object));
         const UnboxedLayout::PropertyVector& properties = layout.properties();
-        for (size_t i = 0; i < properties.length(); i++) {
+        for (auto property : properties) {
             val = iter.read();
 
             // This is the default placeholder value of MObjectState, when no
             // properties are defined yet.
             if (val.isUndefined())
                 continue;
 
-            id = NameToId(properties[i].name);
+            id = NameToId(property.name);
             ObjectOpResult result;
 
             // SetProperty can only fail due to OOM.
             if (!SetProperty(cx, object, id, val, receiver, result))
                 return false;
             if (!result)
                 return result.reportError(cx, object, id);
         }
--- a/js/src/jit/RegisterAllocator.cpp
+++ b/js/src/jit/RegisterAllocator.cpp
@@ -43,18 +43,17 @@ AllocationIntegrityState::record()
             if (!info.outputs.append(*phi->getDef(0)))
                 return false;
             for (size_t k = 0, kend = phi->numOperands(); k < kend; k++) {
                 if (!info.inputs.append(*phi->getOperand(k)))
                     return false;
             }
         }
 
-        for (LInstructionIterator iter = block->begin(); iter != block->end(); iter++) {
-            LInstruction* ins = *iter;
+        for (auto ins : *block) {
             InstructionInfo& info = instructions[ins->id()];
 
             for (size_t k = 0; k < ins->numTemps(); k++) {
                 if (!ins->getTemp(k)->isBogusTemp()) {
                     uint32_t vreg = ins->getTemp(k)->virtualRegister();
                     virtualRegisters[vreg] = ins->getTemp(k);
                 }
                 if (!info.temps.append(*ins->getTemp(k)))
@@ -483,18 +482,18 @@ RegisterAllocator::init()
     if (!insData.init(mir, graph.numInstructions()))
         return false;
 
     if (!entryPositions.reserve(graph.numBlocks()) || !exitPositions.reserve(graph.numBlocks()))
         return false;
 
     for (size_t i = 0; i < graph.numBlocks(); i++) {
         LBlock* block = graph.getBlock(i);
-        for (LInstructionIterator ins = block->begin(); ins != block->end(); ins++)
-            insData[ins->id()] = *ins;
+        for (auto ins : *block)
+            insData[ins->id()] = ins;
         for (size_t j = 0; j < block->numPhis(); j++) {
             LPhi* phi = block->getPhi(j);
             insData[phi->id()] = phi;
         }
 
         CodePosition entry = block->numPhis() != 0
                              ? CodePosition(block->getPhi(0)->id(), CodePosition::INPUT)
                              : inputOf(block->firstInstructionWithId());
--- a/js/src/jit/RematerializedFrame.cpp
+++ b/js/src/jit/RematerializedFrame.cpp
@@ -99,18 +99,17 @@ RematerializedFrame::RematerializeInline
 
     frames = Move(tempFrames.get());
     return true;
 }
 
 /* static */ void
 RematerializedFrame::FreeInVector(GCVector<RematerializedFrame*>& frames)
 {
-    for (size_t i = 0; i < frames.length(); i++) {
-        RematerializedFrame* f = frames[i];
+    for (auto f : frames) {
         MOZ_ASSERT(!Debugger::inFrameMaps(f));
         f->RematerializedFrame::~RematerializedFrame();
         js_free(f);
     }
     frames.clear();
 }
 
 CallObject&
--- a/js/src/jit/Safepoints.cpp
+++ b/js/src/jit/Safepoints.cpp
@@ -141,23 +141,23 @@ WriteBitset(const BitSet& set, CompactBu
 
 static void
 MapSlotsToBitset(BitSet& stackSet, BitSet& argumentSet,
                  CompactBufferWriter& stream, const LSafepoint::SlotList& slots)
 {
     stackSet.clear();
     argumentSet.clear();
 
-    for (uint32_t i = 0; i < slots.length(); i++) {
+    for (auto slot : slots) {
         // Slots are represented at a distance from |fp|. We divide by the
         // pointer size, since we only care about pointer-sized/aligned slots
         // here.
         MOZ_ASSERT(slots[i].slot % sizeof(intptr_t) == 0);
-        size_t index = slots[i].slot / sizeof(intptr_t);
-        (slots[i].stack ? stackSet : argumentSet).insert(index);
+        size_t index = slot.slot / sizeof(intptr_t);
+        (slot.stack ? stackSet : argumentSet).insert(index);
     }
 
     WriteBitset(stackSet, stream);
     WriteBitset(argumentSet, stream);
 }
 
 void
 SafepointWriter::writeGcSlots(LSafepoint* safepoint)
@@ -174,23 +174,23 @@ SafepointWriter::writeGcSlots(LSafepoint
 
 void
 SafepointWriter::writeSlotsOrElementsSlots(LSafepoint* safepoint)
 {
     LSafepoint::SlotList& slots = safepoint->slotsOrElementsSlots();
 
     stream_.writeUnsigned(slots.length());
 
-    for (uint32_t i = 0; i < slots.length(); i++) {
-        if (!slots[i].stack)
+    for (auto & slot : slots) {
+        if (!slot.stack)
             MOZ_CRASH();
 #ifdef JS_JITSPEW
         JitSpew(JitSpew_Safepoints, "    slots/elements slot: %d", slots[i].slot);
 #endif
-        stream_.writeUnsigned(slots[i].slot);
+        stream_.writeUnsigned(slot.slot);
     }
 }
 
 void
 SafepointWriter::writeValueSlots(LSafepoint* safepoint)
 {
     LSafepoint::SlotList& slots = safepoint->valueSlots();
 
--- a/js/src/jit/StupidAllocator.cpp
+++ b/js/src/jit/StupidAllocator.cpp
@@ -46,17 +46,17 @@ StupidAllocator::init()
     if (!RegisterAllocator::init())
         return false;
 
     if (!virtualRegisters.appendN((LDefinition*)nullptr, graph.numVirtualRegisters()))
         return false;
 
     for (size_t i = 0; i < graph.numBlocks(); i++) {
         LBlock* block = graph.getBlock(i);
-        for (LInstructionIterator ins = block->begin(); ins != block->end(); ins++) {
+        for (auto ins : *block) {
             for (size_t j = 0; j < ins->numDefs(); j++) {
                 LDefinition* def = ins->getDef(j);
                 virtualRegisters[def->virtualRegister()] = def;
             }
 
             for (size_t j = 0; j < ins->numTemps(); j++) {
                 LDefinition* def = ins->getTemp(j);
                 if (def->isBogusTemp())
--- a/js/src/jit/shared/CodeGenerator-shared.cpp
+++ b/js/src/jit/shared/CodeGenerator-shared.cpp
@@ -158,34 +158,34 @@ CodeGeneratorShared::generateEpilogue()
     // On systems that use a constant pool, this is a good time to emit.
     masm.flushBuffer();
     return true;
 }
 
 bool
 CodeGeneratorShared::generateOutOfLineCode()
 {
-    for (size_t i = 0; i < outOfLineCode_.length(); i++) {
+    for (auto & i : outOfLineCode_) {
         // Add native => bytecode mapping entries for OOL sites.
         // Not enabled on wasm yet since it doesn't contain bytecode mappings.
         if (!gen->compilingWasm()) {
-            if (!addNativeToBytecodeEntry(outOfLineCode_[i]->bytecodeSite()))
+            if (!addNativeToBytecodeEntry(i->bytecodeSite()))
                 return false;
         }
 
         if (!gen->alloc().ensureBallast())
             return false;
 
         JitSpew(JitSpew_Codegen, "# Emitting out of line code");
 
-        masm.setFramePushed(outOfLineCode_[i]->framePushed());
-        lastPC_ = outOfLineCode_[i]->pc();
-        outOfLineCode_[i]->bind(&masm);
+        masm.setFramePushed(i->framePushed());
+        lastPC_ = i->pc();
+        i->bind(&masm);
 
-        outOfLineCode_[i]->generate(this);
+        i->generate(this);
     }
 
     return !masm.oom();
 }
 
 void
 CodeGeneratorShared::addOutOfLineCode(OutOfLineCode* code, const MInstruction* mir)
 {
@@ -668,18 +668,18 @@ CodeGeneratorShared::encodeSafepoints()
 bool
 CodeGeneratorShared::createNativeToBytecodeScriptList(JSContext* cx)
 {
     js::Vector<JSScript*, 0, SystemAllocPolicy> scriptList;
     InlineScriptTree* tree = gen->info().inlineScriptTree();
     for (;;) {
         // Add script from current tree.
         bool found = false;
-        for (uint32_t i = 0; i < scriptList.length(); i++) {
-            if (scriptList[i] == tree->script()) {
+        for (auto & i : scriptList) {
+            if (i == tree->script()) {
                 found = true;
                 break;
             }
         }
         if (!found) {
             if (!scriptList.append(tree->script()))
                 return false;
         }
@@ -873,18 +873,17 @@ CodeGeneratorShared::generateCompactTrac
     if (trackedOptimizations_.empty())
         return true;
 
     UniqueTrackedOptimizations unique(cx);
     if (!unique.init())
         return false;
 
     // Iterate through all entries to deduplicate their optimization attempts.
-    for (size_t i = 0; i < trackedOptimizations_.length(); i++) {
-        NativeToTrackedOptimizations& entry = trackedOptimizations_[i];
+    for (auto & entry : trackedOptimizations_) {
         if (!unique.add(entry.optimizations))
             return false;
     }
 
     // Sort the unique optimization attempts by frequency to stabilize the
     // attempts' indices in the compact table we will write later.
     if (!unique.sortByFrequency(cx))
         return false;
@@ -1675,17 +1674,17 @@ Label*
 CodeGeneratorShared::labelForBackedgeWithImplicitCheck(MBasicBlock* mir)
 {
     // If this is a loop backedge to a loop header with an implicit interrupt
     // check, use a patchable jump. Skip this search if compiling without a
     // script for wasm, as there will be no interrupt check instruction.
     // Due to critical edge unsplitting there may no longer be unique loop
     // backedges, so just look for any edge going to an earlier block in RPO.
     if (!gen->compilingWasm() && mir->isLoopHeader() && mir->id() <= current->mir()->id()) {
-        for (LInstructionIterator iter = mir->lir()->begin(); iter != mir->lir()->end(); iter++) {
+        for (auto iter : *mir->lir()) {
             if (iter->isMoveGroup()) {
                 // Continue searching for an interrupt check.
             } else {
                 // The interrupt check should be the first instruction in the
                 // loop header other than move groups.
                 MOZ_ASSERT(iter->isInterruptCheck());
                 if (iter->toInterruptCheck()->implicit())
                     return iter->toInterruptCheck()->oolEntry();
--- a/js/src/jit/x64/MacroAssembler-x64.cpp
+++ b/js/src/jit/x64/MacroAssembler-x64.cpp
@@ -508,18 +508,18 @@ MacroAssembler::callWithABIPost(uint32_t
     MOZ_ASSERT(inCall_);
     inCall_ = false;
 #endif
 }
 
 static bool
 IsIntArgReg(Register reg)
 {
-    for (uint32_t i = 0; i < NumIntArgRegs; i++) {
-        if (IntArgRegs[i] == reg)
+    for (auto IntArgReg : IntArgRegs) {
+        if (IntArgReg == reg)
             return true;
     }
 
     return false;
 }
 
 void
 MacroAssembler::callWithABINoProfiler(Register fun, MoveOp::Type result)
--- a/js/src/jit/x86-shared/Assembler-x86-shared.cpp
+++ b/js/src/jit/x86-shared/Assembler-x86-shared.cpp
@@ -75,18 +75,17 @@ void
 AssemblerX86Shared::TraceDataRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader)
 {
     ::TraceDataRelocations(trc, code->raw(), reader);
 }
 
 void
 AssemblerX86Shared::trace(JSTracer* trc)
 {
-    for (size_t i = 0; i < jumps_.length(); i++) {
-        RelativePatch& rp = jumps_[i];
+    for (auto & rp : jumps_) {
         if (rp.kind == Relocation::JITCODE) {
             JitCode* code = JitCode::FromExecutable((uint8_t*)rp.target);
             TraceManuallyBarrieredEdge(trc, &code, "masmrel32");
             MOZ_ASSERT(code == JitCode::FromExecutable((uint8_t*)rp.target));
         }
     }
     if (dataRelocations_.length()) {
         CompactBufferReader reader(dataRelocations_);
@@ -130,18 +129,17 @@ AssemblerX86Shared::executableCopy(void*
         blackbox[4] = uintptr_t(0xFFFF8888);
         MOZ_CRASH("Corrupt code buffer");
     }
 }
 
 void
 AssemblerX86Shared::processCodeLabels(uint8_t* rawCode)
 {
-    for (size_t i = 0; i < codeLabels_.length(); i++) {
-        CodeLabel label = codeLabels_[i];
+    for (auto label : codeLabels_) {
         Bind(rawCode, label.patchAt(), rawCode + label.target()->offset());
     }
 }
 
 AssemblerX86Shared::Condition
 AssemblerX86Shared::InvertCondition(Condition cond)
 {
     switch (cond) {
--- a/js/src/jsapi-tests/testGCAllocator.cpp
+++ b/js/src/jsapi-tests/testGCAllocator.cpp
@@ -85,18 +85,18 @@ addressesGrowUp(bool* resultOut)
 
     for (unsigned i = 0; i < ChunksToTest - 1; i++) {
         if (chunks[i] < chunks[i + 1])
             upCount++;
         else
             downCount++;
     }
 
-    for (unsigned i = 0; i < ChunksToTest; i++)
-        unmapPages(chunks[i], 2 * Chunk);
+    for (auto & chunk : chunks)
+        unmapPages(chunk, 2 * Chunk);
 
     /* Check results were mostly consistent. */
     CHECK(abs(upCount - downCount) >= ThresholdCount);
 
     *resultOut = upCount > downCount;
 
     return true;
 }
--- a/js/src/jsapi-tests/testJitRangeAnalysis.cpp
+++ b/js/src/jsapi-tests/testJitRangeAnalysis.cpp
@@ -254,18 +254,18 @@ BEGIN_TEST(testJitRangeAnalysis_StrictCo
     // We can't do beta node insertion with STRICTEQ and a non-numeric
     // comparison though.
     MCompare::CompareType nonNumerics[] = {
         MCompare::Compare_Unknown,
         MCompare::Compare_Object,
         MCompare::Compare_Bitwise,
         MCompare::Compare_String
     };
-    for (size_t i = 0; i < mozilla::ArrayLength(nonNumerics); ++i) {
-        cmp->setCompareType(nonNumerics[i]);
+    for (auto & nonNumeric : nonNumerics) {
+        cmp->setCompareType(nonNumeric);
         if (!func.runRangeAnalysis())
             return false;
         CHECK(!thenAdd->range() || thenAdd->range()->isUnknown());
         ClearDominatorTree(func.graph);
     }
 
     // We can do it with a numeric comparison.
     cmp->setCompareType(MCompare::Compare_Double);
--- a/js/src/jscompartment.cpp
+++ b/js/src/jscompartment.cpp
@@ -1247,18 +1247,18 @@ JSCompartment::updateDebuggerObservesFla
                flag == DebuggerObservesCoverage ||
                flag == DebuggerObservesAsmJS ||
                flag == DebuggerObservesBinarySource);
 
     GlobalObject* global = zone()->runtimeFromActiveCooperatingThread()->gc.isForegroundSweeping()
                            ? unsafeUnbarrieredMaybeGlobal()
                            : maybeGlobal();
     const GlobalObject::DebuggerVector* v = global->getDebuggers();
-    for (auto p = v->begin(); p != v->end(); p++) {
-        Debugger* dbg = *p;
+    for (const auto & p : *v) {
+        Debugger* dbg = p;
         if (flag == DebuggerObservesAllExecution ? dbg->observesAllExecution() :
             flag == DebuggerObservesCoverage ? dbg->observesCoverage() :
             flag == DebuggerObservesAsmJS ? dbg->observesAsmJS() :
             dbg->observesBinarySource())
         {
             debugModeBits |= flag;
             return;
         }
--- a/js/src/jsgc.cpp
+++ b/js/src/jsgc.cpp
@@ -3222,19 +3222,19 @@ GCRuntime::sweepBackgroundThings(ZoneLis
     freeBlocks.freeAll();
 
     if (zones.isEmpty())
         return;
 
     // We must finalize thing kinds in the order specified by BackgroundFinalizePhases.
     Arena* emptyArenas = nullptr;
     FreeOp fop(nullptr);
-    for (unsigned phase = 0 ; phase < ArrayLength(BackgroundFinalizePhases) ; ++phase) {
+    for (const auto & BackgroundFinalizePhase : BackgroundFinalizePhases) {
         for (Zone* zone = zones.front(); zone; zone = zone->nextZone()) {
-            for (auto kind : BackgroundFinalizePhases[phase].kinds) {
+            for (auto kind : BackgroundFinalizePhase.kinds) {
                 Arena* arenas = zone->arenas.arenaListsToSweep(kind);
                 MOZ_RELEASE_ASSERT(uintptr_t(arenas) != uintptr_t(-1));
                 if (arenas)
                     ArenaLists::backgroundFinalize(&fop, arenas, &emptyArenas);
             }
         }
     }
 
@@ -5370,21 +5370,21 @@ GCRuntime::beginSweepingSweepGroup()
         startSweepingAtomsTable();
 
     // Queue all GC things in all zones for sweeping, either on the foreground
     // or on the background thread.
 
     for (GCSweepGroupIter zone(rt); !zone.done(); zone.next()) {
 
         zone->arenas.queueForForegroundSweep(&fop, ForegroundObjectFinalizePhase);
-        for (unsigned i = 0; i < ArrayLength(IncrementalFinalizePhases); ++i)
-            zone->arenas.queueForForegroundSweep(&fop, IncrementalFinalizePhases[i]);
-
-        for (unsigned i = 0; i < ArrayLength(BackgroundFinalizePhases); ++i)
-            zone->arenas.queueForBackgroundSweep(&fop, BackgroundFinalizePhases[i]);
+        for (const auto & IncrementalFinalizePhase : IncrementalFinalizePhases)
+            zone->arenas.queueForForegroundSweep(&fop, IncrementalFinalizePhase);
+
+        for (const auto & BackgroundFinalizePhase : BackgroundFinalizePhases)
+            zone->arenas.queueForBackgroundSweep(&fop, BackgroundFinalizePhase);
 
         zone->arenas.queueForegroundThingsForSweep(&fop);
     }
 
     sweepActionList = PerSweepGroupActionList;
     sweepActionIndex = 0;
     sweepPhaseIndex = 0;
     sweepZone = nullptr;
--- a/js/src/jsscript.cpp
+++ b/js/src/jsscript.cpp
@@ -1094,18 +1094,18 @@ JSScript::initScriptCounts(JSContext* cx
 
     // Initialize all PCCounts counters to 0.
     ScriptCounts::PCCountsVector base;
     if (!base.reserve(jumpTargets.length())) {
         ReportOutOfMemory(cx);
         return false;
     }
 
-    for (size_t i = 0; i < jumpTargets.length(); i++)
-        base.infallibleEmplaceBack(pcToOffset(jumpTargets[i]));
+    for (auto & jumpTarget : jumpTargets)
+        base.infallibleEmplaceBack(pcToOffset(jumpTarget));
 
     // Create compartment's scriptCountsMap if necessary.
     ScriptCountsMap* map = compartment()->scriptCountsMap;
     if (!map) {
         map = cx->new_<ScriptCountsMap>();
         if (!map) {
             ReportOutOfMemory(cx);
             return false;
--- a/js/src/jsstr.cpp
+++ b/js/src/jsstr.cpp
@@ -1654,18 +1654,18 @@ static const int      sBMHBadPattern  = 
 
 template <typename TextChar, typename PatChar>
 static int
 BoyerMooreHorspool(const TextChar* text, uint32_t textLen, const PatChar* pat, uint32_t patLen)
 {
     MOZ_ASSERT(0 < patLen && patLen <= sBMHPatLenMax);
 
     uint8_t skip[sBMHCharSetSize];
-    for (uint32_t i = 0; i < sBMHCharSetSize; i++)
-        skip[i] = uint8_t(patLen);
+    for (unsigned char & i : skip)
+        i = uint8_t(patLen);
 
     uint32_t patLast = patLen - 1;
     for (uint32_t i = 0; i < patLast; i++) {
         char16_t c = pat[i];
         if (c >= sBMHCharSetSize)
             return sBMHBadPattern;
         skip[c] = uint8_t(patLast - i);
     }
--- a/js/src/vm/ArrayBufferObject.cpp
+++ b/js/src/vm/ArrayBufferObject.cpp
@@ -351,18 +351,18 @@ ArrayBufferObject::detach(JSContext* cx,
         cx->compartment()->detachedTypedObjects = 1;
     }
 
     // Update all views of the buffer to account for the buffer having been
     // detached, and clear the buffer's data and list of views.
 
     auto& innerViews = cx->compartment()->innerViews.get();
     if (InnerViewTable::ViewVector* views = innerViews.maybeViewsUnbarriered(buffer)) {
-        for (size_t i = 0; i < views->length(); i++)
-            NoteViewBufferWasDetached((*views)[i], newContents, cx);
+        for (auto & view : *views)
+            NoteViewBufferWasDetached(view, newContents, cx);
         innerViews.removeViews(buffer);
     }
     if (buffer->firstView()) {
         if (buffer->forInlineTypedObject()) {
             // The buffer points to inline data in its first view, so to keep
             // this pointer alive we don't clear out the first view.
             MOZ_ASSERT(buffer->firstView()->is<InlineTransparentTypedObject>());
         } else {
@@ -426,18 +426,18 @@ ArrayBufferObject::changeContents(JSCont
 
     // Change buffer contents.
     uint8_t* oldDataPointer = dataPointer();
     setNewData(cx->runtime()->defaultFreeOp(), newContents, ownsState);
 
     // Update all views.
     auto& innerViews = cx->compartment()->innerViews.get();
     if (InnerViewTable::ViewVector* views = innerViews.maybeViewsUnbarriered(this)) {
-        for (size_t i = 0; i < views->length(); i++)
-            changeViewContents(cx, (*views)[i], oldDataPointer, newContents);
+        for (auto & view : *views)
+            changeViewContents(cx, view, oldDataPointer, newContents);
     }
     if (firstView())
         changeViewContents(cx, firstView(), oldDataPointer, newContents);
 }
 
 /*
  * Wasm Raw Buf Linear Memory Structure
  *
@@ -1330,18 +1330,18 @@ InnerViewTable::addView(JSContext* cx, A
 
         if (addToNursery) {
             // Only add the entry to |nurseryKeys| if it isn't already there.
             if (views.length() >= VIEW_LIST_MAX_LENGTH) {
                 // To avoid quadratic blowup, skip the loop below if we end up
                 // adding enormous numbers of views for the same object.
                 nurseryKeysValid = false;
             } else {
-                for (size_t i = 0; i < views.length(); i++) {
-                    if (gc::IsInsideNursery(views[i])) {
+                for (auto & view : views) {
+                    if (gc::IsInsideNursery(view)) {
                         addToNursery = false;
                         break;
                     }
                 }
             }
         }
 
         if (!views.append(view)) {
@@ -1410,18 +1410,18 @@ InnerViewTable::sweep()
 }
 
 void
 InnerViewTable::sweepAfterMinorGC()
 {
     MOZ_ASSERT(needsSweepAfterMinorGC());
 
     if (nurseryKeysValid) {
-        for (size_t i = 0; i < nurseryKeys.length(); i++) {
-            JSObject* buffer = MaybeForwarded(nurseryKeys[i]);
+        for (auto & nurseryKey : nurseryKeys) {
+            JSObject* buffer = MaybeForwarded(nurseryKey);
             Map::Ptr p = map.lookup(buffer);
             if (!p)
                 continue;
 
             if (sweepEntry(&p->mutableKey(), p->value()))
                 map.remove(buffer);
         }
         nurseryKeys.clear();
--- a/js/src/vm/Debugger.cpp
+++ b/js/src/vm/Debugger.cpp
@@ -804,18 +804,18 @@ Debugger::getScriptFrameWithIter(JSConte
     result.set(&p->value()->as<DebuggerFrame>());
     return true;
 }
 
 /* static */ bool
 Debugger::hasLiveHook(GlobalObject* global, Hook which)
 {
     if (GlobalObject::DebuggerVector* debuggers = global->getDebuggers()) {
-        for (auto p = debuggers->begin(); p != debuggers->end(); p++) {
-            Debugger* dbg = *p;
+        for (auto & debugger : *debuggers) {
+            Debugger* dbg = debugger;
             if (dbg->enabled && dbg->getHook(which))
                 return true;
         }
     }
     return false;
 }
 
 JSObject*
@@ -1870,31 +1870,31 @@ Debugger::dispatchHook(JSContext* cx, Ho
      * calling into arbitrary JS.
      *
      * Note: In the general case, 'triggered' contains references to objects in
      * different compartments--every compartment *except* this one.
      */
     AutoValueVector triggered(cx);
     Handle<GlobalObject*> global = cx->global();
     if (GlobalObject::DebuggerVector* debuggers = global->getDebuggers()) {
-        for (auto p = debuggers->begin(); p != debuggers->end(); p++) {
-            Debugger* dbg = *p;
+        for (auto & debugger : *debuggers) {
+            Debugger* dbg = debugger;
             if (dbg->enabled && hookIsEnabled(dbg)) {
                 if (!triggered.append(ObjectValue(*dbg->toJSObject())))
                     return JSTRAP_ERROR;
             }
         }
     }
 
     /*
      * Deliver the event to each debugger, checking again to make sure it
      * should still be delivered.
      */
-    for (Value* p = triggered.begin(); p != triggered.end(); p++) {
-        Debugger* dbg = Debugger::fromJSObject(&p->toObject());
+    for (auto & p : triggered) {
+        Debugger* dbg = Debugger::fromJSObject(&p.toObject());
         EnterDebuggeeNoExecute nx(cx, *dbg);
         if (dbg->debuggees.has(global) && dbg->enabled && hookIsEnabled(dbg)) {
             JSTrapStatus st = fireHook(dbg);
             if (st != JSTRAP_CONTINUE)
                 return st;
         }
     }
     return JSTRAP_CONTINUE;
@@ -1981,19 +1981,17 @@ Debugger::onTrap(JSContext* cx, MutableH
         // Skip a breakpoint that is not set for the current wasm::Instance --
         // single wasm::Code can handle breakpoints for mutiple instances.
         if (!isJS && &bp->asWasm()->wasmInstance->instance() != iter.wasmInstance())
             continue;
         if (!triggered.append(bp))
             return JSTRAP_ERROR;
     }
 
-    for (Breakpoint** p = triggered.begin(); p != triggered.end(); p++) {
-        Breakpoint* bp = *p;
-
+    for (auto bp : triggered) {
         /* Handlers can clear breakpoints. Check that bp still exists. */
         if (!site || !site->hasBreakpoint(bp))
             continue;
 
         /*
          * There are two reasons we have to check whether dbg is enabled and
          * debugging global.
          *
@@ -2612,19 +2610,19 @@ UpdateExecutionObservabilityOfScriptsInZ
               default:;
             }
         }
     }
 
     // Iterate through the scripts again and finish discarding
     // BaselineScripts. This must be done as a separate phase as we can only
     // discard the BaselineScript on scripts that have no IonScript.
-    for (size_t i = 0; i < scripts.length(); i++) {
+    for (auto & script : scripts) {
         MOZ_ASSERT_IF(scripts[i]->isDebuggee(), observing);
-        FinishDiscardBaselineScript(fop, scripts[i]);
+        FinishDiscardBaselineScript(fop, script);
     }
 
     // Iterate through all wasm instances to find ones that need to be updated.
     for (JSCompartment* c : zone->compartments()) {
         for (wasm::Instance* instance : c->wasm.instances()) {
             if (!instance->debugEnabled())
                 continue;
 
@@ -2653,18 +2651,18 @@ Debugger::updateExecutionObservabilityOf
 }
 
 template <typename FrameFn>
 /* static */ void
 Debugger::forEachDebuggerFrame(AbstractFramePtr frame, FrameFn fn)
 {
     GlobalObject* global = frame.global();
     if (GlobalObject::DebuggerVector* debuggers = global->getDebuggers()) {
-        for (auto p = debuggers->begin(); p != debuggers->end(); p++) {
-            Debugger* dbg = *p;
+        for (auto & debugger : *debuggers) {
+            Debugger* dbg = debugger;
             if (FrameMap::Ptr entry = dbg->frames.lookup(frame))
                 fn(entry->value());
         }
     }
 }
 
 /* static */ bool
 Debugger::getDebuggerFrames(AbstractFramePtr frame, MutableHandle<DebuggerFrameVector> frames)
@@ -2889,18 +2887,18 @@ Debugger::cannotTrackAllocations(const G
     auto existingCallback = global.compartment()->getAllocationMetadataBuilder();
     return existingCallback && existingCallback != &SavedStacks::metadataBuilder;
 }
 
 /* static */ bool
 Debugger::isObservedByDebuggerTrackingAllocations(const GlobalObject& debuggee)
 {
     if (auto* v = debuggee.getDebuggers()) {
-        for (auto p = v->begin(); p != v->end(); p++) {
-            if ((*p)->trackingAllocationSites && (*p)->enabled) {
+        for (auto & p : *v) {
+            if (p->trackingAllocationSites && p->enabled) {
                 return true;
             }
         }
     }
 
     return false;
 }
 
@@ -3056,18 +3054,18 @@ Debugger::markIteratively(GCMarker* mark
                 continue;
 
             /*
              * Every debuggee has at least one debugger, so in this case
              * getDebuggers can't return nullptr.
              */
             const GlobalObject::DebuggerVector* debuggers = global->getDebuggers();
             MOZ_ASSERT(debuggers);
-            for (auto p = debuggers->begin(); p != debuggers->end(); p++) {
-                Debugger* dbg = *p;
+            for (const auto & debugger : *debuggers) {
+                Debugger* dbg = debugger;
 
                 /*
                  * dbg is a Debugger with at least one debuggee. Check three things:
                  *   - dbg is actually in a compartment that is being marked
                  *   - it isn't already marked
                  *   - it actually has hooks that might be called
                  */
                 GCPtrNativeObject& dbgobj = dbg->toJSObjectRef();
@@ -3991,18 +3989,18 @@ Debugger::addDebuggeeGlobal(JSContext* c
         }
 
         /*
          * Find all compartments containing debuggers debugging c's global
          * object. Add those compartments to visited.
          */
         if (c->isDebuggee()) {
             GlobalObject::DebuggerVector* v = c->maybeGlobal()->getDebuggers();
-            for (auto p = v->begin(); p != v->end(); p++) {
-                JSCompartment* next = (*p)->object->compartment();
+            for (auto & p : *v) {
+                JSCompartment* next = p->object->compartment();
                 if (Find(visited, next) == visited.end() && !visited.append(next))
                     return false;
             }
         }
     }
 
     /*
      * For global to become this js::Debugger's debuggee:
@@ -4420,18 +4418,18 @@ class MOZ_STACK_CLASS Debugger::ScriptQu
         oom = false;
         IterateScripts(cx, singletonComp, this, considerScript);
         if (oom) {
             ReportOutOfMemory(cx);
             return false;
         }
 
         /* We cannot touch the gray bits while isHeapBusy, so do this now. */
-        for (JSScript** i = vector.begin(); i != vector.end(); ++i)
-            JS::ExposeScriptToActiveJS(*i);
+        for (auto & i : vector)
+            JS::ExposeScriptToActiveJS(i);
 
         /*
          * For most queries, we just accumulate results in 'vector' as we find
          * them. But if this is an 'innermost' query, then we've accumulated the
          * results in the 'innermostForCompartment' map. In that case, we now need to
          * walk that map and populate 'vector'.
          */
         if (innermost) {
@@ -6427,20 +6425,20 @@ class DebuggerScriptGetAllColumnOffsetsM
         Vector<wasm::ExprLoc> offsets(cx_);
         if (!instance->instance().debug().getAllColumnOffsets(cx_, &offsets))
             return false;
 
         result_.set(NewDenseEmptyArray(cx_));
         if (!result_)
             return false;
 
-        for (uint32_t i = 0; i < offsets.length(); i++) {
-            size_t lineno = offsets[i].lineno;
-            size_t column = offsets[i].column;
-            size_t offset = offsets[i].offset;
+        for (auto & i : offsets) {
+            size_t lineno = i.lineno;
+            size_t column = i.column;
+            size_t offset = i.offset;
             if (!appendColumnOffsetEntry(lineno, column, offset))
                 return false;
         }
         return true;
     }
 };
 
 static bool
@@ -6504,18 +6502,18 @@ class DebuggerScriptGetLineOffsetsMatche
         Vector<uint32_t> offsets(cx_);
         if (!instance->instance().debug().getLineOffsets(cx_, lineno_, &offsets))
             return false;
 
         result_.set(NewDenseEmptyArray(cx_));
         if (!result_)
             return false;
 
-        for (uint32_t i = 0; i < offsets.length(); i++) {
-            if (!NewbornArrayPush(cx_, result_, NumberValue(offsets[i])))
+        for (unsigned int offset : offsets) {
+            if (!NewbornArrayPush(cx_, result_, NumberValue(offset)))
                 return false;
         }
         return true;
     }
 };
 
 static bool
 DebuggerScript_getLineOffsets(JSContext* cx, unsigned argc, Value* vp)
--- a/js/src/vm/HelperThreads.cpp
+++ b/js/src/vm/HelperThreads.cpp
@@ -378,18 +378,18 @@ ParseTask::finish(JSContext* cx)
             return false;
     }
 
     return true;
 }
 
 ParseTask::~ParseTask()
 {
-    for (size_t i = 0; i < errors.length(); i++)
-        js_delete(errors[i]);
+    for (auto & error : errors)
+        js_delete(error);
 }
 
 void
 ParseTask::trace(JSTracer* trc)
 {
     if (parseGlobal->runtimeFromAnyThread() != trc->runtime())
         return;
     Zone* zone = MaybeForwarded(parseGlobal)->zoneFromAnyThread();
@@ -529,18 +529,17 @@ js::CancelOffThreadParses(JSRuntime* rt)
 #endif
 
     // Instead of forcibly canceling pending parse tasks, just wait for all scheduled
     // and in progress ones to complete. Otherwise the final GC may not collect
     // everything due to zones being used off thread.
     while (true) {
         bool pending = false;
         GlobalHelperThreadState::ParseTaskVector& worklist = HelperThreadState().parseWorklist(lock);
-        for (size_t i = 0; i < worklist.length(); i++) {
-            ParseTask* task = worklist[i];
+        for (auto task : worklist) {
             if (task->runtimeMatches(rt))
                 pending = true;
         }
         if (!pending) {
             bool inProgress = false;
             for (auto& thread : *HelperThreadState().threads) {
                 ParseTask* task = thread.parseTask();
                 if (task && task->runtimeMatches(rt))
@@ -551,18 +550,17 @@ js::CancelOffThreadParses(JSRuntime* rt)
         }
         HelperThreadState().wait(lock, GlobalHelperThreadState::CONSUMER);
     }
 
     // Clean up any parse tasks which haven't been finished by the active thread.
     GlobalHelperThreadState::ParseTaskVector& finished = HelperThreadState().parseFinishedList(lock);
     while (true) {
         bool found = false;
-        for (size_t i = 0; i < finished.length(); i++) {
-            ParseTask* task = finished[i];
+        for (auto task : finished) {
             if (task->runtimeMatches(rt)) {
                 found = true;
                 AutoUnlockHelperThreadState unlock(lock);
                 HelperThreadState().cancelParseTask(rt, task->kind, task);
             }
         }
         if (!found)
             break;
@@ -778,18 +776,18 @@ js::EnqueuePendingParseTasksAfterGC(JSRu
     }
 
     if (newTasks.empty())
         return;
 
     // This logic should mirror the contents of the !activeGCInAtomsZone()
     // branch in StartOffThreadParseScript:
 
-    for (size_t i = 0; i < newTasks.length(); i++)
-        newTasks[i]->activate(rt);
+    for (auto & newTask : newTasks)
+        newTask->activate(rt);
 
     AutoLockHelperThreadState lock;
 
     {
         AutoEnterOOMUnsafeRegion oomUnsafe;
         if (!HelperThreadState().parseWorklist(lock).appendAll(newTasks))
             oomUnsafe.crash("EnqueuePendingParseTasksAfterGC");
     }
@@ -1446,18 +1444,18 @@ GlobalHelperThreadState::finishParseTask
     // Report out of memory errors eagerly, or errors could be malformed.
     if (parseTask->outOfMemory) {
         ReportOutOfMemory(cx);
         return false;
     }
 
     // Report any error or warnings generated during the parse, and inform the
     // debugger about the compiled scripts.
-    for (size_t i = 0; i < parseTask->errors.length(); i++)
-        parseTask->errors[i]->throwError(cx);
+    for (auto & error : parseTask->errors)
+        error->throwError(cx);
     if (parseTask->overRecursed)
         ReportOverRecursed(cx);
     if (cx->isExceptionPending())
         return false;
 
     return true;
 }
 
--- a/js/src/vm/JSONParser.cpp
+++ b/js/src/vm/JSONParser.cpp
@@ -22,43 +22,43 @@
 #include "vm/NativeObject-inl.h"
 
 using namespace js;
 
 using mozilla::RangedPtr;
 
 JSONParserBase::~JSONParserBase()
 {
-    for (size_t i = 0; i < stack.length(); i++) {
-        if (stack[i].state == FinishArrayElement)
-            js_delete(&stack[i].elements());
+    for (auto & i : stack) {
+        if (i.state == FinishArrayElement)
+            js_delete(&i.elements());
         else
-            js_delete(&stack[i].properties());
+            js_delete(&i.properties());
     }
 
-    for (size_t i = 0; i < freeElements.length(); i++)
-        js_delete(freeElements[i]);
+    for (auto & freeElement : freeElements)
+        js_delete(freeElement);
 
-    for (size_t i = 0; i < freeProperties.length(); i++)
-        js_delete(freeProperties[i]);
+    for (auto & freePropertie : freeProperties)
+        js_delete(freePropertie);
 }
 
 void
 JSONParserBase::trace(JSTracer* trc)
 {
-    for (size_t i = 0; i < stack.length(); i++) {
-        if (stack[i].state == FinishArrayElement) {
-            ElementVector& elements = stack[i].elements();
-            for (size_t j = 0; j < elements.length(); j++)
-                TraceRoot(trc, &elements[j], "JSONParser element");
+    for (auto & stk : stack) {
+        if (stk.state == FinishArrayElement) {
+            ElementVector& elements = stk.elements();
+            for (auto & element : elements)
+                TraceRoot(trc, &element, "JSONParser element");
         } else {
-            PropertyVector& properties = stack[i].properties();
-            for (size_t j = 0; j < properties.length(); j++) {
-                TraceRoot(trc, &properties[j].value, "JSONParser property value");
-                TraceRoot(trc, &properties[j].id, "JSONParser property id");
+            PropertyVector& properties = stk.properties();
+            for (auto & property : properties) {
+                TraceRoot(trc, &property.value, "JSONParser property value");
+                TraceRoot(trc, &property.id, "JSONParser property id");
             }
         }
     }
 }
 
 template <typename CharT>
 void
 JSONParser<CharT>::getTextPosition(uint32_t* column, uint32_t* line)
--- a/js/src/vm/MemoryMetrics.cpp
+++ b/js/src/vm/MemoryMetrics.cpp
@@ -787,35 +787,35 @@ CollectRuntimeStatsHelper(JSContext* cx,
         return false;
 
     JS::ZoneStatsVector& zs = rtStats->zoneStatsVector;
     ZoneStats& zTotals = rtStats->zTotals;
 
     // We don't look for notable strings for zTotals. So we first sum all the
     // zones' measurements to get the totals. Then we find the notable strings
     // within each zone.
-    for (size_t i = 0; i < zs.length(); i++)
-        zTotals.addSizes(zs[i]);
+    for (const auto & z : zs)
+        zTotals.addSizes(z);
 
-    for (size_t i = 0; i < zs.length(); i++)
-        if (!FindNotableStrings(zs[i]))
+    for (auto & z : zs)
+        if (!FindNotableStrings(z))
             return false;
 
     MOZ_ASSERT(!zTotals.allStrings);
 
     JS::CompartmentStatsVector& cs = rtStats->compartmentStatsVector;
     CompartmentStats& cTotals = rtStats->cTotals;
 
     // As with the zones, we sum all compartments first, and then get the
     // notable classes within each zone.
-    for (size_t i = 0; i < cs.length(); i++)
-        cTotals.addSizes(cs[i]);
+    for (const auto & c : cs)
+        cTotals.addSizes(c);
 
-    for (size_t i = 0; i < cs.length(); i++) {
-        if (!FindNotableClasses(cs[i]))
+    for (auto & c : cs) {
+        if (!FindNotableClasses(c))
             return false;
     }
 
     MOZ_ASSERT(!cTotals.allClasses);
 
     rtStats->gcHeapGCThings = rtStats->zTotals.sizeOfLiveGCThings() +
                               rtStats->cTotals.sizeOfLiveGCThings();
 
--- a/js/src/vm/RegExpObject.cpp
+++ b/js/src/vm/RegExpObject.cpp
@@ -1022,18 +1022,18 @@ RegExpShared::compile(JSContext* cx, Mut
     MOZ_ASSERT_IF(force == ForceByteCode, code.byteCode);
 
     RegExpCompilation& compilation = re->compilation(mode, input->hasLatin1Chars());
     if (code.jitCode) {
         // First copy the tables. GC can purge the tables if the RegExpShared
         // has no JIT code, so it's important to do this right before setting
         // compilation.jitCode (to ensure no purging happens between adding the
         // tables and setting the JIT code).
-        for (size_t i = 0; i < tables.length(); i++) {
-            if (!re->addTable(Move(tables[i])))
+        for (auto & table : tables) {
+            if (!re->addTable(Move(table)))
                 return false;
         }
         compilation.jitCode = code.jitCode;
     } else if (code.byteCode) {
         MOZ_ASSERT(tables.empty(), "RegExpInterpreter does not use data tables");
         compilation.byteCode = code.byteCode;
     }
 
@@ -1185,18 +1185,18 @@ RegExpShared::sizeOfExcludingThis(mozill
 
     for (size_t i = 0; i < ArrayLength(compilationArray); i++) {
         const RegExpCompilation& compilation = compilationArray[i];
         if (compilation.byteCode)
             n += mallocSizeOf(compilation.byteCode);
     }
 
     n += tables.sizeOfExcludingThis(mallocSizeOf);
-    for (size_t i = 0; i < tables.length(); i++)
-        n += mallocSizeOf(tables[i].get());
+    for (auto & table : tables)
+        n += mallocSizeOf(table.get());
 
     return n;
 }
 
 /* RegExpCompartment */
 
 RegExpCompartment::RegExpCompartment(Zone* zone)
   : matchResultTemplateObject_(nullptr),
--- a/js/src/vm/SavedStacks.cpp
+++ b/js/src/vm/SavedStacks.cpp
@@ -218,18 +218,18 @@ class MOZ_STACK_CLASS SavedFrame::AutoLo
     typedef Vector<Lookup, ASYNC_STACK_MAX_FRAME_COUNT> LookupVector;
     inline LookupVector* operator->() { return &lookups; }
     inline HandleLookup operator[](size_t i) { return HandleLookup(lookups[i]); }
 
   private:
     LookupVector lookups;
 
     virtual void trace(JSTracer* trc) {
-        for (size_t i = 0; i < lookups.length(); i++)
-            lookups[i].trace(trc);
+        for (auto & lookup : lookups)
+            lookup.trace(trc);
     }
 };
 
 /* static */ bool
 SavedFrame::HashPolicy::hasHash(const Lookup& l)
 {
     return SavedFramePtrHasher::hasHash(l.parent);
 }
--- a/js/src/vm/Stack.cpp
+++ b/js/src/vm/Stack.cpp
@@ -1569,18 +1569,18 @@ void
 jit::JitActivation::removeRematerializedFramesFromDebugger(JSContext* cx, uint8_t* top)
 {
     // Ion bailout can fail due to overrecursion and OOM. In such cases we
     // cannot honor any further Debugger hooks on the frame, and need to
     // ensure that its Debugger.Frame entry is cleaned up.
     if (!cx->compartment()->isDebuggee() || !rematerializedFrames_)
         return;
     if (RematerializedFrameTable::Ptr p = rematerializedFrames_->lookup(top)) {
-        for (uint32_t i = 0; i < p->value().length(); i++)
-            Debugger::handleUnrecoverableIonBailoutError(cx, p->value()[i]);
+        for (auto & i : p->value())
+            Debugger::handleUnrecoverableIonBailoutError(cx, i);
     }
 }
 
 void
 jit::JitActivation::traceRematerializedFrames(JSTracer* trc)
 {
     if (!rematerializedFrames_)
         return;
@@ -1618,18 +1618,18 @@ jit::JitActivation::removeIonFrameRecove
         return;
 
     ionRecovery_.erase(elem);
 }
 
 void
 jit::JitActivation::traceIonRecovery(JSTracer* trc)
 {
-    for (RInstructionResults* it = ionRecovery_.begin(); it != ionRecovery_.end(); it++)
-        it->trace(trc);
+    for (auto & it : ionRecovery_)
+        it.trace(trc);
 }
 
 WasmActivation::WasmActivation(JSContext* cx)
   : Activation(cx, Wasm),
     exitFP_(nullptr)
 {
     // Now that the WasmActivation is fully initialized, make it visible to
     // asynchronous profiling.
--- a/js/src/vm/String.cpp
+++ b/js/src/vm/String.cpp
@@ -875,25 +875,25 @@ StaticStrings::init(JSContext* cx)
     return true;
 }
 
 void
 StaticStrings::trace(JSTracer* trc)
 {
     /* These strings never change, so barriers are not needed. */
 
-    for (uint32_t i = 0; i < UNIT_STATIC_LIMIT; i++)
-        TraceProcessGlobalRoot(trc, unitStaticTable[i], "unit-static-string");
+    for (auto & i : unitStaticTable)
+        TraceProcessGlobalRoot(trc, i, "unit-static-string");
 
-    for (uint32_t i = 0; i < NUM_SMALL_CHARS * NUM_SMALL_CHARS; i++)
-        TraceProcessGlobalRoot(trc, length2StaticTable[i], "length2-static-string");
+    for (auto & i : length2StaticTable)
+        TraceProcessGlobalRoot(trc, i, "length2-static-string");
 
     /* This may mark some strings more than once, but so be it. */
-    for (uint32_t i = 0; i < INT_STATIC_LIMIT; i++)
-        TraceProcessGlobalRoot(trc, intStaticTable[i], "int-static-string");
+    for (auto & i : intStaticTable)
+        TraceProcessGlobalRoot(trc, i, "int-static-string");
 }
 
 template <typename CharT>
 /* static */ bool
 StaticStrings::isStatic(const CharT* chars, size_t length)
 {
     switch (length) {
       case 1: {
--- a/js/src/vm/TypeInference.cpp
+++ b/js/src/vm/TypeInference.cpp
@@ -486,18 +486,18 @@ TypeSet::addTypesToConstraint(JSContext*
     /*
      * Build all types in the set into a vector before triggering the
      * constraint, as doing so may modify this type set.
      */
     TypeList types;
     if (!enumerateTypes(&types))
         return false;
 
-    for (unsigned i = 0; i < types.length(); i++)
-        constraint->newType(cx, this, types[i]);
+    for (auto type : types)
+        constraint->newType(cx, this, type);
 
     return true;
 }
 
 #ifdef DEBUG
 static inline bool
 CompartmentsMatch(JSCompartment* a, JSCompartment* b)
 {
@@ -1358,18 +1358,18 @@ CheckFrozenTypeSet(JSContext* cx, Tempor
 
     if (!actual->isSubset(frozen))
         return false;
 
     if (!frozen->isSubset(actual)) {
         TypeSet::TypeList list;
         frozen->enumerateTypes(&list);
 
-        for (size_t i = 0; i < list.length(); i++)
-            actual->addType(cx, list[i]);
+        for (auto i : list)
+            actual->addType(cx, i);
     }
 
     return true;
 }
 
 namespace {
 
 /*
@@ -1532,18 +1532,18 @@ CheckDefinitePropertiesTypeSet(JSContext
     // types can have been added to actual. The analysis may have updated the
     // contents of |frozen| though with new speculative types, and these need
     // to be reflected in |actual| for AddClearDefiniteFunctionUsesInScript
     // to work.
     if (!frozen->isSubset(actual)) {
         TypeSet::TypeList list;
         frozen->enumerateTypes(&list);
 
-        for (size_t i = 0; i < list.length(); i++)
-            actual->addType(cx, list[i]);
+        for (auto l : list)
+            actual->addType(cx, l);
     }
 }
 
 void
 js::FinishDefinitePropertiesAnalysis(JSContext* cx, CompilerConstraintList* constraints)
 {
 #ifdef DEBUG
     // Assert no new types have been added to the StackTypeSets. Do this before
@@ -2534,19 +2534,19 @@ TypeZone::processPendingRecompiles(FreeO
 {
     MOZ_ASSERT(!recompiles.empty());
 
     /*
      * Steal the list of scripts to recompile, to make sure we don't try to
      * recursively recompile them.
      */
     RecompileInfoVector pending;
-    for (size_t i = 0; i < recompiles.length(); i++) {
+    for (auto & recompile : recompiles) {
         AutoEnterOOMUnsafeRegion oomUnsafe;
-        if (!pending.append(recompiles[i]))
+        if (!pending.append(recompile))
             oomUnsafe.crash("processPendingRecompiles");
     }
     recompiles.clear();
 
     jit::Invalidate(*this, fop, pending);
 
     MOZ_ASSERT(recompiles.empty());
 }
@@ -3453,66 +3453,66 @@ JSFunction::setTypeForScriptedFunction(J
 void
 PreliminaryObjectArray::registerNewObject(JSObject* res)
 {
     // The preliminary object pointers are weak, and won't be swept properly
     // during nursery collections, so the preliminary objects need to be
     // initially tenured.
     MOZ_ASSERT(!IsInsideNursery(res));
 
-    for (size_t i = 0; i < COUNT; i++) {
-        if (!objects[i]) {
-            objects[i] = res;
+    for (auto & object : objects) {
+        if (!object) {
+            object = res;
             return;
         }
     }
 
     MOZ_CRASH("There should be room for registering the new object");
 }
 
 void
 PreliminaryObjectArray::unregisterObject(JSObject* obj)
 {
-    for (size_t i = 0; i < COUNT; i++) {
-        if (objects[i] == obj) {
-            objects[i] = nullptr;
+    for (auto & object : objects) {
+        if (object == obj) {
+            object = nullptr;
             return;
         }
     }
 
     MOZ_CRASH("The object should be in the array");
 }
 
 bool
 PreliminaryObjectArray::full() const
 {
-    for (size_t i = 0; i < COUNT; i++) {
-        if (!objects[i])
+    for (auto object : objects) {
+        if (!object)
             return false;
     }
     return true;
 }
 
 bool
 PreliminaryObjectArray::empty() const
 {
-    for (size_t i = 0; i < COUNT; i++) {
-        if (objects[i])
+    for (auto object : objects) {
+        if (object)
             return false;
     }
     return true;
 }
 
 void
 PreliminaryObjectArray::sweep()
 {
     // All objects in the array are weak, so clear any that are about to be
     // destroyed.
-    for (size_t i = 0; i < COUNT; i++) {
-        JSObject** ptr = &objects[i];
+    for (auto & object : objects) {
+        JSObject** ptr = &object;
         if (*ptr && IsAboutToBeFinalizedUnbarriered(ptr)) {
             // Before we clear this reference, change the object's group to the
             // Object.prototype group. This is done to ensure JSObject::finalize
             // sees a NativeObject Class even if we change the current group's
             // Class to one of the unboxed object classes in the meantime. If
             // the compartment's global is dead, we don't do anything as the
             // group's Class is not going to change in that case.
             JSObject* obj = *ptr;
--- a/js/src/vm/UnboxedObject.cpp
+++ b/js/src/vm/UnboxedObject.cpp
@@ -25,18 +25,18 @@ using namespace js;
 
 /////////////////////////////////////////////////////////////////////
 // UnboxedLayout
 /////////////////////////////////////////////////////////////////////
 
 void
 UnboxedLayout::trace(JSTracer* trc)
 {
-    for (size_t i = 0; i < properties_.length(); i++)
-        TraceManuallyBarrieredEdge(trc, &properties_[i].name, "unboxed_layout_name");
+    for (auto & propertie : properties_)
+        TraceManuallyBarrieredEdge(trc, &propertie.name, "unboxed_layout_name");
 
     if (newScript())
         newScript()->trace(trc);
 
     TraceNullableEdge(trc, &nativeGroup_, "unboxed_layout_nativeGroup");
     TraceNullableEdge(trc, &nativeShape_, "unboxed_layout_nativeShape");
     TraceNullableEdge(trc, &allocationScript_, "unboxed_layout_allocationScript");
     TraceNullableEdge(trc, &replacementGroup_, "unboxed_layout_replacementGroup");
@@ -376,18 +376,18 @@ static bool
 PropagatePropertyTypes(JSContext* cx, jsid id, ObjectGroup* oldGroup, ObjectGroup* newGroup)
 {
     HeapTypeSet* typeProperty = oldGroup->maybeGetProperty(id);
     TypeSet::TypeList types;
     if (!typeProperty->enumerateTypes(&types)) {
         ReportOutOfMemory(cx);
         return false;
     }
-    for (size_t j = 0; j < types.length(); j++)
-        AddTypePropertyId(cx, newGroup, nullptr, id, types[j]);
+    for (auto type : types)
+        AddTypePropertyId(cx, newGroup, nullptr, id, type);
     return true;
 }
 
 static PlainObject*
 MakeReplacementTemplateObject(JSContext* cx, HandleObjectGroup group, const UnboxedLayout &layout)
 {
     Rooted<PlainObject*> obj(cx, NewObjectWithGroup<PlainObject>(cx, group, layout.getAllocKind(),
                                                                  TenuredObject));
@@ -559,21 +559,21 @@ UnboxedPlainObject::convertToNative(JSCo
             return false;
 
         // makeNativeGroup can reentrantly invoke this method.
         if (obj->is<PlainObject>())
             return true;
     }
 
     AutoValueVector values(cx);
-    for (size_t i = 0; i < layout.properties().length(); i++) {
+    for (auto prop : layout.properties()) {
         // We might be reading properties off the object which have not been
         // initialized yet. Make sure any double values we read here are
         // canonicalized.
-        if (!values.append(obj->as<UnboxedPlainObject>().getValue(layout.properties()[i], true)))
+        if (!values.append(obj->as<UnboxedPlainObject>().getValue(prop, true)))
             return false;
     }
 
     // We are eliminating the expando edge with the conversion, so trigger a
     // pre barrier.
     JSObject::writeBarrierPre(expando);
 
     // Additionally trigger a post barrier on the expando itself. Whole cell
@@ -608,18 +608,18 @@ UnboxedPlainObject::convertToNative(JSCo
             }
         }
         ::Reverse(ids.begin(), ids.end());
 
         RootedPlainObject nobj(cx, &obj->as<PlainObject>());
         Rooted<UnboxedExpandoObject*> nexpando(cx, expando);
         RootedId id(cx);
         Rooted<PropertyDescriptor> desc(cx);
-        for (size_t i = 0; i < ids.length(); i++) {
-            id = ids[i];
+        for (auto i : ids) {
+            id = i;
             if (!GetOwnPropertyDescriptor(cx, nexpando, id, &desc))
                 return false;
             ObjectOpResult result;
             if (!DefineProperty(cx, nobj, id, desc, result))
                 return false;
             MOZ_ASSERT(result.ok());
         }
     }
@@ -927,18 +927,18 @@ UnboxedPlainObject::obj_watch(JSContext*
 /* static */ bool
 UnboxedPlainObject::newEnumerate(JSContext* cx, HandleObject obj, AutoIdVector& properties,
                                  bool enumerableOnly)
 {
     // Ignore expando properties here, they are special-cased by the property
     // enumeration code.
 
     const UnboxedLayout::PropertyVector& unboxed = obj->as<UnboxedPlainObject>().layout().properties();
-    for (size_t i = 0; i < unboxed.length(); i++) {
-        if (!properties.append(NameToId(unboxed[i].name)))
+    for (auto i : unboxed) {
+        if (!properties.append(NameToId(i.name)))
             return false;
     }
 
     return true;
 }
 
 const Class UnboxedExpandoObject::class_ = {
     "UnboxedExpandoObject",
@@ -1715,22 +1715,21 @@ CombineUnboxedTypes(const Value& value, 
     return false;
 }
 
 // Return whether the property names and types in layout are a subset of the
 // specified vector.
 static bool
 PropertiesAreSuperset(const UnboxedLayout::PropertyVector& properties, UnboxedLayout* layout)
 {
-    for (size_t i = 0; i < layout->properties().length(); i++) {
-        const UnboxedLayout::Property& layoutProperty = layout->properties()[i];
+    for (auto layoutProperty : layout->properties()) {
         bool found = false;
-        for (size_t j = 0; j < properties.length(); j++) {
-            if (layoutProperty.name == properties[j].name) {
-                found = (layoutProperty.type == properties[j].type);
+        for (auto propertie : properties) {
+            if (layoutProperty.name == propertie.name) {
+                found = (layoutProperty.type == propertie.type);
                 break;
             }
         }
         if (!found)
             return false;
     }
     return true;
 }
@@ -1815,34 +1814,32 @@ ComputePlainObjectLayout(JSContext* cx, 
             if (!bestExisting ||
                 existing->properties().length() > bestExisting->properties().length())
             {
                 bestExisting = existing;
             }
         }
     }
     if (bestExisting) {
-        for (size_t i = 0; i < bestExisting->properties().length(); i++) {
-            const UnboxedLayout::Property& existingProperty = bestExisting->properties()[i];
+        for (auto existingProperty : bestExisting->properties()) {
             for (size_t j = 0; j < templateShape->slotSpan(); j++) {
                 if (existingProperty.name == properties[j].name) {
                     MOZ_ASSERT(existingProperty.type == properties[j].type);
                     properties[j].offset = existingProperty.offset;
                 }
             }
         }
         offset = bestExisting->size();
     }
 
     // Order remaining properties from the largest down for the best space
     // utilization.
     static const size_t typeSizes[] = { 8, 4, 1 };
 
-    for (size_t i = 0; i < ArrayLength(typeSizes); i++) {
-        size_t size = typeSizes[i];
+    for (unsigned long size : typeSizes) {
         for (size_t j = 0; j < templateShape->slotSpan(); j++) {
             if (properties[j].offset != UINT32_MAX)
                 continue;
             JSValueType type = properties[j].type;
             if (UnboxedTypeSize(type) == size) {
                 offset = JS_ROUNDUP(offset, size);
                 properties[j].offset = offset;
                 offset += size;
@@ -1854,18 +1851,17 @@ ComputePlainObjectLayout(JSContext* cx, 
     return offset;
 }
 
 static bool
 SetLayoutTraceList(JSContext* cx, UnboxedLayout* layout)
 {
     // Figure out the offsets of any objects or string properties.
     Vector<int32_t, 8, SystemAllocPolicy> objectOffsets, stringOffsets;
-    for (size_t i = 0; i < layout->properties().length(); i++) {
-        const UnboxedLayout::Property& property = layout->properties()[i];
+    for (auto property : layout->properties()) {
         MOZ_ASSERT(property.offset != UINT32_MAX);
         if (property.type == JSVAL_TYPE_OBJECT) {
             if (!objectOffsets.append(property.offset))
                 return false;
         } else if (property.type == JSVAL_TYPE_STRING) {
             if (!stringOffsets.append(property.offset))
                 return false;
         }
@@ -1948,18 +1944,18 @@ GetValuesFromPreliminaryPlainObject(Plai
 }
 
 void
 UnboxedPlainObject::fillAfterConvert(JSContext* cx,
                                      Handle<GCVector<Value>> values, size_t* valueCursor)
 {
     initExpando();
     memset(data(), 0, layout().size());
-    for (size_t i = 0; i < layout().properties().length(); i++)
-        JS_ALWAYS_TRUE(setValue(cx, layout().properties()[i], NextValue(values, valueCursor)));
+    for (auto prop : layout().properties())
+        JS_ALWAYS_TRUE(setValue(cx, prop, NextValue(values, valueCursor)));
 }
 
 bool
 js::TryConvertToUnboxedLayout(JSContext* cx, AutoEnterAnalysis& enter, Shape* templateShape,
                               ObjectGroup* group, PreliminaryObjectArray* objects)
 {
     bool isArray = !templateShape;
 
--- a/js/src/vm/UnboxedObject.h
+++ b/js/src/vm/UnboxedObject.h
@@ -168,19 +168,19 @@ class UnboxedLayout : public mozilla::Li
         return traceList_;
     }
 
     void setTraceList(int32_t* traceList) {
         traceList_ = traceList;
     }
 
     const Property* lookup(JSAtom* atom) const {
-        for (size_t i = 0; i < properties_.length(); i++) {
-            if (properties_[i].name == atom)
-                return &properties_[i];
+        for (const auto & property : properties_) {
+            if (property.name == atom)
+                return &property;
         }
         return nullptr;
     }
 
     const Property* lookup(jsid id) const {
         if (JSID_IS_STRING(id))
             return lookup(JSID_TO_ATOM(id));
         return nullptr;
--- a/js/src/wasm/WasmBaselineCompile.cpp
+++ b/js/src/wasm/WasmBaselineCompile.cpp
@@ -680,18 +680,17 @@ class BaseCompiler
     MOZ_MUST_USE OutOfLineCode* addOutOfLineCode(OutOfLineCode* ool) {
         if (!ool || !outOfLine_.append(ool))
             return nullptr;
         ool->setFramePushed(masm.framePushed());
         return ool;
     }
 
     MOZ_MUST_USE bool generateOutOfLineCode() {
-        for (uint32_t i = 0; i < outOfLine_.length(); i++) {
-            OutOfLineCode* ool = outOfLine_[i];
+        for (auto ool : outOfLine_) {
             ool->bind(masm);
             ool->generate(masm);
         }
 
         return !masm.oom();
     }
 
     ////////////////////////////////////////////////////////////
@@ -2673,20 +2672,20 @@ class BaseCompiler
     void jumpTable(LabelVector& labels, Label* theTable) {
         // Flush constant pools to ensure that the table is never interrupted by
         // constant pool entries.
         masm.flush();
 
         masm.bind(theTable);
 
 #if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_ARM)
-        for (uint32_t i = 0; i < labels.length(); i++) {
+        for (auto & label : labels) {
             CodeLabel cl;
             masm.writeCodePointer(cl.patchAt());
-            cl.target()->bind(labels[i].offset());
+            cl.target()->bind(label.offset());
             masm.addCodeLabel(cl);
         }
 #else
         MOZ_CRASH("BaseCompiler platform hook: jumpTable");
 #endif
     }
 
     void tableSwitch(Label* theTable, RegI32 switchValue, Label* dispatchCode) {
--- a/js/src/wasm/WasmBinaryToText.cpp
+++ b/js/src/wasm/WasmBinaryToText.cpp
@@ -283,18 +283,18 @@ RenderUnreachable(WasmRenderContext& c, 
         return false;
     MAP_AST_EXPR(c, unreachable);
     return c.buffer.append("unreachable");
 }
 
 static bool
 RenderCallArgs(WasmRenderContext& c, const AstExprVector& args)
 {
-    for (uint32_t i = 0; i < args.length(); i++) {
-        if (!RenderExpr(c, *args[i]))
+    for (auto arg : args) {
+        if (!RenderExpr(c, *arg))
             return false;
     }
 
     return true;
 }
 
 static bool
 RenderCall(WasmRenderContext& c, AstCall& call)
--- a/js/src/wasm/WasmDebug.cpp
+++ b/js/src/wasm/WasmDebug.cpp
@@ -568,18 +568,18 @@ DebugState::debugDisplayURL(JSContext* c
                 return nullptr;
             cx->clearPendingException(); // ignore invalid URI
         } else if (!result.append(filenamePrefix.finishString()) || !result.append(":")) {
             return nullptr;
         }
     }
 
     const ModuleHash& hash = metadata().hash;
-    for (size_t i = 0; i < sizeof(ModuleHash); i++) {
-        char digit1 = hash[i] / 16, digit2 = hash[i] % 16;
+    for (unsigned char i : hash) {
+        char digit1 = i / 16, digit2 = i % 16;
         if (!result.append((char)(digit1 < 10 ? digit1 + '0' : digit1 + 'a' - 10)))
             return nullptr;
         if (!result.append((char)(digit2 < 10 ? digit2 + '0' : digit2 + 'a' - 10)))
             return nullptr;
     }
     return result.finishString();
 }
 
--- a/js/src/wasm/WasmInstance.cpp
+++ b/js/src/wasm/WasmInstance.cpp
@@ -382,18 +382,17 @@ Instance::Instance(JSContext* cx,
         const TableDesc& td = metadata().tables[i];
         TableTls& table = tableTls(td);
         table.length = tables_[i]->length();
         table.base = tables_[i]->base();
     }
 
     uint8_t* globalData = globals_->globalData();
 
-    for (size_t i = 0; i < metadata().globals.length(); i++) {
-        const GlobalDesc& global = metadata().globals[i];
+    for (const auto & global : metadata().globals) {
         if (global.isConstant())
             continue;
 
         uint8_t* globalAddr = globalData + global.offset();
         switch (global.kind()) {
           case GlobalKind::Import: {
             globalImports[global.importIndex()].writePayload(globalAddr);
             break;
--- a/js/src/wasm/WasmTextToBinary.cpp
+++ b/js/src/wasm/WasmTextToBinary.cpp
@@ -3482,18 +3482,18 @@ class Resolver
 } // end anonymous namespace
 
 static bool
 ResolveExpr(Resolver& r, AstExpr& expr);
 
 static bool
 ResolveExprList(Resolver& r, const AstExprVector& v)
 {
-    for (size_t i = 0; i < v.length(); i++) {
-        if (!ResolveExpr(r, *v[i]))
+    for (auto i : v) {
+        if (!ResolveExpr(r, *i))
             return false;
     }
     return true;
 }
 
 static bool
 ResolveBlock(Resolver& r, AstBlock& b)
 {
@@ -3921,18 +3921,18 @@ ResolveModule(LifoAlloc& lifo, AstModule
 // wasm function body serialization
 
 static bool
 EncodeExpr(Encoder& e, AstExpr& expr);
 
 static bool
 EncodeExprList(Encoder& e, const AstExprVector& v)
 {
-    for (size_t i = 0; i < v.length(); i++) {
-        if (!EncodeExpr(e, *v[i]))
+    for (auto i : v) {
+        if (!EncodeExpr(e, *i))
             return false;
     }
     return true;
 }
 
 static bool
 EncodeBlock(Encoder& e, AstBlock& b)
 {
--- a/js/xpconnect/src/XPCJSRuntime.cpp
+++ b/js/xpconnect/src/XPCJSRuntime.cpp
@@ -1413,19 +1413,17 @@ ReportZoneStats(const JS::ZoneStats& zSt
     size_t stringsNotableAboutMemoryGCHeap = 0;
     size_t stringsNotableAboutMemoryMallocHeap = 0;
 
     #define MAYBE_INLINE \
         "The characters may be inline or on the malloc heap."
     #define MAYBE_OVERALLOCATED \
         "Sometimes over-allocated to simplify string concatenation."
 
-    for (size_t i = 0; i < zStats.notableStrings.length(); i++) {
-        const JS::NotableStringInfo& info = zStats.notableStrings[i];
-
+    for (const auto & info : zStats.notableStrings) {
         MOZ_ASSERT(!zStats.isTotals);
 
         // We don't do notable string detection when anonymizing, because
         // there's a good chance its for crash submission, and the memory
         // required for notable string detection is high.
         MOZ_ASSERT(!anonymize);
 
         nsDependentCString notableString(info.buffer);
@@ -1712,20 +1710,18 @@ ReportCompartmentStats(const JS::Compart
     nsCString nonNotablePath = cJSPathPrefix;
     nonNotablePath += cStats.isTotals
                     ? NS_LITERAL_CSTRING("classes/")
                     : NS_LITERAL_CSTRING("classes/class(<non-notable classes>)/");
 
     ReportClassStats(cStats.classInfo, nonNotablePath, handleReport, data,
                      gcTotal);
 
-    for (size_t i = 0; i < cStats.notableClasses.length(); i++) {
+    for (const auto & classInfo : cStats.notableClasses) {
         MOZ_ASSERT(!cStats.isTotals);
-        const JS::NotableClassInfo& classInfo = cStats.notableClasses[i];
-
         nsCString classPath = cJSPathPrefix +
             nsPrintfCString("classes/class(%s)/", classInfo.className_);
 
         ReportClassStats(classInfo, classPath, handleReport, data, gcTotal);
     }
 
     // Note that we use cDOMPathPrefix here.  This is because we measure orphan
     // DOM nodes in the JS reporter, but we want to report them in a "dom"
@@ -1856,26 +1852,24 @@ ReportJSRuntimeExplicitTreeStats(const J
                                  amIAddonManager* addonManager,
                                  nsIHandleReportCallback* handleReport,
                                  nsISupports* data,
                                  bool anonymize,
                                  size_t* rtTotalOut)
 {
     size_t gcTotal = 0;
 
-    for (size_t i = 0; i < rtStats.zoneStatsVector.length(); i++) {
-        const JS::ZoneStats& zStats = rtStats.zoneStatsVector[i];
+    for (const auto & zStats : rtStats.zoneStatsVector) {
         const xpc::ZoneStatsExtras* extras =
           static_cast<const xpc::ZoneStatsExtras*>(zStats.extra);
         ReportZoneStats(zStats, *extras, handleReport, data, anonymize,
                         &gcTotal);
     }
 
-    for (size_t i = 0; i < rtStats.compartmentStatsVector.length(); i++) {
-        const JS::CompartmentStats& cStats = rtStats.compartmentStatsVector[i];
+    for (const auto & cStats : rtStats.compartmentStatsVector) {
         const xpc::CompartmentStatsExtras* extras =
             static_cast<const xpc::CompartmentStatsExtras*>(cStats.extra);
 
         ReportCompartmentStats(cStats, *extras, addonManager, handleReport,
                                data, &gcTotal);
     }
 
     // Report the rtStats.runtime numbers under "runtime/", and compute their
@@ -2099,18 +2093,18 @@ class JSMainRuntimeCompartmentsReporter 
         // |handleReport| from within CompartmentCallback() leads to all manner
         // of assertions.
 
         Data d;
         d.anonymizeID = anonymize ? 1 : 0;
         JS_IterateCompartments(XPCJSContext::Get()->Context(),
                                &d, CompartmentCallback);
 
-        for (size_t i = 0; i < d.paths.length(); i++)
-            REPORT(nsCString(d.paths[i]), KIND_OTHER, UNITS_COUNT, 1,
+        for (const auto & path : d.paths)
+            REPORT(nsCString(path), KIND_OTHER, UNITS_COUNT, 1,
                 "A live compartment in the main JSRuntime.");
 
         return NS_OK;
     }
 };
 
 NS_IMPL_ISUPPORTS(JSMainRuntimeCompartmentsReporter, nsIMemoryReporter)
 
--- a/js/xpconnect/src/XPCWrappedNativeInfo.cpp
+++ b/js/xpconnect/src/XPCWrappedNativeInfo.cpp
@@ -699,18 +699,18 @@ XPCNativeSet::NewInstance(nsTArray<RefPt
     // We impose the invariant:
     // "All sets have exactly one nsISupports interface and it comes first."
     // This is the place where we impose that rule - even if given inputs
     // that don't exactly follow the rule.
 
     RefPtr<XPCNativeInterface> isup = XPCNativeInterface::GetISupports();
     uint16_t slots = array.Length() + 1;
 
-    for (auto key = array.begin(); key != array.end(); key++) {
-        if (*key == isup)
+    for (auto & key : array) {
+        if (key == isup)
             slots--;
     }
 
     // Use placement new to create an object with the right amount of space
     // to hold the members array
     int size = sizeof(XPCNativeSet);
     if (slots > 1)
         size += (slots - 1) * sizeof(XPCNativeInterface*);
@@ -718,18 +718,18 @@ XPCNativeSet::NewInstance(nsTArray<RefPt
     RefPtr<XPCNativeSet> obj = new(place) XPCNativeSet();
 
     // Stick the nsISupports in front and skip additional nsISupport(s)
     XPCNativeInterface** outp = (XPCNativeInterface**) &obj->mInterfaces;
     uint16_t memberCount = 1;   // for the one member in nsISupports
 
     NS_ADDREF(*(outp++) = isup);
 
-    for (auto key = array.begin(); key != array.end(); key++) {
-        RefPtr<XPCNativeInterface> cur = key->forget();
+    for (auto & key : array) {
+        RefPtr<XPCNativeInterface> cur = key.forget();
         if (isup == cur)
             continue;
         memberCount += cur->GetMemberCount();
         *(outp++) = cur.forget().take();
     }
     obj->mMemberCount = memberCount;
     obj->mInterfaceCount = slots;
 
--- a/layout/base/GeometryUtils.cpp
+++ b/layout/base/GeometryUtils.cpp
@@ -198,18 +198,18 @@ public:
       points[i] = CSSPoint(nsPresContext::AppUnitsToFloatCSSPixels(appUnits[i].x),
                            nsPresContext::AppUnitsToFloatCSSPixels(appUnits[i].y));
     }
     nsLayoutUtils::TransformResult rv =
       nsLayoutUtils::TransformPoints(f, mRelativeToFrame, 4, points);
     if (rv == nsLayoutUtils::TRANSFORM_SUCCEEDED) {
       CSSPoint delta(nsPresContext::AppUnitsToFloatCSSPixels(mRelativeToBoxTopLeft.x),
                      nsPresContext::AppUnitsToFloatCSSPixels(mRelativeToBoxTopLeft.y));
-      for (uint32_t i = 0; i < 4; ++i) {
-        points[i] -= delta;
+      for (auto & point : points) {
+        point -= delta;
       }
     } else {
       PodArrayZero(points);
     }
     mResult.AppendElement(new DOMQuad(mParentObject, points));
   }
 
   nsISupports* mParentObject;
--- a/layout/base/nsLayoutUtils.cpp
+++ b/layout/base/nsLayoutUtils.cpp
@@ -7646,18 +7646,18 @@ GetFontFacesForFramesInner(nsIFrame* aFr
       nsLayoutUtils::GetFontFacesForText(aFrame, 0, INT32_MAX, true,
                                          aFontFaceList);
     }
     return;
   }
 
   nsIFrame::ChildListID childLists[] = { nsIFrame::kPrincipalList,
                                          nsIFrame::kPopupList };
-  for (size_t i = 0; i < ArrayLength(childLists); ++i) {
-    nsFrameList children(aFrame->GetChildList(childLists[i]));
+  for (auto & childList : childLists) {
+    nsFrameList children(aFrame->GetChildList(childList));
     for (nsFrameList::Enumerator e(children); !e.AtEnd(); e.Next()) {
       nsIFrame* child = e.get();
       child = nsPlaceholderFrame::GetRealFrameFor(child);
       GetFontFacesForFramesInner(child, aFontFaceList);
     }
   }
 }
 
--- a/layout/generic/TextOverflow.cpp
+++ b/layout/generic/TextOverflow.cpp
@@ -697,18 +697,18 @@ TextOverflow::ProcessLine(const nsDispla
     LogicalRect alignmentRect(mBlockWM, alignmentEdges.mIStart,
                               insideMarkersArea.BStart(mBlockWM),
                               alignmentEdges.ISize(), 1);
     insideMarkersArea.IntersectRect(insideMarkersArea, alignmentRect);
   }
 
   // Clip and remove display items as needed at the final marker edges.
   nsDisplayList* lists[] = { aLists.Content(), aLists.PositionedDescendants() };
-  for (uint32_t i = 0; i < ArrayLength(lists); ++i) {
-    PruneDisplayListContents(lists[i], framesToHide, insideMarkersArea);
+  for (auto & list : lists) {
+    PruneDisplayListContents(list, framesToHide, insideMarkersArea);
   }
   CreateMarkers(aLine, needIStart, needIEnd, insideMarkersArea, contentArea);
 }
 
 void
 TextOverflow::PruneDisplayListContents(nsDisplayList* aList,
                                        const FrameHashtable& aFramesToHide,
                                        const LogicalRect& aInsideMarkersArea)
--- a/layout/generic/nsColumnSetFrame.cpp
+++ b/layout/generic/nsColumnSetFrame.cpp
@@ -59,18 +59,18 @@ private:
 
 void
 nsDisplayColumnRule::Paint(nsDisplayListBuilder* aBuilder,
                            gfxContext* aCtx)
 {
   static_cast<nsColumnSetFrame*>(mFrame)->
     CreateBorderRenderers(mBorderRenderers, aCtx, mVisibleRect, ToReferenceFrame());
 
-  for (auto iter = mBorderRenderers.begin(); iter != mBorderRenderers.end(); iter++) {
-    iter->DrawBorders();
+  for (auto & mBorderRenderer : mBorderRenderers) {
+    mBorderRenderer.DrawBorders();
   }
 }
 LayerState
 nsDisplayColumnRule::GetLayerState(nsDisplayListBuilder* aBuilder,
                                    LayerManager* aManager,
                                    const ContainerLayerParameters& aParameters)
 {
   if (!gfxPrefs::LayersAllowColumnRuleLayers()) {
@@ -82,18 +82,18 @@ nsDisplayColumnRule::GetLayerState(nsDis
 
   static_cast<nsColumnSetFrame*>(mFrame)->
     CreateBorderRenderers(mBorderRenderers, screenRefCtx, mVisibleRect, ToReferenceFrame());
 
   if (mBorderRenderers.IsEmpty()) {
     return LAYER_NONE;
   }
 
-  for (auto iter = mBorderRenderers.begin(); iter != mBorderRenderers.end(); iter++) {
-    if (!iter->CanCreateWebRenderCommands()) {
+  for (auto & mBorderRenderer : mBorderRenderers) {
+    if (!mBorderRenderer.CanCreateWebRenderCommands()) {
       return LAYER_NONE;
     }
   }
 
   return LAYER_ACTIVE;
 }
 
 already_AddRefed<Layer>
@@ -117,25 +117,25 @@ nsDisplayColumnRule::CreateWebRenderComm
 
     static_cast<nsColumnSetFrame*>(mFrame)->
       CreateBorderRenderers(mBorderRenderers, screenRefCtx, mVisibleRect, ToReferenceFrame());
 
     if (mBorderRenderers.IsEmpty()) {
       return false;
     }
 
-    for (auto iter = mBorderRenderers.begin(); iter != mBorderRenderers.end(); iter++) {
-      if (!iter->CanCreateWebRenderCommands()) {
+    for (auto & mBorderRenderer : mBorderRenderers) {
+      if (!mBorderRenderer.CanCreateWebRenderCommands()) {
         return false;
       }
     }
   }
 
-  for (auto iter = mBorderRenderers.begin(); iter != mBorderRenderers.end(); iter++) {
-    iter->CreateWebRenderCommands(aBuilder, aSc);
+  for (auto & mBorderRenderer : mBorderRenderers) {
+    mBorderRenderer.CreateWebRenderCommands(aBuilder, aSc);
   }
 
   return true;
 }
 
 /**
  * Tracking issues:
  *
--- a/layout/generic/nsFrameSelection.cpp
+++ b/layout/generic/nsFrameSelection.cpp
@@ -376,18 +376,18 @@ nsFrameSelection::nsFrameSelection()
 
 nsFrameSelection::~nsFrameSelection()
 {
 }
 
 NS_IMPL_CYCLE_COLLECTION_CLASS(nsFrameSelection)
 
 NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(nsFrameSelection)
-  for (size_t i = 0; i < kPresentSelectionTypeCount; ++i) {
-    tmp->mDomSelections[i] = nullptr;
+  for (auto & mDomSelection : tmp->mDomSelections) {
+    mDomSelection = nullptr;
   }
 
   NS_IMPL_CYCLE_COLLECTION_UNLINK(mCellParent)
   tmp->mSelectingTableCellMode = 0;
   tmp->mDragSelectingCells = false;
   NS_IMPL_CYCLE_COLLECTION_UNLINK(mStartSelectedCell)
   NS_IMPL_CYCLE_COLLECTION_UNLINK(mEndSelectedCell)
   NS_IMPL_CYCLE_COLLECTION_UNLINK(mAppendStartSelectedCell)
@@ -2951,18 +2951,18 @@ nsFrameSelection::DisconnectFromPresShel
 {
   RefPtr<AccessibleCaretEventHub> eventHub = mShell->GetAccessibleCaretEventHub();
   if (eventHub) {
     int8_t index = GetIndexFromSelectionType(SelectionType::eNormal);
     mDomSelections[index]->RemoveSelectionListener(eventHub);
   }
 
   StopAutoScrollTimer();
-  for (size_t i = 0; i < kPresentSelectionTypeCount; i++) {
-    mDomSelections[i]->Clear(nullptr);
+  for (auto & mDomSelection : mDomSelections) {
+    mDomSelection->Clear(nullptr);
   }
   mShell = nullptr;
 }
 
 /**
  * See Bug 1288453.
  *
  * Update the selection cache on repaint to handle when a pre-existing
--- a/layout/mathml/nsMathMLChar.cpp
+++ b/layout/mathml/nsMathMLChar.cpp
@@ -1285,27 +1285,27 @@ nsMathMLChar::StretchEnumContext::TryPar
       sizedata[i] = i == 3 ? mTargetSize : 0;
     }
   }
 
   // For the Unicode table, we check that all the glyphs are actually found and
   // come from the same font.
   if (aGlyphTable == &gGlyphTableList->mUnicodeTable) {
     gfxFont* unicodeFont = nullptr;
-    for (int32_t i = 0; i < 4; i++) {
-      if (!textRun[i]) {
+    for (auto & glyph : textRun) {
+      if (!glyph) {
         continue;
       }
-      if (textRun[i]->GetLength() != 1 ||
-          textRun[i]->GetCharacterGlyphs()[0].IsMissing()) {
+      if (glyph->GetLength() != 1 ||
+          glyph->GetCharacterGlyphs()[0].IsMissing()) {
         return false;
       }
       uint32_t numGlyphRuns;
       const gfxTextRun::GlyphRun* glyphRuns =
-        textRun[i]->GetGlyphRuns(&numGlyphRuns);
+        glyph->GetGlyphRuns(&numGlyphRuns);
       if (numGlyphRuns != 1) {
         return false;
       }
       if (!unicodeFont) {
         unicodeFont = glyphRuns[0].mFont;
       } else if (unicodeFont != glyphRuns[0].mFont) {
         return false;
       }
--- a/layout/painting/nsCSSRenderingGradients.cpp
+++ b/layout/painting/nsCSSRenderingGradients.cpp
@@ -363,31 +363,31 @@ static void ResolveMidpoints(nsTArray<Co
       newStops[1].mPosition = offset1 + (offset - offset1) * 2 / 3;
 
       for (size_t y = 0; y < 7; y++) {
         newStops[y+2].mPosition = offset + (offset2 - offset) * y / 13;
       }
     }
     // calculate colors
 
-    for (size_t y = 0; y < 9; y++) {
+    for (auto & newStop : newStops) {
       // Calculate the intermediate color stops per the formula of the CSS images
       // spec. http://dev.w3.org/csswg/css-images/#color-stop-syntax
       // 9 points were chosen since it is the minimum number of stops that always
       // give the smoothest appearace regardless of midpoint position and difference
       // in luminance of the end points.
-      float relativeOffset = (newStops[y].mPosition - offset1) / (offset2 - offset1);
+      float relativeOffset = (newStop.mPosition - offset1) / (offset2 - offset1);
       float multiplier = powf(relativeOffset, logf(.5f) / logf(midpoint));
 
       gfx::Float red = color1.r + multiplier * (color2.r - color1.r);
       gfx::Float green = color1.g + multiplier * (color2.g - color1.g);
       gfx::Float blue = color1.b + multiplier * (color2.b - color1.b);
       gfx::Float alpha = color1.a + multiplier * (color2.a - color1.a);
 
-      newStops[y].mColor = Color(red, green, blue, alpha);
+      newStop.mColor = Color(red, green, blue, alpha);
     }
 
     stops.ReplaceElementsAt(x, 1, newStops, 9);
     x += 9;
   }
 }
 
 static Color
--- a/layout/painting/nsDisplayList.h
+++ b/layout/painting/nsDisplayList.h
@@ -2635,18 +2635,18 @@ struct nsDisplayListCollection : public 
   explicit nsDisplayListCollection(nsDisplayList* aBorderBackground) :
     nsDisplayListSet(aBorderBackground, &mLists[1], &mLists[2], &mLists[3], &mLists[4],
                      &mLists[5]) {}
 
   /**
    * Sort all lists by content order.
    */
   void SortAllByContentOrder(nsIContent* aCommonAncestor) {
-    for (int32_t i = 0; i < 6; ++i) {
-      mLists[i].SortByContentOrder(aCommonAncestor);
+    for (auto & mList : mLists) {
+      mList.SortByContentOrder(aCommonAncestor);
     }
   }
 
 private:
   // This class is only used on stack, so we don't have to worry about leaking
   // it.  Don't let us be heap-allocated!
   void* operator new(size_t sz) CPP_THROW_NEW;
 
--- a/layout/printing/nsPrintPreviewListener.cpp
+++ b/layout/printing/nsPrintPreviewListener.cpp
@@ -143,18 +143,18 @@ GetActionForEvent(nsIDOMEvent* aEvent)
   if (keyEvent->mCharCode == ' ' || keyEvent->mKeyCode == NS_VK_SPACE) {
     return eEventAction_Propagate;
   }
 
   if (keyEvent->IsShift()) {
     return eEventAction_Suppress;
   }
 
-  for (uint32_t i = 0; i < ArrayLength(kOKKeyCodes); ++i) {
-    if (keyEvent->mKeyCode == kOKKeyCodes[i]) {
+  for (unsigned int kOKKeyCode : kOKKeyCodes) {
+    if (keyEvent->mKeyCode == kOKKeyCode) {
       return eEventAction_Propagate;
     }
   }
 
   return eEventAction_Suppress;
 }
 
 NS_IMETHODIMP
--- a/layout/style/Declaration.cpp
+++ b/layout/style/Declaration.cpp
@@ -1177,18 +1177,18 @@ Declaration::GetPropertyValueInternal(
           }
           aValue.Append(char16_t(' '));
         }
         if (done) {
           break;
         }
         aValue.AppendLiteral(", ");
       }
-      for (uint32_t i = 0; i < numProps; ++i) {
-        if (lists[i]) {
+      for (auto & list : lists) {
+        if (list) {
           // Lists not all the same length, can't use shorthand.
           aValue.Truncate();
           break;
         }
       }
       break;
     }
     case eCSSProperty_marker: {
--- a/layout/style/StyleAnimationValue.cpp
+++ b/layout/style/StyleAnimationValue.cpp
@@ -1707,18 +1707,17 @@ StyleAnimationValue::ComputeDistance(nsC
           rect1->mRight.GetUnit() != rect2->mRight.GetUnit() ||
           rect1->mBottom.GetUnit() != rect2->mBottom.GetUnit() ||
           rect1->mLeft.GetUnit() != rect2->mLeft.GetUnit()) {
         // At least until we have calc()
         return false;
       }
 
       double squareDistance = 0.0;
-      for (uint32_t i = 0; i < ArrayLength(nsCSSRect::sides); ++i) {
-        nsCSSValue nsCSSRect::*member = nsCSSRect::sides[i];
+      for (auto member : nsCSSRect::sides) {
         MOZ_ASSERT((rect1->*member).GetUnit() == (rect2->*member).GetUnit(),
                    "should have returned above");
         double diff;
         switch ((rect1->*member).GetUnit()) {
           case eCSSUnit_Pixel:
             diff = (rect1->*member).GetFloatValue() -
                    (rect2->*member).GetFloatValue();
             break;
@@ -1883,19 +1882,19 @@ StyleAnimationValue::ComputeDistance(nsC
       const nsCSSValuePairList *list1 = aStartValue.GetCSSValuePairListValue();
       const nsCSSValuePairList *list2 = aEndValue.GetCSSValuePairListValue();
       double squareDistance = 0.0;
       do {
         static nsCSSValue nsCSSValuePairList::* const pairListValues[] = {
           &nsCSSValuePairList::mXValue,
           &nsCSSValuePairList::mYValue,
         };
-        for (uint32_t i = 0; i < ArrayLength(pairListValues); ++i) {
-          const nsCSSValue &v1 = list1->*(pairListValues[i]);
-          const nsCSSValue &v2 = list2->*(pairListValues[i]);
+        for (auto pairListValue : pairListValues) {
+          const nsCSSValue &v1 = list1->*pairListValue;
+          const nsCSSValue &v2 = list2->*pairListValue;
           nsCSSUnit unit =
             GetCommonUnit(aProperty, v1.GetUnit(), v2.GetUnit());
           if (unit == eCSSUnit_Null) {
             return false;
           }
           double diffsquared = 0.0;
           switch (unit) {
             case eCSSUnit_Number:
@@ -2383,21 +2382,21 @@ AddCSSValuePairList(nsCSSPropertyID aPro
   nsCSSValuePairList* resultPtr = result.get();
 
   do {
     static nsCSSValue nsCSSValuePairList::* const pairListValues[] = {
       &nsCSSValuePairList::mXValue,
       &nsCSSValuePairList::mYValue,
     };
     uint32_t restrictions = nsCSSProps::ValueRestrictions(aProperty);
-    for (uint32_t i = 0; i < ArrayLength(pairListValues); ++i) {
-      const nsCSSValue& v1 = aList1->*(pairListValues[i]);
-      const nsCSSValue& v2 = aList2->*(pairListValues[i]);
-
-      nsCSSValue& vr = resultPtr->*(pairListValues[i]);
+    for (auto pairListValue : pairListValues) {
+      const nsCSSValue& v1 = aList1->*pairListValue;
+      const nsCSSValue& v2 = aList2->*pairListValue;
+
+      nsCSSValue& vr = resultPtr->*pairListValue;
       nsCSSUnit unit =
         GetCommonUnit(aProperty, v1.GetUnit(), v2.GetUnit());
       if (unit == eCSSUnit_Null) {
         return nullptr;
       }
       if (unit == eCSSUnit_Number) {
         AddCSSValueNumber(aCoeff1, v1,
                           aCoeff2, v2,
@@ -3105,18 +3104,17 @@ StyleAnimationValue::AddWeighted(nsCSSPr
           rect1->mRight.GetUnit() != rect2->mRight.GetUnit() ||
           rect1->mBottom.GetUnit() != rect2->mBottom.GetUnit() ||
           rect1->mLeft.GetUnit() != rect2->mLeft.GetUnit()) {
         // At least until we have calc()
         return false;
       }
 
       nsAutoPtr<nsCSSRect> result(new nsCSSRect);
-      for (uint32_t i = 0; i < ArrayLength(nsCSSRect::sides); ++i) {
-        nsCSSValue nsCSSRect::*member = nsCSSRect::sides[i];
+      for (auto member : nsCSSRect::sides) {
         MOZ_ASSERT((rect1->*member).GetUnit() == (rect2->*member).GetUnit(),
                    "should have returned above");
         switch ((rect1->*member).GetUnit()) {
           case eCSSUnit_Pixel:
             AddCSSValuePixel(aCoeff1, rect1->*member, aCoeff2, rect2->*member,
                              result->*member);
             break;
           case eCSSUnit_Auto:
--- a/layout/style/nsRuleData.cpp
+++ b/layout/style/nsRuleData.cpp
@@ -33,18 +33,18 @@ nsRuleData::nsRuleData(uint32_t aSIDs,
                        nsPresContext* aContext,
                        nsStyleContext* aStyleContext)
   : GenericSpecifiedValues(StyleBackendType::Gecko, aContext, aSIDs)
   , mStyleContext(aStyleContext)
   , mValueStorage(aValueStorage)
 {
 #ifndef MOZ_VALGRIND
   size_t framePoisonOffset = GetPoisonOffset();
-  for (size_t i = 0; i < nsStyleStructID_Length; ++i) {
-    mValueOffsets[i] = framePoisonOffset;
+  for (unsigned long & mValueOffset : mValueOffsets) {
+    mValueOffset = framePoisonOffset;
   }
 #endif
 }
 
 void
 nsRuleData::SetFontFamily(const nsString& aValue)
 {
   nsCSSValue* family = ValueForFontFamily();
--- a/layout/style/nsStyleSet.cpp
+++ b/layout/style/nsStyleSet.cpp
@@ -2222,21 +2222,21 @@ nsStyleSet::ResolveXULTreePseudoStyle(El
 
 bool
 nsStyleSet::AppendFontFaceRules(nsTArray<nsFontFaceRuleContainer>& aArray)
 {
   NS_ENSURE_FALSE(mInShutdown, false);
   NS_ASSERTION(mBatching == 0, "rule processors out of date");
 
   nsPresContext* presContext = PresContext();
-  for (uint32_t i = 0; i < ArrayLength(gCSSSheetTypes); ++i) {
-    if (gCSSSheetTypes[i] == SheetType::ScopedDoc)
+  for (auto gCSSSheetType : gCSSSheetTypes) {
+    if (gCSSSheetType == SheetType::ScopedDoc)
       continue;
     nsCSSRuleProcessor *ruleProc = static_cast<nsCSSRuleProcessor*>
-                                    (mRuleProcessors[gCSSSheetTypes[i]].get());
+                                    (mRuleProcessors[gCSSSheetType].get());
     if (ruleProc && !ruleProc->AppendFontFaceRules(presContext, aArray))
       return false;
   }
   return true;
 }
 
 nsCSSKeyframesRule*
 nsStyleSet::KeyframesRuleForName(const nsString& aName)
@@ -2285,19 +2285,19 @@ nsStyleSet::CounterStyleRuleForName(nsIA
 bool
 nsStyleSet::AppendFontFeatureValuesRules(
                                  nsTArray<nsCSSFontFeatureValuesRule*>& aArray)
 {
   NS_ENSURE_FALSE(mInShutdown, false);
   NS_ASSERTION(mBatching == 0, "rule processors out of date");
 
   nsPresContext* presContext = PresContext();
-  for (uint32_t i = 0; i < ArrayLength(gCSSSheetTypes); ++i) {
+  for (auto gCSSSheetType : gCSSSheetTypes) {
     nsCSSRuleProcessor *ruleProc = static_cast<nsCSSRuleProcessor*>
-                                    (mRuleProcessors[gCSSSheetTypes[i]].get());
+                                    (mRuleProcessors[gCSSSheetType].get());
     if (ruleProc &&
         !ruleProc->AppendFontFeatureValuesRules(presContext, aArray))
     {
       return false;
     }
   }
   return true;
 }
@@ -2338,21 +2338,21 @@ nsStyleSet::GetFontFeatureValuesLookup()
 
 bool
 nsStyleSet::AppendPageRules(nsTArray<nsCSSPageRule*>& aArray)
 {
   NS_ENSURE_FALSE(mInShutdown, false);
   NS_ASSERTION(mBatching == 0, "rule processors out of date");
 
   nsPresContext* presContext = PresContext();
-  for (uint32_t i = 0; i < ArrayLength(gCSSSheetTypes); ++i) {
-    if (gCSSSheetTypes[i] == SheetType::ScopedDoc)
+  for (auto gCSSSheetType : gCSSSheetTypes) {
+    if (gCSSSheetType == SheetType::ScopedDoc)
       continue;
     nsCSSRuleProcessor* ruleProc = static_cast<nsCSSRuleProcessor*>
-                                    (mRuleProcessors[gCSSSheetTypes[i]].get());
+                                    (mRuleProcessors[gCSSSheetType].get());
     if (ruleProc && !ruleProc->AppendPageRules(presContext, aArray))
       return false;
   }
   return true;
 }
 
 void
 nsStyleSet::BeginShutdown()
--- a/layout/tables/nsTableFrame.cpp
+++ b/layout/tables/nsTableFrame.cpp
@@ -2605,23 +2605,23 @@ nsTableFrame::InsertFrames(ChildListID  
         insertions[1].mList.AppendFrames(nullptr, head);
       }
       if (!next) {
         break;
       }
       display = next->StyleDisplay();
     }
   }
-  for (uint32_t i = 0; i < ArrayLength(insertions); ++i) {
+  for (auto & insertion : insertions) {
     // We pass aPrevFrame for both ColGroup and other frames since
     // HomogenousInsertFrames will only use it if it's a suitable
     // prev-sibling for the frames in the frame list.
-    if (!insertions[i].mList.IsEmpty()) {
-      HomogenousInsertFrames(insertions[i].mID, aPrevFrame,
-                             insertions[i].mList);
+    if (!insertion.mList.IsEmpty()) {
+      HomogenousInsertFrames(insertion.mID, aPrevFrame,
+                             insertion.mList);
     }
   }
 }
 
 void
 nsTableFrame::HomogenousInsertFrames(ChildListID     aListID,
                                      nsIFrame*       aPrevFrame,
                                      nsFrameList&    aFrameList)
@@ -6009,18 +6009,18 @@ nsTableFrame::CalcBCBorders()
 
   // calculate an expanded damage area
   TableArea damageArea(propData->mDamageArea);
   ExpandBCDamageArea(damageArea);
 
   // segments that are on the table border edges need
   // to be initialized only once
   bool tableBorderReset[4];
-  for (uint32_t sideX = 0; sideX < ArrayLength(tableBorderReset); sideX++) {
-    tableBorderReset[sideX] = false;
+  for (bool & sideX : tableBorderReset) {
+    sideX = false;
   }
 
   // block-dir borders indexed in inline-direction (cols)
   BCCellBorders lastBlockDirBorders(damageArea.ColCount() + 1,
                                     damageArea.StartCol());
   if (!lastBlockDirBorders.borders) ABORT0();
   BCCellBorder  lastBStartBorder, lastBEndBorder;
   // inline-dir borders indexed in inline-direction (cols)
--- a/media/gmp-clearkey/0.1/ClearKeyBase64.cpp
+++ b/media/gmp-clearkey/0.1/ClearKeyBase64.cpp
@@ -74,26 +74,26 @@ DecodeBase64(const string& aEncoded, vec
     return false;
   }
 
   // The number of bytes we haven't yet filled in the current byte, mod 8.
   int shift = 0;
 
   aOutDecoded.resize((encoded.size() * 3) / 4);
   vector<uint8_t>::iterator out = aOutDecoded.begin();
-  for (size_t i = 0; i < encoded.length(); i++) {
+  for (char c : encoded) {
     if (!shift) {
-      *out = encoded[i] << 2;
+      *out = c << 2;
     }
     else {
-      *out |= encoded[i] >> (6 - shift);
+      *out |= c >> (6 - shift);
       out++;
       if (out == aOutDecoded.end()) {
         // Hit last 6bit octed in encoded, which is padding and can be ignored.
         break;
       }
-      *out = encoded[i] << (shift + 2);
+      *out = c << (shift + 2);
     }
     shift = (shift + 2) % 8;
   }
 
   return true;
 }
\ No newline at end of file
--- a/media/gmp-clearkey/0.1/ClearKeyDecryptionManager.cpp
+++ b/media/gmp-clearkey/0.1/ClearKeyDecryptionManager.cpp
@@ -70,18 +70,18 @@ ClearKeyDecryptionManager::ClearKeyDecry
 }
 
 ClearKeyDecryptionManager::~ClearKeyDecryptionManager()
 {
   CK_LOGD("ClearKeyDecryptionManager::~ClearKeyDecryptionManager");
 
   sInstance = nullptr;
 
-  for (auto it = mDecryptors.begin(); it != mDecryptors.end(); it++) {
-    it->second->Release();
+  for (auto & mDecryptor : mDecryptors) {
+    mDecryptor.second->Release();
   }
   mDecryptors.clear();
 }
 
 bool
 ClearKeyDecryptionManager::HasSeenKeyId(const KeyId& aKeyId) const
 {
   CK_LOGD("ClearKeyDecryptionManager::SeenKeyId %s",
--- a/media/gmp-clearkey/0.1/ClearKeySessionManager.cpp
+++ b/media/gmp-clearkey/0.1/ClearKeySessionManager.cpp
@@ -146,23 +146,23 @@ ClearKeySessionManager::CreateSession(ui
     return;
   }
 
   mSessions[sessionId] = session;
 
   const vector<KeyId>& sessionKeys = session->GetKeyIds();
   vector<KeyId> neededKeys;
 
-  for (auto it = sessionKeys.begin(); it != sessionKeys.end(); it++) {
+  for (const auto & sessionKey : sessionKeys) {
     // Need to request this key ID from the client. We always send a key
     // request, whether or not another session has sent a request with the same
     // key ID. Otherwise a script can end up waiting for another script to
     // respond to the request (which may not necessarily happen).
-    neededKeys.push_back(*it);
-    mDecryptionManager->ExpectKeyId(*it);
+    neededKeys.push_back(sessionKey);
+    mDecryptionManager->ExpectKeyId(sessionKey);
   }
 
   if (neededKeys.empty()) {
     CK_LOGD("No keys needed from client.");
     return;
   }
 
   // Send a request for needed key data.
@@ -392,18 +392,17 @@ ClearKeySessionManager::UpdateSession(ui
                            0,
                            nullptr,
                            0);
 
     return;
   }
 
   vector<KeyInformation> keyInfos;
-  for (size_t i = 0; i < keyPairs.size(); i++) {
-    KeyIdPair& keyPair = keyPairs[i];
+  for (auto & keyPair : keyPairs) {
     mDecryptionManager->InitKey(keyPair.mKeyId, keyPair.mKey);
     mKeyIds.insert(keyPair.mKeyId);
 
     KeyInformation keyInfo = KeyInformation();
     keyInfo.key_id = &keyPair.mKeyId[0];
     keyInfo.key_id_size = keyPair.mKeyId.size();
     keyInfo.status = KeyStatus::kUsable;
 
@@ -451,18 +450,17 @@ ClearKeySessionManager::UpdateSession(ui
   WriteData(mHost, sessionId, keydata, move(resolve), move(reject));
 }
 
 void
 ClearKeySessionManager::Serialize(const ClearKeySession* aSession,
                                   std::vector<uint8_t>& aOutKeyData)
 {
   const std::vector<KeyId>& keyIds = aSession->GetKeyIds();
-  for (size_t i = 0; i < keyIds.size(); i++) {
-    const KeyId& keyId = keyIds[i];
+  for (const auto & keyId : keyIds) {
     if (!mDecryptionManager->HasKeyForKeyId(keyId)) {
       continue;
     }
     assert(keyId.size() == CENC_KEY_LEN);
     aOutKeyData.insert(aOutKeyData.end(), keyId.begin(), keyId.end());
     const Key& key = mDecryptionManager->GetDecryptionKey(keyId);
     assert(key.size() == CENC_KEY_LEN);
     aOutKeyData.insert(aOutKeyData.end(), key.begin(), key.end());
@@ -649,18 +647,18 @@ ClearKeySessionManager::Decrypt(const In
   return status;
 }
 
 void
 ClearKeySessionManager::DecryptingComplete()
 {
   CK_LOGD("ClearKeySessionManager::DecryptingComplete %p", this);
 
-  for (auto it = mSessions.begin(); it != mSessions.end(); it++) {
-    delete it->second;
+  for (auto & mSession : mSessions) {
+    delete mSession.second;
   }
   mSessions.clear();
 
   mDecryptionManager = nullptr;
   mHost = nullptr;
 
   Release();
 }
--- a/media/mtransport/rlogconnector.cpp
+++ b/media/mtransport/rlogconnector.cpp
@@ -157,18 +157,18 @@ void RLogConnector::Filter(const std::st
                             std::deque<std::string>* matching_logs) {
   std::vector<std::string> substrings;
   substrings.push_back(substring);
   FilterAny(substrings, limit, matching_logs);
 }
 
 inline bool AnySubstringMatches(const std::vector<std::string>& substrings,
                                 const std::string& string) {
-  for (auto sub = substrings.begin(); sub != substrings.end(); ++sub) {
-    if (string.find(*sub) != std::string::npos) {
+  for (const auto & substring : substrings) {
+    if (string.find(substring) != std::string::npos) {
       return true;
     }
   }
   return false;
 }
 
 void RLogConnector::FilterAny(const std::vector<std::string>& substrings,
                                uint32_t limit,
--- a/media/mtransport/test/gtest_ringbuffer_dumper.h
+++ b/media/mtransport/test/gtest_ringbuffer_dumper.h
@@ -38,18 +38,18 @@ class RingbufferDumper : public ::testin
     void DestroyRingBuffer_s() {
       RLogConnector::DestroyInstance();
     }
 
     void DumpRingBuffer_s() {
       std::deque<std::string> logs;
       // Get an unlimited number