Bug 1452982 part 14 - Rename 'active thread' to 'main thread'. r=jonco
authorJan de Mooij <jdemooij@mozilla.com>
Thu, 19 Apr 2018 13:04:46 +0200
changeset 468069 2f7d0134b22176dd649418217cb98d09ce102c83
parent 468068 ff7588bba148f3f6f0bed0a88e5c94dcb102f3db
child 468070 292f8e5c6336f089843d98b661771bc90db69adb
push id9165
push userasasaki@mozilla.com
push dateThu, 26 Apr 2018 21:04:54 +0000
treeherdermozilla-beta@064c3804de2e [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjonco
bugs1452982
milestone61.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1452982 part 14 - Rename 'active thread' to 'main thread'. r=jonco
js/src/frontend/BytecodeEmitter.cpp
js/src/gc/Allocator.cpp
js/src/gc/Cell.h
js/src/gc/FreeOp.h
js/src/gc/GC.cpp
js/src/gc/GCHelperState.h
js/src/gc/GCRuntime.h
js/src/gc/ObjectKind-inl.h
js/src/gc/Zone.cpp
js/src/jit/CompileWrappers.h
js/src/jit/Ion.cpp
js/src/jit/Ion.h
js/src/jit/IonBuilder.cpp
js/src/jit/IonOptimizationLevels.cpp
js/src/jit/IonOptimizationLevels.h
js/src/jit/JitCompartment.h
js/src/jit/JitOptions.h
js/src/jit/MIR.cpp
js/src/jit/MIR.h
js/src/jit/MIRGenerator.h
js/src/jsapi.cpp
js/src/jsapi.h
js/src/vm/ErrorReporting.cpp
js/src/vm/HelperThreads.cpp
js/src/vm/JSContext.h
js/src/vm/JSObject-inl.h
js/src/vm/JSObject.cpp
js/src/vm/JSScript.h
js/src/vm/ObjectGroup-inl.h
js/src/vm/Runtime.cpp
js/src/vm/Runtime.h
js/src/vm/SymbolType.cpp
js/src/vm/TypeInference.cpp
js/src/vm/TypeInference.h
js/src/wasm/WasmSignalHandlers.cpp
--- a/js/src/frontend/BytecodeEmitter.cpp
+++ b/js/src/frontend/BytecodeEmitter.cpp
@@ -3563,17 +3563,17 @@ BytecodeEmitter::maybeSetSourceMap()
 
     return true;
 }
 
 void
 BytecodeEmitter::tellDebuggerAboutCompiledScript(JSContext* cx)
 {
     // Note: when parsing off thread the resulting scripts need to be handed to
-    // the debugger after rejoining to the active thread.
+    // the debugger after rejoining to the main thread.
     if (cx->helperThread())
         return;
 
     // Lazy scripts are never top level (despite always being invoked with a
     // nullptr parent), and so the hook should never be fired.
     if (emitterMode != LazyFunction && !parent)
         Debugger::onNewScript(cx, script);
 }
--- a/js/src/gc/Allocator.cpp
+++ b/js/src/gc/Allocator.cpp
@@ -370,28 +370,28 @@ GCRuntime::refillFreeListFromAnyThread(J
         return refillFreeListFromMainThread(cx, thingKind);
 
     return refillFreeListFromHelperThread(cx, thingKind);
 }
 
 /* static */ TenuredCell*
 GCRuntime::refillFreeListFromMainThread(JSContext* cx, AllocKind thingKind)
 {
-    // It should not be possible to allocate on the active thread while we are
+    // It should not be possible to allocate on the main thread while we are
     // inside a GC.
     Zone *zone = cx->zone();
     MOZ_ASSERT(!JS::CurrentThreadIsHeapBusy(), "allocating while under GC");
 
     return cx->arenas()->allocateFromArena(zone, thingKind, ShouldCheckThresholds::CheckThresholds);
 }
 
 /* static */ TenuredCell*
 GCRuntime::refillFreeListFromHelperThread(JSContext* cx, AllocKind thingKind)
 {
-    // A GC may be happening on the active thread, but zones used by off thread
+    // A GC may be happening on the main thread, but zones used by off thread
     // tasks are never collected.
     Zone* zone = cx->zone();
     MOZ_ASSERT(!zone->wasGCStarted());
 
     return cx->arenas()->allocateFromArena(zone, thingKind, ShouldCheckThresholds::CheckThresholds);
 }
 
 /* static */ TenuredCell*
--- a/js/src/gc/Cell.h
+++ b/js/src/gc/Cell.h
@@ -378,25 +378,25 @@ TenuredCell::readBarrier(TenuredCell* th
     // at the moment this can happen e.g. when rekeying tables containing
     // read-barriered GC things after a moving GC.
     //
     // TODO: Fix this and assert we're not collecting if we're on the active
     // thread.
 
     JS::shadow::Zone* shadowZone = thing->shadowZoneFromAnyThread();
     if (shadowZone->needsIncrementalBarrier()) {
-        // Barriers are only enabled on the active thread and are disabled while collecting.
+        // Barriers are only enabled on the main thread and are disabled while collecting.
         MOZ_ASSERT(!RuntimeFromMainThreadIsHeapMajorCollecting(shadowZone));
         Cell* tmp = thing;
         TraceManuallyBarrieredGenericPointerEdge(shadowZone->barrierTracer(), &tmp, "read barrier");
         MOZ_ASSERT(tmp == thing);
     }
 
     if (thing->isMarkedGray()) {
-        // There shouldn't be anything marked grey unless we're on the active thread.
+        // There shouldn't be anything marked grey unless we're on the main thread.
         MOZ_ASSERT(CurrentThreadCanAccessRuntime(thing->runtimeFromAnyThread()));
         if (!RuntimeFromMainThreadIsHeapMajorCollecting(shadowZone))
             JS::UnmarkGrayGCThingRecursively(JS::GCCellPtr(thing, thing->getTraceKind()));
     }
 }
 
 void
 AssertSafeToSkipBarrier(TenuredCell* thing);
--- a/js/src/gc/FreeOp.h
+++ b/js/src/gc/FreeOp.h
@@ -40,17 +40,17 @@ class FreeOp : public JSFreeOp
     explicit FreeOp(JSRuntime* maybeRuntime);
     ~FreeOp();
 
     bool onMainThread() const {
         return runtime_ != nullptr;
     }
 
     bool maybeOnHelperThread() const {
-        // Sometimes background finalization happens on the active thread so
+        // Sometimes background finalization happens on the main thread so
         // runtime_ being null doesn't always mean we are off thread.
         return !runtime_;
     }
 
     bool isDefaultFreeOp() const;
 
     void free_(void* p) {
         js_free(p);
--- a/js/src/gc/GC.cpp
+++ b/js/src/gc/GC.cpp
@@ -425,31 +425,31 @@ FOR_EACH_ALLOCKIND(EXPAND_THINGS_PER_ARE
 
 struct js::gc::FinalizePhase
 {
     gcstats::PhaseKind statsPhase;
     AllocKinds kinds;
 };
 
 /*
- * Finalization order for objects swept incrementally on the active thread.
+ * Finalization order for objects swept incrementally on the main thread.
  */
 static const FinalizePhase ForegroundObjectFinalizePhase = {
     gcstats::PhaseKind::SWEEP_OBJECT, {
         AllocKind::OBJECT0,
         AllocKind::OBJECT2,
         AllocKind::OBJECT4,
         AllocKind::OBJECT8,
         AllocKind::OBJECT12,
         AllocKind::OBJECT16
     }
 };
 
 /*
- * Finalization order for GC things swept incrementally on the active thread.
+ * Finalization order for GC things swept incrementally on the main thread.
  */
 static const FinalizePhase ForegroundNonObjectFinalizePhase = {
     gcstats::PhaseKind::SWEEP_SCRIPT, {
         AllocKind::SCRIPT,
         AllocKind::JITCODE
     }
 };
 
@@ -2435,17 +2435,17 @@ ShouldRelocateZone(size_t arenaCount, si
 
     return (relocCount * 100.0) / arenaCount >= MIN_ZONE_RECLAIM_PERCENT;
 }
 
 bool
 ArenaLists::relocateArenas(Zone* zone, Arena*& relocatedListOut, JS::gcreason::Reason reason,
                            SliceBudget& sliceBudget, gcstats::Statistics& stats)
 {
-    // This is only called from the active thread while we are doing a GC, so
+    // This is only called from the main thread while we are doing a GC, so
     // there is no need to lock.
     MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime_));
     MOZ_ASSERT(runtime_->gc.isHeapCompacting());
     MOZ_ASSERT(!runtime_->gc.isBackgroundSweeping());
 
     // Clear all the free lists.
     clearFreeLists();
 
@@ -3300,17 +3300,17 @@ Nursery::requestMinorGC(JS::gcreason::Re
     // See comment in requestMajorGC.
     runtime()->mainContextFromOwnThread()->requestInterrupt(JSContext::RequestInterruptCanWait);
 }
 
 bool
 GCRuntime::triggerGC(JS::gcreason::Reason reason)
 {
     /*
-     * Don't trigger GCs if this is being called off the active thread from
+     * Don't trigger GCs if this is being called off the main thread from
      * onTooMuchMalloc().
      */
     if (!CurrentThreadCanAccessRuntime(rt))
         return false;
 
     /* GC is already running. */
     if (JS::CurrentThreadIsHeapCollecting())
         return false;
@@ -3555,17 +3555,17 @@ GCRuntime::sweepBackgroundThings(ZoneLis
                 if (arenas)
                     ArenaLists::backgroundFinalize(&fop, arenas, &emptyArenas);
             }
         }
 
         AutoLockGC lock(rt);
 
         // Release any arenas that are now empty, dropping and reaquiring the GC
-        // lock every so often to avoid blocking the active thread from
+        // lock every so often to avoid blocking the main thread from
         // allocating chunks.
         static const size_t LockReleasePeriod = 32;
         size_t releaseCount = 0;
         Arena* next;
         for (Arena* arena = emptyArenas; arena; arena = next) {
             next = arena->next;
             rt->gc.releaseArena(arena, lock);
             releaseCount++;
@@ -3725,17 +3725,17 @@ GCHelperState::waitBackgroundSweepEnd()
         waitForBackgroundThread(lock);
     if (!rt->gc.isIncrementalGCInProgress())
         rt->gc.assertBackgroundSweepingFinished();
 }
 
 void
 GCHelperState::doSweep(AutoLockGC& lock)
 {
-    // The active thread may call queueZonesForBackgroundSweep() while this is
+    // The main thread may call queueZonesForBackgroundSweep() while this is
     // running so we must check there is no more work to do before exiting.
 
     do {
         while (!rt->gc.backgroundSweepZones.ref().isEmpty()) {
             AutoSetThreadIsSweeping threadIsSweeping;
 
             ZoneList zones;
             zones.transferFrom(rt->gc.backgroundSweepZones.ref());
@@ -4199,17 +4199,17 @@ ShouldCollectZone(Zone* zone, JS::gcreas
     // thread. In either case we don't have information about which atoms are
     // roots, so we must skip collecting atoms.
     //
     // Note that only affects the first slice of an incremental GC since root
     // marking is completed before we return to the mutator.
     //
     // Off-thread parsing is inhibited after the start of GC which prevents
     // races between creating atoms during parsing and sweeping atoms on the
-    // active thread.
+    // main thread.
     //
     // Otherwise, we always schedule a GC in the atoms zone so that atoms which
     // the other collected zones are using are marked, and we can update the
     // set of atoms in use by the other collected zones at the end of the GC.
     if (zone->isAtomsZone())
         return TlsContext.get()->canCollectAtoms();
 
     return zone->canCollect();
@@ -7432,17 +7432,17 @@ GCRuntime::gcCycle(bool nonincrementalBy
     AutoTraceSession session(rt, JS::HeapState::MajorCollecting);
 
     majorGCTriggerReason = JS::gcreason::NO_REASON;
 
     number++;
     if (!isIncrementalGCInProgress())
         incMajorGcNumber();
 
-    // It's ok if threads other than the active thread have suppressGC set, as
+    // It's ok if threads other than the main thread have suppressGC set, as
     // they are operating on zones which will not be collected from here.
     MOZ_ASSERT(!rt->mainContextFromOwnThread()->suppressGC);
 
     // Assert if this is a GC unsafe region.
     rt->mainContextFromOwnThread()->verifyIsSafeToGC();
 
     {
         gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::WAIT_BACKGROUND_THREAD);
@@ -9050,17 +9050,17 @@ js::gc::detail::CellIsMarkedGrayIfKnown(
     // following cases:
     //
     // 1) When OOM has caused us to clear the gcGrayBitsValid_ flag.
     //
     // 2) When we are in an incremental GC and examine a cell that is in a zone
     // that is not being collected. Gray targets of CCWs that are marked black
     // by a barrier will eventually be marked black in the next GC slice.
     //
-    // 3) When we are not on the runtime's active thread. Helper threads might
+    // 3) When we are not on the runtime's main thread. Helper threads might
     // call this while parsing, and they are not allowed to inspect the
     // runtime's incremental state. The objects being operated on are not able
     // to be collected and will not be marked any color.
 
     if (!CanCheckGrayBits(cell))
         return false;
 
     auto tc = &cell->asTenured();
--- a/js/src/gc/GCHelperState.h
+++ b/js/src/gc/GCHelperState.h
@@ -18,30 +18,30 @@ namespace gc {
 class ArenaLists;
 } /* namespace gc */
 
 /*
  * Helper state for use when JS helper threads sweep and allocate GC thing kinds
  * that can be swept and allocated off thread.
  *
  * In non-threadsafe builds, all actual sweeping and allocation is performed
- * on the active thread, but GCHelperState encapsulates this from clients as
+ * on the main thread, but GCHelperState encapsulates this from clients as
  * much as possible.
  */
 class GCHelperState
 {
     enum State {
         IDLE,
         SWEEPING
     };
 
     // Associated runtime.
     JSRuntime* const rt;
 
-    // Condvar for notifying the active thread when work has finished. This is
+    // Condvar for notifying the main thread when work has finished. This is
     // associated with the runtime's GC lock --- the worker thread state
     // condvars can't be used here due to lock ordering issues.
     ConditionVariable done;
 
     // Activity for the helper to do, protected by the GC lock.
     MainThreadOrGCTaskData<State> state_;
 
     // Whether work is being performed on some thread.
--- a/js/src/gc/GCRuntime.h
+++ b/js/src/gc/GCRuntime.h
@@ -100,17 +100,17 @@ class ChunkPool
         operator Chunk*() const { return get(); }
         Chunk* operator->() const { return get(); }
       private:
         Chunk* current_;
     };
 };
 
 // Performs extra allocation off thread so that when memory is required on the
-// active thread it will already be available and waiting.
+// main thread it will already be available and waiting.
 class BackgroundAllocTask : public GCParallelTask
 {
     // Guarded by the GC lock.
     GCLockData<ChunkPool&> chunkPool_;
 
     const bool enabled_;
 
   public:
@@ -931,17 +931,17 @@ class GCRuntime
 
     /* Always preserve JIT code during GCs, for testing. */
     MainThreadData<bool> alwaysPreserveCode;
 
 #ifdef DEBUG
     MainThreadData<bool> arenasEmptyAtShutdown;
 #endif
 
-    /* Synchronize GC heap access among GC helper threads and active threads. */
+    /* Synchronize GC heap access among GC helper threads and the main thread. */
     friend class js::AutoLockGC;
     friend class js::AutoLockGCBgAlloc;
     js::Mutex lock;
 
     BackgroundAllocTask allocTask;
     BackgroundDecommitTask decommitTask;
 
     js::GCHelperState helperState;
--- a/js/src/gc/ObjectKind-inl.h
+++ b/js/src/gc/ObjectKind-inl.h
@@ -18,17 +18,17 @@ namespace js {
 namespace gc {
 
 static inline bool
 CanBeFinalizedInBackground(AllocKind kind, const Class* clasp)
 {
     MOZ_ASSERT(IsObjectAllocKind(kind));
     /* If the class has no finalizer or a finalizer that is safe to call on
      * a different thread, we change the alloc kind. For example,
-     * AllocKind::OBJECT0 calls the finalizer on the active thread,
+     * AllocKind::OBJECT0 calls the finalizer on the main thread,
      * AllocKind::OBJECT0_BACKGROUND calls the finalizer on the gcHelperThread.
      * IsBackgroundFinalized is called to prevent recursively incrementing
      * the alloc kind; kind may already be a background finalize kind.
      */
     return (!IsBackgroundFinalized(kind) &&
             (!clasp->hasFinalize() || (clasp->flags & JSCLASS_BACKGROUND_FINALIZE)));
 }
 
--- a/js/src/gc/Zone.cpp
+++ b/js/src/gc/Zone.cpp
@@ -247,18 +247,18 @@ Zone::discardJitCode(FreeOp* fop, bool d
      */
     if (discardBaselineCode) {
         jitZone()->optimizedStubSpace()->freeAllAfterMinorGC(this);
         jitZone()->purgeIonCacheIRStubInfo();
     }
 
     /*
      * Free all control flow graphs that are cached on BaselineScripts.
-     * Assuming this happens on the active thread and all control flow
-     * graph reads happen on the active thread, this is safe.
+     * Assuming this happens on the main thread and all control flow
+     * graph reads happen on the main thread, this is safe.
      */
     jitZone()->cfgSpace()->lifoAlloc().freeAll();
 }
 
 #ifdef JSGC_HASH_TABLE_CHECKS
 void
 JS::Zone::checkUniqueIdTableAfterMovingGC()
 {
--- a/js/src/jit/CompileWrappers.h
+++ b/js/src/jit/CompileWrappers.h
@@ -11,17 +11,17 @@
 
 namespace js {
 namespace jit {
 
 class JitRuntime;
 
 // During Ion compilation we need access to various bits of the current
 // compartment, runtime and so forth. However, since compilation can run off
-// thread while the active thread is mutating the VM, this access needs
+// thread while the main thread is mutating the VM, this access needs
 // to be restricted. The classes below give the compiler an interface to access
 // all necessary information in a threadsafe fashion.
 
 class CompileRuntime
 {
     JSRuntime* runtime();
 
   public:
--- a/js/src/jit/Ion.cpp
+++ b/js/src/jit/Ion.cpp
@@ -381,39 +381,39 @@ JitRuntime::debugTrapHandler(JSContext* 
     }
     return debugTrapHandler_;
 }
 
 JitRuntime::IonBuilderList&
 JitRuntime::ionLazyLinkList(JSRuntime* rt)
 {
     MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt),
-               "Should only be mutated by the active thread.");
+               "Should only be mutated by the main thread.");
     return ionLazyLinkList_.ref();
 }
 
 void
 JitRuntime::ionLazyLinkListRemove(JSRuntime* rt, jit::IonBuilder* builder)
 {
     MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt),
-               "Should only be mutated by the active thread.");
+               "Should only be mutated by the main thread.");
     MOZ_ASSERT(rt == builder->script()->runtimeFromMainThread());
     MOZ_ASSERT(ionLazyLinkListSize_ > 0);
 
     builder->removeFrom(ionLazyLinkList(rt));
     ionLazyLinkListSize_--;
 
     MOZ_ASSERT(ionLazyLinkList(rt).isEmpty() == (ionLazyLinkListSize_ == 0));
 }
 
 void
 JitRuntime::ionLazyLinkListAdd(JSRuntime* rt, jit::IonBuilder* builder)
 {
     MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt),
-               "Should only be mutated by the active thread.");
+               "Should only be mutated by the main thread.");
     MOZ_ASSERT(rt == builder->script()->runtimeFromMainThread());
     ionLazyLinkList(rt).insertFront(builder);
     ionLazyLinkListSize_++;
 }
 
 uint8_t*
 JSContext::allocateOsrTempData(size_t size)
 {
@@ -2442,17 +2442,17 @@ Compile(JSContext* cx, HandleScript scri
 
 } // namespace jit
 } // namespace js
 
 bool
 jit::OffThreadCompilationAvailable(JSContext* cx)
 {
     // Even if off thread compilation is enabled, compilation must still occur
-    // on the active thread in some cases.
+    // on the main thread in some cases.
     //
     // Require cpuCount > 1 so that Ion compilation jobs and active-thread
     // execution are not competing for the same resources.
     return cx->runtime()->canUseOffthreadIonCompilation()
         && HelperThreadState().cpuCount > 1
         && CanUseExtraThreads();
 }
 
--- a/js/src/jit/Ion.h
+++ b/js/src/jit/Ion.h
@@ -58,17 +58,17 @@ class JitContext
     JitContext(JSContext* cx, TempAllocator* temp);
     JitContext(CompileRuntime* rt, CompileCompartment* comp, TempAllocator* temp);
     JitContext(CompileRuntime* rt, TempAllocator* temp);
     explicit JitContext(CompileRuntime* rt);
     explicit JitContext(TempAllocator* temp);
     JitContext();
     ~JitContext();
 
-    // Running context when executing on the active thread. Not available during
+    // Running context when executing on the main thread. Not available during
     // compilation.
     JSContext* cx;
 
     // Allocator for temporary memory during compilation.
     TempAllocator* temp;
 
     // Wrappers with information about the current runtime/compartment for use
     // during compilation.
--- a/js/src/jit/IonBuilder.cpp
+++ b/js/src/jit/IonBuilder.cpp
@@ -7319,17 +7319,17 @@ IonBuilder::ensureDefiniteType(MDefiniti
     current->add(replace);
     return replace;
 }
 
 static size_t
 NumFixedSlots(JSObject* object)
 {
     // Note: we can't use object->numFixedSlots() here, as this will read the
-    // shape and can race with the active thread if we are building off thread.
+    // shape and can race with the main thread if we are building off thread.
     // The allocation kind and object class (which goes through the type) can
     // be read freely, however.
     gc::AllocKind kind = object->asTenured().getAllocKind();
     return gc::GetGCKindSlots(kind, object->getClass());
 }
 
 static bool
 IsUninitializedGlobalLexicalSlot(JSObject* obj, PropertyName* name)
@@ -13539,17 +13539,17 @@ IonBuilder::setPropTryReferenceTypedObje
     *emitted = true;
     return resumeAfter(store);
 }
 
 JSObject*
 IonBuilder::checkNurseryObject(JSObject* obj)
 {
     // If we try to use any nursery pointers during compilation, make sure that
-    // the active thread will cancel this compilation before performing a minor
+    // the main thread will cancel this compilation before performing a minor
     // GC. All constants used during compilation should either go through this
     // function or should come from a type set (which has a similar barrier).
     if (obj && IsInsideNursery(obj)) {
         compartment->zone()->setMinorGCShouldCancelIonCompilations();
         IonBuilder* builder = this;
         while (builder) {
             builder->setNotSafeForMinorGC();
             builder = builder->callerBuilder_;
--- a/js/src/jit/IonOptimizationLevels.cpp
+++ b/js/src/jit/IonOptimizationLevels.cpp
@@ -88,17 +88,17 @@ OptimizationInfo::compilerWarmUpThreshol
     uint32_t warmUpThreshold = JitOptions.forcedDefaultIonWarmUpThreshold
         .valueOr(compilerWarmUpThreshold_);
 
     if (JitOptions.isSmallFunction(script)) {
         warmUpThreshold = JitOptions.forcedDefaultIonSmallFunctionWarmUpThreshold
             .valueOr(compilerSmallFunctionWarmUpThreshold_);
     }
 
-    // If the script is too large to compile on the active thread, we can still
+    // If the script is too large to compile on the main thread, we can still
     // compile it off thread. In these cases, increase the warm-up counter
     // threshold to improve the compilation's type information and hopefully
     // avoid later recompilation.
 
     if (script->length() > MAX_ACTIVE_THREAD_SCRIPT_SIZE)
         warmUpThreshold *= (script->length() / (double) MAX_ACTIVE_THREAD_SCRIPT_SIZE);
 
     uint32_t numLocalsAndArgs = NumLocalsAndArgs(script);
--- a/js/src/jit/IonOptimizationLevels.h
+++ b/js/src/jit/IonOptimizationLevels.h
@@ -92,17 +92,17 @@ class OptimizationInfo
     // Toggles whether sink is used.
     bool sink_;
 
     // Describes which register allocator to use.
     IonRegisterAllocator registerAllocator_;
 
     // The maximum total bytecode size of an inline call site. We use a lower
     // value if off-thread compilation is not available, to avoid stalling the
-    // active thread.
+    // main thread.
     uint32_t inlineMaxBytecodePerCallSiteHelperThread_;
     uint32_t inlineMaxBytecodePerCallSiteMainThread_;
 
     // The maximum value we allow for baselineScript->inlinedBytecodeLength_
     // when inlining.
     uint16_t inlineMaxCalleeInlinedBytecodeLength_;
 
     // The maximum bytecode length we'll inline in a single compilation.
--- a/js/src/jit/JitCompartment.h
+++ b/js/src/jit/JitCompartment.h
@@ -605,17 +605,17 @@ class JitCompartment
         if (stubs_[RegExpTester])
             return true;
         stubs_[RegExpTester] = generateRegExpTesterStub(cx);
         return stubs_[RegExpTester];
     }
 
     // Perform the necessary read barriers on stubs and SIMD template object
     // described by the bitmasks passed in. This function can only be called
-    // from the active thread.
+    // from the main thread.
     //
     // The stub and template object pointers must still be valid by the time
     // these methods are called. This is arranged by cancelling off-thread Ion
     // compilation at the start of GC and at the start of sweeping.
     void performStubReadBarriers(uint32_t stubsToBarrier) const;
     void performSIMDTemplateReadBarriers(uint32_t simdTemplatesToBarrier) const;
 
     size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
--- a/js/src/jit/JitOptions.h
+++ b/js/src/jit/JitOptions.h
@@ -11,17 +11,17 @@
 
 #include "jit/IonTypes.h"
 #include "js/TypeDecls.h"
 
 namespace js {
 namespace jit {
 
 // Longer scripts can only be compiled off thread, as these compilations
-// can be expensive and stall the active thread for too long.
+// can be expensive and stall the main thread for too long.
 static const uint32_t MAX_ACTIVE_THREAD_SCRIPT_SIZE = 2 * 1000;
 static const uint32_t MAX_ACTIVE_THREAD_LOCALS_AND_ARGS = 256;
 
 // Possible register allocators which may be used.
 enum IonRegisterAllocator {
     RegisterAllocator_Backtracking,
     RegisterAllocator_Testbed,
     RegisterAllocator_Stupid
--- a/js/src/jit/MIR.cpp
+++ b/js/src/jit/MIR.cpp
@@ -949,22 +949,22 @@ MakeUnknownTypeSet(TempAllocator& tempAl
 }
 
 #ifdef DEBUG
 
 bool
 jit::IonCompilationCanUseNurseryPointers()
 {
     // If we are doing backend compilation, which could occur on a helper
-    // thread but might actually be on the active thread, check the flag set on
+    // thread but might actually be on the main thread, check the flag set on
     // the JSContext by AutoEnterIonCompilation.
     if (CurrentThreadIsIonCompiling())
         return !CurrentThreadIsIonCompilingSafeForMinorGC();
 
-    // Otherwise, we must be on the active thread during MIR construction. The
+    // Otherwise, we must be on the main thread during MIR construction. The
     // store buffer must have been notified that minor GCs must cancel pending
     // or in progress Ion compilations.
     JSRuntime* rt = TlsContext.get()->zone()->runtimeFromMainThread();
     return rt->gc.storeBuffer().cancelIonCompilations();
 }
 
 #endif // DEBUG
 
--- a/js/src/jit/MIR.h
+++ b/js/src/jit/MIR.h
@@ -4180,17 +4180,17 @@ class MInitElemGetterSetter
 
   public:
     INSTRUCTION_HEADER(InitElemGetterSetter)
     TRIVIAL_NEW_WRAPPERS
     NAMED_OPERANDS((0, object), (1, idValue), (2, value))
 };
 
 // WrappedFunction wraps a JSFunction so it can safely be used off-thread.
-// In particular, a function's flags can be modified on the active thread as
+// In particular, a function's flags can be modified on the main thread as
 // functions are relazified and delazified, so we must be careful not to access
 // these flags off-thread.
 class WrappedFunction : public TempObject
 {
     CompilerFunction fun_;
     uint16_t nargs_;
     bool isNative_ : 1;
     bool isNativeWithJitEntry_ : 1;
@@ -8867,17 +8867,17 @@ class MClassConstructor : public MNullar
         return AliasSet::None();
     }
 };
 
 struct LambdaFunctionInfo
 {
     // The functions used in lambdas are the canonical original function in
     // the script, and are immutable except for delazification. Record this
-    // information while still on the active thread to avoid races.
+    // information while still on the main thread to avoid races.
   private:
     CompilerFunction fun_;
 
   public:
     uint16_t flags;
     uint16_t nargs;
     gc::Cell* scriptOrLazyScript;
     bool singletonType;
--- a/js/src/jit/MIRGenerator.h
+++ b/js/src/jit/MIRGenerator.h
@@ -114,17 +114,17 @@ class MIRGenerator
 
     bool safeForMinorGC() const {
         return safeForMinorGC_;
     }
     void setNotSafeForMinorGC() {
         safeForMinorGC_ = false;
     }
 
-    // Whether the active thread is trying to cancel this build.
+    // Whether the main thread is trying to cancel this build.
     bool shouldCancel(const char* why) {
         return cancelBuild_;
     }
     void cancel() {
         cancelBuild_ = true;
     }
 
     bool compilingWasm() const {
--- a/js/src/jsapi.cpp
+++ b/js/src/jsapi.cpp
@@ -4226,23 +4226,23 @@ CanDoOffThread(JSContext* cx, const Read
 {
     static const size_t TINY_LENGTH = 5 * 1000;
     static const size_t HUGE_SRC_LENGTH = 100 * 1000;
     static const size_t HUGE_BC_LENGTH = 367 * 1000;
 
     // These are heuristics which the caller may choose to ignore (e.g., for
     // testing purposes).
     if (!options.forceAsync) {
-        // Compiling off the active thread inolves creating a new Zone and other
+        // Compiling off the main thread inolves creating a new Zone and other
         // significant overheads.  Don't bother if the script is tiny.
         if (length < TINY_LENGTH)
             return false;
 
         // If the parsing task would have to wait for GC to complete, it'll probably
-        // be faster to just start it synchronously on the active thread unless the
+        // be faster to just start it synchronously on the main thread unless the
         // script is huge.
         if (OffThreadParsingMustWaitForGC(cx->runtime())) {
             if (what == OffThread::Compile && length < HUGE_SRC_LENGTH)
                 return false;
             if (what == OffThread::Decode && length < HUGE_BC_LENGTH)
                 return false;
         }
     }
--- a/js/src/jsapi.h
+++ b/js/src/jsapi.h
@@ -3913,17 +3913,17 @@ CanDecodeOffThread(JSContext* cx, const 
 
 /*
  * Off thread compilation control flow.
  *
  * After successfully triggering an off thread compile of a script, the
  * callback will eventually be invoked with the specified data and a token
  * for the compilation. The callback will be invoked while off thread,
  * so must ensure that its operations are thread safe. Afterwards, one of the
- * following functions must be invoked on the runtime's active thread:
+ * following functions must be invoked on the runtime's main thread:
  *
  * - FinishOffThreadScript, to get the result script (or nullptr on failure).
  * - CancelOffThreadScript, to free the resources without creating a script.
  *
  * The characters passed in to CompileOffThread must remain live until the
  * callback is invoked, and the resulting script will be rooted until the call
  * to FinishOffThreadScript.
  */
@@ -6040,17 +6040,17 @@ extern JS_PUBLIC_API(TranscodeResult)
 DecodeScript(JSContext* cx, const TranscodeRange& range, JS::MutableHandleScript scriptp);
 
 extern JS_PUBLIC_API(TranscodeResult)
 DecodeInterpretedFunction(JSContext* cx, TranscodeBuffer& buffer, JS::MutableHandleFunction funp,
                           size_t cursorIndex = 0);
 
 // Register an encoder on the given script source, such that all functions can
 // be encoded as they are parsed. This strategy is used to avoid blocking the
-// active thread in a non-interruptible way.
+// main thread in a non-interruptible way.
 //
 // The |script| argument of |StartIncrementalEncoding| and
 // |FinishIncrementalEncoding| should be the top-level script returned either as
 // an out-param of any of the |Compile| functions, or the result of
 // |FinishOffThreadScript|.
 //
 // The |buffer| argument of |FinishIncrementalEncoding| is used for appending
 // the encoded bytecode into the buffer. If any of these functions failed, the
--- a/js/src/vm/ErrorReporting.cpp
+++ b/js/src/vm/ErrorReporting.cpp
@@ -54,17 +54,17 @@ js::CompileError::throwError(JSContext* 
     // uncaught exception report.
     ErrorToException(cx, this, nullptr, nullptr);
 }
 
 bool
 js::ReportCompileWarning(JSContext* cx, ErrorMetadata&& metadata, UniquePtr<JSErrorNotes> notes,
                          unsigned flags, unsigned errorNumber, va_list args)
 {
-    // On the active thread, report the error immediately. When compiling off
+    // On the main thread, report the error immediately. When compiling off
     // thread, save the error so that the thread finishing the parse can report
     // it later.
     CompileError tempErr;
     CompileError* err = &tempErr;
     if (cx->helperThread() && !cx->addPendingCompileError(&err))
         return false;
 
     err->notes = Move(notes);
@@ -90,17 +90,17 @@ js::ReportCompileWarning(JSContext* cx, 
 
     return true;
 }
 
 void
 js::ReportCompileError(JSContext* cx, ErrorMetadata&& metadata, UniquePtr<JSErrorNotes> notes,
                        unsigned flags, unsigned errorNumber, va_list args)
 {
-    // On the active thread, report the error immediately. When compiling off
+    // On the main thread, report the error immediately. When compiling off
     // thread, save the error so that the thread finishing the parse can report
     // it later.
     CompileError tempErr;
     CompileError* err = &tempErr;
     if (cx->helperThread() && !cx->addPendingCompileError(&err))
         return;
 
     err->notes = Move(notes);
--- a/js/src/vm/HelperThreads.cpp
+++ b/js/src/vm/HelperThreads.cpp
@@ -594,17 +594,17 @@ js::CancelOffThreadParses(JSRuntime* rt)
                     inProgress = true;
             }
             if (!inProgress)
                 break;
         }
         HelperThreadState().wait(lock, GlobalHelperThreadState::CONSUMER);
     }
 
-    // Clean up any parse tasks which haven't been finished by the active thread.
+    // Clean up any parse tasks which haven't been finished by the main thread.
     GlobalHelperThreadState::ParseTaskVector& finished = HelperThreadState().parseFinishedList(lock);
     while (true) {
         bool found = false;
         for (size_t i = 0; i < finished.length(); i++) {
             ParseTask* task = finished[i];
             if (task->runtimeMatches(rt)) {
                 found = true;
                 AutoUnlockHelperThreadState unlock(lock);
@@ -1755,17 +1755,17 @@ HelperThread::handleWasmWorkload(AutoLoc
     currentTask.emplace(HelperThreadState().wasmWorklist(locked, mode).popCopyFront());
 
     wasm::CompileTask* task = wasmTask();
     {
         AutoUnlockHelperThreadState unlock(locked);
         wasm::ExecuteCompileTaskFromHelperThread(task);
     }
 
-    // No active thread should be waiting on the CONSUMER mutex.
+    // No main thread should be waiting on the CONSUMER mutex.
     currentTask.reset();
 }
 
 void
 HelperThread::handleWasmTier2GeneratorWorkload(AutoLockHelperThreadState& locked)
 {
     MOZ_ASSERT(HelperThreadState().canStartWasmTier2Generator(locked));
     MOZ_ASSERT(idle());
@@ -1798,17 +1798,17 @@ HelperThread::handlePromiseHelperTaskWor
     currentTask.emplace(task);
 
     {
         AutoUnlockHelperThreadState unlock(locked);
         task->execute();
         task->dispatchResolveAndDestroy();
     }
 
-    // No active thread should be waiting on the CONSUMER mutex.
+    // No main thread should be waiting on the CONSUMER mutex.
     currentTask.reset();
 }
 
 void
 HelperThread::handleIonWorkload(AutoLockHelperThreadState& locked)
 {
     MOZ_ASSERT(HelperThreadState().canStartIonCompile(locked));
     MOZ_ASSERT(idle());
@@ -1846,17 +1846,17 @@ HelperThread::handleIonWorkload(AutoLock
     // This must happen before the current task is reset. DestroyContext
     // cancels in progress Ion compilations before destroying its target
     // context, and after we reset the current task we are no longer considered
     // to be Ion compiling.
     rt->mainContextFromAnyThread()->requestInterrupt(JSContext::RequestInterruptCanWait);
 
     currentTask.reset();
 
-    // Notify the active thread in case it is waiting for the compilation to finish.
+    // Notify the main thread in case it is waiting for the compilation to finish.
     HelperThreadState().notifyAll(GlobalHelperThreadState::CONSUMER, locked);
 }
 
 void
 HelperThread::handleIonFreeWorkload(AutoLockHelperThreadState& locked)
 {
     MOZ_ASSERT(idle());
     MOZ_ASSERT(HelperThreadState().canStartIonFreeTask(locked));
@@ -1948,17 +1948,17 @@ HelperThread::handleParseWorkload(AutoLo
     {
         AutoEnterOOMUnsafeRegion oomUnsafe;
         if (!HelperThreadState().parseFinishedList(locked).append(task))
             oomUnsafe.crash("handleParseWorkload");
     }
 
     currentTask.reset();
 
-    // Notify the active thread in case it is waiting for the parse/emit to finish.
+    // Notify the main thread in case it is waiting for the parse/emit to finish.
     HelperThreadState().notifyAll(GlobalHelperThreadState::CONSUMER, locked);
 }
 
 void
 HelperThread::handleCompressionWorkload(AutoLockHelperThreadState& locked)
 {
     MOZ_ASSERT(HelperThreadState().canStartCompressionTask(locked));
     MOZ_ASSERT(idle());
@@ -1983,17 +1983,17 @@ HelperThread::handleCompressionWorkload(
     {
         AutoEnterOOMUnsafeRegion oomUnsafe;
         if (!HelperThreadState().compressionFinishedList(locked).append(Move(task)))
             oomUnsafe.crash("handleCompressionWorkload");
     }
 
     currentTask.reset();
 
-    // Notify the active thread in case it is waiting for the compression to finish.
+    // Notify the main thread in case it is waiting for the compression to finish.
     HelperThreadState().notifyAll(GlobalHelperThreadState::CONSUMER, locked);
 }
 
 bool
 js::EnqueueOffThreadCompression(JSContext* cx, UniquePtr<SourceCompressionTask> task)
 {
     AutoLockHelperThreadState lock;
 
--- a/js/src/vm/JSContext.h
+++ b/js/src/vm/JSContext.h
@@ -459,22 +459,22 @@ struct JSContext : public JS::RootingCon
     js::ThreadData<bool> ionCompiling;
 
     // Whether this thread is actively Ion compiling in a context where a minor
     // GC could happen simultaneously. If this is true, this thread cannot use
     // any pointers into the nursery.
     js::ThreadData<bool> ionCompilingSafeForMinorGC;
 
     // Whether this thread is currently performing GC.  This thread could be the
-    // active thread or a helper thread while the active thread is running the
+    // main thread or a helper thread while the main thread is running the
     // collector.
     js::ThreadData<bool> performingGC;
 
     // Whether this thread is currently sweeping GC things.  This thread could
-    // be the active thread or a helper thread while the active thread is running
+    // be the main thread or a helper thread while the main thread is running
     // the mutator.  This is used to assert that destruction of GCPtr only
     // happens when we are sweeping.
     js::ThreadData<bool> gcSweeping;
 
     // Whether this thread is performing work in the background for a runtime's
     // GCHelperState.
     js::ThreadData<bool> gcHelperStateThread;
 
@@ -536,17 +536,17 @@ struct JSContext : public JS::RootingCon
     js::ThreadData<unsigned> generationalDisabled;
 
     // Some code cannot tolerate compacting GC so it can be disabled temporarily
     // with AutoDisableCompactingGC which uses this counter.
     js::ThreadData<unsigned> compactingDisabledCount;
 
     // Count of AutoKeepAtoms instances on the current thread's stack. When any
     // instances exist, atoms in the runtime will not be collected. Threads
-    // parsing off the active thread do not increment this value, but the presence
+    // parsing off the main thread do not increment this value, but the presence
     // of any such threads also inhibits collection of atoms. We don't scan the
     // stacks of exclusive threads, so we need to avoid collecting their
     // objects in another way. The only GC thing pointers they have are to
     // their exclusive compartment (which is not collected) or to the atoms
     // compartment. Therefore, we avoid collecting the atoms compartment when
     // exclusive threads are running.
     js::ThreadData<unsigned> keepAtoms;
 
--- a/js/src/vm/JSObject-inl.h
+++ b/js/src/vm/JSObject-inl.h
@@ -90,17 +90,17 @@ JSObject::ensureShape(JSContext* cx)
 inline void
 JSObject::finalize(js::FreeOp* fop)
 {
     js::probes::FinalizeObject(this);
 
 #ifdef DEBUG
     MOZ_ASSERT(isTenured());
     if (!IsBackgroundFinalized(asTenured().getAllocKind())) {
-        /* Assert we're on the active thread. */
+        /* Assert we're on the main thread. */
         MOZ_ASSERT(CurrentThreadCanAccessZone(zone()));
     }
 #endif
 
     const js::Class* clasp = getClass();
     js::NativeObject* nobj = nullptr;
     if (clasp->isNative())
         nobj = &as<js::NativeObject>();
--- a/js/src/vm/JSObject.cpp
+++ b/js/src/vm/JSObject.cpp
@@ -4111,17 +4111,17 @@ JSObject::debugCheckNewObject(ObjectGrou
         MOZ_ASSERT(clasp == &UnboxedPlainObject::class_);
 
     if (!ClassCanHaveFixedData(clasp)) {
         MOZ_ASSERT(shape);
         MOZ_ASSERT(gc::GetGCKindSlots(allocKind, clasp) == shape->numFixedSlots());
     }
 
     // Classes with a finalizer must specify whether instances will be finalized
-    // on the active thread or in the background, except proxies whose behaviour
+    // on the main thread or in the background, except proxies whose behaviour
     // depends on the target object.
     static const uint32_t FinalizeMask = JSCLASS_FOREGROUND_FINALIZE | JSCLASS_BACKGROUND_FINALIZE;
     uint32_t flags = clasp->flags;
     uint32_t finalizeFlags = flags & FinalizeMask;
     if (clasp->hasFinalize() && !clasp->isProxy()) {
         MOZ_ASSERT(finalizeFlags == JSCLASS_FOREGROUND_FINALIZE ||
                    finalizeFlags == JSCLASS_BACKGROUND_FINALIZE);
         MOZ_ASSERT((finalizeFlags == JSCLASS_BACKGROUND_FINALIZE) == IsBackgroundFinalized(allocKind));
--- a/js/src/vm/JSScript.h
+++ b/js/src/vm/JSScript.h
@@ -388,18 +388,18 @@ class ScriptSource
             return chars_;
         }
     };
 
   private:
     uint32_t refs;
 
     // Note: while ScriptSources may be compressed off thread, they are only
-    // modified by the active thread, and all members are always safe to access
-    // on the active thread.
+    // modified by the main thread, and all members are always safe to access
+    // on the main thread.
 
     // Indicate which field in the |data| union is active.
 
     struct Missing { };
 
     struct Uncompressed
     {
         SharedImmutableTwoByteString string;
--- a/js/src/vm/ObjectGroup-inl.h
+++ b/js/src/vm/ObjectGroup-inl.h
@@ -10,17 +10,17 @@
 #include "vm/ObjectGroup.h"
 
 namespace js {
 
 inline bool
 ObjectGroup::needsSweep()
 {
     // Note: this can be called off thread during compacting GCs, in which case
-    // nothing will be running on the active thread.
+    // nothing will be running on the main thread.
     MOZ_ASSERT(!TlsContext.get()->inUnsafeCallWithABI);
     return generation() != zoneFromAnyThread()->types.generation;
 }
 
 inline void
 ObjectGroup::maybeSweep(AutoClearTypeInferenceStateOnOOM* oom)
 {
     if (needsSweep())
--- a/js/src/vm/Runtime.cpp
+++ b/js/src/vm/Runtime.cpp
@@ -286,17 +286,17 @@ JSRuntime::destroyRuntime()
             FinishGC(cx);
 
         /* Free source hook early, as its destructor may want to delete roots. */
         sourceHook = nullptr;
 
         /*
          * Cancel any pending, in progress or completed Ion compilations and
          * parse tasks. Waiting for wasm and compression tasks is done
-         * synchronously (on the active thread or during parse tasks), so no
+         * synchronously (on the main thread or during parse tasks), so no
          * explicit canceling is needed for these.
          */
         CancelOffThreadIonCompile(this);
         CancelOffThreadParses(this);
         CancelOffThreadCompressions(this);
 
         /* Remove persistent GC roots. */
         gc.finishRoots();
--- a/js/src/vm/Runtime.h
+++ b/js/src/vm/Runtime.h
@@ -457,29 +457,29 @@ struct JSRuntime : public js::MallocProv
     mozilla::LinkedList<js::Debugger>& debuggerList() { return debuggerList_.ref(); }
 
   private:
     /*
      * Lock taken when using per-runtime or per-zone data that could otherwise
      * be accessed simultaneously by multiple threads.
      *
      * Locking this only occurs if there is actually a thread other than the
-     * active thread which could access such data.
+     * main thread which could access such data.
      */
     js::Mutex exclusiveAccessLock;
 #ifdef DEBUG
     bool activeThreadHasExclusiveAccess;
 #endif
 
     /*
      * Lock used to protect the script data table, which can be used by
      * off-thread parsing.
      *
      * Locking this only occurs if there is actually a thread other than the
-     * active thread which could access this.
+     * main thread which could access this.
      */
     js::Mutex scriptDataLock;
 #ifdef DEBUG
     bool activeThreadHasScriptDataAccess;
 #endif
 
     // Number of zones which may be operated on by helper threads.
     mozilla::Atomic<size_t> numActiveHelperThreadZones;
--- a/js/src/vm/SymbolType.cpp
+++ b/js/src/vm/SymbolType.cpp
@@ -39,17 +39,17 @@ Symbol::new_(JSContext* cx, JS::SymbolCo
     JSAtom* atom = nullptr;
     if (description) {
         atom = AtomizeString(cx, description);
         if (!atom)
             return nullptr;
     }
 
     // Lock to allocate. If symbol allocation becomes a bottleneck, this can
-    // probably be replaced with an assertion that we're on the active thread.
+    // probably be replaced with an assertion that we're on the main thread.
     AutoLockForExclusiveAccess lock(cx);
     Symbol* sym;
     {
         AutoAtomsCompartment ac(cx, lock);
         sym = newInternal(cx, code, cx->compartment()->randomHashCode(), atom, lock);
     }
     if (sym)
         cx->markAtom(sym);
--- a/js/src/vm/TypeInference.cpp
+++ b/js/src/vm/TypeInference.cpp
@@ -1016,40 +1016,40 @@ TypeSet::intersectSets(TemporaryTypeSet*
 // Compiler constraints
 /////////////////////////////////////////////////////////////////////
 
 // Compiler constraints overview
 //
 // Constraints generated during Ion compilation capture assumptions made about
 // heap properties that will trigger invalidation of the resulting Ion code if
 // the constraint is violated. Constraints can only be attached to type sets on
-// the active thread, so to allow compilation to occur almost entirely off thread
+// the main thread, so to allow compilation to occur almost entirely off thread
 // the generation is split into two phases.
 //
 // During compilation, CompilerConstraint values are constructed in a list,
 // recording the heap property type set which was read from and its expected
 // contents, along with the assumption made about those contents.
 //
-// At the end of compilation, when linking the result on the active thread, the
+// At the end of compilation, when linking the result on the main thread, the
 // list of compiler constraints are read and converted to type constraints and
 // attached to the type sets. If the property type sets have changed so that the
 // assumptions no longer hold then the compilation is aborted and its result
 // discarded.
 
 // Superclass of all constraints generated during Ion compilation. These may
 // be allocated off thread, using the current JIT context's allocator.
 class CompilerConstraint
 {
   public:
     // Property being queried by the compiler.
     HeapTypeSetKey property;
 
     // Contents of the property at the point when the query was performed. This
     // may differ from the actual property types later in compilation as the
-    // active thread performs side effects.
+    // main thread performs side effects.
     TemporaryTypeSet* expected;
 
     CompilerConstraint(LifoAlloc* alloc, const HeapTypeSetKey& property)
       : property(property),
         expected(property.maybeTypes() ? property.maybeTypes()->clone(alloc) : nullptr)
     {}
 
     // Generate the type constraint recording the assumption made by this
@@ -1309,17 +1309,17 @@ TypeSet::ObjectKey::property(jsid id)
     return property;
 }
 
 void
 TypeSet::ObjectKey::ensureTrackedProperty(JSContext* cx, jsid id)
 {
     // If we are accessing a lazily defined property which actually exists in
     // the VM and has not been instantiated yet, instantiate it now if we are
-    // on the active thread and able to do so.
+    // on the main thread and able to do so.
     if (!JSID_IS_VOID(id) && !JSID_IS_EMPTY(id)) {
         MOZ_ASSERT(CurrentThreadCanAccessRuntime(cx->runtime()));
         if (isSingleton()) {
             JSObject* obj = singleton();
             if (obj->isNative() && obj->as<NativeObject>().containsPure(id))
                 EnsureTrackPropertyTypes(cx, obj, id);
         }
     }
@@ -1507,17 +1507,17 @@ js::FinishCompilation(JSContext* cx, Han
 
     *isValidOut = true;
     return true;
 }
 
 static void
 CheckDefinitePropertiesTypeSet(JSContext* cx, TemporaryTypeSet* frozen, StackTypeSet* actual)
 {
-    // The definite properties analysis happens on the active thread, so no new
+    // The definite properties analysis happens on the main thread, so no new
     // types can have been added to actual. The analysis may have updated the
     // contents of |frozen| though with new speculative types, and these need
     // to be reflected in |actual| for AddClearDefiniteFunctionUsesInScript
     // to work.
     if (!frozen->isSubset(actual)) {
         TypeSet::TypeList list;
         frozen->enumerateTypes(&list);
 
@@ -4333,17 +4333,17 @@ ObjectGroup::sweep(AutoClearTypeInferenc
 
     Maybe<AutoClearTypeInferenceStateOnOOM> fallbackOOM;
     EnsureHasAutoClearTypeInferenceStateOnOOM(oom, zone(), fallbackOOM);
 
     AutoTouchingGrayThings tgt;
 
     if (maybeUnboxedLayout()) {
         // Remove unboxed layouts that are about to be finalized from the
-        // compartment wide list while we are still on the active thread.
+        // compartment wide list while we are still on the main thread.
         ObjectGroup* group = this;
         if (IsAboutToBeFinalizedUnbarriered(&group))
             unboxedLayout().detachFromCompartment();
 
         if (unboxedLayout().newScript())
             unboxedLayout().newScript()->sweep();
 
         // Discard constructor code to avoid holding onto ExecutablePools.
--- a/js/src/vm/TypeInference.h
+++ b/js/src/vm/TypeInference.h
@@ -1293,20 +1293,20 @@ FinishCompilation(JSContext* cx, HandleS
 
 // Update the actual types in any scripts queried by constraints with any
 // speculative types added during the definite properties analysis.
 void
 FinishDefinitePropertiesAnalysis(JSContext* cx, CompilerConstraintList* constraints);
 
 // Representation of a heap type property which may or may not be instantiated.
 // Heap properties for singleton types are instantiated lazily as they are used
-// by the compiler, but this is only done on the active thread. If we are
+// by the compiler, but this is only done on the main thread. If we are
 // compiling off thread and use a property which has not yet been instantiated,
 // it will be treated as empty and non-configured and will be instantiated when
-// rejoining to the active thread. If it is in fact not empty, the compilation
+// rejoining to the main thread. If it is in fact not empty, the compilation
 // will fail; to avoid this, we try to instantiate singleton property types
 // during generation of baseline caches.
 class HeapTypeSetKey
 {
     friend class TypeSet::ObjectKey;
 
     // Object and property being accessed.
     TypeSet::ObjectKey* object_;
--- a/js/src/wasm/WasmSignalHandlers.cpp
+++ b/js/src/wasm/WasmSignalHandlers.cpp
@@ -1031,17 +1031,17 @@ HandleMachException(JSContext* cx, const
 
     if (request.body.exception != EXC_BAD_ACCESS &&
         request.body.exception != EXC_BAD_INSTRUCTION)
     {
         return false;
     }
 
     // The faulting thread is suspended so we can access cx fields that can
-    // normally only be accessed by the cx's active thread.
+    // normally only be accessed by the cx's main thread.
     AutoNoteSingleThreadedRegion anstr;
 
     const CodeSegment* codeSegment = LookupCodeSegment(pc);
     if (!codeSegment || !codeSegment->isModule())
         return false;
 
     const ModuleSegment* moduleSegment = codeSegment->asModule();