--- a/js/src/gc/GCRuntime.h
+++ b/js/src/gc/GCRuntime.h
@@ -97,17 +97,17 @@ class BackgroundDecommitTask : public GC
explicit BackgroundDecommitTask(JSRuntime *rt) : GCParallelTask(rt) {}
void setChunksToScan(ChunkVector &chunks);
protected:
void run() override;
private:
- UnprotectedData<ChunkVector> toDecommit;
+ ActiveThreadOrGCTaskData<ChunkVector> toDecommit;
};
/*
* Encapsulates all of the GC tunables. These are effectively constant and
* should only be modified by setParameter.
*/
class GCSchedulingTunables
{
@@ -115,70 +115,70 @@ class GCSchedulingTunables
* Soft limit on the number of bytes we are allowed to allocate in the GC
* heap. Attempts to allocate gcthings over this limit will return null and
* subsequently invoke the standard OOM machinery, independent of available
* physical memory.
*/
UnprotectedData<size_t> gcMaxBytes_;
/* Maximum nursery size for each zone group. */
- UnprotectedData<size_t> gcMaxNurseryBytes_;
+ ActiveThreadData<size_t> gcMaxNurseryBytes_;
/*
* The base value used to compute zone->trigger.gcBytes(). When
* usage.gcBytes() surpasses threshold.gcBytes() for a zone, the zone may
* be scheduled for a GC, depending on the exact circumstances.
*/
- UnprotectedData<size_t> gcZoneAllocThresholdBase_;
+ ActiveThreadOrGCTaskData<size_t> gcZoneAllocThresholdBase_;
/* Fraction of threshold.gcBytes() which triggers an incremental GC. */
UnprotectedData<double> zoneAllocThresholdFactor_;
/*
* Number of bytes to allocate between incremental slices in GCs triggered
* by the zone allocation threshold.
*/
UnprotectedData<size_t> zoneAllocDelayBytes_;
/*
* Totally disables |highFrequencyGC|, the HeapGrowthFactor, and other
* tunables that make GC non-deterministic.
*/
- UnprotectedData<bool> dynamicHeapGrowthEnabled_;
+ ActiveThreadData<bool> dynamicHeapGrowthEnabled_;
/*
* We enter high-frequency mode if we GC a twice within this many
* microseconds. This value is stored directly in microseconds.
*/
- UnprotectedData<uint64_t> highFrequencyThresholdUsec_;
+ ActiveThreadData<uint64_t> highFrequencyThresholdUsec_;
/*
* When in the |highFrequencyGC| mode, these parameterize the per-zone
* "HeapGrowthFactor" computation.
*/
- UnprotectedData<uint64_t> highFrequencyLowLimitBytes_;
- UnprotectedData<uint64_t> highFrequencyHighLimitBytes_;
- UnprotectedData<double> highFrequencyHeapGrowthMax_;
- UnprotectedData<double> highFrequencyHeapGrowthMin_;
+ ActiveThreadData<uint64_t> highFrequencyLowLimitBytes_;
+ ActiveThreadData<uint64_t> highFrequencyHighLimitBytes_;
+ ActiveThreadData<double> highFrequencyHeapGrowthMax_;
+ ActiveThreadData<double> highFrequencyHeapGrowthMin_;
/*
* When not in |highFrequencyGC| mode, this is the global (stored per-zone)
* "HeapGrowthFactor".
*/
- UnprotectedData<double> lowFrequencyHeapGrowth_;
+ ActiveThreadData<double> lowFrequencyHeapGrowth_;
/*
* Doubles the length of IGC slices when in the |highFrequencyGC| mode.
*/
- UnprotectedData<bool> dynamicMarkSliceEnabled_;
+ ActiveThreadData<bool> dynamicMarkSliceEnabled_;
/*
* Controls whether painting can trigger IGC slices.
*/
- UnprotectedData<bool> refreshFrameSlicesEnabled_;
+ ActiveThreadData<bool> refreshFrameSlicesEnabled_;
/*
* Controls the number of empty chunks reserved for future allocation.
*/
UnprotectedData<uint32_t> minEmptyChunkCount_;
UnprotectedData<uint32_t> maxEmptyChunkCount_;
public:
@@ -521,17 +521,17 @@ class GCSchedulingTunables
class GCSchedulingState
{
/*
* Influences how we schedule and run GC's in several subtle ways. The most
* important factor is in how it controls the "HeapGrowthFactor". The
* growth factor is a measure of how large (as a percentage of the last GC)
* the heap is allowed to grow before we try to schedule another GC.
*/
- UnprotectedData<bool> inHighFrequencyGCMode_;
+ ActiveThreadData<bool> inHighFrequencyGCMode_;
public:
GCSchedulingState()
: inHighFrequencyGCMode_(false)
{}
bool inHighFrequencyGCMode() const { return inHighFrequencyGCMode_; }
@@ -540,29 +540,29 @@ class GCSchedulingState
inHighFrequencyGCMode_ =
tunables.isDynamicHeapGrowthEnabled() && lastGCTime &&
lastGCTime + tunables.highFrequencyThresholdUsec() > currentTime;
}
};
template<typename F>
struct Callback {
- UnprotectedData<F> op;
- UnprotectedData<void*> data;
+ ActiveThreadData<F> op;
+ ActiveThreadData<void*> data;
Callback()
: op(nullptr), data(nullptr)
{}
Callback(F op, void* data)
: op(op), data(data)
{}
};
template<typename F>
-using CallbackVector = UnprotectedData<Vector<Callback<F>, 4, SystemAllocPolicy>>;
+using CallbackVector = ActiveThreadData<Vector<Callback<F>, 4, SystemAllocPolicy>>;
template <typename T, typename Iter0, typename Iter1>
class ChainedIter
{
Iter0 iter0_;
Iter1 iter1_;
public:
@@ -974,17 +974,17 @@ class GCRuntime
public:
JSRuntime* const rt;
/* Embedders can use this zone however they wish. */
UnprotectedData<JS::Zone*> systemZone;
// List of all zone groups (protected by the GC lock).
- UnprotectedData<ZoneGroupVector> groups;
+ ActiveThreadData<ZoneGroupVector> groups;
// The unique atoms zone, which has no zone group.
WriteOnceData<Zone*> atomsZone;
private:
UnprotectedData<gcstats::Statistics> stats_;
public:
gcstats::Statistics& stats() { return stats_.ref(); }
@@ -1018,52 +1018,52 @@ class GCRuntime
// is moved back to the emptyChunks pool and scheduled for eventual
// release.
UnprotectedData<ChunkPool> availableChunks_;
// When all arenas in a chunk are used, it is moved to the fullChunks pool
// so as to reduce the cost of operations on the available lists.
UnprotectedData<ChunkPool> fullChunks_;
- UnprotectedData<RootedValueMap> rootsHash;
+ ActiveThreadData<RootedValueMap> rootsHash;
- UnprotectedData<size_t> maxMallocBytes;
+ ActiveThreadData<size_t> maxMallocBytes;
// An incrementing id used to assign unique ids to cells that require one.
mozilla::Atomic<uint64_t, mozilla::ReleaseAcquire> nextCellUniqueId_;
/*
* Number of the committed arenas in all GC chunks including empty chunks.
*/
mozilla::Atomic<uint32_t, mozilla::ReleaseAcquire> numArenasFreeCommitted;
- UnprotectedData<VerifyPreTracer*> verifyPreData;
+ ActiveThreadData<VerifyPreTracer*> verifyPreData;
private:
UnprotectedData<bool> chunkAllocationSinceLastGC;
- UnprotectedData<int64_t> lastGCTime;
+ ActiveThreadData<int64_t> lastGCTime;
- UnprotectedData<JSGCMode> mode;
+ ActiveThreadData<JSGCMode> mode;
mozilla::Atomic<size_t, mozilla::ReleaseAcquire> numActiveZoneIters;
/* During shutdown, the GC needs to clean up every possible object. */
- UnprotectedData<bool> cleanUpEverything;
+ ActiveThreadData<bool> cleanUpEverything;
// Gray marking must be done after all black marking is complete. However,
// we do not have write barriers on XPConnect roots. Therefore, XPConnect
// roots must be accumulated in the first slice of incremental GC. We
// accumulate these roots in each zone's gcGrayRoots vector and then mark
// them later, after black marking is complete for each compartment. This
// accumulation can fail, but in that case we switch to non-incremental GC.
enum class GrayBufferState {
Unused,
Okay,
Failed
};
- UnprotectedData<GrayBufferState> grayBufferState;
+ ActiveThreadData<GrayBufferState> grayBufferState;
bool hasBufferedGrayRoots() const { return grayBufferState == GrayBufferState::Okay; }
// Clear each zone's gray buffers, but do not change the current state.
void resetBufferedGrayRoots() const;
// Reset the gray buffering state to Unused.
void clearBufferedGrayRoots() {
grayBufferState = GrayBufferState::Unused;
@@ -1074,138 +1074,138 @@ class GCRuntime
* The gray bits can become invalid if UnmarkGray overflows the stack. A
* full GC will reset this bit, since it fills in all the gray bits.
*/
UnprotectedData<bool> grayBitsValid;
mozilla::Atomic<JS::gcreason::Reason, mozilla::Relaxed> majorGCTriggerReason;
public:
- UnprotectedData<JS::gcreason::Reason> minorGCTriggerReason;
+ ActiveThreadData<JS::gcreason::Reason> minorGCTriggerReason;
private:
/* Perform full GC if rt->keepAtoms() becomes false. */
- UnprotectedData<bool> fullGCForAtomsRequested_;
+ ActiveThreadData<bool> fullGCForAtomsRequested_;
/* Incremented at the start of every minor GC. */
- UnprotectedData<uint64_t> minorGCNumber;
+ ActiveThreadData<uint64_t> minorGCNumber;
/* Incremented at the start of every major GC. */
- UnprotectedData<uint64_t> majorGCNumber;
+ ActiveThreadData<uint64_t> majorGCNumber;
/* The major GC number at which to release observed type information. */
- UnprotectedData<uint64_t> jitReleaseNumber;
+ ActiveThreadData<uint64_t> jitReleaseNumber;
/* Incremented on every GC slice. */
- UnprotectedData<uint64_t> number;
+ ActiveThreadData<uint64_t> number;
/* The number at the time of the most recent GC's first slice. */
- UnprotectedData<uint64_t> startNumber;
+ ActiveThreadData<uint64_t> startNumber;
/* Whether the currently running GC can finish in multiple slices. */
- UnprotectedData<bool> isIncremental;
+ ActiveThreadData<bool> isIncremental;
/* Whether all zones are being collected in first GC slice. */
- UnprotectedData<bool> isFull;
+ ActiveThreadData<bool> isFull;
/* Whether the heap will be compacted at the end of GC. */
- UnprotectedData<bool> isCompacting;
+ ActiveThreadData<bool> isCompacting;
/* The invocation kind of the current GC, taken from the first slice. */
- UnprotectedData<JSGCInvocationKind> invocationKind;
+ ActiveThreadData<JSGCInvocationKind> invocationKind;
/* The initial GC reason, taken from the first slice. */
- UnprotectedData<JS::gcreason::Reason> initialReason;
+ ActiveThreadData<JS::gcreason::Reason> initialReason;
/*
* The current incremental GC phase. This is also used internally in
* non-incremental GC.
*/
- UnprotectedData<State> incrementalState;
+ ActiveThreadOrGCTaskData<State> incrementalState;
/* Indicates that the last incremental slice exhausted the mark stack. */
- UnprotectedData<bool> lastMarkSlice;
+ ActiveThreadData<bool> lastMarkSlice;
/* Whether any sweeping will take place in the separate GC helper thread. */
- UnprotectedData<bool> sweepOnBackgroundThread;
+ ActiveThreadData<bool> sweepOnBackgroundThread;
/* Whether observed type information is being released in the current GC. */
- UnprotectedData<bool> releaseObservedTypes;
+ ActiveThreadData<bool> releaseObservedTypes;
/* Whether any black->gray edges were found during marking. */
- UnprotectedData<BlackGrayEdgeVector> foundBlackGrayEdges;
+ ActiveThreadData<BlackGrayEdgeVector> foundBlackGrayEdges;
/* Singly linked list of zones to be swept in the background. */
- UnprotectedData<ZoneList> backgroundSweepZones;
+ ActiveThreadOrGCTaskData<ZoneList> backgroundSweepZones;
/*
* Free LIFO blocks are transferred to this allocator before being freed on
* the background GC thread after sweeping.
*/
- UnprotectedData<LifoAlloc> blocksToFreeAfterSweeping;
+ ActiveThreadOrGCTaskData<LifoAlloc> blocksToFreeAfterSweeping;
private:
/* Index of current zone group (for stats). */
- UnprotectedData<unsigned> zoneGroupIndex;
+ ActiveThreadData<unsigned> zoneGroupIndex;
/*
* Incremental sweep state.
*/
- UnprotectedData<JS::Zone*> zoneGroups;
- UnprotectedData<JS::Zone*> currentZoneGroup;
- UnprotectedData<bool> sweepingTypes;
- UnprotectedData<unsigned> finalizePhase;
- UnprotectedData<JS::Zone*> sweepZone;
- UnprotectedData<AllocKind> sweepKind;
- UnprotectedData<bool> abortSweepAfterCurrentGroup;
+ ActiveThreadData<JS::Zone*> zoneGroups;
+ ActiveThreadOrGCTaskData<JS::Zone*> currentZoneGroup;
+ ActiveThreadData<bool> sweepingTypes;
+ ActiveThreadData<unsigned> finalizePhase;
+ ActiveThreadData<JS::Zone*> sweepZone;
+ ActiveThreadData<AllocKind> sweepKind;
+ ActiveThreadData<bool> abortSweepAfterCurrentGroup;
/*
* Concurrent sweep infrastructure.
*/
void startTask(GCParallelTask& task, gcstats::Phase phase, AutoLockHelperThreadState& locked);
void joinTask(GCParallelTask& task, gcstats::Phase phase, AutoLockHelperThreadState& locked);
/*
* List head of arenas allocated during the sweep phase.
*/
- UnprotectedData<Arena*> arenasAllocatedDuringSweep;
+ ActiveThreadData<Arena*> arenasAllocatedDuringSweep;
/*
* Incremental compacting state.
*/
- UnprotectedData<bool> startedCompacting;
- UnprotectedData<ZoneList> zonesToMaybeCompact;
- UnprotectedData<Arena*> relocatedArenasToRelease;
+ ActiveThreadData<bool> startedCompacting;
+ ActiveThreadData<ZoneList> zonesToMaybeCompact;
+ ActiveThreadData<Arena*> relocatedArenasToRelease;
#ifdef JS_GC_ZEAL
- UnprotectedData<MarkingValidator*> markingValidator;
+ ActiveThreadData<MarkingValidator*> markingValidator;
#endif
/*
* Indicates that a GC slice has taken place in the middle of an animation
* frame, rather than at the beginning. In this case, the next slice will be
* delayed so that we don't get back-to-back slices.
*/
- UnprotectedData<bool> interFrameGC;
+ ActiveThreadData<bool> interFrameGC;
/* Default budget for incremental GC slice. See js/SliceBudget.h. */
- UnprotectedData<int64_t> defaultTimeBudget_;
+ ActiveThreadData<int64_t> defaultTimeBudget_;
/*
* We disable incremental GC if we encounter a Class with a trace hook
* that does not implement write barriers.
*/
- UnprotectedData<bool> incrementalAllowed;
+ ActiveThreadData<bool> incrementalAllowed;
/*
* Whether compacting GC can is enabled globally.
*/
- UnprotectedData<bool> compactingEnabled;
+ ActiveThreadData<bool> compactingEnabled;
- UnprotectedData<bool> poked;
+ ActiveThreadData<bool> poked;
/*
* These options control the zealousness of the GC. At every allocation,
* nextScheduled is decremented. When it reaches zero we do a full GC.
*
* At this point, if zeal_ is one of the types that trigger periodic
* collection, then nextScheduled is reset to the value of zealFrequency.
* Otherwise, no additional GCs take place.
@@ -1222,26 +1222,26 @@ class GCRuntime
* in jsgc.cpp for more information about this.
*
* zeal_ values from 8 to 10 periodically run different types of
* incremental GC.
*
* zeal_ value 14 performs periodic shrinking collections.
*/
#ifdef JS_GC_ZEAL
- UnprotectedData<uint32_t> zealModeBits;
- UnprotectedData<int> zealFrequency;
- UnprotectedData<int> nextScheduled;
- UnprotectedData<bool> deterministicOnly;
- UnprotectedData<int> incrementalLimit;
+ ActiveThreadData<uint32_t> zealModeBits;
+ ActiveThreadData<int> zealFrequency;
+ ActiveThreadData<int> nextScheduled;
+ ActiveThreadData<bool> deterministicOnly;
+ ActiveThreadData<int> incrementalLimit;
- UnprotectedData<Vector<JSObject*, 0, SystemAllocPolicy>> selectedForMarking;
+ ActiveThreadData<Vector<JSObject*, 0, SystemAllocPolicy>> selectedForMarking;
#endif
- UnprotectedData<bool> fullCompartmentChecks;
+ ActiveThreadData<bool> fullCompartmentChecks;
Callback<JSGCCallback> gcCallback;
Callback<JS::DoCycleCollectionCallback> gcDoCycleCollectionCallback;
Callback<JSObjectsTenuredCallback> tenuredCallback;
CallbackVector<JSFinalizeCallback> finalizeCallbacks;
CallbackVector<JSWeakPointerZoneGroupCallback> updateWeakPointerZoneGroupCallbacks;
CallbackVector<JSWeakPointerCompartmentCallback> updateWeakPointerCompartmentCallbacks;
@@ -1262,36 +1262,36 @@ class GCRuntime
* tracing through black roots and the other is for tracing through gray
* roots. The black/gray distinction is only relevant to the cycle
* collector.
*/
CallbackVector<JSTraceDataOp> blackRootTracers;
Callback<JSTraceDataOp> grayRootTracer;
/* Always preserve JIT code during GCs, for testing. */
- UnprotectedData<bool> alwaysPreserveCode;
+ ActiveThreadData<bool> alwaysPreserveCode;
#ifdef DEBUG
- UnprotectedData<bool> arenasEmptyAtShutdown;
+ ActiveThreadData<bool> arenasEmptyAtShutdown;
#endif
/* Synchronize GC heap access among GC helper threads and main threads. */
friend class js::AutoLockGC;
js::Mutex lock;
BackgroundAllocTask allocTask;
BackgroundDecommitTask decommitTask;
GCHelperState helperState;
/*
* During incremental sweeping, this field temporarily holds the arenas of
* the current AllocKind being swept in order of increasing free space.
*/
- UnprotectedData<SortedArenaList> incrementalSweepList;
+ ActiveThreadData<SortedArenaList> incrementalSweepList;
friend class js::GCHelperState;
friend class MarkingValidator;
friend class AutoTraceSession;
friend class AutoEnterIteration;
};
/* Prevent compartments and zones from being collected during iteration. */
--- a/js/src/gc/Marking.h
+++ b/js/src/gc/Marking.h
@@ -52,23 +52,23 @@ static const size_t INCREMENTAL_MARK_STA
* stack list with the pointer to stack top in GCMarker::unmarkedArenaStackTop.
* GCMarker::delayMarkingChildren adds arenas to the stack as necessary while
* markDelayedChildren pops the arenas from the stack until it empties.
*/
class MarkStack
{
friend class GCMarker;
- UnprotectedData<uintptr_t*> stack_;
- UnprotectedData<uintptr_t*> tos_;
- UnprotectedData<uintptr_t*> end_;
+ ActiveThreadData<uintptr_t*> stack_;
+ ActiveThreadData<uintptr_t*> tos_;
+ ActiveThreadData<uintptr_t*> end_;
// The capacity we start with and reset() to.
- UnprotectedData<size_t> baseCapacity_;
- UnprotectedData<size_t> maxCapacity_;
+ ActiveThreadData<size_t> baseCapacity_;
+ ActiveThreadData<size_t> maxCapacity_;
public:
explicit MarkStack(size_t maxCapacity)
: stack_(nullptr),
tos_(nullptr),
end_(nullptr),
baseCapacity_(0),
maxCapacity_(maxCapacity)
@@ -331,39 +331,39 @@ class GCMarker : public JSTracer
MOZ_MUST_USE bool restoreValueArray(JSObject* obj, void** vpp, void** endp);
void saveValueRanges();
inline void processMarkStackTop(SliceBudget& budget);
/* The mark stack. Pointers in this stack are "gray" in the GC sense. */
MarkStack stack;
/* The color is only applied to objects and functions. */
- UnprotectedData<uint32_t> color;
+ ActiveThreadData<uint32_t> color;
/* Pointer to the top of the stack of arenas we are delaying marking on. */
- UnprotectedData<js::gc::Arena*> unmarkedArenaStackTop;
+ ActiveThreadData<js::gc::Arena*> unmarkedArenaStackTop;
/*
* If the weakKeys table OOMs, disable the linear algorithm and fall back
* to iterating until the next GC.
*/
- UnprotectedData<bool> linearWeakMarkingDisabled_;
+ ActiveThreadData<bool> linearWeakMarkingDisabled_;
#ifdef DEBUG
/* Count of arenas that are currently in the stack. */
- UnprotectedData<size_t> markLaterArenas;
+ ActiveThreadData<size_t> markLaterArenas;
/* Assert that start and stop are called with correct ordering. */
- UnprotectedData<bool> started;
+ ActiveThreadData<bool> started;
/*
* If this is true, all marked objects must belong to a compartment being
* GCed. This is used to look for compartment bugs.
*/
- UnprotectedData<bool> strictCompartmentChecking;
+ ActiveThreadData<bool> strictCompartmentChecking;
#endif // DEBUG
};
#ifdef DEBUG
// Return true if this trace is happening on behalf of gray buffering during
// the marking phase of incremental GC.
bool
IsBufferGrayRootsTracer(JSTracer* trc);
--- a/js/src/gc/Zone.h
+++ b/js/src/gc/Zone.h
@@ -336,17 +336,17 @@ struct Zone : public JS::shadow::Zone,
js::ZoneGroupData<mozilla::LinkedList<js::WeakMapBase>> gcWeakMapList_;
public:
mozilla::LinkedList<js::WeakMapBase>& gcWeakMapList() { return gcWeakMapList_.ref(); }
typedef js::Vector<JSCompartment*, 1, js::SystemAllocPolicy> CompartmentVector;
private:
// The set of compartments in this zone.
- js::UnprotectedData<CompartmentVector> compartments_;
+ js::ActiveThreadOrGCTaskData<CompartmentVector> compartments_;
public:
CompartmentVector& compartments() { return compartments_.ref(); }
// This zone's gray roots.
typedef js::Vector<js::gc::Cell*, 0, js::SystemAllocPolicy> GrayRootVector;
private:
js::ZoneGroupData<GrayRootVector> gcGrayRoots_;
public:
@@ -405,17 +405,17 @@ struct Zone : public JS::shadow::Zone,
bool addTypeDescrObject(JSContext* cx, HandleObject obj);
// Malloc counter to measure memory pressure for GC scheduling. It runs from
// gcMaxMallocBytes down to zero. This counter should be used only when it's
// not possible to know the size of a free.
mozilla::Atomic<ptrdiff_t, mozilla::ReleaseAcquire> gcMallocBytes;
// GC trigger threshold for allocations on the C heap.
- js::UnprotectedData<size_t> gcMaxMallocBytes;
+ js::ActiveThreadData<size_t> gcMaxMallocBytes;
// Whether a GC has been triggered as a result of gcMallocBytes falling
// below zero.
//
// This should be a bool, but Atomic only supports 32-bit and pointer-sized
// types.
mozilla::Atomic<uint32_t, mozilla::ReleaseAcquire> gcMallocGCTriggered;
@@ -577,17 +577,17 @@ struct Zone : public JS::shadow::Zone,
void setKeepShapeTables(bool b) {
keepShapeTables_ = b;
}
private:
js::ZoneGroupData<js::jit::JitZone*> jitZone_;
js::UnprotectedData<GCState> gcState_;
- js::UnprotectedData<bool> gcScheduled_;
+ js::ActiveThreadData<bool> gcScheduled_;
js::ZoneGroupData<bool> gcPreserveCode_;
js::ZoneGroupData<bool> jitUsingBarriers_;
js::ZoneGroupData<bool> keepShapeTables_;
// Allow zones to be linked into a list
friend class js::gc::ZoneList;
static Zone * const NotOnList;
js::ZoneGroupOrGCTaskData<Zone*> listNext_;
--- a/js/src/gc/ZoneGroup.cpp
+++ b/js/src/gc/ZoneGroup.cpp
@@ -12,17 +12,17 @@
namespace js {
ZoneGroup::ZoneGroup(JSRuntime* runtime)
: runtime(runtime),
context(TlsContext.get()),
enterCount(this, 1),
zones_(),
- nursery_(this),
+ nursery_(this, this),
storeBuffer_(this, runtime, nursery()),
blocksToFreeAfterMinorGC((size_t) JSContext::TEMP_LIFO_ALLOC_PRIMARY_CHUNK_SIZE),
caches_(this),
#ifdef DEBUG
ionBailAfter_(this, 0),
#endif
jitZoneGroup(this, nullptr),
debuggerList_(this),
--- a/js/src/gc/ZoneGroup.h
+++ b/js/src/gc/ZoneGroup.h
@@ -49,35 +49,35 @@ class ZoneGroup
ZoneGroupData<size_t> enterCount;
void enter();
void leave();
bool ownedByCurrentThread();
// All zones in the group.
private:
- UnprotectedData<ZoneVector> zones_;
+ ActiveThreadOrGCTaskData<ZoneVector> zones_;
public:
ZoneVector& zones() { return zones_.ref(); }
explicit ZoneGroup(JSRuntime* runtime);
~ZoneGroup();
bool init(size_t maxNurseryBytes);
private:
- UnprotectedData<Nursery> nursery_;
+ ZoneGroupData<Nursery> nursery_;
ZoneGroupData<gc::StoreBuffer> storeBuffer_;
public:
Nursery& nursery() { return nursery_.ref(); }
gc::StoreBuffer& storeBuffer() { return storeBuffer_.ref(); }
// Free LIFO blocks are transferred to this allocator before being freed
// after minor GC.
- UnprotectedData<LifoAlloc> blocksToFreeAfterMinorGC;
+ ActiveThreadData<LifoAlloc> blocksToFreeAfterMinorGC;
void minorGC(JS::gcreason::Reason reason,
gcstats::Phase phase = gcstats::PHASE_MINOR_GC) JS_HAZ_GC_CALL;
void evictNursery(JS::gcreason::Reason reason = JS::gcreason::EVICT_NURSERY) {
minorGC(reason, gcstats::PHASE_EVICT_NURSERY);
}
void freeAllLifoBlocksAfterMinorGC(LifoAlloc* lifo);
--- a/js/src/jit/JitCompartment.h
+++ b/js/src/jit/JitCompartment.h
@@ -82,20 +82,20 @@ class PatchableBackedge : public InlineL
class JitRuntime
{
private:
friend class JitCompartment;
// Executable allocator for all code except wasm code and Ion code with
// patchable backedges (see below).
- UnprotectedData<ExecutableAllocator> execAlloc_;
+ ActiveThreadData<ExecutableAllocator> execAlloc_;
// Executable allocator for Ion scripts with patchable backedges.
- UnprotectedData<ExecutableAllocator> backedgeExecAlloc_;
+ ActiveThreadData<ExecutableAllocator> backedgeExecAlloc_;
// Shared exception-handler tail.
ExclusiveAccessLockWriteOnceData<JitCode*> exceptionTail_;
// Shared post-bailout-handler tail.
ExclusiveAccessLockWriteOnceData<JitCode*> bailoutTail_;
// Shared profiler exit frame tail.
--- a/js/src/jsgc.h
+++ b/js/src/jsgc.h
@@ -646,17 +646,17 @@ class ArenaLists
BackgroundFinalizeState;
/* The current background finalization state, accessed atomically. */
UnprotectedData<AllAllocKindArray<BackgroundFinalizeState>> backgroundFinalizeState_;
BackgroundFinalizeState& backgroundFinalizeState(AllocKind i) { return backgroundFinalizeState_.ref()[i]; }
const BackgroundFinalizeState& backgroundFinalizeState(AllocKind i) const { return backgroundFinalizeState_.ref()[i]; }
/* For each arena kind, a list of arenas remaining to be swept. */
- UnprotectedData<AllAllocKindArray<Arena*>> arenaListsToSweep_;
+ ActiveThreadOrGCTaskData<AllAllocKindArray<Arena*>> arenaListsToSweep_;
Arena*& arenaListsToSweep(AllocKind i) { return arenaListsToSweep_.ref()[i]; }
Arena* arenaListsToSweep(AllocKind i) const { return arenaListsToSweep_.ref()[i]; }
/* During incremental sweeping, a list of the arenas already swept. */
ZoneGroupOrGCTaskData<AllocKind> incrementalSweptArenaKind;
ZoneGroupOrGCTaskData<ArenaList> incrementalSweptArenas;
// Arena lists which have yet to be swept, but need additional foreground
@@ -872,17 +872,17 @@ class GCHelperState
JSRuntime* const rt;
// Condvar for notifying the main thread when work has finished. This is
// associated with the runtime's GC lock --- the worker thread state
// condvars can't be used here due to lock ordering issues.
js::ConditionVariable done;
// Activity for the helper to do, protected by the GC lock.
- UnprotectedData<State> state_;
+ ActiveThreadOrGCTaskData<State> state_;
// Whether work is being performed on some thread.
GCLockData<bool> hasThread;
void startBackgroundThread(State newState, const AutoLockGC& lock,
const AutoLockHelperThreadState& helperLock);
void waitForBackgroundThread(js::AutoLockGC& lock);
@@ -944,17 +944,17 @@ class GCParallelTask
enum TaskState {
NotStarted,
Dispatched,
Finished,
};
UnprotectedData<TaskState> state;
// Amount of time this task took to execute.
- UnprotectedData<mozilla::TimeDuration> duration_;
+ ActiveThreadOrGCTaskData<mozilla::TimeDuration> duration_;
explicit GCParallelTask(const GCParallelTask&) = delete;
protected:
// A flag to signal a request for early completion of the off-thread task.
mozilla::Atomic<bool> cancel_;
virtual void run() = 0;
--- a/js/src/threading/ProtectedData.cpp
+++ b/js/src/threading/ProtectedData.cpp
@@ -31,16 +31,37 @@ OnBackgroundThread()
return true;
}
return false;
}
template <AllowedBackgroundThread Background>
void
+CheckActiveThread<Background>::check() const
+{
+ // When interrupting a thread on Windows, changes are made to the runtime
+ // and active thread's state from another thread while the active thread is
+ // suspended. We need a way to mark these accesses as being tantamount to
+ // accesses by the active thread. See bug 1323066.
+#ifndef XP_WIN
+ if (OnBackgroundThread<Background>())
+ return;
+
+ JSContext* cx = TlsContext.get();
+ MOZ_ASSERT(cx == cx->runtime()->activeContext);
+#endif // XP_WIN
+}
+
+template class CheckActiveThread<AllowedBackgroundThread::None>;
+template class CheckActiveThread<AllowedBackgroundThread::GCTask>;
+template class CheckActiveThread<AllowedBackgroundThread::IonCompile>;
+
+template <AllowedBackgroundThread Background>
+void
CheckZoneGroup<Background>::check() const
{
if (OnBackgroundThread<Background>())
return;
if (group) {
// This check is disabled for now because helper thread parse tasks
// access data in the same zone group that the single main thread is
--- a/js/src/threading/ProtectedData.h
+++ b/js/src/threading/ProtectedData.h
@@ -197,16 +197,38 @@ enum class AllowedBackgroundThread
{
None,
GCTask,
IonCompile,
GCTaskOrIonCompile
};
template <AllowedBackgroundThread Background>
+class CheckActiveThread
+{
+ public:
+ void check() const;
+};
+
+// Data which may only be accessed by the runtime's cooperatively scheduled
+// active thread.
+template <typename T>
+using ActiveThreadData =
+ ProtectedDataNoCheckArgs<CheckActiveThread<AllowedBackgroundThread::None>, T>;
+
+// Data which may only be accessed by the runtime's cooperatively scheduled
+// active thread, or by various helper thread tasks.
+template <typename T>
+using ActiveThreadOrGCTaskData =
+ ProtectedDataNoCheckArgs<CheckActiveThread<AllowedBackgroundThread::GCTask>, T>;
+template <typename T>
+using ActiveThreadOrIonCompileData =
+ ProtectedDataNoCheckArgs<CheckActiveThread<AllowedBackgroundThread::IonCompile>, T>;
+
+template <AllowedBackgroundThread Background>
class CheckZoneGroup
{
#ifdef DEBUG
ZoneGroup* group;
public:
explicit CheckZoneGroup(ZoneGroup* group) : group(group) {}
void check() const;
@@ -218,29 +240,23 @@ class CheckZoneGroup
// Data which may only be accessed by threads with exclusive access to the
// associated zone group.
template <typename T>
using ZoneGroupData =
ProtectedDataZoneGroupArg<CheckZoneGroup<AllowedBackgroundThread::None>, T>;
// Data which may only be accessed by threads with exclusive access to the
-// associated zone group, or by GC helper thread tasks.
+// associated zone group, or by various helper thread tasks.
template <typename T>
using ZoneGroupOrGCTaskData =
ProtectedDataZoneGroupArg<CheckZoneGroup<AllowedBackgroundThread::GCTask>, T>;
-
-// Data which may only be accessed by threads with exclusive access to the
-// associated zone group, or by Ion compilation helper thread tasks.
template <typename T>
using ZoneGroupOrIonCompileData =
ProtectedDataZoneGroupArg<CheckZoneGroup<AllowedBackgroundThread::IonCompile>, T>;
-
-// Data which may only be accessed by threads with exclusive access to the
-// associated zone group, or by either GC helper or Ion compilation tasks.
template <typename T>
using ZoneGroupOrGCTaskOrIonCompileData =
ProtectedDataZoneGroupArg<CheckZoneGroup<AllowedBackgroundThread::GCTaskOrIonCompile>, T>;
// Runtime wide locks which might protect some data.
enum class GlobalLock
{
GCLock,
--- a/js/src/vm/HelperThreads.cpp
+++ b/js/src/vm/HelperThreads.cpp
@@ -1178,20 +1178,20 @@ js::GCParallelTask::runFromMainThread(JS
run();
duration_ = mozilla::TimeStamp::Now() - timeStart;
}
void
js::GCParallelTask::runFromHelperThread(AutoLockHelperThreadState& locked)
{
JSContext cx(runtime(), JS::ContextOptions());
+ gc::AutoSetThreadIsPerformingGC performingGC;
{
AutoUnlockHelperThreadState parallelSection(locked);
- gc::AutoSetThreadIsPerformingGC performingGC;
mozilla::TimeStamp timeStart = mozilla::TimeStamp::Now();
cx.heapState = JS::HeapState::MajorCollecting;
run();
cx.heapState = JS::HeapState::Idle;
duration_ = mozilla::TimeStamp::Now() - timeStart;
}
state = Finished;
--- a/js/src/vm/Runtime.cpp
+++ b/js/src/vm/Runtime.cpp
@@ -90,16 +90,17 @@ ReturnZeroSize(const void* p)
return 0;
}
JSRuntime::JSRuntime(JSRuntime* parentRuntime)
: parentRuntime(parentRuntime),
#ifdef DEBUG
updateChildRuntimeCount(parentRuntime),
#endif
+ activeContext(nullptr),
profilerSampleBufferGen_(0),
profilerSampleBufferLapCount_(1),
telemetryCallback(nullptr),
getIncumbentGlobalCallback(nullptr),
enqueuePromiseJobCallback(nullptr),
enqueuePromiseJobCallbackData(nullptr),
promiseRejectionTrackerCallback(nullptr),
promiseRejectionTrackerCallbackData(nullptr),
@@ -186,16 +187,18 @@ JSRuntime::JSRuntime(JSRuntime* parentRu
}
bool
JSRuntime::init(JSContext* cx, uint32_t maxbytes, uint32_t maxNurseryBytes)
{
if (CanUseExtraThreads() && !EnsureHelperThreadsInitialized())
return false;
+ activeContext = cx;
+
singletonContext = cx;
defaultFreeOp_ = js_new<js::FreeOp>(this);
if (!defaultFreeOp_)
return false;
ScopedJSDeletePtr<ZoneGroup> zoneGroup(js_new<ZoneGroup>(this));
if (!zoneGroup)
--- a/js/src/vm/Runtime.h
+++ b/js/src/vm/Runtime.h
@@ -292,16 +292,21 @@ struct JSRuntime : public js::MallocProv
if (parent_)
parent_->childRuntimeCount--;
}
};
AutoUpdateChildRuntimeCount updateChildRuntimeCount;
#endif
+ // The context for the thread which currently has exclusive access to most
+ // contents of the runtime. When execution on the runtime is cooperatively
+ // scheduled, this is the thread which is currently running.
+ mozilla::Atomic<JSContext*, mozilla::ReleaseAcquire> activeContext;
+
/*
* The profiler sampler generation after the latest sample.
*
* The lapCount indicates the number of largest number of 'laps'
* (wrapping from high to low) that occurred when writing entries
* into the sample buffer. All JitcodeGlobalMap entries referenced
* from a given sample are assigned the generation of the sample buffer
* at the START of the run. If multiple laps occur, then some entries
@@ -349,88 +354,88 @@ struct JSRuntime : public js::MallocProv
break;
if (profilerSampleBufferLapCount_.compareExchange(curLapCount, lapCount))
break;
}
}
/* Call this to accumulate telemetry data. */
- js::UnprotectedData<JSAccumulateTelemetryDataCallback> telemetryCallback;
+ js::ActiveThreadData<JSAccumulateTelemetryDataCallback> telemetryCallback;
public:
// Accumulates data for Firefox telemetry. |id| is the ID of a JS_TELEMETRY_*
// histogram. |key| provides an additional key to identify the histogram.
// |sample| is the data to add to the histogram.
void addTelemetry(int id, uint32_t sample, const char* key = nullptr);
void setTelemetryCallback(JSRuntime* rt, JSAccumulateTelemetryDataCallback callback);
public:
- js::UnprotectedData<JSGetIncumbentGlobalCallback> getIncumbentGlobalCallback;
- js::UnprotectedData<JSEnqueuePromiseJobCallback> enqueuePromiseJobCallback;
- js::UnprotectedData<void*> enqueuePromiseJobCallbackData;
+ js::ActiveThreadData<JSGetIncumbentGlobalCallback> getIncumbentGlobalCallback;
+ js::ActiveThreadData<JSEnqueuePromiseJobCallback> enqueuePromiseJobCallback;
+ js::ActiveThreadData<void*> enqueuePromiseJobCallbackData;
- js::UnprotectedData<JSPromiseRejectionTrackerCallback> promiseRejectionTrackerCallback;
- js::UnprotectedData<void*> promiseRejectionTrackerCallbackData;
+ js::ActiveThreadData<JSPromiseRejectionTrackerCallback> promiseRejectionTrackerCallback;
+ js::ActiveThreadData<void*> promiseRejectionTrackerCallbackData;
- js::UnprotectedData<JS::StartAsyncTaskCallback> startAsyncTaskCallback;
+ js::ActiveThreadData<JS::StartAsyncTaskCallback> startAsyncTaskCallback;
js::UnprotectedData<JS::FinishAsyncTaskCallback> finishAsyncTaskCallback;
js::ExclusiveData<js::PromiseTaskPtrVector> promiseTasksToDestroy;
JSObject* getIncumbentGlobal(JSContext* cx);
bool enqueuePromiseJob(JSContext* cx, js::HandleFunction job, js::HandleObject promise,
js::HandleObject incumbentGlobal);
void addUnhandledRejectedPromise(JSContext* cx, js::HandleObject promise);
void removeUnhandledRejectedPromise(JSContext* cx, js::HandleObject promise);
/* Had an out-of-memory error which did not populate an exception. */
mozilla::Atomic<bool> hadOutOfMemory;
/*
* Allow relazifying functions in compartments that are active. This is
* only used by the relazifyFunctions() testing function.
*/
- js::UnprotectedData<bool> allowRelazificationForTesting;
+ js::ActiveThreadData<bool> allowRelazificationForTesting;
/* Compartment destroy callback. */
- js::UnprotectedData<JSDestroyCompartmentCallback> destroyCompartmentCallback;
+ js::ActiveThreadData<JSDestroyCompartmentCallback> destroyCompartmentCallback;
/* Compartment memory reporting callback. */
- js::UnprotectedData<JSSizeOfIncludingThisCompartmentCallback> sizeOfIncludingThisCompartmentCallback;
+ js::ActiveThreadData<JSSizeOfIncludingThisCompartmentCallback> sizeOfIncludingThisCompartmentCallback;
/* Zone destroy callback. */
- js::UnprotectedData<JSZoneCallback> destroyZoneCallback;
+ js::ActiveThreadData<JSZoneCallback> destroyZoneCallback;
/* Zone sweep callback. */
- js::UnprotectedData<JSZoneCallback> sweepZoneCallback;
+ js::ActiveThreadData<JSZoneCallback> sweepZoneCallback;
/* Call this to get the name of a compartment. */
- js::UnprotectedData<JSCompartmentNameCallback> compartmentNameCallback;
+ js::ActiveThreadData<JSCompartmentNameCallback> compartmentNameCallback;
/* Callback for doing memory reporting on external strings. */
- js::UnprotectedData<JSExternalStringSizeofCallback> externalStringSizeofCallback;
+ js::ActiveThreadData<JSExternalStringSizeofCallback> externalStringSizeofCallback;
- js::UnprotectedData<mozilla::UniquePtr<js::SourceHook>> sourceHook;
+ js::ActiveThreadData<mozilla::UniquePtr<js::SourceHook>> sourceHook;
- js::UnprotectedData<const JSSecurityCallbacks*> securityCallbacks;
- js::UnprotectedData<const js::DOMCallbacks*> DOMcallbacks;
- js::UnprotectedData<JSDestroyPrincipalsOp> destroyPrincipals;
- js::UnprotectedData<JSReadPrincipalsOp> readPrincipals;
+ js::ActiveThreadData<const JSSecurityCallbacks*> securityCallbacks;
+ js::ActiveThreadData<const js::DOMCallbacks*> DOMcallbacks;
+ js::ActiveThreadData<JSDestroyPrincipalsOp> destroyPrincipals;
+ js::ActiveThreadData<JSReadPrincipalsOp> readPrincipals;
/* Optional warning reporter. */
- js::UnprotectedData<JS::WarningReporter> warningReporter;
+ js::ActiveThreadData<JS::WarningReporter> warningReporter;
private:
/* Gecko profiling metadata */
js::UnprotectedData<js::GeckoProfiler> geckoProfiler_;
public:
js::GeckoProfiler& geckoProfiler() { return geckoProfiler_.ref(); }
// Heap GC roots for PersistentRooted pointers.
- js::UnprotectedData<mozilla::EnumeratedArray<JS::RootKind, JS::RootKind::Limit,
+ js::ActiveThreadData<mozilla::EnumeratedArray<JS::RootKind, JS::RootKind::Limit,
mozilla::LinkedList<JS::PersistentRooted<void*>>>> heapRoots;
void tracePersistentRoots(JSTracer* trc);
void finishPersistentRoots();
void finishRoots();
public:
@@ -440,40 +445,40 @@ struct JSRuntime : public js::MallocProv
js::UnprotectedData<JS::AsmJSCacheOps> asmJSCacheOps;
private:
js::UnprotectedData<const JSPrincipals*> trustedPrincipals_;
public:
void setTrustedPrincipals(const JSPrincipals* p) { trustedPrincipals_ = p; }
const JSPrincipals* trustedPrincipals() const { return trustedPrincipals_; }
- js::UnprotectedData<const JSWrapObjectCallbacks*> wrapObjectCallbacks;
- js::UnprotectedData<js::PreserveWrapperCallback> preserveWrapperCallback;
+ js::ActiveThreadData<const JSWrapObjectCallbacks*> wrapObjectCallbacks;
+ js::ActiveThreadData<js::PreserveWrapperCallback> preserveWrapperCallback;
- js::UnprotectedData<js::ScriptEnvironmentPreparer*> scriptEnvironmentPreparer;
+ js::ActiveThreadData<js::ScriptEnvironmentPreparer*> scriptEnvironmentPreparer;
- js::UnprotectedData<js::CTypesActivityCallback> ctypesActivityCallback;
+ js::ActiveThreadData<js::CTypesActivityCallback> ctypesActivityCallback;
private:
- js::UnprotectedData<const js::Class*> windowProxyClass_;
+ js::WriteOnceData<const js::Class*> windowProxyClass_;
public:
const js::Class* maybeWindowProxyClass() const {
return windowProxyClass_;
}
void setWindowProxyClass(const js::Class* clasp) {
windowProxyClass_ = clasp;
}
private:
/*
* Head of circular list of all enabled Debuggers that have
* onNewGlobalObject handler methods established.
*/
- js::UnprotectedData<JSCList> onNewGlobalObjectWatchers_;
+ js::ActiveThreadData<JSCList> onNewGlobalObjectWatchers_;
public:
JSCList& onNewGlobalObjectWatchers() { return onNewGlobalObjectWatchers_.ref(); }
private:
/*
* Lock taken when using per-runtime or per-zone data that could otherwise
* be accessed simultaneously by multiple threads.
*
@@ -503,26 +508,26 @@ struct JSRuntime : public js::MallocProv
return (!exclusiveThreadsPresent() && mainThreadHasExclusiveAccess) ||
exclusiveAccessLock.ownedByCurrentThread();
}
#endif
// How many compartments there are across all zones. This number includes
// off main thread context compartments, so it isn't necessarily equal to the
// number of compartments visited by CompartmentsIter.
- js::UnprotectedData<size_t> numCompartments;
+ js::ActiveThreadData<size_t> numCompartments;
/* Locale-specific callbacks for string conversion. */
- js::UnprotectedData<const JSLocaleCallbacks*> localeCallbacks;
+ js::ActiveThreadData<const JSLocaleCallbacks*> localeCallbacks;
/* Default locale for Internationalization API */
- js::UnprotectedData<char*> defaultLocale;
+ js::ActiveThreadData<char*> defaultLocale;
/* Default JSVersion. */
- js::UnprotectedData<JSVersion> defaultVersion_;
+ js::ActiveThreadData<JSVersion> defaultVersion_;
private:
/* Code coverage output. */
js::UnprotectedData<js::coverage::LCovRuntime> lcovOutput_;
public:
js::coverage::LCovRuntime& lcovOutput() { return lcovOutput_.ref(); }
private:
@@ -775,17 +780,17 @@ struct JSRuntime : public js::MallocProv
bool transformToPermanentAtoms(JSContext* cx);
// Cached well-known symbols (ES6 rev 24 6.1.5.1). Like permanent atoms,
// these are shared with the parentRuntime, if any.
js::WriteOnceData<js::WellKnownSymbols*> wellKnownSymbols;
/* Shared Intl data for this runtime. */
- js::UnprotectedData<js::SharedIntlData> sharedIntlData;
+ js::ActiveThreadData<js::SharedIntlData> sharedIntlData;
void traceSharedIntlData(JSTracer* trc);
// Table of bytecode and other data that may be shared across scripts
// within the runtime. This may be modified by threads using
// AutoLockForExclusiveAccess.
private:
js::ExclusiveAccessLockData<js::ScriptDataTable> scriptDataTable_;
@@ -846,17 +851,17 @@ struct JSRuntime : public js::MallocProv
void addSizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf, JS::RuntimeSizes* runtime);
private:
// Settings for how helper threads can be used.
mozilla::Atomic<bool> offthreadIonCompilationEnabled_;
mozilla::Atomic<bool> parallelParsingEnabled_;
- js::UnprotectedData<bool> autoWritableJitCodeActive_;
+ js::ActiveThreadData<bool> autoWritableJitCodeActive_;
public:
// Note: these values may be toggled dynamically (in response to about:config
// prefs changing).
void setOffthreadIonCompilationEnabled(bool value) {
offthreadIonCompilationEnabled_ = value;
}
@@ -871,22 +876,22 @@ struct JSRuntime : public js::MallocProv
}
void toggleAutoWritableJitCodeActive(bool b) {
MOZ_ASSERT(autoWritableJitCodeActive_ != b, "AutoWritableJitCode should not be nested.");
autoWritableJitCodeActive_ = b;
}
/* See comment for JS::SetLargeAllocationFailureCallback in jsapi.h. */
- js::UnprotectedData<JS::LargeAllocationFailureCallback> largeAllocationFailureCallback;
- js::UnprotectedData<void*> largeAllocationFailureCallbackData;
+ js::ActiveThreadData<JS::LargeAllocationFailureCallback> largeAllocationFailureCallback;
+ js::ActiveThreadData<void*> largeAllocationFailureCallbackData;
/* See comment for JS::SetOutOfMemoryCallback in jsapi.h. */
- js::UnprotectedData<JS::OutOfMemoryCallback> oomCallback;
- js::UnprotectedData<void*> oomCallbackData;
+ js::ActiveThreadData<JS::OutOfMemoryCallback> oomCallback;
+ js::ActiveThreadData<void*> oomCallbackData;
/*
* These variations of malloc/calloc/realloc will call the
* large-allocation-failure callback on OOM and retry the allocation.
*/
static const unsigned LARGE_ALLOCATION = 25 * 1024 * 1024;
template <typename T>
@@ -914,23 +919,23 @@ struct JSRuntime : public js::MallocProv
}
return static_cast<T*>(onOutOfMemoryCanGC(js::AllocFunction::Realloc, bytes, p));
}
/*
* Debugger.Memory functions like takeCensus use this embedding-provided
* function to assess the size of malloc'd blocks of memory.
*/
- js::UnprotectedData<mozilla::MallocSizeOf> debuggerMallocSizeOf;
+ js::ActiveThreadData<mozilla::MallocSizeOf> debuggerMallocSizeOf;
/* Last time at which an animation was played for this runtime. */
mozilla::Atomic<int64_t> lastAnimationTime;
private:
- js::UnprotectedData<js::PerformanceMonitoring> performanceMonitoring_;
+ js::ActiveThreadData<js::PerformanceMonitoring> performanceMonitoring_;
public:
js::PerformanceMonitoring& performanceMonitoring() { return performanceMonitoring_.ref(); }
private:
/* List of Ion compilation waiting to get linked. */
typedef mozilla::LinkedList<js::jit::IonBuilder> IonBuilderList;
js::HelperThreadLockData<IonBuilderList> ionLazyLinkList_;