Merge mozilla-inbound to mozilla-central. a=merge
authorDaniel Varga <dvarga@mozilla.com>
Thu, 11 Apr 2019 00:54:55 +0300
changeset 468832 30ca3c3abfe63d2d5923e28dcc565972ea4cf811
parent 468809 9d3dbe3fef2671cf9cad2383415d412ddc6a2aa2 (current diff)
parent 468831 f3fcf307ee8091d8d3c03af13818099a90c2db21 (diff)
child 468860 58239a04c1951189c63f4c3f4542422fd1905d19
child 468886 606f85641d0b62ec89b2ee0e9084bd64359cb578
push id35851
push userdvarga@mozilla.com
push dateWed, 10 Apr 2019 21:56:12 +0000
treeherdermozilla-central@30ca3c3abfe6 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersmerge
milestone68.0a1
first release with
nightly linux32
30ca3c3abfe6 / 68.0a1 / 20190410215612 / files
nightly linux64
30ca3c3abfe6 / 68.0a1 / 20190410215612 / files
nightly mac
30ca3c3abfe6 / 68.0a1 / 20190410215612 / files
nightly win32
30ca3c3abfe6 / 68.0a1 / 20190410215612 / files
nightly win64
30ca3c3abfe6 / 68.0a1 / 20190410215612 / files
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
releases
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Merge mozilla-inbound to mozilla-central. a=merge
js/src/builtin/TestingFunctions.cpp
js/src/frontend/BytecodeControlStructures.cpp
js/src/frontend/BytecodeEmitter.cpp
js/src/frontend/BytecodeEmitter.h
js/src/frontend/CForEmitter.cpp
js/src/frontend/DoWhileEmitter.cpp
js/src/frontend/EmitterScope.cpp
js/src/frontend/ExpressionStatementEmitter.cpp
js/src/frontend/ForInEmitter.cpp
js/src/frontend/ForOfEmitter.cpp
js/src/frontend/ForOfLoopControl.cpp
js/src/frontend/FunctionEmitter.cpp
js/src/frontend/IfEmitter.cpp
js/src/frontend/LabelEmitter.cpp
js/src/frontend/ObjectEmitter.cpp
js/src/frontend/SwitchEmitter.cpp
js/src/frontend/TryEmitter.cpp
js/src/frontend/WhileEmitter.cpp
js/src/vm/JSScript.cpp
--- a/dom/svg/SVGAnimatedLength.h
+++ b/dom/svg/SVGAnimatedLength.h
@@ -28,17 +28,17 @@ class SMILValue;
 namespace dom {
 class DOMSVGAnimatedLength;
 class DOMSVGLength;
 class SVGAnimationElement;
 class SVGViewportElement;
 
 class UserSpaceMetrics {
  public:
-  virtual ~UserSpaceMetrics() {}
+  virtual ~UserSpaceMetrics() = default;
 
   virtual float GetEmLength() const = 0;
   virtual float GetExLength() const = 0;
   virtual float GetAxisLength(uint8_t aCtxType) const = 0;
 };
 
 class UserSpaceMetricsWithSize : public UserSpaceMetrics {
  public:
--- a/dom/svg/SVGAnimatedLengthList.h
+++ b/dom/svg/SVGAnimatedLengthList.h
@@ -37,17 +37,17 @@ class SVGElement;
  * consumers don't need to concern themselves with that.
  */
 class SVGAnimatedLengthList {
   // friends so that they can get write access to mBaseVal
   friend class dom::DOMSVGLength;
   friend class dom::DOMSVGLengthList;
 
  public:
-  SVGAnimatedLengthList() {}
+  SVGAnimatedLengthList() = default;
 
   /**
    * Because it's so important that mBaseVal and its DOMSVGLengthList wrapper
    * (if any) be kept in sync (see the comment in
    * DOMSVGAnimatedLengthList::InternalBaseValListWillChangeTo), this method
    * returns a const reference. Only our friend classes may get mutable
    * references to mBaseVal.
    */
--- a/dom/svg/SVGAnimatedPathSegList.h
+++ b/dom/svg/SVGAnimatedPathSegList.h
@@ -39,17 +39,17 @@ class SVGElement;
  * to know or worry about wrappers (or forget about them!) for the most part.
  */
 class SVGAnimatedPathSegList final {
   // friends so that they can get write access to mBaseVal and mAnimVal
   friend class DOMSVGPathSeg;
   friend class DOMSVGPathSegList;
 
  public:
-  SVGAnimatedPathSegList() {}
+  SVGAnimatedPathSegList() = default;
 
   /**
    * Because it's so important that mBaseVal and its DOMSVGPathSegList wrapper
    * (if any) be kept in sync (see the comment in
    * DOMSVGPathSegList::InternalListWillChangeTo), this method returns a const
    * reference. Only our friend classes may get mutable references to mBaseVal.
    */
   const SVGPathData& GetBaseValue() const { return mBaseVal; }
--- a/dom/svg/SVGAnimatedPointList.h
+++ b/dom/svg/SVGAnimatedPointList.h
@@ -38,17 +38,17 @@ class SVGElement;
  * to know or worry about wrappers (or forget about them!) for the most part.
  */
 class SVGAnimatedPointList {
   // friends so that they can get write access to mBaseVal and mAnimVal
   friend class DOMSVGPoint;
   friend class DOMSVGPointList;
 
  public:
-  SVGAnimatedPointList() {}
+  SVGAnimatedPointList() = default;
 
   /**
    * Because it's so important that mBaseVal and its DOMSVGPointList wrapper
    * (if any) be kept in sync (see the comment in
    * DOMSVGPointList::InternalListWillChangeTo), this method returns a const
    * reference. Only our friend classes may get mutable references to mBaseVal.
    */
   const SVGPointList& GetBaseValue() const { return mBaseVal; }
--- a/dom/svg/SVGComponentTransferFunctionElement.h
+++ b/dom/svg/SVGComponentTransferFunctionElement.h
@@ -29,17 +29,17 @@ typedef SVGFEUnstyledElement SVGComponen
 
 class SVGComponentTransferFunctionElement
     : public SVGComponentTransferFunctionElementBase {
  protected:
   explicit SVGComponentTransferFunctionElement(
       already_AddRefed<mozilla::dom::NodeInfo>&& aNodeInfo)
       : SVGComponentTransferFunctionElementBase(std::move(aNodeInfo)) {}
 
-  virtual ~SVGComponentTransferFunctionElement() {}
+  virtual ~SVGComponentTransferFunctionElement() = default;
 
  public:
   typedef gfx::ComponentTransferAttributes ComponentTransferAttributes;
 
   // interfaces:
   NS_DECLARE_STATIC_IID_ACCESSOR(
       NS_SVG_FE_COMPONENT_TRANSFER_FUNCTION_ELEMENT_CID)
 
--- a/dom/svg/SVGFilters.h
+++ b/dom/svg/SVGFilters.h
@@ -50,17 +50,17 @@ class SVGFE : public SVGFEBase {
   typedef mozilla::gfx::SourceSurface SourceSurface;
   typedef mozilla::gfx::Size Size;
   typedef mozilla::gfx::IntRect IntRect;
   typedef mozilla::gfx::ColorSpace ColorSpace;
   typedef mozilla::gfx::FilterPrimitiveDescription FilterPrimitiveDescription;
 
   explicit SVGFE(already_AddRefed<mozilla::dom::NodeInfo>&& aNodeInfo)
       : SVGFEBase(std::move(aNodeInfo)) {}
-  virtual ~SVGFE() {}
+  virtual ~SVGFE() = default;
 
  public:
   typedef mozilla::gfx::PrimitiveAttributes PrimitiveAttributes;
 
   ColorSpace GetInputColorSpace(int32_t aInputIndex,
                                 ColorSpace aUnchangedInputColorSpace) {
     return OperatesOnSRGB(aInputIndex,
                           aUnchangedInputColorSpace == ColorSpace::SRGB)
@@ -178,17 +178,17 @@ class SVGFEUnstyledElement : public SVGF
 typedef SVGFE SVGFELightingElementBase;
 
 class SVGFELightingElement : public SVGFELightingElementBase {
  protected:
   explicit SVGFELightingElement(
       already_AddRefed<mozilla::dom::NodeInfo>&& aNodeInfo)
       : SVGFELightingElementBase(std::move(aNodeInfo)) {}
 
-  virtual ~SVGFELightingElement() {}
+  virtual ~SVGFELightingElement() = default;
 
  public:
   // interfaces:
   NS_INLINE_DECL_REFCOUNTING_INHERITED(SVGFELightingElement,
                                        SVGFELightingElementBase)
 
   virtual bool AttributeAffectsRendering(int32_t aNameSpaceID,
                                          nsAtom* aAttribute) const override;
--- a/dom/svg/SVGIRect.h
+++ b/dom/svg/SVGIRect.h
@@ -15,17 +15,17 @@
 
 class nsIContent;
 
 namespace mozilla {
 namespace dom {
 
 class SVGIRect : public nsISupports, public nsWrapperCache {
  public:
-  virtual ~SVGIRect() {}
+  virtual ~SVGIRect() = default;
 
   JSObject* WrapObject(JSContext* aCx,
                        JS::Handle<JSObject*> aGivenProto) override {
     return SVGRect_Binding::Wrap(aCx, this, aGivenProto);
   }
 
   virtual nsIContent* GetParentObject() const = 0;
 
--- a/dom/svg/SVGIntegerPairSMILType.h
+++ b/dom/svg/SVGIntegerPairSMILType.h
@@ -36,14 +36,14 @@ class SVGIntegerPairSMILType : public SM
   virtual nsresult ComputeDistance(const SMILValue& aFrom, const SMILValue& aTo,
                                    double& aDistance) const override;
   virtual nsresult Interpolate(const SMILValue& aStartVal,
                                const SMILValue& aEndVal, double aUnitDistance,
                                SMILValue& aResult) const override;
 
  private:
   // Private constructor: prevent instances beyond my singleton.
-  constexpr SVGIntegerPairSMILType() {}
+  constexpr SVGIntegerPairSMILType() = default;
 };
 
 }  // namespace mozilla
 
 #endif  // MOZILLA_SVGINTEGERPAIRSMILTYPE_H_
--- a/dom/svg/SVGLengthList.h
+++ b/dom/svg/SVGLengthList.h
@@ -34,18 +34,18 @@ class DOMSVGLengthList;
  * The DOM wrapper class for this class is DOMSVGLengthList.
  */
 class SVGLengthList {
   friend class dom::DOMSVGLength;
   friend class dom::DOMSVGLengthList;
   friend class SVGAnimatedLengthList;
 
  public:
-  SVGLengthList() {}
-  ~SVGLengthList() {}
+  SVGLengthList() = default;
+  ~SVGLengthList() = default;
 
   // Only methods that don't make/permit modification to this list are public.
   // Only our friend classes can access methods that may change us.
 
   /// This may return an incomplete string on OOM, but that's acceptable.
   void GetValueAsString(nsAString& aValue) const;
 
   bool IsEmpty() const { return mLengths.IsEmpty(); }
--- a/dom/svg/SVGLengthListSMILType.h
+++ b/dom/svg/SVGLengthListSMILType.h
@@ -86,14 +86,14 @@ class SVGLengthListSMILType : public SMI
   virtual nsresult ComputeDistance(const SMILValue& aFrom, const SMILValue& aTo,
                                    double& aDistance) const override;
   virtual nsresult Interpolate(const SMILValue& aStartVal,
                                const SMILValue& aEndVal, double aUnitDistance,
                                SMILValue& aResult) const override;
 
  private:
   // Private constructor: prevent instances beyond my singleton.
-  constexpr SVGLengthListSMILType() {}
+  constexpr SVGLengthListSMILType() = default;
 };
 
 }  // namespace mozilla
 
 #endif  // MOZILLA_SVGLENGTHLISTSMILTYPE_H_
--- a/dom/svg/SVGMatrix.h
+++ b/dom/svg/SVGMatrix.h
@@ -58,17 +58,17 @@ class SVGMatrix final : public nsWrapper
    * Ctor for SVGMatrix objects that belong to a DOMSVGTransform.
    */
   explicit SVGMatrix(DOMSVGTransform& aTransform) : mTransform(&aTransform) {}
 
   /**
    * Ctors for SVGMatrix objects created independently of a DOMSVGTransform.
    */
   // Default ctor for gfxMatrix will produce identity mx
-  SVGMatrix() {}
+  SVGMatrix() = default;
 
   explicit SVGMatrix(const gfxMatrix& aMatrix) : mMatrix(aMatrix) {}
 
   const gfxMatrix& GetMatrix() const {
     return mTransform ? mTransform->Matrixgfx() : mMatrix;
   }
 
   // WebIDL
@@ -98,17 +98,17 @@ class SVGMatrix final : public nsWrapper
   already_AddRefed<SVGMatrix> RotateFromVector(float x, float y,
                                                ErrorResult& aRv);
   already_AddRefed<SVGMatrix> FlipX();
   already_AddRefed<SVGMatrix> FlipY();
   already_AddRefed<SVGMatrix> SkewX(float angle, ErrorResult& rv);
   already_AddRefed<SVGMatrix> SkewY(float angle, ErrorResult& rv);
 
  private:
-  ~SVGMatrix() {}
+  ~SVGMatrix() = default;
 
   void SetMatrix(const gfxMatrix& aMatrix) {
     if (mTransform) {
       mTransform->SetMatrix(aMatrix);
     } else {
       mMatrix = aMatrix;
     }
   }
--- a/dom/svg/SVGMotionSMILType.h
+++ b/dom/svg/SVGMotionSMILType.h
@@ -66,14 +66,14 @@ class SVGMotionSMILType : public SMILTyp
   // Used to generate a SMILValue for the point at the given distance along
   // the given path.
   static SMILValue ConstructSMILValue(Path* aPath, float aDist,
                                       RotateType aRotateType,
                                       float aRotateAngle);
 
  private:
   // Private constructor: prevent instances beyond my singleton.
-  constexpr SVGMotionSMILType() {}
+  constexpr SVGMotionSMILType() = default;
 };
 
 }  // namespace mozilla
 
 #endif  // MOZILLA_SVGMOTIONSMILTYPE_H_
--- a/dom/svg/SVGNumberList.h
+++ b/dom/svg/SVGNumberList.h
@@ -32,18 +32,18 @@ class DOMSVGNumberList;
  * The DOM wrapper class for this class is DOMSVGNumberList.
  */
 class SVGNumberList {
   friend class dom::DOMSVGNumber;
   friend class dom::DOMSVGNumberList;
   friend class SVGAnimatedNumberList;
 
  public:
-  SVGNumberList() {}
-  ~SVGNumberList() {}
+  SVGNumberList() = default;
+  ~SVGNumberList() = default;
 
   // Only methods that don't make/permit modification to this list are public.
   // Only our friend classes can access methods that may change us.
 
   /// This may return an incomplete string on OOM, but that's acceptable.
   void GetValueAsString(nsAString& aValue) const;
 
   bool IsEmpty() const { return mNumbers.IsEmpty(); }
--- a/dom/svg/SVGNumberListSMILType.h
+++ b/dom/svg/SVGNumberListSMILType.h
@@ -40,14 +40,14 @@ class SVGNumberListSMILType : public SMI
   virtual nsresult ComputeDistance(const SMILValue& aFrom, const SMILValue& aTo,
                                    double& aDistance) const override;
   virtual nsresult Interpolate(const SMILValue& aStartVal,
                                const SMILValue& aEndVal, double aUnitDistance,
                                SMILValue& aResult) const override;
 
  private:
   // Private constructor: prevent instances beyond my singleton.
-  constexpr SVGNumberListSMILType() {}
+  constexpr SVGNumberListSMILType() = default;
 };
 
 }  // namespace mozilla
 
 #endif  // MOZILLA_SVGNUMBERLISTSMILTYPE_H_
--- a/dom/svg/SVGNumberPairSMILType.h
+++ b/dom/svg/SVGNumberPairSMILType.h
@@ -33,14 +33,14 @@ class SVGNumberPairSMILType : public SMI
   virtual nsresult ComputeDistance(const SMILValue& aFrom, const SMILValue& aTo,
                                    double& aDistance) const override;
   virtual nsresult Interpolate(const SMILValue& aStartVal,
                                const SMILValue& aEndVal, double aUnitDistance,
                                SMILValue& aResult) const override;
 
  private:
   // Private constructor: prevent instances beyond my singleton.
-  constexpr SVGNumberPairSMILType() {}
+  constexpr SVGNumberPairSMILType() = default;
 };
 
 }  // namespace mozilla
 
 #endif  // MOZILLA_SVGNUMBERPAIRSMILTYPE_H_
--- a/dom/svg/SVGOrientSMILType.h
+++ b/dom/svg/SVGOrientSMILType.h
@@ -53,14 +53,14 @@ class SVGOrientSMILType : public SMILTyp
   virtual nsresult ComputeDistance(const SMILValue& aFrom, const SMILValue& aTo,
                                    double& aDistance) const override;
   virtual nsresult Interpolate(const SMILValue& aStartVal,
                                const SMILValue& aEndVal, double aUnitDistance,
                                SMILValue& aResult) const override;
 
  private:
   // Private constructor: prevent instances beyond my singleton.
-  constexpr SVGOrientSMILType() {}
+  constexpr SVGOrientSMILType() = default;
 };
 
 }  // namespace mozilla
 
 #endif  // MOZILLA_SVGORIENTSMILTYPE_H_
--- a/dom/svg/SVGPathData.h
+++ b/dom/svg/SVGPathData.h
@@ -86,18 +86,18 @@ class SVGPathData {
   typedef gfx::PathBuilder PathBuilder;
   typedef gfx::FillRule FillRule;
   typedef gfx::Float Float;
   typedef gfx::CapStyle CapStyle;
 
  public:
   typedef const float* const_iterator;
 
-  SVGPathData() {}
-  ~SVGPathData() {}
+  SVGPathData() = default;
+  ~SVGPathData() = default;
 
   // Only methods that don't make/permit modification to this list are public.
   // Only our friend classes can access methods that may change us.
 
   /// This may return an incomplete string on OOM, but that's acceptable.
   void GetValueAsString(nsAString& aValue) const;
 
   bool IsEmpty() const { return mData.IsEmpty(); }
--- a/dom/svg/SVGPathSegListSMILType.h
+++ b/dom/svg/SVGPathSegListSMILType.h
@@ -43,14 +43,14 @@ class SVGPathSegListSMILType : public SM
   virtual nsresult ComputeDistance(const SMILValue& aFrom, const SMILValue& aTo,
                                    double& aDistance) const override;
   virtual nsresult Interpolate(const SMILValue& aStartVal,
                                const SMILValue& aEndVal, double aUnitDistance,
                                SMILValue& aResult) const override;
 
  private:
   // Private constructor: prevent instances beyond my singleton.
-  constexpr SVGPathSegListSMILType() {}
+  constexpr SVGPathSegListSMILType() = default;
 };
 
 }  // namespace mozilla
 
 #endif  // MOZILLA_SVGPATHSEGLISTSMILTYPE_H_
--- a/dom/svg/SVGPathSegUtils.h
+++ b/dom/svg/SVGPathSegUtils.h
@@ -68,17 +68,17 @@ struct SVGPathTraversalState {
  *
  * The DOM wrapper classes for encoded path segments (data contained in
  * instances of SVGPathData) is DOMSVGPathSeg and its sub-classes. Note that
  * there are multiple different DOM classes for path segs - one for each of the
  * 19 SVG 1.1 segment types.
  */
 class SVGPathSegUtils {
  private:
-  SVGPathSegUtils() {}  // private to prevent instances
+  SVGPathSegUtils() = default;  // private to prevent instances
 
  public:
   static void GetValueAsString(const float* aSeg, nsAString& aValue);
 
   /**
    * Encode a segment type enum to a float.
    *
    * At some point in the future we will likely want to encode other
--- a/dom/svg/SVGPointList.h
+++ b/dom/svg/SVGPointList.h
@@ -32,18 +32,18 @@ class nsISVGPoint;
  */
 class SVGPointList {
   friend class mozilla::nsISVGPoint;
   friend class SVGAnimatedPointList;
   friend class DOMSVGPointList;
   friend class DOMSVGPoint;
 
  public:
-  SVGPointList() {}
-  ~SVGPointList() {}
+  SVGPointList() = default;
+  ~SVGPointList() = default;
 
   // Only methods that don't make/permit modification to this list are public.
   // Only our friend classes can access methods that may change us.
 
   /// This may return an incomplete string on OOM, but that's acceptable.
   void GetValueAsString(nsAString& aValue) const;
 
   bool IsEmpty() const { return mItems.IsEmpty(); }
--- a/dom/svg/SVGPointListSMILType.h
+++ b/dom/svg/SVGPointListSMILType.h
@@ -40,14 +40,14 @@ class SVGPointListSMILType : public SMIL
   virtual nsresult ComputeDistance(const SMILValue& aFrom, const SMILValue& aTo,
                                    double& aDistance) const override;
   virtual nsresult Interpolate(const SMILValue& aStartVal,
                                const SMILValue& aEndVal, double aUnitDistance,
                                SMILValue& aResult) const override;
 
  private:
   // Private constructor: prevent instances beyond my singleton.
-  constexpr SVGPointListSMILType() {}
+  constexpr SVGPointListSMILType() = default;
 };
 
 }  // namespace mozilla
 
 #endif  // MOZILLA_SVGPOINTLISTSMILTYPE_H_
--- a/dom/svg/SVGRect.h
+++ b/dom/svg/SVGRect.h
@@ -41,17 +41,17 @@ class SVGRect final : public SVGIRect {
 
   float Height() const final { return mHeight; }
 
   void SetHeight(float aHeight, ErrorResult& aRv) final { mHeight = aHeight; }
 
   virtual nsIContent* GetParentObject() const override { return mParent; }
 
  protected:
-  ~SVGRect() {}
+  ~SVGRect() = default;
 
   nsCOMPtr<nsIContent> mParent;
   float mX, mY, mWidth, mHeight;
 };
 
 }  // namespace dom
 }  // namespace mozilla
 
--- a/dom/svg/SVGSVGElement.h
+++ b/dom/svg/SVGSVGElement.h
@@ -60,17 +60,17 @@ class DOMSVGTranslatePoint final : publi
   virtual already_AddRefed<nsISVGPoint> MatrixTransform(
       SVGMatrix& matrix) override;
 
   virtual nsISupports* GetParentObject() override;
 
   RefPtr<SVGSVGElement> mElement;
 
  private:
-  ~DOMSVGTranslatePoint() {}
+  ~DOMSVGTranslatePoint() = default;
 };
 
 typedef SVGViewportElement SVGSVGElementBase;
 
 class SVGSVGElement final : public SVGSVGElementBase {
   friend class ::nsSVGOuterSVGFrame;
   friend class mozilla::SVGFragmentIdentifier;
   friend class mozilla::AutoSVGViewHandler;
--- a/dom/svg/SVGStringList.h
+++ b/dom/svg/SVGStringList.h
@@ -17,17 +17,17 @@ namespace mozilla {
  *
  * The DOM wrapper class for this class is DOMSVGStringList.
  */
 class SVGStringList {
   friend class DOMSVGStringList;
 
  public:
   SVGStringList() : mIsSet(false), mIsCommaSeparated(false) {}
-  ~SVGStringList() {}
+  ~SVGStringList() = default;
 
   void SetIsCommaSeparated(bool aIsCommaSeparated) {
     mIsCommaSeparated = aIsCommaSeparated;
   }
   nsresult SetValue(const nsAString& aValue);
 
   void Clear() {
     mStrings.Clear();
--- a/dom/svg/SVGTests.h
+++ b/dom/svg/SVGTests.h
@@ -100,17 +100,17 @@ class SVGTests : public nsISupports {
 
   virtual SVGElement* AsSVGElement() = 0;
 
   const SVGElement* AsSVGElement() const {
     return const_cast<SVGTests*>(this)->AsSVGElement();
   }
 
  protected:
-  virtual ~SVGTests() {}
+  virtual ~SVGTests() = default;
 
  private:
   enum { FEATURES, EXTENSIONS, LANGUAGE };
   SVGStringList mStringListAttributes[3];
   static nsStaticAtom* const sStringListNames[3];
 };
 
 NS_DEFINE_STATIC_IID_ACCESSOR(SVGTests, MOZILLA_DOMSVGTESTS_IID)
--- a/dom/svg/SVGTransformList.h
+++ b/dom/svg/SVGTransformList.h
@@ -27,18 +27,18 @@ class DOMSVGTransform;
  * The DOM wrapper class for this class is DOMSVGTransformList.
  */
 class SVGTransformList {
   friend class SVGAnimatedTransformList;
   friend class DOMSVGTransformList;
   friend class dom::DOMSVGTransform;
 
  public:
-  SVGTransformList() {}
-  ~SVGTransformList() {}
+  SVGTransformList() = default;
+  ~SVGTransformList() = default;
 
   // Only methods that don't make/permit modification to this list are public.
   // Only our friend classes can access methods that may change us.
 
   /// This may return an incomplete string on OOM, but that's acceptable.
   void GetValueAsString(nsAString& aValue) const;
 
   bool IsEmpty() const { return mItems.IsEmpty(); }
--- a/dom/svg/SVGTransformListSMILType.h
+++ b/dom/svg/SVGTransformListSMILType.h
@@ -111,14 +111,14 @@ class SVGTransformListSMILType : public 
                                   SMILValue& aValue);
   static bool AppendTransforms(const SVGTransformList& aList,
                                SMILValue& aValue);
   static bool GetTransforms(const SMILValue& aValue,
                             FallibleTArray<SVGTransform>& aTransforms);
 
  private:
   // Private constructor: prevent instances beyond my singleton.
-  constexpr SVGTransformListSMILType() {}
+  constexpr SVGTransformListSMILType() = default;
 };
 
 }  // end namespace mozilla
 
 #endif  // SVGLISTTRANSFORMSMILTYPE_H_
--- a/dom/svg/SVGTransformableElement.h
+++ b/dom/svg/SVGTransformableElement.h
@@ -22,17 +22,17 @@ class SVGGraphicsElement;
 class SVGMatrix;
 class SVGIRect;
 struct SVGBoundingBoxOptions;
 
 class SVGTransformableElement : public SVGElement {
  public:
   explicit SVGTransformableElement(already_AddRefed<dom::NodeInfo>&& aNodeInfo)
       : SVGElement(std::move(aNodeInfo)) {}
-  virtual ~SVGTransformableElement() {}
+  virtual ~SVGTransformableElement() = default;
 
   virtual nsresult Clone(dom::NodeInfo*, nsINode** aResult) const override = 0;
 
   // WebIDL
   already_AddRefed<DOMSVGAnimatedTransformList> Transform();
   SVGElement* GetNearestViewportElement();
   SVGElement* GetFarthestViewportElement();
   MOZ_CAN_RUN_SCRIPT
--- a/dom/svg/SVGViewBoxSMILType.h
+++ b/dom/svg/SVGViewBoxSMILType.h
@@ -33,14 +33,14 @@ class SVGViewBoxSMILType : public SMILTy
   virtual nsresult ComputeDistance(const SMILValue& aFrom, const SMILValue& aTo,
                                    double& aDistance) const override;
   virtual nsresult Interpolate(const SMILValue& aStartVal,
                                const SMILValue& aEndVal, double aUnitDistance,
                                SMILValue& aResult) const override;
 
  private:
   // Private constructor: prevent instances beyond my singleton.
-  constexpr SVGViewBoxSMILType() {}
+  constexpr SVGViewBoxSMILType() = default;
 };
 
 }  // namespace mozilla
 
 #endif  // MOZILLA_SVGVIEWBOXSMILTYPE_H_
--- a/dom/svg/SVGViewportElement.h
+++ b/dom/svg/SVGViewportElement.h
@@ -20,19 +20,19 @@
 #include "nsISVGPoint.h"
 #include "SVGPreserveAspectRatio.h"
 
 class nsSVGOuterSVGFrame;
 class nsSVGViewportFrame;
 
 namespace mozilla {
 class AutoPreserveAspectRatioOverride;
-class DOMSVGAnimatedPreserveAspectRatio;
 
 namespace dom {
+class DOMSVGAnimatedPreserveAspectRatio;
 class SVGAnimatedRect;
 class SVGViewElement;
 class SVGViewportElement;
 
 class svgFloatSize {
  public:
   svgFloatSize(float aWidth, float aHeight) : width(aWidth), height(aHeight) {}
   bool operator!=(const svgFloatSize& rhs) {
--- a/gfx/thebes/gfxPlatform.cpp
+++ b/gfx/thebes/gfxPlatform.cpp
@@ -918,16 +918,17 @@ void gfxPlatform::Init() {
   gPlatform = new gfxPlatformMac;
 #elif defined(MOZ_WIDGET_GTK)
   gPlatform = new gfxPlatformGtk;
 #elif defined(ANDROID)
   gPlatform = new gfxAndroidPlatform;
 #else
 #  error "No gfxPlatform implementation available"
 #endif
+  gPlatform->PopulateScreenInfo();
   gPlatform->InitAcceleration();
   gPlatform->InitWebRenderConfig();
   // When using WebRender, we defer initialization of the D3D11 devices until
   // the (rare) cases where they're used. Note that the GPU process where
   // WebRender runs doesn't initialize gfxPlatform and performs explicit
   // initialization of the bits it needs.
   if (!gfxVars::UseWebRender()) {
     gPlatform->EnsureDevicesInitialized();
@@ -955,17 +956,16 @@ void gfxPlatform::Init() {
   SkGraphics::Init();
 #  ifdef MOZ_ENABLE_FREETYPE
   SkInitCairoFT(gPlatform->FontHintingEnabled());
 #  endif
 #endif
 
   InitLayersIPC();
 
-  gPlatform->PopulateScreenInfo();
   gPlatform->ComputeTileSize();
 
 #ifdef MOZ_ENABLE_FREETYPE
   Factory::SetFTLibrary(gPlatform->GetFTLibrary());
 #endif
 
   gPlatform->mHasVariationFontSupport = gPlatform->CheckVariationFontSupport();
 
@@ -2506,17 +2506,17 @@ static bool CalculateWrQualifiedPrefValu
   // rollout pref enabled" case.
   if (Preferences::HasUserValue(WR_ROLLOUT_PREF_OVERRIDE)) {
     return Preferences::GetBool(WR_ROLLOUT_PREF_OVERRIDE);
   }
   return gfxPrefs::WebRenderAllQualified();
 }
 
 static FeatureState& WebRenderHardwareQualificationStatus(
-    bool aHasBattery, nsCString& aOutFailureId) {
+    const IntSize& aScreenSize, bool aHasBattery, nsCString& aOutFailureId) {
   FeatureState& featureWebRenderQualified =
       gfxConfig::GetFeature(Feature::WEBRENDER_QUALIFIED);
   featureWebRenderQualified.EnableByDefault();
 
   if (Preferences::HasUserValue(WR_ROLLOUT_HW_QUALIFIED_OVERRIDE)) {
     if (!Preferences::GetBool(WR_ROLLOUT_HW_QUALIFIED_OVERRIDE)) {
       featureWebRenderQualified.Disable(
           FeatureStatus::Blocked, "HW qualification pref override",
@@ -2571,17 +2571,18 @@ static FeatureState& WebRenderHardwareQu
               (deviceID >= 0x9830 && deviceID < 0x9870) ||
               (deviceID >= 0x9900 && deviceID < 0x9a00)) {
             // we have a desktop CAYMAN, SI, CIK, VI, or GFX9 device
           } else {
             featureWebRenderQualified.Disable(
                 FeatureStatus::Blocked, "Device too old",
                 NS_LITERAL_CSTRING("FEATURE_FAILURE_DEVICE_TOO_OLD"));
           }
-        } else if (adapterVendorID == u"0x8086") {  // Intel
+        } else if (adapterVendorID == u"0x8086" ||
+                   adapterVendorID == u"mesa/i965") {  // Intel
           const uint16_t supportedDevices[] = {
               0x191d,  // HD Graphics P530
               0x192d,  // Iris Pro Graphics P555
               0x1912,  // HD Graphics 530
               0x5912,  // HD Graphics 630
               0x3e92,  // UHD Graphics 630
               // HD Graphics 4600
               0x0412,
@@ -2600,16 +2601,28 @@ static FeatureState& WebRenderHardwareQu
             if (deviceID == id) {
               supported = true;
             }
           }
           if (!supported) {
             featureWebRenderQualified.Disable(
                 FeatureStatus::Blocked, "Device too old",
                 NS_LITERAL_CSTRING("FEATURE_FAILURE_DEVICE_TOO_OLD"));
+          } else if (adapterVendorID == u"mesa/i965") {
+            const int32_t maxPixels = 3440 * 1440;  // UWQHD
+            int32_t pixels = aScreenSize.width * aScreenSize.height;
+            if (pixels > maxPixels) {
+              featureWebRenderQualified.Disable(
+                  FeatureStatus::Blocked, "Screen size too large",
+                  NS_LITERAL_CSTRING("FEATURE_FAILURE_SCREEN_SIZE_TOO_LARGE"));
+            } else if (pixels <= 0) {
+              featureWebRenderQualified.Disable(
+                  FeatureStatus::Blocked, "Screen size unknown",
+                  NS_LITERAL_CSTRING("FEATURE_FAILURE_SCREEN_SIZE_UNKNOWN"));
+            }
           }
 #endif
         } else {
           featureWebRenderQualified.Disable(
               FeatureStatus::Blocked, "Unsupported vendor",
               NS_LITERAL_CSTRING("FEATURE_FAILURE_UNSUPPORTED_VENDOR"));
         }
       }
@@ -2648,17 +2661,18 @@ void gfxPlatform::InitWebRenderConfig() 
     if (gfxVars::UseWebRender()) {
       reporter.SetSuccessful();
     }
     return;
   }
 
   nsCString failureId;
   FeatureState& featureWebRenderQualified =
-      WebRenderHardwareQualificationStatus(HasBattery(), failureId);
+      WebRenderHardwareQualificationStatus(GetScreenSize(), HasBattery(),
+                                           failureId);
   FeatureState& featureWebRender = gfxConfig::GetFeature(Feature::WEBRENDER);
 
   featureWebRender.DisableByDefault(
       FeatureStatus::OptIn, "WebRender is an opt-in feature",
       NS_LITERAL_CSTRING("FEATURE_FAILURE_DEFAULT_OFF"));
 
   const bool wrQualifiedAll = CalculateWrQualifiedPrefValue();
 
@@ -2743,16 +2757,26 @@ void gfxPlatform::InitWebRenderConfig() 
     gfxVars::SetUseWebRender(true);
     reporter.SetSuccessful();
 
     if (XRE_IsParentProcess()) {
       Preferences::RegisterPrefixCallbackAndCall(
           WebRenderDebugPrefChangeCallback, WR_DEBUG_PREF);
     }
   }
+#if defined(XP_LINUX) && !defined(MOZ_WIDGET_ANDROID)
+  else if (gfxConfig::IsEnabled(Feature::HW_COMPOSITING)) {
+    // Hardware compositing should be disabled by default if we aren't using
+    // WebRender. We had to check if it is enabled at all, because it may
+    // already have been forced disabled (e.g. safe mode, headless). It may
+    // still be forced on by the user, and if so, this should have no effect.
+    gfxConfig::Disable(Feature::HW_COMPOSITING, FeatureStatus::Blocked,
+                       "Acceleration blocked by platform");
+  }
+#endif
 
 #ifdef XP_WIN
   if (Preferences::GetBool("gfx.webrender.dcomp-win.enabled", false)) {
     // XXX relax win version to windows 8.
     if (IsWin10OrLater() && gfxVars::UseWebRender() &&
         gfxVars::UseWebRenderANGLE()) {
       gfxVars::SetUseWebRenderDCompWin(true);
     }
--- a/gfx/thebes/gfxPlatform.h
+++ b/gfx/thebes/gfxPlatform.h
@@ -731,17 +731,17 @@ class gfxPlatform : public mozilla::laye
   virtual bool DevicesInitialized() { return true; };
 
   static uint32_t TargetFrameRate();
 
  protected:
   gfxPlatform();
   virtual ~gfxPlatform();
 
-  virtual bool HasBattery() { return true; }
+  virtual bool HasBattery() { return false; }
 
   virtual void InitAcceleration();
   virtual void InitWebRenderConfig();
 
   /**
    * Called immediately before deleting the gfxPlatform object.
    */
   virtual void WillShutdown();
--- a/gfx/thebes/gfxPlatformGtk.cpp
+++ b/gfx/thebes/gfxPlatformGtk.cpp
@@ -325,17 +325,17 @@ uint32_t gfxPlatformGtk::MaxGenericSubst
       mMaxGenericSubstitutions = 3;
     }
   }
 
   return uint32_t(mMaxGenericSubstitutions);
 }
 
 bool gfxPlatformGtk::AccelerateLayersByDefault() {
-  return gfxPrefs::WebRenderAll();
+  return true;
 }
 
 void gfxPlatformGtk::GetPlatformCMSOutputProfile(void*& mem, size_t& size) {
   mem = nullptr;
   size = 0;
 
 #ifdef MOZ_X11
   GdkDisplay* display = gdk_display_get_default();
--- a/js/public/GCAPI.h
+++ b/js/public/GCAPI.h
@@ -302,16 +302,24 @@ typedef enum JSGCParamKey {
    * This value will be rounded to the nearest Nursery::SubChunkStep if below
    * gc::ChunkSize, otherwise it'll be rounded to the nearest gc::ChunkSize.
    *
    * Default: Nursery::SubChunkLimit
    * Pref: None
    */
   JSGC_MIN_NURSERY_BYTES = 31,
 
+  /*
+   * The minimum time to allow between triggering last ditch GCs in seconds.
+   *
+   * Default: 60 seconds
+   * Pref: None
+   */
+  JSGC_MIN_LAST_DITCH_GC_PERIOD = 32,
+
 } JSGCParamKey;
 
 /*
  * Generic trace operation that calls JS::TraceEdge on each traceable thing's
  * location reachable from data.
  */
 typedef void (*JSTraceDataOp)(JSTracer* trc, void* data);
 
--- a/js/src/builtin/TestingFunctions.cpp
+++ b/js/src/builtin/TestingFunctions.cpp
@@ -483,17 +483,18 @@ static bool MinorGC(JSContext* cx, unsig
   _("highFrequencyHeapGrowthMax", JSGC_HIGH_FREQUENCY_HEAP_GROWTH_MAX, true) \
   _("highFrequencyHeapGrowthMin", JSGC_HIGH_FREQUENCY_HEAP_GROWTH_MIN, true) \
   _("lowFrequencyHeapGrowth", JSGC_LOW_FREQUENCY_HEAP_GROWTH, true)          \
   _("dynamicHeapGrowth", JSGC_DYNAMIC_HEAP_GROWTH, true)                     \
   _("dynamicMarkSlice", JSGC_DYNAMIC_MARK_SLICE, true)                       \
   _("allocationThreshold", JSGC_ALLOCATION_THRESHOLD, true)                  \
   _("minEmptyChunkCount", JSGC_MIN_EMPTY_CHUNK_COUNT, true)                  \
   _("maxEmptyChunkCount", JSGC_MAX_EMPTY_CHUNK_COUNT, true)                  \
-  _("compactingEnabled", JSGC_COMPACTING_ENABLED, true)
+  _("compactingEnabled", JSGC_COMPACTING_ENABLED, true)                      \
+  _("minLastDitchGCPeriod", JSGC_MIN_LAST_DITCH_GC_PERIOD, true)
 
 static const struct ParamInfo {
   const char* name;
   JSGCParamKey param;
   bool writable;
 } paramMap[] = {
 #define DEFINE_PARAM_INFO(name, key, writable) {name, key, writable},
     FOR_EACH_GC_PARAM(DEFINE_PARAM_INFO)
--- a/js/src/frontend/BytecodeControlStructures.cpp
+++ b/js/src/frontend/BytecodeControlStructures.cpp
@@ -36,17 +36,17 @@ LabelControl::LabelControl(BytecodeEmitt
       startOffset_(startOffset) {}
 
 LoopControl::LoopControl(BytecodeEmitter* bce, StatementKind loopKind)
     : BreakableControl(bce, loopKind), tdzCache_(bce) {
   MOZ_ASSERT(is<LoopControl>());
 
   LoopControl* enclosingLoop = findNearest<LoopControl>(enclosing());
 
-  stackDepth_ = bce->stackDepth;
+  stackDepth_ = bce->bytecodeSection().stackDepth();
   loopDepth_ = enclosingLoop ? enclosingLoop->loopDepth_ + 1 : 1;
 
   int loopSlots;
   if (loopKind == StatementKind::Spread) {
     // The iterator next method, the iterator, the result array, and
     // the current array index are on the stack.
     loopSlots = 4;
   } else if (loopKind == StatementKind::ForOfLoop) {
@@ -76,17 +76,17 @@ bool LoopControl::emitContinueTarget(Byt
     return false;
   }
   return true;
 }
 
 bool LoopControl::emitSpecialBreakForDone(BytecodeEmitter* bce) {
   // This doesn't pop stack values, nor handle any other controls.
   // Should be called on the toplevel of the loop.
-  MOZ_ASSERT(bce->stackDepth == stackDepth_);
+  MOZ_ASSERT(bce->bytecodeSection().stackDepth() == stackDepth_);
   MOZ_ASSERT(bce->innermostNestableControl == this);
 
   if (!bce->newSrcNote(SRC_BREAK)) {
     return false;
   }
   if (!bce->emitJump(JSOP_GOTO, &breaks)) {
     return false;
   }
@@ -104,43 +104,44 @@ bool LoopControl::emitEntryJump(Bytecode
 bool LoopControl::emitLoopHead(BytecodeEmitter* bce,
                                const Maybe<uint32_t>& nextPos) {
   if (nextPos) {
     if (!bce->updateSourceCoordNotes(*nextPos)) {
       return false;
     }
   }
 
-  head_ = {bce->offset()};
+  head_ = {bce->bytecodeSection().offset()};
   ptrdiff_t off;
   if (!bce->emitJumpTargetOp(JSOP_LOOPHEAD, &off)) {
     return false;
   }
 
   return true;
 }
 
 bool LoopControl::emitLoopEntry(BytecodeEmitter* bce,
                                 const Maybe<uint32_t>& nextPos) {
   if (nextPos) {
     if (!bce->updateSourceCoordNotes(*nextPos)) {
       return false;
     }
   }
 
-  JumpTarget entry = {bce->offset()};
+  JumpTarget entry = {bce->bytecodeSection().offset()};
   bce->patchJumpsToTarget(entryJump_, entry);
 
   MOZ_ASSERT(loopDepth_ > 0);
 
   ptrdiff_t off;
   if (!bce->emitJumpTargetOp(JSOP_LOOPENTRY, &off)) {
     return false;
   }
-  SetLoopEntryDepthHintAndFlags(bce->code(off), loopDepth_, canIonOsr_);
+  SetLoopEntryDepthHintAndFlags(bce->bytecodeSection().code(off), loopDepth_,
+                                canIonOsr_);
 
   return true;
 }
 
 bool LoopControl::emitLoopEnd(BytecodeEmitter* bce, JSOp op) {
   JumpList beq;
   if (!bce->emitBackwardJump(op, head_, &beq, &breakTarget_)) {
     return false;
--- a/js/src/frontend/BytecodeEmitter.cpp
+++ b/js/src/frontend/BytecodeEmitter.cpp
@@ -86,42 +86,54 @@ static bool ParseNodeRequiresSpecialLine
   // handling to avoid strange stepping behavior.
   // Functions usually shouldn't have location information (bug 1431202).
 
   ParseNodeKind kind = pn->getKind();
   return kind == ParseNodeKind::WhileStmt || kind == ParseNodeKind::ForStmt ||
          kind == ParseNodeKind::Function;
 }
 
+BytecodeEmitter::BytecodeSection::BytecodeSection(JSContext* cx,
+                                                  uint32_t lineNum)
+    : code_(cx),
+      notes_(cx),
+      tryNoteList_(cx),
+      scopeNoteList_(cx),
+      resumeOffsetList_(cx),
+      currentLine_(lineNum) {}
+
+BytecodeEmitter::PerScriptData::PerScriptData(JSContext* cx)
+    : scopeList_(cx),
+      numberList_(cx),
+      atomIndices_(cx->frontendCollectionPool()) {}
+
+bool BytecodeEmitter::PerScriptData::init(JSContext* cx) {
+  return atomIndices_.acquire(cx);
+}
+
 BytecodeEmitter::BytecodeEmitter(
     BytecodeEmitter* parent, SharedContext* sc, HandleScript script,
     Handle<LazyScript*> lazyScript, uint32_t lineNum, EmitterMode emitterMode,
     FieldInitializers fieldInitializers /* = FieldInitializers::Invalid() */)
     : sc(sc),
       cx(sc->cx_),
       parent(parent),
       script(cx, script),
       lazyScript(cx, lazyScript),
-      code_(cx),
-      notes_(cx),
-      currentLine_(lineNum),
+      bytecodeSection_(cx, lineNum),
+      perScriptData_(cx),
       fieldInitializers_(fieldInitializers),
-      atomIndices(cx->frontendCollectionPool()),
       firstLine(lineNum),
-      numberList(cx),
-      scopeList(cx),
-      tryNoteList(cx),
-      scopeNoteList(cx),
-      resumeOffsetList(cx),
       emitterMode(emitterMode) {
   MOZ_ASSERT_IF(emitterMode == LazyFunction, lazyScript);
 
   if (sc->isFunctionBox()) {
     // Functions have IC entries for type monitoring |this| and arguments.
-    numICEntries = sc->asFunctionBox()->function()->nargs() + 1;
+    bytecodeSection().setNumICEntries(sc->asFunctionBox()->function()->nargs() +
+                                      1);
   }
 }
 
 BytecodeEmitter::BytecodeEmitter(BytecodeEmitter* parent,
                                  BCEParserHandle* handle, SharedContext* sc,
                                  HandleScript script,
                                  Handle<LazyScript*> lazyScript,
                                  uint32_t lineNum, EmitterMode emitterMode,
@@ -143,17 +155,17 @@ BytecodeEmitter::BytecodeEmitter(Bytecod
   this->parser = ep_.ptr();
 }
 
 void BytecodeEmitter::initFromBodyPosition(TokenPos bodyPosition) {
   setScriptStartOffsetIfUnset(bodyPosition.begin);
   setFunctionBodyEndPos(bodyPosition.end);
 }
 
-bool BytecodeEmitter::init() { return atomIndices.acquire(cx); }
+bool BytecodeEmitter::init() { return perScriptData_.init(cx); }
 
 template <typename T>
 T* BytecodeEmitter::findInnermostNestableControl() const {
   return NestableControl::findNearest<T>(innermostNestableControl);
 }
 
 template <typename T, typename Predicate /* (T*) -> bool */>
 T* BytecodeEmitter::findInnermostNestableControl(Predicate predicate) const {
@@ -189,79 +201,74 @@ bool BytecodeEmitter::markStepBreakpoint
 
   if (!newSrcNote(SRC_BREAKPOINT)) {
     return false;
   }
 
   // We track the location of the most recent separator for use in
   // markSimpleBreakpoint. Note that this means that the position must already
   // be set before markStepBreakpoint is called.
-  lastSeparatorOffet_ = code().length();
-  lastSeparatorLine_ = currentLine_;
-  lastSeparatorColumn_ = lastColumn_;
+  bytecodeSection().updateSeparatorPosition();
 
   return true;
 }
 
 bool BytecodeEmitter::markSimpleBreakpoint() {
   if (inPrologue()) {
     return true;
   }
 
   // If a breakable call ends up being the same location as the most recent
   // expression start, we need to skip marking it breakable in order to avoid
   // having two breakpoints with the same line/column position.
   // Note: This assumes that the position for the call has already been set.
-  bool isDuplicateLocation =
-      lastSeparatorLine_ == currentLine_ && lastSeparatorColumn_ == lastColumn_;
-
-  if (!isDuplicateLocation) {
+  if (!bytecodeSection().isDuplicateLocation()) {
     if (!newSrcNote(SRC_BREAKPOINT)) {
       return false;
     }
   }
 
   return true;
 }
 
 bool BytecodeEmitter::emitCheck(JSOp op, ptrdiff_t delta, ptrdiff_t* offset) {
-  *offset = code().length();
-
-  if (!code().growByUninitialized(delta)) {
+  *offset = bytecodeSection().code().length();
+
+  if (!bytecodeSection().code().growByUninitialized(delta)) {
     ReportOutOfMemory(cx);
     return false;
   }
 
   // If op is JOF_TYPESET (see the type barriers comment in TypeInference.h),
   // reserve a type set to store its result.
   if (CodeSpec[op].format & JOF_TYPESET) {
-    if (typesetCount < JSScript::MaxBytecodeTypeSets) {
-      typesetCount++;
+    if (bytecodeSection().typesetCount() < JSScript::MaxBytecodeTypeSets) {
+      bytecodeSection().addTypesetCount();
     }
   }
 
   if (BytecodeOpHasIC(op)) {
-    numICEntries++;
-  }
-
-  return true;
-}
-
-void BytecodeEmitter::updateDepth(ptrdiff_t target) {
+    bytecodeSection().addNumICEntries();
+  }
+
+  return true;
+}
+
+void BytecodeEmitter::BytecodeSection::updateDepth(ptrdiff_t target) {
   jsbytecode* pc = code(target);
 
   int nuses = StackUses(pc);
   int ndefs = StackDefs(pc);
 
-  stackDepth -= nuses;
-  MOZ_ASSERT(stackDepth >= 0);
-  stackDepth += ndefs;
-
-  if ((uint32_t)stackDepth > maxStackDepth) {
-    maxStackDepth = stackDepth;
+  stackDepth_ -= nuses;
+  MOZ_ASSERT(stackDepth_ >= 0);
+  stackDepth_ += ndefs;
+
+  if ((uint32_t)stackDepth_ > maxStackDepth_) {
+    maxStackDepth_ = stackDepth_;
   }
 }
 
 #ifdef DEBUG
 bool BytecodeEmitter::checkStrictOrSloppy(JSOp op) {
   if (IsCheckStrictOp(op) && !sc->strict()) {
     return false;
   }
@@ -275,128 +282,129 @@ bool BytecodeEmitter::checkStrictOrSlopp
 bool BytecodeEmitter::emit1(JSOp op) {
   MOZ_ASSERT(checkStrictOrSloppy(op));
 
   ptrdiff_t offset;
   if (!emitCheck(op, 1, &offset)) {
     return false;
   }
 
-  jsbytecode* code = this->code(offset);
+  jsbytecode* code = bytecodeSection().code(offset);
   code[0] = jsbytecode(op);
-  updateDepth(offset);
+  bytecodeSection().updateDepth(offset);
   return true;
 }
 
 bool BytecodeEmitter::emit2(JSOp op, uint8_t op1) {
   MOZ_ASSERT(checkStrictOrSloppy(op));
 
   ptrdiff_t offset;
   if (!emitCheck(op, 2, &offset)) {
     return false;
   }
 
-  jsbytecode* code = this->code(offset);
+  jsbytecode* code = bytecodeSection().code(offset);
   code[0] = jsbytecode(op);
   code[1] = jsbytecode(op1);
-  updateDepth(offset);
+  bytecodeSection().updateDepth(offset);
   return true;
 }
 
 bool BytecodeEmitter::emit3(JSOp op, jsbytecode op1, jsbytecode op2) {
   MOZ_ASSERT(checkStrictOrSloppy(op));
 
   /* These should filter through emitVarOp. */
   MOZ_ASSERT(!IsArgOp(op));
   MOZ_ASSERT(!IsLocalOp(op));
 
   ptrdiff_t offset;
   if (!emitCheck(op, 3, &offset)) {
     return false;
   }
 
-  jsbytecode* code = this->code(offset);
+  jsbytecode* code = bytecodeSection().code(offset);
   code[0] = jsbytecode(op);
   code[1] = op1;
   code[2] = op2;
-  updateDepth(offset);
+  bytecodeSection().updateDepth(offset);
   return true;
 }
 
 bool BytecodeEmitter::emitN(JSOp op, size_t extra, ptrdiff_t* offset) {
   MOZ_ASSERT(checkStrictOrSloppy(op));
   ptrdiff_t length = 1 + ptrdiff_t(extra);
 
   ptrdiff_t off;
   if (!emitCheck(op, length, &off)) {
     return false;
   }
 
-  jsbytecode* code = this->code(off);
+  jsbytecode* code = bytecodeSection().code(off);
   code[0] = jsbytecode(op);
   /* The remaining |extra| bytes are set by the caller */
 
   /*
    * Don't updateDepth if op's use-count comes from the immediate
    * operand yet to be stored in the extra bytes after op.
    */
   if (CodeSpec[op].nuses >= 0) {
-    updateDepth(off);
+    bytecodeSection().updateDepth(off);
   }
 
   if (offset) {
     *offset = off;
   }
   return true;
 }
 
 bool BytecodeEmitter::emitJumpTargetOp(JSOp op, ptrdiff_t* off) {
   MOZ_ASSERT(BytecodeIsJumpTarget(op));
 
-  size_t numEntries = numICEntries;
+  size_t numEntries = bytecodeSection().numICEntries();
   if (MOZ_UNLIKELY(numEntries > UINT32_MAX)) {
     reportError(nullptr, JSMSG_NEED_DIET, js_script_str);
     return false;
   }
 
   if (!emitN(op, CodeSpec[op].length - 1, off)) {
     return false;
   }
 
-  SET_ICINDEX(code(*off), numEntries);
+  SET_ICINDEX(bytecodeSection().code(*off), numEntries);
   return true;
 }
 
 bool BytecodeEmitter::emitJumpTarget(JumpTarget* target) {
-  ptrdiff_t off = offset();
+  ptrdiff_t off = bytecodeSection().offset();
 
   // Alias consecutive jump targets.
-  if (off == lastTarget.offset + ptrdiff_t(JSOP_JUMPTARGET_LENGTH)) {
-    target->offset = lastTarget.offset;
+  if (off == bytecodeSection().lastTargetOffset() +
+                 ptrdiff_t(JSOP_JUMPTARGET_LENGTH)) {
+    target->offset = bytecodeSection().lastTargetOffset();
     return true;
   }
 
   target->offset = off;
-  lastTarget.offset = off;
+  bytecodeSection().setLastTargetOffset(off);
 
   ptrdiff_t opOff;
   return emitJumpTargetOp(JSOP_JUMPTARGET, &opOff);
 }
 
 bool BytecodeEmitter::emitJumpNoFallthrough(JSOp op, JumpList* jump) {
   ptrdiff_t offset;
   if (!emitCheck(op, 5, &offset)) {
     return false;
   }
 
-  jsbytecode* code = this->code(offset);
+  jsbytecode* code = bytecodeSection().code(offset);
   code[0] = jsbytecode(op);
   MOZ_ASSERT(-1 <= jump->offset && jump->offset < offset);
-  jump->push(this->code(0), offset);
-  updateDepth(offset);
+  jump->push(bytecodeSection().code(0), offset);
+  bytecodeSection().updateDepth(offset);
   return true;
 }
 
 bool BytecodeEmitter::emitJump(JSOp op, JumpList* jump) {
   if (!emitJumpNoFallthrough(op, jump)) {
     return false;
   }
   if (BytecodeFallsThrough(op)) {
@@ -420,21 +428,22 @@ bool BytecodeEmitter::emitBackwardJump(J
   // target for break statements.
   if (!emitJumpTarget(fallthrough)) {
     return false;
   }
   return true;
 }
 
 void BytecodeEmitter::patchJumpsToTarget(JumpList jump, JumpTarget target) {
-  MOZ_ASSERT(-1 <= jump.offset && jump.offset <= offset());
-  MOZ_ASSERT(0 <= target.offset && target.offset <= offset());
-  MOZ_ASSERT_IF(jump.offset != -1 && target.offset + 4 <= offset(),
-                BytecodeIsJumpTarget(JSOp(*code(target.offset))));
-  jump.patchAll(code(0), target);
+  MOZ_ASSERT(-1 <= jump.offset && jump.offset <= bytecodeSection().offset());
+  MOZ_ASSERT(0 <= target.offset && target.offset <= bytecodeSection().offset());
+  MOZ_ASSERT_IF(
+      jump.offset != -1 && target.offset + 4 <= bytecodeSection().offset(),
+      BytecodeIsJumpTarget(JSOp(*bytecodeSection().code(target.offset))));
+  jump.patchAll(bytecodeSection().code(0), target);
 }
 
 bool BytecodeEmitter::emitJumpTargetAndPatch(JumpList jump) {
   if (jump.offset == -1) {
     return true;
   }
   JumpTarget target;
   if (!emitJumpTarget(&target)) {
@@ -454,33 +463,33 @@ bool BytecodeEmitter::emitCall(JSOp op, 
   return emit3(op, ARGC_LO(argc), ARGC_HI(argc));
 }
 
 bool BytecodeEmitter::emitCall(JSOp op, uint16_t argc, ParseNode* pn) {
   return emitCall(op, argc, pn ? Some(pn->pn_pos.begin) : Nothing());
 }
 
 bool BytecodeEmitter::emitDupAt(unsigned slotFromTop) {
-  MOZ_ASSERT(slotFromTop < unsigned(stackDepth));
+  MOZ_ASSERT(slotFromTop < unsigned(bytecodeSection().stackDepth()));
 
   if (slotFromTop == 0) {
     return emit1(JSOP_DUP);
   }
 
   if (slotFromTop >= JS_BIT(24)) {
     reportError(nullptr, JSMSG_TOO_MANY_LOCALS);
     return false;
   }
 
   ptrdiff_t off;
   if (!emitN(JSOP_DUPAT, 3, &off)) {
     return false;
   }
 
-  jsbytecode* pc = code(off);
+  jsbytecode* pc = bytecodeSection().code(off);
   SET_UINT24(pc, slotFromTop);
   return true;
 }
 
 bool BytecodeEmitter::emitPopN(unsigned n) {
   MOZ_ASSERT(n != 0);
 
   if (n == 1) {
@@ -511,91 +520,84 @@ static inline unsigned LengthOfSetLine(u
 bool BytecodeEmitter::updateLineNumberNotes(uint32_t offset) {
   // Don't emit line/column number notes in the prologue.
   if (inPrologue()) {
     return true;
   }
 
   ErrorReporter* er = &parser->errorReporter();
   bool onThisLine;
-  if (!er->isOnThisLine(offset, currentLine(), &onThisLine)) {
+  if (!er->isOnThisLine(offset, bytecodeSection().currentLine(), &onThisLine)) {
     er->errorNoOffset(JSMSG_OUT_OF_MEMORY);
     return false;
   }
 
   if (!onThisLine) {
     unsigned line = er->lineAt(offset);
-    unsigned delta = line - currentLine();
+    unsigned delta = line - bytecodeSection().currentLine();
 
     /*
      * Encode any change in the current source line number by using
      * either several SRC_NEWLINE notes or just one SRC_SETLINE note,
      * whichever consumes less space.
      *
      * NB: We handle backward line number deltas (possible with for
      * loops where the update part is emitted after the body, but its
      * line number is <= any line number in the body) here by letting
      * unsigned delta_ wrap to a very large number, which triggers a
      * SRC_SETLINE.
      */
-    setCurrentLine(line);
+    bytecodeSection().setCurrentLine(line);
     if (delta >= LengthOfSetLine(line)) {
       if (!newSrcNote2(SRC_SETLINE, ptrdiff_t(line))) {
         return false;
       }
     } else {
       do {
         if (!newSrcNote(SRC_NEWLINE)) {
           return false;
         }
       } while (--delta != 0);
     }
 
-    updateSeparatorPosition();
+    bytecodeSection().updateSeparatorPositionIfPresent();
   }
   return true;
 }
 
 /* Updates the line number and column number information in the source notes. */
 bool BytecodeEmitter::updateSourceCoordNotes(uint32_t offset) {
   if (!updateLineNumberNotes(offset)) {
     return false;
   }
 
   // Don't emit line/column number notes in the prologue.
   if (inPrologue()) {
     return true;
   }
 
   uint32_t columnIndex = parser->errorReporter().columnAt(offset);
-  ptrdiff_t colspan = ptrdiff_t(columnIndex) - ptrdiff_t(lastColumn_);
+  ptrdiff_t colspan =
+      ptrdiff_t(columnIndex) - ptrdiff_t(bytecodeSection().lastColumn());
   if (colspan != 0) {
     // If the column span is so large that we can't store it, then just
     // discard this information. This can happen with minimized or otherwise
     // machine-generated code. Even gigantic column numbers are still
     // valuable if you have a source map to relate them to something real;
     // but it's better to fail soft here.
     if (!SN_REPRESENTABLE_COLSPAN(colspan)) {
       return true;
     }
     if (!newSrcNote2(SRC_COLSPAN, SN_COLSPAN_TO_OFFSET(colspan))) {
       return false;
     }
-    lastColumn_ = columnIndex;
-    updateSeparatorPosition();
-  }
-  return true;
-}
-
-/* Updates the last separator position, if present */
-void BytecodeEmitter::updateSeparatorPosition() {
-  if (!inPrologue() && lastSeparatorOffet_ == code().length()) {
-    lastSeparatorLine_ = currentLine_;
-    lastSeparatorColumn_ = lastColumn_;
-  }
+    bytecodeSection().setLastColumn(columnIndex);
+    bytecodeSection().updateSeparatorPositionIfPresent();
+  }
+  return true;
 }
 
 Maybe<uint32_t> BytecodeEmitter::getOffsetForLoop(ParseNode* nextpn) {
   if (!nextpn) {
     return Nothing();
   }
 
   // Try to give the JSOP_LOOPHEAD and JSOP_LOOPENTRY the same line number as
@@ -621,17 +623,17 @@ bool BytecodeEmitter::emitUint16Operand(
   return true;
 }
 
 bool BytecodeEmitter::emitUint32Operand(JSOp op, uint32_t operand) {
   ptrdiff_t off;
   if (!emitN(op, 4, &off)) {
     return false;
   }
-  SET_UINT32(code(off), operand);
+  SET_UINT32(bytecodeSection().code(off), operand);
   return true;
 }
 
 namespace {
 
 class NonLocalExitControl {
  public:
   enum Kind {
@@ -658,27 +660,28 @@ class NonLocalExitControl {
 
   NonLocalExitControl(const NonLocalExitControl&) = delete;
 
   MOZ_MUST_USE bool leaveScope(EmitterScope* scope);
 
  public:
   NonLocalExitControl(BytecodeEmitter* bce, Kind kind)
       : bce_(bce),
-        savedScopeNoteIndex_(bce->scopeNoteList.length()),
-        savedDepth_(bce->stackDepth),
+        savedScopeNoteIndex_(bce->bytecodeSection().scopeNoteList().length()),
+        savedDepth_(bce->bytecodeSection().stackDepth()),
         openScopeNoteIndex_(bce->innermostEmitterScope()->noteIndex()),
         kind_(kind) {}
 
   ~NonLocalExitControl() {
-    for (uint32_t n = savedScopeNoteIndex_; n < bce_->scopeNoteList.length();
-         n++) {
-      bce_->scopeNoteList.recordEnd(n, bce_->offset());
-    }
-    bce_->stackDepth = savedDepth_;
+    for (uint32_t n = savedScopeNoteIndex_;
+         n < bce_->bytecodeSection().scopeNoteList().length(); n++) {
+      bce_->bytecodeSection().scopeNoteList().recordEnd(
+          n, bce_->bytecodeSection().offset());
+    }
+    bce_->bytecodeSection().setStackDepth(savedDepth_);
   }
 
   MOZ_MUST_USE bool prepareForNonLocalJump(NestableControl* target);
 
   MOZ_MUST_USE bool prepareForNonLocalJumpToOutermost() {
     return prepareForNonLocalJump(nullptr);
   }
 };
@@ -690,21 +693,22 @@ bool NonLocalExitControl::leaveScope(Emi
 
   // As we pop each scope due to the non-local jump, emit notes that
   // record the extent of the enclosing scope. These notes will have
   // their ends recorded in ~NonLocalExitControl().
   uint32_t enclosingScopeIndex = ScopeNote::NoScopeIndex;
   if (es->enclosingInFrame()) {
     enclosingScopeIndex = es->enclosingInFrame()->index();
   }
-  if (!bce_->scopeNoteList.append(enclosingScopeIndex, bce_->offset(),
-                                  openScopeNoteIndex_)) {
-    return false;
-  }
-  openScopeNoteIndex_ = bce_->scopeNoteList.length() - 1;
+  if (!bce_->bytecodeSection().scopeNoteList().append(
+          enclosingScopeIndex, bce_->bytecodeSection().offset(),
+          openScopeNoteIndex_)) {
+    return false;
+  }
+  openScopeNoteIndex_ = bce_->bytecodeSection().scopeNoteList().length() - 1;
 
   return true;
 }
 
 /*
  * Emit additional bytecode(s) for non-local jumps.
  */
 bool NonLocalExitControl::prepareForNonLocalJump(NestableControl* target) {
@@ -833,17 +837,17 @@ bool NonLocalExitControl::prepareForNonL
       target ? target->emitterScope() : bce_->varEmitterScope;
   for (; es != targetEmitterScope; es = es->enclosingInFrame()) {
     if (!leaveScope(es)) {
       return false;
     }
   }
 
   // Close FOR_OF_ITERCLOSE trynotes.
-  ptrdiff_t end = bce_->offset();
+  ptrdiff_t end = bce_->bytecodeSection().offset();
   for (ptrdiff_t start : forOfIterCloseScopeStarts) {
     if (!bce_->addTryNote(JSTRY_FOR_OF_ITERCLOSE, 0, start, end)) {
       return false;
     }
   }
 
   return true;
 }
@@ -879,38 +883,38 @@ bool BytecodeEmitter::emitIndex32(JSOp o
   const size_t len = 1 + UINT32_INDEX_LEN;
   MOZ_ASSERT(len == size_t(CodeSpec[op].length));
 
   ptrdiff_t offset;
   if (!emitCheck(op, len, &offset)) {
     return false;
   }
 
-  jsbytecode* code = this->code(offset);
+  jsbytecode* code = bytecodeSection().code(offset);
   code[0] = jsbytecode(op);
   SET_UINT32_INDEX(code, index);
-  updateDepth(offset);
+  bytecodeSection().updateDepth(offset);
   return true;
 }
 
 bool BytecodeEmitter::emitIndexOp(JSOp op, uint32_t index) {
   MOZ_ASSERT(checkStrictOrSloppy(op));
 
   const size_t len = CodeSpec[op].length;
   MOZ_ASSERT(len >= 1 + UINT32_INDEX_LEN);
 
   ptrdiff_t offset;
   if (!emitCheck(op, len, &offset)) {
     return false;
   }
 
-  jsbytecode* code = this->code(offset);
+  jsbytecode* code = bytecodeSection().code(offset);
   code[0] = jsbytecode(op);
   SET_UINT32_INDEX(code, index);
-  updateDepth(offset);
+  bytecodeSection().updateDepth(offset);
   return true;
 }
 
 bool BytecodeEmitter::emitAtomOp(JSAtom* atom, JSOp op) {
   MOZ_ASSERT(atom);
 
   // .generator lookups should be emitted as JSOP_GETALIASEDVAR instead of
   // JSOP_GETNAME etc, to bypass |with| objects on the scope chain.
@@ -935,77 +939,77 @@ bool BytecodeEmitter::emitAtomOp(JSAtom*
 bool BytecodeEmitter::emitAtomOp(uint32_t atomIndex, JSOp op) {
   MOZ_ASSERT(JOF_OPTYPE(op) == JOF_ATOM);
 
   return emitIndexOp(op, atomIndex);
 }
 
 bool BytecodeEmitter::emitInternedScopeOp(uint32_t index, JSOp op) {
   MOZ_ASSERT(JOF_OPTYPE(op) == JOF_SCOPE);
-  MOZ_ASSERT(index < scopeList.length());
+  MOZ_ASSERT(index < perScriptData().scopeList().length());
   return emitIndex32(op, index);
 }
 
 bool BytecodeEmitter::emitInternedObjectOp(uint32_t index, JSOp op) {
   MOZ_ASSERT(JOF_OPTYPE(op) == JOF_OBJECT);
-  MOZ_ASSERT(index < objectList.length);
+  MOZ_ASSERT(index < perScriptData().objectList().length);
   return emitIndex32(op, index);
 }
 
 bool BytecodeEmitter::emitObjectOp(ObjectBox* objbox, JSOp op) {
-  return emitInternedObjectOp(objectList.add(objbox), op);
+  return emitInternedObjectOp(perScriptData().objectList().add(objbox), op);
 }
 
 bool BytecodeEmitter::emitObjectPairOp(ObjectBox* objbox1, ObjectBox* objbox2,
                                        JSOp op) {
-  uint32_t index = objectList.add(objbox1);
-  objectList.add(objbox2);
+  uint32_t index = perScriptData().objectList().add(objbox1);
+  perScriptData().objectList().add(objbox2);
   return emitInternedObjectOp(index, op);
 }
 
 bool BytecodeEmitter::emitRegExp(uint32_t index) {
   return emitIndex32(JSOP_REGEXP, index);
 }
 
 bool BytecodeEmitter::emitLocalOp(JSOp op, uint32_t slot) {
   MOZ_ASSERT(JOF_OPTYPE(op) != JOF_ENVCOORD);
   MOZ_ASSERT(IsLocalOp(op));
 
   ptrdiff_t off;
   if (!emitN(op, LOCALNO_LEN, &off)) {
     return false;
   }
 
-  SET_LOCALNO(code(off), slot);
+  SET_LOCALNO(bytecodeSection().code(off), slot);
   return true;
 }
 
 bool BytecodeEmitter::emitArgOp(JSOp op, uint16_t slot) {
   MOZ_ASSERT(IsArgOp(op));
   ptrdiff_t off;
   if (!emitN(op, ARGNO_LEN, &off)) {
     return false;
   }
 
-  SET_ARGNO(code(off), slot);
+  SET_ARGNO(bytecodeSection().code(off), slot);
   return true;
 }
 
 bool BytecodeEmitter::emitEnvCoordOp(JSOp op, EnvironmentCoordinate ec) {
   MOZ_ASSERT(JOF_OPTYPE(op) == JOF_ENVCOORD);
 
   unsigned n = ENVCOORD_HOPS_LEN + ENVCOORD_SLOT_LEN;
   MOZ_ASSERT(int(n) + 1 /* op */ == CodeSpec[op].length);
 
   ptrdiff_t off;
   if (!emitN(op, n, &off)) {
     return false;
   }
 
-  jsbytecode* pc = code(off);
+  jsbytecode* pc = bytecodeSection().code(off);
   SET_ENVCOORD_HOPS(pc, ec.hops());
   pc += ENVCOORD_HOPS_LEN;
   SET_ENVCOORD_SLOT(pc, ec.slot());
   pc += ENVCOORD_SLOT_LEN;
   return true;
 }
 
 JSOp BytecodeEmitter::strictifySetNameOp(JSOp op) {
@@ -1675,23 +1679,23 @@ bool BytecodeEmitter::reportExtraWarning
 
 bool BytecodeEmitter::emitNewInit() {
   const size_t len = 1 + UINT32_INDEX_LEN;
   ptrdiff_t offset;
   if (!emitCheck(JSOP_NEWINIT, len, &offset)) {
     return false;
   }
 
-  jsbytecode* code = this->code(offset);
+  jsbytecode* code = bytecodeSection().code(offset);
   code[0] = JSOP_NEWINIT;
   code[1] = 0;
   code[2] = 0;
   code[3] = 0;
   code[4] = 0;
-  updateDepth(offset);
+  bytecodeSection().updateDepth(offset);
   return true;
 }
 
 bool BytecodeEmitter::iteratorResultShape(unsigned* shape) {
   // No need to do any guessing for the object kind, since we know exactly how
   // many properties we plan to have.
   gc::AllocKind kind = gc::GetGCObjectKind(2);
   RootedPlainObject obj(
@@ -1711,17 +1715,17 @@ bool BytecodeEmitter::iteratorResultShap
     return false;
   }
 
   ObjectBox* objbox = parser->newObjectBox(obj);
   if (!objbox) {
     return false;
   }
 
-  *shape = objectList.add(objbox);
+  *shape = perScriptData().objectList().add(objbox);
 
   return true;
 }
 
 bool BytecodeEmitter::emitPrepareIteratorResult() {
   unsigned shape;
   if (!iteratorResultShape(&shape)) {
     return false;
@@ -2012,20 +2016,20 @@ bool BytecodeEmitter::emitCallIncDec(Una
 }
 
 bool BytecodeEmitter::emitDouble(double d) {
   ptrdiff_t offset;
   if (!emitCheck(JSOP_DOUBLE, 9, &offset)) {
     return false;
   }
 
-  jsbytecode* code = this->code(offset);
+  jsbytecode* code = bytecodeSection().code(offset);
   code[0] = jsbytecode(JSOP_DOUBLE);
   SET_INLINE_VALUE(code, DoubleValue(d));
-  updateDepth(offset);
+  bytecodeSection().updateDepth(offset);
   return true;
 }
 
 bool BytecodeEmitter::emitNumberOp(double dval) {
   int32_t ival;
   if (NumberIsInt32(dval, &ival)) {
     if (ival == 0) {
       return emit1(JSOP_ZERO);
@@ -2042,23 +2046,23 @@ bool BytecodeEmitter::emitNumberOp(doubl
       if (!emitUint16Operand(JSOP_UINT16, u)) {
         return false;
       }
     } else if (u < JS_BIT(24)) {
       ptrdiff_t off;
       if (!emitN(JSOP_UINT24, 3, &off)) {
         return false;
       }
-      SET_UINT24(code(off), u);
+      SET_UINT24(bytecodeSection().code(off), u);
     } else {
       ptrdiff_t off;
       if (!emitN(JSOP_INT32, 4, &off)) {
         return false;
       }
-      SET_INT32(code(off), ival);
+      SET_INT32(bytecodeSection().code(off), ival);
     }
     return true;
   }
 
   return emitDouble(dval);
 }
 
 /*
@@ -2249,23 +2253,23 @@ bool BytecodeEmitter::allocateResumeInde
       MaxResumeIndex < uint32_t(AbstractGeneratorObject::RESUME_INDEX_CLOSING),
       "resumeIndex should not include magic AbstractGeneratorObject "
       "resumeIndex values");
   static_assert(
       MaxResumeIndex <= INT32_MAX / sizeof(uintptr_t),
       "resumeIndex * sizeof(uintptr_t) must fit in an int32. JIT code relies "
       "on this when loading resume entries from BaselineScript");
 
-  *resumeIndex = resumeOffsetList.length();
+  *resumeIndex = bytecodeSection().resumeOffsetList().length();
   if (*resumeIndex > MaxResumeIndex) {
     reportError(nullptr, JSMSG_TOO_MANY_RESUME_INDEXES);
     return false;
   }
 
-  return resumeOffsetList.append(offset);
+  return bytecodeSection().resumeOffsetList().append(offset);
 }
 
 bool BytecodeEmitter::allocateResumeIndexRange(mozilla::Span<ptrdiff_t> offsets,
                                                uint32_t* firstResumeIndex) {
   *firstResumeIndex = 0;
 
   for (size_t i = 0, len = offsets.size(); i < len; i++) {
     uint32_t resumeIndex;
@@ -2288,25 +2292,25 @@ bool BytecodeEmitter::emitYieldOp(JSOp o
   MOZ_ASSERT(op == JSOP_INITIALYIELD || op == JSOP_YIELD || op == JSOP_AWAIT);
 
   ptrdiff_t off;
   if (!emitN(op, 3, &off)) {
     return false;
   }
 
   if (op == JSOP_INITIALYIELD || op == JSOP_YIELD) {
-    numYields++;
+    bytecodeSection().addNumYields();
   }
 
   uint32_t resumeIndex;
-  if (!allocateResumeIndex(offset(), &resumeIndex)) {
-    return false;
-  }
-
-  SET_RESUMEINDEX(code(off), resumeIndex);
+  if (!allocateResumeIndex(bytecodeSection().offset(), &resumeIndex)) {
+    return false;
+  }
+
+  SET_RESUMEINDEX(bytecodeSection().code(off), resumeIndex);
 
   return emit1(JSOP_DEBUGAFTERYIELD);
 }
 
 bool BytecodeEmitter::emitSetThis(BinaryNode* setThisNode) {
   // ParseNodeKind::SetThis is used to update |this| after a super() call
   // in a derived class constructor.
 
@@ -2564,17 +2568,17 @@ bool BytecodeEmitter::emitDestructuringL
   // setting or initializing value.  Getting reference doesn't recur.
   if (target->isKind(ParseNodeKind::Name) ||
       target->isKind(ParseNodeKind::ArrayExpr) ||
       target->isKind(ParseNodeKind::ObjectExpr)) {
     return true;
   }
 
 #ifdef DEBUG
-  int depth = stackDepth;
+  int depth = bytecodeSection().stackDepth();
 #endif
 
   switch (target->getKind()) {
     case ParseNodeKind::DotExpr: {
       PropertyAccess* prop = &target->as<PropertyAccess>();
       bool isSuper = prop->isSuper();
       PropOpEmitter poe(this, PropOpEmitter::Kind::SimpleAssignment,
                         isSuper ? PropOpEmitter::ObjKind::Super
@@ -2642,17 +2646,17 @@ bool BytecodeEmitter::emitDestructuringL
           "rejects function calls as assignment "
           "targets in destructuring assignments");
       break;
 
     default:
       MOZ_CRASH("emitDestructuringLHSRef: bad lhs kind");
   }
 
-  MOZ_ASSERT(stackDepth == depth + int(*emitted));
+  MOZ_ASSERT(bytecodeSection().stackDepth() == depth + int(*emitted));
 
   return true;
 }
 
 bool BytecodeEmitter::emitSetOrInitializeDestructuring(
     ParseNode* target, DestructuringFlavor flav) {
   // Now emit the lvalue opcode sequence. If the lvalue is a nested
   // destructuring initialiser-form, call ourselves to handle it, then pop
@@ -2805,17 +2809,17 @@ bool BytecodeEmitter::emitIteratorNext(
     const Maybe<uint32_t>& callSourceCoordOffset,
     IteratorKind iterKind /* = IteratorKind::Sync */,
     bool allowSelfHosted /* = false */) {
   MOZ_ASSERT(allowSelfHosted || emitterMode != BytecodeEmitter::SelfHosting,
              ".next() iteration is prohibited in self-hosted code because it "
              "can run user-modifiable iteration code");
 
   //                [stack] ... NEXT ITER
-  MOZ_ASSERT(this->stackDepth >= 2);
+  MOZ_ASSERT(bytecodeSection().stackDepth() >= 2);
 
   if (!emitCall(JSOP_CALL, 0, callSourceCoordOffset)) {
     //              [stack] ... RESULT
     return false;
   }
 
   if (iterKind == IteratorKind::Async) {
     if (!emitAwaitInInnermostScope()) {
@@ -2828,17 +2832,17 @@ bool BytecodeEmitter::emitIteratorNext(
     //              [stack] ... RESULT
     return false;
   }
   return true;
 }
 
 bool BytecodeEmitter::emitPushNotUndefinedOrNull() {
   //                [stack] V
-  MOZ_ASSERT(this->stackDepth > 0);
+  MOZ_ASSERT(bytecodeSection().stackDepth() > 0);
 
   if (!emit1(JSOP_DUP)) {
     //              [stack] V V
     return false;
   }
   if (!emit1(JSOP_UNDEFINED)) {
     //              [stack] V V UNDEFINED
     return false;
@@ -3076,32 +3080,32 @@ bool BytecodeEmitter::emitIteratorCloseI
 
   return emit1(JSOP_POP);
   //                [stack] ...
 }
 
 template <typename InnerEmitter>
 bool BytecodeEmitter::wrapWithDestructuringTryNote(int32_t iterDepth,
                                                    InnerEmitter emitter) {
-  MOZ_ASSERT(this->stackDepth >= iterDepth);
+  MOZ_ASSERT(bytecodeSection().stackDepth() >= iterDepth);
 
   // Pad a nop at the beginning of the bytecode covered by the trynote so
   // that when unwinding environments, we may unwind to the scope
   // corresponding to the pc *before* the start, in case the first bytecode
   // emitted by |emitter| is the start of an inner scope. See comment above
   // UnwindEnvironmentToTryPc.
   if (!emit1(JSOP_TRY_DESTRUCTURING)) {
     return false;
   }
 
-  ptrdiff_t start = offset();
+  ptrdiff_t start = bytecodeSection().offset();
   if (!emitter(this)) {
     return false;
   }
-  ptrdiff_t end = offset();
+  ptrdiff_t end = bytecodeSection().offset();
   if (start != end) {
     return addTryNote(JSTRY_DESTRUCTURING, iterDepth, start, end);
   }
   return true;
 }
 
 bool BytecodeEmitter::emitDefault(ParseNode* defaultExpr, ParseNode* pattern) {
   //                [stack] VALUE
@@ -3197,17 +3201,17 @@ bool BytecodeEmitter::emitInitializer(Pa
   }
 
   return true;
 }
 
 bool BytecodeEmitter::emitDestructuringOpsArray(ListNode* pattern,
                                                 DestructuringFlavor flav) {
   MOZ_ASSERT(pattern->isKind(ParseNodeKind::ArrayExpr));
-  MOZ_ASSERT(this->stackDepth != 0);
+  MOZ_ASSERT(bytecodeSection().stackDepth() != 0);
 
   // Here's pseudo code for |let [a, b, , c=y, ...d] = x;|
   //
   // Lines that are annotated "covered by trynote" mean that upon throwing
   // an exception, IteratorClose is called on iter only if done is false.
   //
   //   let x, y;
   //   let a, b, c, d;
@@ -3321,17 +3325,17 @@ bool BytecodeEmitter::emitDestructuringO
   if (!emit1(JSOP_FALSE)) {
     //              [stack] ... OBJ NEXT ITER FALSE
     return false;
   }
 
   // JSTRY_DESTRUCTURING expects the iterator and the done value
   // to be the second to top and the top of the stack, respectively.
   // IteratorClose is called upon exception only if done is false.
-  int32_t tryNoteDepth = stackDepth;
+  int32_t tryNoteDepth = bytecodeSection().stackDepth();
 
   for (ParseNode* member : pattern->contents()) {
     bool isFirst = member == pattern->head();
     DebugOnly<bool> hasNext = !!member->pn_next;
 
     size_t emitted = 0;
 
     // Spec requires LHS reference to be evaluated first.
@@ -3630,17 +3634,17 @@ bool BytecodeEmitter::emitComputedProper
   return emitTree(computedPropName->kid()) && emit1(JSOP_TOID);
 }
 
 bool BytecodeEmitter::emitDestructuringOpsObject(ListNode* pattern,
                                                  DestructuringFlavor flav) {
   MOZ_ASSERT(pattern->isKind(ParseNodeKind::ObjectExpr));
 
   //                [stack] ... RHS
-  MOZ_ASSERT(this->stackDepth > 0);
+  MOZ_ASSERT(bytecodeSection().stackDepth() > 0);
 
   if (!emit1(JSOP_CHECKOBJCOERCIBLE)) {
     //              [stack] ... RHS
     return false;
   }
 
   bool needsRestPropertyExcludedSet =
       pattern->count() > 1 && pattern->last()->isKind(ParseNodeKind::Spread);
@@ -3810,17 +3814,17 @@ bool BytecodeEmitter::emitDestructuringO
 
   return true;
 }
 
 bool BytecodeEmitter::emitDestructuringObjRestExclusionSet(ListNode* pattern) {
   MOZ_ASSERT(pattern->isKind(ParseNodeKind::ObjectExpr));
   MOZ_ASSERT(pattern->last()->isKind(ParseNodeKind::Spread));
 
-  ptrdiff_t offset = this->offset();
+  ptrdiff_t offset = bytecodeSection().offset();
   if (!emitNewInit()) {
     return false;
   }
 
   // Try to construct the shape of the object as we go, so we can emit a
   // JSOP_NEWOBJECT with the final shape instead.
   // In the case of computed property names and indices, we cannot fix the
   // shape at bytecode compile time. When the shape cannot be determined,
@@ -4685,21 +4689,21 @@ MOZ_MUST_USE bool BytecodeEmitter::emitG
     return false;
   }
 
   if (!emitJump(JSOP_GOSUB, jump)) {
     return false;
   }
 
   uint32_t resumeIndex;
-  if (!allocateResumeIndex(offset(), &resumeIndex)) {
-    return false;
-  }
-
-  SET_RESUMEINDEX(code(off), resumeIndex);
+  if (!allocateResumeIndex(bytecodeSection().offset(), &resumeIndex)) {
+    return false;
+  }
+
+  SET_RESUMEINDEX(bytecodeSection().code(off), resumeIndex);
   return true;
 }
 
 bool BytecodeEmitter::emitIf(TernaryNode* ifNode) {
   IfEmitter ifThenElse(this);
 
   if (!ifThenElse.emitIf(Some(ifNode->kid1()->pn_pos.begin))) {
     return false;
@@ -4894,17 +4898,17 @@ bool BytecodeEmitter::emitWith(BinaryNod
   if (!emitTree(withNode->right())) {
     return false;
   }
 
   return emitterScope.leave(this);
 }
 
 bool BytecodeEmitter::emitCopyDataProperties(CopyOption option) {
-  DebugOnly<int32_t> depth = this->stackDepth;
+  DebugOnly<int32_t> depth = bytecodeSection().stackDepth();
 
   uint32_t argc;
   if (option == CopyOption::Filtered) {
     MOZ_ASSERT(depth > 2);
     //              [stack] TARGET SOURCE SET
     argc = 3;
 
     if (!emitAtomOp(cx->names().CopyDataProperties, JSOP_GETINTRINSIC)) {
@@ -4949,25 +4953,25 @@ bool BytecodeEmitter::emitCopyDataProper
     return false;
   }
 
   if (!emit1(JSOP_POP)) {
     //              [stack]
     return false;
   }
 
-  MOZ_ASSERT(depth - int(argc) == this->stackDepth);
+  MOZ_ASSERT(depth - int(argc) == bytecodeSection().stackDepth());
   return true;
 }
 
 bool BytecodeEmitter::emitBigIntOp(BigInt* bigint) {
-  if (!numberList.append(BigIntValue(bigint))) {
-    return false;
-  }
-  return emitIndex32(JSOP_BIGINT, numberList.length() - 1);
+  if (!perScriptData().numberList().append(BigIntValue(bigint))) {
+    return false;
+  }
+  return emitIndex32(JSOP_BIGINT, perScriptData().numberList().length() - 1);
 }
 
 bool BytecodeEmitter::emitIterator() {
   // Convert iterable to iterator.
   if (!emit1(JSOP_DUP)) {
     //              [stack] OBJ OBJ
     return false;
   }
@@ -5140,34 +5144,34 @@ bool BytecodeEmitter::emitSpread(bool al
     //              [stack] NEXT ITER ARR I
     return false;
   }
 
   // When we enter the goto above, we have NEXT ITER ARR I on the stack. But
   // when we reach this point on the loop backedge (if spreading produces at
   // least one value), we've additionally pushed a RESULT iteration value.
   // Increment manually to reflect this.
-  this->stackDepth++;
+  bytecodeSection().setStackDepth(bytecodeSection().stackDepth() + 1);
 
   {
 #ifdef DEBUG
-    auto loopDepth = this->stackDepth;
+    auto loopDepth = bytecodeSection().stackDepth();
 #endif
 
     // Emit code to assign result.value to the iteration variable.
     if (!emitAtomOp(cx->names().value, JSOP_GETPROP)) {
       //            [stack] NEXT ITER ARR I VALUE
       return false;
     }
     if (!emit1(JSOP_INITELEM_INC)) {
       //            [stack] NEXT ITER ARR (I+1)
       return false;
     }
 
-    MOZ_ASSERT(this->stackDepth == loopDepth - 1);
+    MOZ_ASSERT(bytecodeSection().stackDepth() == loopDepth - 1);
 
     // Spread operations can't contain |continue|, so don't bother setting loop
     // and enclosing "update" offsets, as we do with for-loops.
 
     // COME FROM the beginning of the loop to here.
     if (!loopInfo.emitLoopEntry(this, Nothing())) {
       //            [stack] NEXT ITER ARR I
       return false;
@@ -5194,31 +5198,31 @@ bool BytecodeEmitter::emitSpread(bool al
       return false;
     }
 
     if (!loopInfo.emitLoopEnd(this, JSOP_IFEQ)) {
       //            [stack] NEXT ITER ARR I RESULT
       return false;
     }
 
-    MOZ_ASSERT(this->stackDepth == loopDepth);
+    MOZ_ASSERT(bytecodeSection().stackDepth() == loopDepth);
   }
 
   // Let Ion know where the closing jump of this loop is.
   if (!setSrcNoteOffset(noteIndex, SrcNote::ForOf::BackJumpOffset,
                         loopInfo.loopEndOffsetFromEntryJump())) {
     return false;
   }
 
   // No breaks or continues should occur in spreads.
   MOZ_ASSERT(loopInfo.breaks.offset == -1);
   MOZ_ASSERT(loopInfo.continues.offset == -1);
 
-  if (!addTryNote(JSTRY_FOR_OF, stackDepth, loopInfo.headOffset(),
-                  loopInfo.breakTargetOffset())) {
+  if (!addTryNote(JSTRY_FOR_OF, bytecodeSection().stackDepth(),
+                  loopInfo.headOffset(), loopInfo.breakTargetOffset())) {
     return false;
   }
 
   if (!emit2(JSOP_PICK, 4)) {
     //              [stack] ITER ARR FINAL_INDEX RESULT NEXT
     return false;
   }
   if (!emit2(JSOP_PICK, 4)) {
@@ -5229,17 +5233,17 @@ bool BytecodeEmitter::emitSpread(bool al
   return emitPopN(3);
   //                [stack] ARR FINAL_INDEX
 }
 
 bool BytecodeEmitter::emitInitializeForInOrOfTarget(TernaryNode* forHead) {
   MOZ_ASSERT(forHead->isKind(ParseNodeKind::ForIn) ||
              forHead->isKind(ParseNodeKind::ForOf));
 
-  MOZ_ASSERT(this->stackDepth >= 1,
+  MOZ_ASSERT(bytecodeSection().stackDepth() >= 1,
              "must have a per-iteration value for initializing");
 
   ParseNode* target = forHead->kid1();
   MOZ_ASSERT(!forHead->kid2());
 
   // If the for-in/of loop didn't have a variable declaration, per-loop
   // initialization is just assigning the iteration value to a target
   // expression.
@@ -5277,24 +5281,24 @@ bool BytecodeEmitter::emitInitializeForI
     if (!noe.prepareForRhs()) {
       return false;
     }
     if (noe.emittedBindOp()) {
       // Per-iteration initialization in for-in/of loops computes the
       // iteration value *before* initializing.  Thus the initializing
       // value may be buried under a bind-specific value on the stack.
       // Swap it to the top of the stack.
-      MOZ_ASSERT(stackDepth >= 2);
+      MOZ_ASSERT(bytecodeSection().stackDepth() >= 2);
       if (!emit1(JSOP_SWAP)) {
         return false;
       }
     } else {
       // In cases of emitting a frame slot or environment slot,
       // nothing needs be done.
-      MOZ_ASSERT(stackDepth >= 1);
+      MOZ_ASSERT(bytecodeSection().stackDepth() >= 1);
     }
     if (!noe.emitAssignment()) {
       return false;
     }
 
     // The caller handles removing the iteration value from the stack.
     return true;
   }
@@ -5962,17 +5966,17 @@ bool BytecodeEmitter::emitReturn(UnaryNo
    * We can't simply transfer control flow to our caller in that case,
    * because we must gosub to those finally clauses from inner to outer,
    * with the correct stack pointer (i.e., after popping any with,
    * for/in, etc., slots nested inside the finally's try).
    *
    * In this case we mutate JSOP_RETURN into JSOP_SETRVAL and add an
    * extra JSOP_RETRVAL after the fixups.
    */
-  ptrdiff_t top = offset();
+  ptrdiff_t top = bytecodeSection().offset();
 
   bool needsFinalYield =
       sc->isFunctionBox() && sc->asFunctionBox()->needsFinalYield();
   bool isDerivedClassConstructor =
       sc->isFunctionBox() && sc->asFunctionBox()->isDerivedClassConstructor();
 
   if (!emit1((needsFinalYield || isDerivedClassConstructor) ? JSOP_SETRVAL
                                                             : JSOP_RETURN)) {
@@ -6023,22 +6027,23 @@ bool BytecodeEmitter::emitReturn(UnaryNo
 
     if (!emitGetNameAtLocation(cx->names().dotGenerator, loc)) {
       return false;
     }
     if (!emitYieldOp(JSOP_FINALYIELDRVAL)) {
       return false;
     }
   } else if (isDerivedClassConstructor) {
-    MOZ_ASSERT(code()[top] == JSOP_SETRVAL);
+    MOZ_ASSERT(bytecodeSection().code()[top] == JSOP_SETRVAL);
     if (!emit1(JSOP_RETRVAL)) {
       return false;
     }
-  } else if (top + static_cast<ptrdiff_t>(JSOP_RETURN_LENGTH) != offset()) {
-    code()[top] = JSOP_SETRVAL;
+  } else if (top + static_cast<ptrdiff_t>(JSOP_RETURN_LENGTH) !=
+             bytecodeSection().offset()) {
+    bytecodeSection().code()[top] = JSOP_SETRVAL;
     if (!emit1(JSOP_RETRVAL)) {
       return false;
     }
   }
 
   return true;
 }
 
@@ -6208,17 +6213,17 @@ bool BytecodeEmitter::emitYieldStar(Pars
   // Step 6.
   // Initial send value is undefined.
   if (!emit1(JSOP_UNDEFINED)) {
     //              [stack] NEXT ITER RECEIVED
     return false;
   }
 
   int32_t savedDepthTemp;
-  int32_t startDepth = stackDepth;
+  int32_t startDepth = bytecodeSection().stackDepth();
   MOZ_ASSERT(startDepth >= 3);
 
   TryEmitter tryCatch(this, TryEmitter::Kind::TryCatchFinally,
                       TryEmitter::ControlKind::NonSyntactic);
   if (!tryCatch.emitJumpOverCatchAndFinally()) {
     //              [stack] NEXT ITER RESULT
     return false;
   }
@@ -6240,17 +6245,17 @@ bool BytecodeEmitter::emitYieldStar(Pars
     }
   }
 
   if (!tryCatch.emitTry()) {
     //              [stack] NEXT ITER RESULT
     return false;
   }
 
-  MOZ_ASSERT(this->stackDepth == startDepth);
+  MOZ_ASSERT(bytecodeSection().stackDepth() == startDepth);
 
   // Step 7.a.vi.
   // Step 7.b.ii.7.
   // Step 7.c.ix.
   //   25.5.3.7 AsyncGeneratorYield, step 5.
   if (iterKind == IteratorKind::Async) {
     if (!emitAwaitInInnermostScope()) {
       //            [stack] NEXT ITER RESULT
@@ -6273,17 +6278,17 @@ bool BytecodeEmitter::emitYieldStar(Pars
     return false;
   }
 
   if (!tryCatch.emitCatch()) {
     //              [stack] NEXT ITER RESULT
     return false;
   }
 
-  MOZ_ASSERT(stackDepth == startDepth);
+  MOZ_ASSERT(bytecodeSection().stackDepth() == startDepth);
 
   if (!emit1(JSOP_EXCEPTION)) {
     //              [stack] NEXT ITER RESULT EXCEPTION
     return false;
   }
   if (!emitDupAt(2)) {
     //              [stack] NEXT ITER RESULT EXCEPTION ITER
     return false;
@@ -6292,17 +6297,17 @@ bool BytecodeEmitter::emitYieldStar(Pars
     //              [stack] NEXT ITER RESULT EXCEPTION ITER ITER
     return false;
   }
   if (!emitAtomOp(cx->names().throw_, JSOP_CALLPROP)) {
     //              [stack] NEXT ITER RESULT EXCEPTION ITER THROW
     return false;
   }
 
-  savedDepthTemp = stackDepth;
+  savedDepthTemp = bytecodeSection().stackDepth();
   InternalIfEmitter ifThrowMethodIsNotDefined(this);
   if (!emitPushNotUndefinedOrNull()) {
     //              [stack] NEXT ITER RESULT EXCEPTION ITER THROW
     //                    NOT-UNDEF-OR-NULL
     return false;
   }
 
   if (!ifThrowMethodIsNotDefined.emitThenElse()) {
@@ -6343,28 +6348,28 @@ bool BytecodeEmitter::emitYieldStar(Pars
   if (!emit1(JSOP_SWAP)) {
     //              [stack] NEXT ITER RESULT OLDRESULT
     return false;
   }
   if (!emit1(JSOP_POP)) {
     //              [stack] NEXT ITER RESULT
     return false;
   }
-  MOZ_ASSERT(this->stackDepth == startDepth);
+  MOZ_ASSERT(bytecodeSection().stackDepth() == startDepth);
 
   JumpList checkResult;
   // Note that there is no GOSUB to the finally block here. If the iterator has
   // a "throw" method, it does not perform IteratorClose.
   if (!emitJump(JSOP_GOTO, &checkResult)) {
     //              [stack] NEXT ITER RESULT
     //              [stack] # goto checkResult
     return false;
   }
 
-  stackDepth = savedDepthTemp;
+  bytecodeSection().setStackDepth(savedDepthTemp);
   if (!ifThrowMethodIsNotDefined.emitElse()) {
     //              [stack] NEXT ITER RESULT EXCEPTION ITER THROW
     return false;
   }
 
   if (!emit1(JSOP_POP)) {
     //              [stack] NEXT ITER RESULT EXCEPTION ITER
     return false;
@@ -6379,22 +6384,22 @@ bool BytecodeEmitter::emitYieldStar(Pars
   }
   // Steps 7.b.iii.5-6.
   if (!emitUint16Operand(JSOP_THROWMSG, JSMSG_ITERATOR_NO_THROW)) {
     //              [stack] NEXT ITER RESULT EXCEPTION
     //              [stack] # throw
     return false;
   }
 
-  stackDepth = savedDepthTemp;
+  bytecodeSection().setStackDepth(savedDepthTemp);
   if (!ifThrowMethodIsNotDefined.emitEnd()) {
     return false;
   }
 
-  stackDepth = startDepth;
+  bytecodeSection().setStackDepth(startDepth);
   if (!tryCatch.emitFinally()) {
     return false;
   }
 
   // Step 7.c.i.
   //
   // Call iterator.return() for receiving a "forced return" completion from
   // the generator.
@@ -6511,17 +6516,17 @@ bool BytecodeEmitter::emitYieldStar(Pars
       //            [stack] NEXT ITER OLDRESULT FTYPE FVALUE RESULT
       return false;
     }
   }
   if (!emit1(JSOP_SETRVAL)) {
     //              [stack] NEXT ITER OLDRESULT FTYPE FVALUE
     return false;
   }
-  savedDepthTemp = this->stackDepth;
+  savedDepthTemp = bytecodeSection().stackDepth();
   if (!ifReturnDone.emitElse()) {
     //              [stack] NEXT ITER OLDRESULT FTYPE FVALUE RESULT
     return false;
   }
   if (!emit2(JSOP_UNPICK, 3)) {
     //              [stack] NEXT ITER RESULT OLDRESULT FTYPE FVALUE
     return false;
   }
@@ -6533,17 +6538,17 @@ bool BytecodeEmitter::emitYieldStar(Pars
     // goto tryStart;
     JumpList beq;
     JumpTarget breakTarget{-1};
     if (!emitBackwardJump(JSOP_GOTO, tryStart, &beq, &breakTarget)) {
       //            [stack] NEXT ITER RESULT
       return false;
     }
   }
-  this->stackDepth = savedDepthTemp;
+  bytecodeSection().setStackDepth(savedDepthTemp);
   if (!ifReturnDone.emitEnd()) {
     return false;
   }
 
   if (!ifReturnMethodIsDefined.emitElse()) {
     //              [stack] NEXT ITER RESULT FTYPE FVALUE ITER RET
     return false;
   }
@@ -6608,17 +6613,17 @@ bool BytecodeEmitter::emitYieldStar(Pars
     }
   }
 
   // Step 7.a.iii.
   if (!emitCheckIsObj(CheckIsObjectKind::IteratorNext)) {
     //              [stack] NEXT ITER RESULT
     return false;
   }
-  MOZ_ASSERT(this->stackDepth == startDepth);
+  MOZ_ASSERT(bytecodeSection().stackDepth() == startDepth);
 
   // Steps 7.a.iv-v.
   // Steps 7.b.ii.5-6.
   if (!emitJumpTargetAndPatch(checkResult)) {
     //              [stack] NEXT ITER RESULT
     //              [stack] # checkResult:
     return false;
   }
@@ -6653,17 +6658,17 @@ bool BytecodeEmitter::emitYieldStar(Pars
     //              [stack] RESULT
     return false;
   }
   if (!emitAtomOp(cx->names().value, JSOP_GETPROP)) {
     //              [stack] VALUE
     return false;
   }
 
-  MOZ_ASSERT(this->stackDepth == startDepth - 2);
+  MOZ_ASSERT(bytecodeSection().stackDepth() == startDepth - 2);
 
   return true;
 }
 
 bool BytecodeEmitter::emitStatementList(ListNode* stmtList) {
   for (ParseNode* stmt : stmtList->contents()) {
     if (!emitTree(stmt)) {
       return false;
@@ -6702,17 +6707,17 @@ bool BytecodeEmitter::emitExpressionStat
     /*
      * Don't eliminate apparently useless expressions if they are labeled
      * expression statements. The startOffset() test catches the case
      * where we are nesting in emitTree for a labeled compound statement.
      */
     if (innermostNestableControl &&
         innermostNestableControl->is<LabelControl>() &&
         innermostNestableControl->as<LabelControl>().startOffset() >=
-            offset()) {
+            bytecodeSection().offset()) {
       useful = true;
     }
   }
 
   if (useful) {
     ValueUsage valueUsage =
         wantval ? ValueUsage::WantValue : ValueUsage::IgnoreValue;
     ExpressionStatementEmitter ese(this, valueUsage);
@@ -8177,18 +8182,18 @@ bool BytecodeEmitter::replaceNewInitWith
   if (!objbox) {
     return false;
   }
 
   static_assert(
       JSOP_NEWINIT_LENGTH == JSOP_NEWOBJECT_LENGTH,
       "newinit and newobject must have equal length to edit in-place");
 
-  uint32_t index = objectList.add(objbox);
-  jsbytecode* code = this->code(offset);
+  uint32_t index = perScriptData().objectList().add(objbox);
+  jsbytecode* code = bytecodeSection().code(offset);
 
   MOZ_ASSERT(code[0] == JSOP_NEWINIT);
   code[0] = JSOP_NEWOBJECT;
   SET_UINT32(code, index);
 
   return true;
 }
 
@@ -9228,17 +9233,18 @@ bool BytecodeEmitter::emitTree(
 
     case ParseNodeKind::BigIntExpr:
       if (!emitBigIntOp(pn->as<BigIntLiteral>().box()->value())) {
         return false;
       }
       break;
 
     case ParseNodeKind::RegExpExpr:
-      if (!emitRegExp(objectList.add(pn->as<RegExpLiteral>().objbox()))) {
+      if (!emitRegExp(perScriptData().objectList().add(
+              pn->as<RegExpLiteral>().objbox()))) {
         return false;
       }
       break;
 
     case ParseNodeKind::TrueExpr:
       if (!emit1(JSOP_TRUE)) {
         return false;
       }
@@ -9329,33 +9335,35 @@ static bool AllocSrcNote(JSContext* cx, 
 
   *index = notes.length() - 1;
   return true;
 }
 
 bool BytecodeEmitter::addTryNote(JSTryNoteKind kind, uint32_t stackDepth,
                                  size_t start, size_t end) {
   MOZ_ASSERT(!inPrologue());
-  return tryNoteList.append(kind, stackDepth, start, end);
+  return bytecodeSection().tryNoteList().append(kind, stackDepth, start, end);
 }
 
 bool BytecodeEmitter::newSrcNote(SrcNoteType type, unsigned* indexp) {
-  SrcNotesVector& notes = this->notes();
+  // Prologue shouldn't have source notes.
+  MOZ_ASSERT(!inPrologue());
+  SrcNotesVector& notes = bytecodeSection().notes();
   unsigned index;
   if (!AllocSrcNote(cx, notes, &index)) {
     return false;
   }
 
   /*
    * Compute delta from the last annotated bytecode's offset.  If it's too
    * big to fit in sn, allocate one or more xdelta notes and reset sn.
    */
-  ptrdiff_t offset = this->offset();
-  ptrdiff_t delta = offset - lastNoteOffset();
-  lastNoteOffset_ = offset;
+  ptrdiff_t offset = bytecodeSection().offset();
+  ptrdiff_t delta = offset - bytecodeSection().lastNoteOffset();
+  bytecodeSection().setLastNoteOffset(offset);
   if (delta >= SN_DELTA_LIMIT) {
     do {
       ptrdiff_t xdelta = Min(delta, SN_XDELTA_MASK);
       SN_MAKE_XDELTA(&notes[index], xdelta);
       delta -= xdelta;
       if (!AllocSrcNote(cx, notes, &index)) {
         return false;
       }
@@ -9415,17 +9423,17 @@ bool BytecodeEmitter::newSrcNote3(SrcNot
 
 bool BytecodeEmitter::setSrcNoteOffset(unsigned index, unsigned which,
                                        ptrdiff_t offset) {
   if (!SN_REPRESENTABLE_OFFSET(offset)) {
     reportError(nullptr, JSMSG_NEED_DIET, js_script_str);
     return false;
   }
 
-  SrcNotesVector& notes = this->notes();
+  SrcNotesVector& notes = bytecodeSection().notes();
 
   /* Find the offset numbered which (i.e., skip exactly which offsets). */
   jssrcnote* sn = &notes[index];
   MOZ_ASSERT(SN_TYPE(sn) != SRC_XDELTA);
   MOZ_ASSERT((int)which < js_SrcNoteSpec[SN_TYPE(sn)].arity);
   for (sn++; which; sn++, which--) {
     if (*sn & SN_4BYTE_OFFSET_FLAG) {
       sn += 3;
@@ -9453,20 +9461,20 @@ bool BytecodeEmitter::setSrcNoteOffset(u
     *sn++ = (jssrcnote)(offset >> 16);
     *sn++ = (jssrcnote)(offset >> 8);
   }
   *sn = (jssrcnote)offset;
   return true;
 }
 
 void BytecodeEmitter::copySrcNotes(jssrcnote* destination, uint32_t nsrcnotes) {
-  unsigned count = notes_.length();
+  unsigned count = bytecodeSection().notes().length();
   // nsrcnotes includes SN_MAKE_TERMINATOR in addition to the srcnotes.
   MOZ_ASSERT(nsrcnotes == count + 1);
-  PodCopy(destination, notes_.begin(), count);
+  PodCopy(destination, bytecodeSection().notes().begin(), count);
   SN_MAKE_TERMINATOR(&destination[count]);
 }
 
 void CGNumberList::finish(mozilla::Span<GCPtrValue> array) {
   MOZ_ASSERT(length() == array.size());
 
   for (unsigned i = 0; i < length(); i++) {
     array[i].init(vector[i]);
--- a/js/src/frontend/BytecodeEmitter.h
+++ b/js/src/frontend/BytecodeEmitter.h
@@ -119,62 +119,274 @@ struct MOZ_STACK_CLASS BytecodeEmitter {
 
   // The JSScript we're ultimately producing.
   Rooted<JSScript*> script;
 
   // The lazy script if mode is LazyFunction, nullptr otherwise.
   Rooted<LazyScript*> lazyScript;
 
  private:
-  BytecodeVector code_;  /* bytecode */
-  SrcNotesVector notes_; /* source notes, see below */
+  // Bytecode and all data directly associated with specific opcode/index inside
+  // bytecode is stored in this class.
+  class BytecodeSection {
+   public:
+    BytecodeSection(JSContext* cx, uint32_t lineNum);
+
+    // ---- Bytecode ----
+
+    BytecodeVector& code() { return code_; }
+    const BytecodeVector& code() const { return code_; }
+
+    jsbytecode* code(ptrdiff_t offset) { return code_.begin() + offset; }
+    ptrdiff_t offset() const { return code_.end() - code_.begin(); }
+
+    // ---- Source notes ----
+
+    SrcNotesVector& notes() { return notes_; }
+    const SrcNotesVector& notes() const { return notes_; }
+
+    ptrdiff_t lastNoteOffset() const { return lastNoteOffset_; }
+    void setLastNoteOffset(ptrdiff_t offset) { lastNoteOffset_ = offset; }
+
+    // ---- Jump ----
+
+    ptrdiff_t lastTargetOffset() const { return lastTarget_.offset; }
+    void setLastTargetOffset(ptrdiff_t offset) { lastTarget_.offset = offset; }
 
-  // Code offset for last source note
-  ptrdiff_t lastNoteOffset_ = 0;
+    // Check if the last emitted opcode is a jump target.
+    bool lastOpcodeIsJumpTarget() const {
+      return offset() - lastTarget_.offset == ptrdiff_t(JSOP_JUMPTARGET_LENGTH);
+    }
+
+    // JumpTarget should not be part of the emitted statement, as they can be
+    // aliased by multiple statements. If we included the jump target as part of
+    // the statement we might have issues where the enclosing statement might
+    // not contain all the opcodes of the enclosed statements.
+    ptrdiff_t lastNonJumpTargetOffset() const {
+      return lastOpcodeIsJumpTarget() ? lastTarget_.offset : offset();
+    }
+
+    // ---- Stack ----
+
+    int32_t stackDepth() const { return stackDepth_; }
+    void setStackDepth(int32_t depth) { stackDepth_ = depth; }
+
+    uint32_t maxStackDepth() const { return maxStackDepth_; }
+
+    void updateDepth(ptrdiff_t target);
+
+    // ---- Try notes ----
+
+    CGTryNoteList& tryNoteList() { return tryNoteList_; };
+    const CGTryNoteList& tryNoteList() const { return tryNoteList_; };
+
+    // ---- Scope ----
+
+    CGScopeNoteList& scopeNoteList() { return scopeNoteList_; };
+    const CGScopeNoteList& scopeNoteList() const { return scopeNoteList_; };
+
+    // ---- Generator ----
 
-  // Line number for srcnotes.
-  //
-  // WARNING: If this becomes out of sync with already-emitted srcnotes,
-  // we can get undefined behavior.
-  uint32_t currentLine_ = 0;
+    CGResumeOffsetList& resumeOffsetList() { return resumeOffsetList_; }
+    const CGResumeOffsetList& resumeOffsetList() const {
+      return resumeOffsetList_;
+    }
+
+    uint32_t numYields() const { return numYields_; }
+    void addNumYields() { numYields_++; }
+
+    // ---- Line and column ----
+
+    uint32_t currentLine() const { return currentLine_; }
+    uint32_t lastColumn() const { return lastColumn_; }
+    void setCurrentLine(uint32_t line) {
+      currentLine_ = line;
+      lastColumn_ = 0;
+    }
+    void setLastColumn(uint32_t column) { lastColumn_ = column; }
+
+    void updateSeparatorPosition() {
+      lastSeparatorOffet_ = code().length();
+      lastSeparatorLine_ = currentLine_;
+      lastSeparatorColumn_ = lastColumn_;
+    }
+
+    void updateSeparatorPositionIfPresent() {
+      if (lastSeparatorOffet_ == code().length()) {
+        lastSeparatorLine_ = currentLine_;
+        lastSeparatorColumn_ = lastColumn_;
+      }
+    }
+
+    bool isDuplicateLocation() const {
+      return lastSeparatorLine_ == currentLine_ &&
+             lastSeparatorColumn_ == lastColumn_;
+    }
+
+    // ---- JIT ----
+
+    size_t numICEntries() const { return numICEntries_; }
+    void addNumICEntries() { numICEntries_++; }
+    void setNumICEntries(size_t entries) { numICEntries_ = entries; }
+
+    uint16_t typesetCount() const { return typesetCount_; }
+    void addTypesetCount() { typesetCount_++; }
+
+   private:
+    // ---- Bytecode ----
+
+    // Bytecode.
+    BytecodeVector code_;
+
+    // ---- Source notes ----
+
+    // Source notes
+    SrcNotesVector notes_;
+
+    // Code offset for last source note
+    ptrdiff_t lastNoteOffset_ = 0;
+
+    // ---- Jump ----
 
-  // Zero-based column index on currentLine of last SRC_COLSPAN-annotated
-  // opcode.
-  //
-  // WARNING: If this becomes out of sync with already-emitted srcnotes,
-  // we can get undefined behavior.
-  uint32_t lastColumn_ = 0;
+    // Last jump target emitted.
+    JumpTarget lastTarget_ = {-1 - ptrdiff_t(JSOP_JUMPTARGET_LENGTH)};
+
+    // ---- Stack ----
+
+    // Maximum number of expression stack slots so far.
+    uint32_t maxStackDepth_ = 0;
+
+    // Current stack depth in script frame.
+    int32_t stackDepth_ = 0;
+
+    // ---- Try notes ----
+
+    // List of emitted try notes.
+    CGTryNoteList tryNoteList_;
+
+    // ---- Scope ----
+
+    // List of emitted block scope notes.
+    CGScopeNoteList scopeNoteList_;
+
+    // ---- Generator ----
+
+    // Certain ops (yield, await, gosub) have an entry in the script's
+    // resumeOffsets list. This can be used to map from the op's resumeIndex to
+    // the bytecode offset of the next pc. This indirection makes it easy to
+    // resume in the JIT (because BaselineScript stores a resumeIndex => native
+    // code array).
+    CGResumeOffsetList resumeOffsetList_;
+
+    // Number of yield instructions emitted. Does not include JSOP_AWAIT.
+    uint32_t numYields_ = 0;
+
+    // ---- Line and column ----
+
+    // Line number for srcnotes.
+    //
+    // WARNING: If this becomes out of sync with already-emitted srcnotes,
+    // we can get undefined behavior.
+    uint32_t currentLine_;
+
+    // Zero-based column index on currentLine_ of last SRC_COLSPAN-annotated
+    // opcode.
+    //
+    // WARNING: If this becomes out of sync with already-emitted srcnotes,
+    // we can get undefined behavior.
+    uint32_t lastColumn_ = 0;
+
+    // The offset, line and column numbers of the last opcode for the
+    // breakpoint for step execution.
+    uint32_t lastSeparatorOffet_ = 0;
+    uint32_t lastSeparatorLine_ = 0;
+    uint32_t lastSeparatorColumn_ = 0;
+
+    // ---- JIT ----
+
+    // Number of JOF_IC opcodes emitted.
+    size_t numICEntries_ = 0;
 
-  uint32_t lastSeparatorOffet_ = 0;
-  uint32_t lastSeparatorLine_ = 0;
-  uint32_t lastSeparatorColumn_ = 0;
+    // Number of JOF_TYPESET opcodes generated.
+    uint16_t typesetCount_ = 0;
+  };
+
+  BytecodeSection bytecodeSection_;
+
+ public:
+  BytecodeSection& bytecodeSection() { return bytecodeSection_; }
+  const BytecodeSection& bytecodeSection() const { return bytecodeSection_; }
+
+ private:
+  // Data that is not directly associated with specific opcode/index inside
+  // bytecode, but referred from bytecode is stored in this class.
+  class PerScriptData {
+   public:
+    explicit PerScriptData(JSContext* cx);
+
+    MOZ_MUST_USE bool init(JSContext* cx);
+
+    // ---- Scope ----
+
+    CGScopeList& scopeList() { return scopeList_; }
+    const CGScopeList& scopeList() const { return scopeList_; }
+
+    // ---- Literals ----
+
+    CGNumberList& numberList() { return numberList_; }
+    const CGNumberList& numberList() const { return numberList_; }
 
+    CGObjectList& objectList() { return objectList_; }
+    const CGObjectList& objectList() const { return objectList_; }
+
+    PooledMapPtr<AtomIndexMap>& atomIndices() { return atomIndices_; }
+    const PooledMapPtr<AtomIndexMap>& atomIndices() const {
+      return atomIndices_;
+    }
+
+   private:
+    // ---- Scope ----
+
+    // List of emitted scopes.
+    CGScopeList scopeList_;
+
+    // ---- Literals ----
+
+    // List of double and bigint values used by script.
+    CGNumberList numberList_;
+
+    // List of emitted objects.
+    CGObjectList objectList_;
+
+    // Map from atom to index.
+    PooledMapPtr<AtomIndexMap> atomIndices_;
+  };
+
+  PerScriptData perScriptData_;
+
+ public:
+  PerScriptData& perScriptData() { return perScriptData_; }
+  const PerScriptData& perScriptData() const { return perScriptData_; }
+
+ private:
   // switchToMain sets this to the bytecode offset of the main section.
   mozilla::Maybe<uint32_t> mainOffset_ = {};
 
   /* field info for enclosing class */
   const FieldInitializers fieldInitializers_;
 
  public:
-  // Last jump target emitted.
-  JumpTarget lastTarget = {-1 - ptrdiff_t(JSOP_JUMPTARGET_LENGTH)};
-
   // Private storage for parser wrapper. DO NOT REFERENCE INTERNALLY. May not be
   // initialized. Use |parser| instead.
   mozilla::Maybe<EitherParser> ep_ = {};
   BCEParserHandle* parser = nullptr;
 
-  PooledMapPtr<AtomIndexMap> atomIndices; /* literals indexed for mapping */
   unsigned firstLine = 0; /* first line, for JSScript::initFromEmitter */
 
   uint32_t maxFixedSlots = 0; /* maximum number of fixed frame slots so far */
-  uint32_t maxStackDepth =
-      0; /* maximum number of expression stack slots so far */
-
-  int32_t stackDepth = 0; /* current stack depth in script frame */
 
   uint32_t bodyScopeIndex =
       UINT32_MAX; /* index into scopeList of the body scope */
 
   EmitterScope* varEmitterScope = nullptr;
   NestableControl* innermostNestableControl = nullptr;
   EmitterScope* innermostEmitterScope_ = nullptr;
   TDZCheckCache* innermostTDZCheckCache = nullptr;
@@ -190,38 +402,16 @@ struct MOZ_STACK_CLASS BytecodeEmitter {
   EmitterScope* innermostEmitterScope() const {
     MOZ_ASSERT(!unstableEmitterScope);
     return innermostEmitterScopeNoCheck();
   }
   EmitterScope* innermostEmitterScopeNoCheck() const {
     return innermostEmitterScope_;
   }
 
-  CGNumberList numberList;       /* double and bigint values used by script */
-  CGObjectList objectList;       /* list of emitted objects */
-  CGScopeList scopeList;         /* list of emitted scopes */
-  CGTryNoteList tryNoteList;     /* list of emitted try notes */
-  CGScopeNoteList scopeNoteList; /* list of emitted block scope notes */
-
-  // Certain ops (yield, await, gosub) have an entry in the script's
-  // resumeOffsets list. This can be used to map from the op's resumeIndex to
-  // the bytecode offset of the next pc. This indirection makes it easy to
-  // resume in the JIT (because BaselineScript stores a resumeIndex => native
-  // code array).
-  CGResumeOffsetList resumeOffsetList;
-
-  // Number of JOF_IC opcodes emitted.
-  size_t numICEntries = 0;
-
-  // Number of yield instructions emitted. Does not include JSOP_AWAIT.
-  uint32_t numYields = 0;
-
-  // Number of JOF_TYPESET opcodes generated.
-  uint16_t typesetCount = 0;
-
   // Script contains singleton initializer JSOP_OBJECT.
   bool hasSingletons = false;
 
   // Script contains finally block.
   bool hasTryFinally = false;
 
   // True while emitting a lambda which is only expected to run once.
   bool emittingRunOnceLambda = false;
@@ -355,34 +545,36 @@ struct MOZ_STACK_CLASS BytecodeEmitter {
   }
 
   void setVarEmitterScope(EmitterScope* emitterScope) {
     MOZ_ASSERT(emitterScope);
     MOZ_ASSERT(!varEmitterScope);
     varEmitterScope = emitterScope;
   }
 
-  Scope* outermostScope() const { return scopeList.vector[0]; }
+  Scope* outermostScope() const {
+    return perScriptData().scopeList().vector[0];
+  }
   Scope* innermostScope() const;
   Scope* bodyScope() const {
-    MOZ_ASSERT(bodyScopeIndex < scopeList.length());
-    return scopeList.vector[bodyScopeIndex];
+    MOZ_ASSERT(bodyScopeIndex < perScriptData().scopeList().length());
+    return perScriptData().scopeList().vector[bodyScopeIndex];
   }
 
   MOZ_ALWAYS_INLINE
   MOZ_MUST_USE bool makeAtomIndex(JSAtom* atom, uint32_t* indexp) {
-    MOZ_ASSERT(atomIndices);
-    AtomIndexMap::AddPtr p = atomIndices->lookupForAdd(atom);
+    MOZ_ASSERT(perScriptData().atomIndices());
+    AtomIndexMap::AddPtr p = perScriptData().atomIndices()->lookupForAdd(atom);
     if (p) {
       *indexp = p->value();
       return true;
     }
 
-    uint32_t index = atomIndices->count();
-    if (!atomIndices->add(p, atom, index)) {
+    uint32_t index = perScriptData().atomIndices()->count();
+    if (!perScriptData().atomIndices()->add(p, atom, index)) {
       ReportOutOfMemory(cx);
       return false;
     }
 
     *indexp = index;
     return true;
   }
 
@@ -395,55 +587,23 @@ struct MOZ_STACK_CLASS BytecodeEmitter {
 
   bool needsImplicitThis();
 
   MOZ_MUST_USE bool emitThisEnvironmentCallee();
   MOZ_MUST_USE bool emitSuperBase();
 
   void tellDebuggerAboutCompiledScript(JSContext* cx);
 
-  BytecodeVector& code() { return code_; }
-  const BytecodeVector& code() const { return code_; }
-
-  jsbytecode* code(ptrdiff_t offset) { return code_.begin() + offset; }
-  ptrdiff_t offset() const { return code_.end() - code_.begin(); }
-
   uint32_t mainOffset() const { return *mainOffset_; }
 
   bool inPrologue() const { return mainOffset_.isNothing(); }
 
   void switchToMain() {
     MOZ_ASSERT(inPrologue());
-    mainOffset_.emplace(code_.length());
-  }
-
-  SrcNotesVector& notes() {
-    // Prologue shouldn't have source notes.
-    MOZ_ASSERT(!inPrologue());
-    return notes_;
-  }
-  ptrdiff_t lastNoteOffset() const { return lastNoteOffset_; }
-  unsigned currentLine() const { return currentLine_; }
-
-  void setCurrentLine(uint32_t line) {
-    currentLine_ = line;
-    lastColumn_ = 0;
-  }
-
-  // Check if the last emitted opcode is a jump target.
-  bool lastOpcodeIsJumpTarget() const {
-    return offset() - lastTarget.offset == ptrdiff_t(JSOP_JUMPTARGET_LENGTH);
-  }
-
-  // JumpTarget should not be part of the emitted statement, as they can be
-  // aliased by multiple statements. If we included the jump target as part of
-  // the statement we might have issues where the enclosing statement might
-  // not contain all the opcodes of the enclosed statements.
-  ptrdiff_t lastNonJumpTargetOffset() const {
-    return lastOpcodeIsJumpTarget() ? lastTarget.offset : offset();
+    mainOffset_.emplace(bytecodeSection().code().length());
   }
 
   void setFunctionBodyEndPos(uint32_t pos) {
     functionBodyEndPos = mozilla::Some(pos);
   }
 
   void setScriptStartOffsetIfUnset(uint32_t pos) {
     if (scriptStartOffset.isNothing()) {
@@ -504,22 +664,20 @@ struct MOZ_STACK_CLASS BytecodeEmitter {
   // encompasses the entire source.
   MOZ_MUST_USE bool emitScript(ParseNode* body);
 
   // Emit function code for the tree rooted at body.
   enum class TopLevelFunction { No, Yes };
   MOZ_MUST_USE bool emitFunctionScript(FunctionNode* funNode,
                                        TopLevelFunction isTopLevel);
 
-  void updateDepth(ptrdiff_t target);
   MOZ_MUST_USE bool markStepBreakpoint();
   MOZ_MUST_USE bool markSimpleBreakpoint();
   MOZ_MUST_USE bool updateLineNumberNotes(uint32_t offset);
   MOZ_MUST_USE bool updateSourceCoordNotes(uint32_t offset);
-  void updateSeparatorPosition();
 
   JSOp strictifySetNameOp(JSOp op);
 
   MOZ_MUST_USE bool emitCheck(JSOp op, ptrdiff_t delta, ptrdiff_t* offset);
 
   // Emit one bytecode.
   MOZ_MUST_USE bool emit1(JSOp op);
 
--- a/js/src/frontend/CForEmitter.cpp
+++ b/js/src/frontend/CForEmitter.cpp
@@ -70,17 +70,17 @@ bool CForEmitter::emitBody(Cond cond, co
   if (!bce_->newSrcNote(SRC_FOR, &noteIndex_)) {
     return false;
   }
   if (!bce_->emit1(JSOP_NOP)) {
     //              [stack]
     return false;
   }
 
-  biasedTop_ = bce_->offset();
+  biasedTop_ = bce_->bytecodeSection().offset();
 
   if (cond_ == Cond::Present) {
     // Goto the loop condition, which branches back to iterate.
     if (!loopInfo_->emitEntryJump(bce_)) {
       return false;
     }
   }
 
@@ -158,30 +158,30 @@ bool CForEmitter::emitCond(const Maybe<u
     if (!bce_->emit1(JSOP_POP)) {
       //            [stack]
       return false;
     }
 
     // Restore the absolute line number for source note readers.
     if (endPos) {
       uint32_t lineNum = bce_->parser->errorReporter().lineAt(*endPos);
-      if (bce_->currentLine() != lineNum) {
+      if (bce_->bytecodeSection().currentLine() != lineNum) {
         if (!bce_->newSrcNote2(SRC_SETLINE, ptrdiff_t(lineNum))) {
           return false;
         }
-        bce_->setCurrentLine(lineNum);
+        bce_->bytecodeSection().setCurrentLine(lineNum);
       }
     }
   }
 
   if (update_ == Update::Present) {
     tdzCache_.reset();
   }
 
-  condOffset_ = bce_->offset();
+  condOffset_ = bce_->bytecodeSection().offset();
 
   if (cond_ == Cond::Present) {
     if (!loopInfo_->emitLoopEntry(bce_, condPos)) {
       //            [stack]
       return false;
     }
   } else if (update_ == Update::Missing) {
     // If there is no condition clause and no update clause, mark
@@ -223,17 +223,18 @@ bool CForEmitter::emitEnd() {
   // The third note offset helps us find the loop-closing jump.
   if (!bce_->setSrcNoteOffset(noteIndex_, SrcNote::For::BackJumpOffset,
                               loopInfo_->loopEndOffset() - biasedTop_))
 
   {
     return false;
   }
 
-  if (!bce_->addTryNote(JSTRY_LOOP, bce_->stackDepth, loopInfo_->headOffset(),
+  if (!bce_->addTryNote(JSTRY_LOOP, bce_->bytecodeSection().stackDepth(),
+                        loopInfo_->headOffset(),
                         loopInfo_->breakTargetOffset())) {
     return false;
   }
 
   if (!loopInfo_->patchBreaksAndContinues(bce_)) {
     //              [stack]
     return false;
   }
--- a/js/src/frontend/DoWhileEmitter.cpp
+++ b/js/src/frontend/DoWhileEmitter.cpp
@@ -70,17 +70,18 @@ bool DoWhileEmitter::emitCond() {
 
 bool DoWhileEmitter::emitEnd() {
   MOZ_ASSERT(state_ == State::Cond);
 
   if (!loopInfo_->emitLoopEnd(bce_, JSOP_IFNE)) {
     return false;
   }
 
-  if (!bce_->addTryNote(JSTRY_LOOP, bce_->stackDepth, loopInfo_->headOffset(),
+  if (!bce_->addTryNote(JSTRY_LOOP, bce_->bytecodeSection().stackDepth(),
+                        loopInfo_->headOffset(),
                         loopInfo_->breakTargetOffset())) {
     return false;
   }
 
   // Update the annotations with the update and back edge positions, for
   // IonBuilder.
   if (!bce_->setSrcNoteOffset(noteIndex_, SrcNote::DoWhile::CondOffset,
                               loopInfo_->continueTargetOffsetFromLoopHead())) {
--- a/js/src/frontend/EmitterScope.cpp
+++ b/js/src/frontend/EmitterScope.cpp
@@ -337,37 +337,37 @@ NameLocation EmitterScope::searchAndCach
 template <typename ScopeCreator>
 bool EmitterScope::internScope(BytecodeEmitter* bce, ScopeCreator createScope) {
   RootedScope enclosing(bce->cx, enclosingScope(bce));
   Scope* scope = createScope(bce->cx, enclosing);
   if (!scope) {
     return false;
   }
   hasEnvironment_ = scope->hasEnvironment();
-  scopeIndex_ = bce->scopeList.length();
-  return bce->scopeList.append(scope);
+  scopeIndex_ = bce->perScriptData().scopeList().length();
+  return bce->perScriptData().scopeList().append(scope);
 }
 
 template <typename ScopeCreator>
 bool EmitterScope::internBodyScope(BytecodeEmitter* bce,
                                    ScopeCreator createScope) {
   MOZ_ASSERT(bce->bodyScopeIndex == UINT32_MAX,
              "There can be only one body scope");
-  bce->bodyScopeIndex = bce->scopeList.length();
+  bce->bodyScopeIndex = bce->perScriptData().scopeList().length();
   return internScope(bce, createScope);
 }
 
 bool EmitterScope::appendScopeNote(BytecodeEmitter* bce) {
   MOZ_ASSERT(ScopeKindIsInBody(scope(bce)->kind()) && enclosingInFrame(),
              "Scope notes are not needed for body-level scopes.");
-  noteIndex_ = bce->scopeNoteList.length();
-  return bce->scopeNoteList.append(index(), bce->offset(),
-                                   enclosingInFrame()
-                                       ? enclosingInFrame()->noteIndex()
-                                       : ScopeNote::NoScopeNoteIndex);
+  noteIndex_ = bce->bytecodeSection().scopeNoteList().length();
+  return bce->bytecodeSection().scopeNoteList().append(
+      index(), bce->bytecodeSection().offset(),
+      enclosingInFrame() ? enclosingInFrame()->noteIndex()
+                         : ScopeNote::NoScopeNoteIndex);
 }
 
 bool EmitterScope::deadZoneFrameSlotRange(BytecodeEmitter* bce,
                                           uint32_t slotStart,
                                           uint32_t slotEnd) const {
   // Lexical bindings throw ReferenceErrors if they are used before
   // initialization. See ES6 8.1.1.1.6.
   //
@@ -1052,27 +1052,28 @@ bool EmitterScope::leave(BytecodeEmitter
 
   // Finish up the scope if we are leaving it in LIFO fashion.
   if (!nonLocal) {
     // Popping scopes due to non-local jumps generate additional scope
     // notes. See NonLocalExitControl::prepareForNonLocalJump.
     if (ScopeKindIsInBody(kind)) {
       // The extra function var scope is never popped once it's pushed,
       // so its scope note extends until the end of any possible code.
-      uint32_t offset =
-          kind == ScopeKind::FunctionBodyVar ? UINT32_MAX : bce->offset();
-      bce->scopeNoteList.recordEnd(noteIndex_, offset);
+      uint32_t offset = kind == ScopeKind::FunctionBodyVar
+                            ? UINT32_MAX
+                            : bce->bytecodeSection().offset();
+      bce->bytecodeSection().scopeNoteList().recordEnd(noteIndex_, offset);
     }
   }
 
   return true;
 }
 
 Scope* EmitterScope::scope(const BytecodeEmitter* bce) const {
-  return bce->scopeList.vector[index()];
+  return bce->perScriptData().scopeList().vector[index()];
 }
 
 NameLocation EmitterScope::lookup(BytecodeEmitter* bce, JSAtom* name) {
   if (Maybe<NameLocation> loc = lookupInCache(bce, name)) {
     return *loc;
   }
   return searchAndCache(bce, name);
 }
--- a/js/src/frontend/ExpressionStatementEmitter.cpp
+++ b/js/src/frontend/ExpressionStatementEmitter.cpp
@@ -24,25 +24,25 @@ bool ExpressionStatementEmitter::prepare
 
   if (beginPos) {
     if (!bce_->updateSourceCoordNotes(*beginPos)) {
       return false;
     }
   }
 
 #ifdef DEBUG
-  depth_ = bce_->stackDepth;
+  depth_ = bce_->bytecodeSection().stackDepth();
   state_ = State::Expr;
 #endif
   return true;
 }
 
 bool ExpressionStatementEmitter::emitEnd() {
   MOZ_ASSERT(state_ == State::Expr);
-  MOZ_ASSERT(bce_->stackDepth == depth_ + 1);
+  MOZ_ASSERT(bce_->bytecodeSection().stackDepth() == depth_ + 1);
 
   //                [stack] VAL
 
   JSOp op = valueUsage_ == ValueUsage::WantValue ? JSOP_SETRVAL : JSOP_POP;
   if (!bce_->emit1(op)) {
     //              [stack] # if WantValue
     //              [stack] VAL
     //              [stack] # otherwise
--- a/js/src/frontend/ForInEmitter.cpp
+++ b/js/src/frontend/ForInEmitter.cpp
@@ -89,17 +89,17 @@ bool ForInEmitter::emitInitialize() {
 
     // For uncaptured bindings, put them back in TDZ.
     if (!headLexicalEmitterScope_->deadZoneFrameSlots(bce_)) {
       return false;
     }
   }
 
 #ifdef DEBUG
-  loopDepth_ = bce_->stackDepth;
+  loopDepth_ = bce_->bytecodeSection().stackDepth();
 #endif
   MOZ_ASSERT(loopDepth_ >= 2);
 
   if (!bce_->emit1(JSOP_ITERNEXT)) {
     //              [stack] ITER ITERVAL
     return false;
   }
 
@@ -107,29 +107,29 @@ bool ForInEmitter::emitInitialize() {
   state_ = State::Initialize;
 #endif
   return true;
 }
 
 bool ForInEmitter::emitBody() {
   MOZ_ASSERT(state_ == State::Initialize);
 
-  MOZ_ASSERT(bce_->stackDepth == loopDepth_,
+  MOZ_ASSERT(bce_->bytecodeSection().stackDepth() == loopDepth_,
              "iterator and iterval must be left on the stack");
 
 #ifdef DEBUG
   state_ = State::Body;
 #endif
   return true;
 }
 
 bool ForInEmitter::emitEnd(const Maybe<uint32_t>& forPos) {
   MOZ_ASSERT(state_ == State::Body);
 
-  loopInfo_->setContinueTarget(bce_->offset());
+  loopInfo_->setContinueTarget(bce_->bytecodeSection().offset());
 
   if (forPos) {
     // Make sure this code is attributed to the "for".
     if (!bce_->updateSourceCoordNotes(*forPos)) {
       return false;
     }
   }
 
@@ -166,18 +166,19 @@ bool ForInEmitter::emitEnd(const Maybe<u
   }
 
   // Pop the enumeration value.
   if (!bce_->emit1(JSOP_POP)) {
     //              [stack] ITER
     return false;
   }
 
-  if (!bce_->addTryNote(JSTRY_FOR_IN, bce_->stackDepth, loopInfo_->headOffset(),
-                        bce_->offset())) {
+  if (!bce_->addTryNote(JSTRY_FOR_IN, bce_->bytecodeSection().stackDepth(),
+                        loopInfo_->headOffset(),
+                        bce_->bytecodeSection().offset())) {
     return false;
   }
 
   if (!bce_->emit1(JSOP_ENDITER)) {
     //              [stack]
     return false;
   }
 
--- a/js/src/frontend/ForOfEmitter.cpp
+++ b/js/src/frontend/ForOfEmitter.cpp
@@ -53,17 +53,17 @@ bool ForOfEmitter::emitInitialize(const 
     }
   } else {
     if (!bce_->emitIterator()) {
       //            [stack] NEXT ITER
       return false;
     }
   }
 
-  int32_t iterDepth = bce_->stackDepth;
+  int32_t iterDepth = bce_->bytecodeSection().stackDepth();
 
   // For-of loops have the iterator next method, the iterator itself, and
   // the result.value on the stack.
   // Push an undefined to balance the stack.
   if (!bce_->emit1(JSOP_UNDEFINED)) {
     //              [stack] NEXT ITER UNDEF
     return false;
   }
@@ -106,17 +106,17 @@ bool ForOfEmitter::emitInitialize(const 
 
     // For uncaptured bindings, put them back in TDZ.
     if (!headLexicalEmitterScope_->deadZoneFrameSlots(bce_)) {
       return false;
     }
   }
 
 #ifdef DEBUG
-  loopDepth_ = bce_->stackDepth;
+  loopDepth_ = bce_->bytecodeSection().stackDepth();
 #endif
 
   // Make sure this code is attributed to the "for".
   if (forPos) {
     if (!bce_->updateSourceCoordNotes(*forPos)) {
       return false;
     }
   }
@@ -190,17 +190,17 @@ bool ForOfEmitter::emitInitialize(const 
   state_ = State::Initialize;
 #endif
   return true;
 }
 
 bool ForOfEmitter::emitBody() {
   MOZ_ASSERT(state_ == State::Initialize);
 
-  MOZ_ASSERT(bce_->stackDepth == loopDepth_,
+  MOZ_ASSERT(bce_->bytecodeSection().stackDepth() == loopDepth_,
              "the stack must be balanced around the initializing "
              "operation");
 
   // Remove VALUE from the stack to release it.
   if (!bce_->emit1(JSOP_POP)) {
     //              [stack] NEXT ITER
     return false;
   }
@@ -213,24 +213,24 @@ bool ForOfEmitter::emitBody() {
   state_ = State::Body;
 #endif
   return true;
 }
 
 bool ForOfEmitter::emitEnd(const Maybe<uint32_t>& iteratedPos) {
   MOZ_ASSERT(state_ == State::Body);
 
-  MOZ_ASSERT(bce_->stackDepth == loopDepth_,
+  MOZ_ASSERT(bce_->bytecodeSection().stackDepth() == loopDepth_,
              "the stack must be balanced around the for-of body");
 
   if (!loopInfo_->emitEndCodeNeedingIteratorClose(bce_)) {
     return false;
   }
 
-  loopInfo_->setContinueTarget(bce_->offset());
+  loopInfo_->setContinueTarget(bce_->bytecodeSection().offset());
 
   // We use the iterated value's position to attribute JSOP_LOOPENTRY,
   // which corresponds to the iteration protocol.
   // This is a bit misleading for 2nd and later iterations and might need
   // some fix (bug 1482003).
   if (!loopInfo_->emitLoopEntry(bce_, iteratedPos)) {
     return false;
   }
@@ -239,29 +239,30 @@ bool ForOfEmitter::emitEnd(const Maybe<u
     //              [stack] NEXT ITER UNDEF FALSE
     return false;
   }
   if (!loopInfo_->emitLoopEnd(bce_, JSOP_IFEQ)) {
     //              [stack] NEXT ITER UNDEF
     return false;
   }
 
-  MOZ_ASSERT(bce_->stackDepth == loopDepth_);
+  MOZ_ASSERT(bce_->bytecodeSection().stackDepth() == loopDepth_);
 
   // Let Ion know where the closing jump of this loop is.
   if (!bce_->setSrcNoteOffset(noteIndex_, SrcNote::ForOf::BackJumpOffset,
                               loopInfo_->loopEndOffsetFromEntryJump())) {
     return false;
   }
 
   if (!loopInfo_->patchBreaksAndContinues(bce_)) {
     return false;
   }
 
-  if (!bce_->addTryNote(JSTRY_FOR_OF, bce_->stackDepth, loopInfo_->headOffset(),
+  if (!bce_->addTryNote(JSTRY_FOR_OF, bce_->bytecodeSection().stackDepth(),
+                        loopInfo_->headOffset(),
                         loopInfo_->breakTargetOffset())) {
     return false;
   }
 
   if (!bce_->emitPopN(3)) {
     //              [stack]
     return false;
   }
--- a/js/src/frontend/ForOfLoopControl.cpp
+++ b/js/src/frontend/ForOfLoopControl.cpp
@@ -25,32 +25,32 @@ bool ForOfLoopControl::emitBeginCodeNeed
   tryCatch_.emplace(bce, TryEmitter::Kind::TryCatch,
                     TryEmitter::ControlKind::NonSyntactic);
 
   if (!tryCatch_->emitTry()) {
     return false;
   }
 
   MOZ_ASSERT(numYieldsAtBeginCodeNeedingIterClose_ == UINT32_MAX);
-  numYieldsAtBeginCodeNeedingIterClose_ = bce->numYields;
+  numYieldsAtBeginCodeNeedingIterClose_ = bce->bytecodeSection().numYields();
 
   return true;
 }
 
 bool ForOfLoopControl::emitEndCodeNeedingIteratorClose(BytecodeEmitter* bce) {
   if (!tryCatch_->emitCatch()) {
     //              [stack] ITER ...
     return false;
   }
 
   if (!bce->emit1(JSOP_EXCEPTION)) {
     //              [stack] ITER ... EXCEPTION
     return false;
   }
-  unsigned slotFromTop = bce->stackDepth - iterDepth_;
+  unsigned slotFromTop = bce->bytecodeSection().stackDepth() - iterDepth_;
   if (!bce->emitDupAt(slotFromTop)) {
     //              [stack] ITER ... EXCEPTION ITER
     return false;
   }
 
   // If ITER is undefined, it means the exception is thrown by
   // IteratorClose for non-local jump, and we should't perform
   // IteratorClose again here.
@@ -64,17 +64,18 @@ bool ForOfLoopControl::emitEndCodeNeedin
   }
 
   InternalIfEmitter ifIteratorIsNotClosed(bce);
   if (!ifIteratorIsNotClosed.emitThen()) {
     //              [stack] ITER ... EXCEPTION
     return false;
   }
 
-  MOZ_ASSERT(slotFromTop == unsigned(bce->stackDepth - iterDepth_));
+  MOZ_ASSERT(slotFromTop ==
+             unsigned(bce->bytecodeSection().stackDepth() - iterDepth_));
   if (!bce->emitDupAt(slotFromTop)) {
     //              [stack] ITER ... EXCEPTION ITER
     return false;
   }
   if (!emitIteratorCloseInInnermostScopeWithTryNote(bce,
                                                     CompletionKind::Throw)) {
     return false;  // ITER ... EXCEPTION
   }
@@ -87,17 +88,17 @@ bool ForOfLoopControl::emitEndCodeNeedin
   if (!bce->emit1(JSOP_THROW)) {
     //              [stack] ITER ...
     return false;
   }
 
   // If any yields were emitted, then this for-of loop is inside a star
   // generator and must handle the case of Generator.return. Like in
   // yield*, it is handled with a finally block.
-  uint32_t numYieldsEmitted = bce->numYields;
+  uint32_t numYieldsEmitted = bce->bytecodeSection().numYields();
   if (numYieldsEmitted > numYieldsAtBeginCodeNeedingIterClose_) {
     if (!tryCatch_->emitFinally()) {
       return false;
     }
 
     InternalIfEmitter ifGeneratorClosing(bce);
     if (!bce->emit1(JSOP_ISGENCLOSING)) {
       //            [stack] ITER ... FTYPE FVALUE CLOSING
@@ -130,22 +131,22 @@ bool ForOfLoopControl::emitEndCodeNeedin
   numYieldsAtBeginCodeNeedingIterClose_ = UINT32_MAX;
 
   return true;
 }
 
 bool ForOfLoopControl::emitIteratorCloseInInnermostScopeWithTryNote(
     BytecodeEmitter* bce,
     CompletionKind completionKind /* = CompletionKind::Normal */) {
-  ptrdiff_t start = bce->offset();
+  ptrdiff_t start = bce->bytecodeSection().offset();
   if (!emitIteratorCloseInScope(bce, *bce->innermostEmitterScope(),
                                 completionKind)) {
     return false;
   }
-  ptrdiff_t end = bce->offset();
+  ptrdiff_t end = bce->bytecodeSection().offset();
   return bce->addTryNote(JSTRY_FOR_OF_ITERCLOSE, 0, start, end);
 }
 
 bool ForOfLoopControl::emitIteratorCloseInScope(
     BytecodeEmitter* bce, EmitterScope& currentScope,
     CompletionKind completionKind /* = CompletionKind::Normal */) {
   return bce->emitIteratorCloseInScope(currentScope, iterKind_, completionKind,
                                        allowSelfHosted_);
@@ -188,17 +189,17 @@ bool ForOfLoopControl::emitPrepareForNon
     //              [stack] ITER UNDEF
     return false;
   }
   if (!bce->emit1(JSOP_SWAP)) {
     //              [stack] UNDEF ITER
     return false;
   }
 
-  *tryNoteStart = bce->offset();
+  *tryNoteStart = bce->bytecodeSection().offset();
   if (!emitIteratorCloseInScope(bce, currentScope, CompletionKind::Normal)) {
     //              [stack] UNDEF
     return false;
   }
 
   if (isTarget) {
     // At the level of the target block, there's bytecode after the
     // loop that will pop the next method, the iterator, and the
--- a/js/src/frontend/FunctionEmitter.cpp
+++ b/js/src/frontend/FunctionEmitter.cpp
@@ -215,17 +215,17 @@ bool FunctionEmitter::emitAsmJSModule() 
 #ifdef DEBUG
   state_ = State::End;
 #endif
   return true;
 }
 
 bool FunctionEmitter::emitFunction() {
   // Make the function object a literal in the outer script's pool.
-  unsigned index = bce_->objectList.add(funbox_);
+  unsigned index = bce_->perScriptData().objectList().add(funbox_);
 
   //                [stack]
 
   if (isHoisted_ == IsHoisted::No) {
     return emitNonHoisted(index);
     //              [stack] FUN?
   }
 
--- a/js/src/frontend/IfEmitter.cpp
+++ b/js/src/frontend/IfEmitter.cpp
@@ -36,38 +36,38 @@ bool BranchEmitterBase::emitThenInternal
   }
   if (!bce_->emitJump(JSOP_IFEQ, &jumpAroundThen_)) {
     return false;
   }
 
   // To restore stack depth in else part, save depth of the then part.
 #ifdef DEBUG
   // If DEBUG, this is also necessary to calculate |pushed_|.
-  thenDepth_ = bce_->stackDepth;
+  thenDepth_ = bce_->bytecodeSection().stackDepth();
 #else
   if (type == SRC_COND || type == SRC_IF_ELSE) {
-    thenDepth_ = bce_->stackDepth;
+    thenDepth_ = bce_->bytecodeSection().stackDepth();
   }
 #endif
 
   // Enclose then-branch with TDZCheckCache.
   if (kind_ == Kind::MayContainLexicalAccessInBranch) {
     tdzCache_.emplace(bce_);
   }
 
   return true;
 }
 
 void BranchEmitterBase::calculateOrCheckPushed() {
 #ifdef DEBUG
   if (!calculatedPushed_) {
-    pushed_ = bce_->stackDepth - thenDepth_;
+    pushed_ = bce_->bytecodeSection().stackDepth() - thenDepth_;
     calculatedPushed_ = true;
   } else {
-    MOZ_ASSERT(pushed_ == bce_->stackDepth - thenDepth_);
+    MOZ_ASSERT(pushed_ == bce_->bytecodeSection().stackDepth() - thenDepth_);
   }
 #endif
 }
 
 bool BranchEmitterBase::emitElseInternal() {
   calculateOrCheckPushed();
 
   // The end of TDZCheckCache for then-clause.
@@ -87,17 +87,17 @@ bool BranchEmitterBase::emitElseInternal
   if (!bce_->emitJumpTargetAndPatch(jumpAroundThen_)) {
     return false;
   }
 
   // Clear jumpAroundThen_ offset, to tell emitEnd there was an else part.
   jumpAroundThen_ = JumpList();
 
   // Restore stack depth of the then part.
-  bce_->stackDepth = thenDepth_;
+  bce_->bytecodeSection().setStackDepth(thenDepth_);
 
   // Enclose else-branch with TDZCheckCache.
   if (kind_ == Kind::MayContainLexicalAccessInBranch) {
     tdzCache_.emplace(bce_);
   }
 
   return true;
 }
--- a/js/src/frontend/LabelEmitter.cpp
+++ b/js/src/frontend/LabelEmitter.cpp
@@ -22,30 +22,30 @@ bool LabelEmitter::emitLabel(JSAtom* nam
   uint32_t index;
   if (!bce_->makeAtomIndex(name, &index)) {
     return false;
   }
   if (!bce_->emitN(JSOP_LABEL, 4, &top_)) {
     return false;
   }
 
-  controlInfo_.emplace(bce_, name, bce_->offset());
+  controlInfo_.emplace(bce_, name, bce_->bytecodeSection().offset());
 
 #ifdef DEBUG
   state_ = State::Label;
 #endif
   return true;
 }
 
 bool LabelEmitter::emitEnd() {
   MOZ_ASSERT(state_ == State::Label);
 
   // Patch the JSOP_LABEL offset.
-  jsbytecode* labelpc = bce_->code(top_);
-  int32_t offset = bce_->lastNonJumpTargetOffset() - top_;
+  jsbytecode* labelpc = bce_->bytecodeSection().code(top_);
+  int32_t offset = bce_->bytecodeSection().lastNonJumpTargetOffset() - top_;
   MOZ_ASSERT(*labelpc == JSOP_LABEL);
   SET_CODE_OFFSET(labelpc, offset);
 
   // Patch the break/continue to this label.
   if (!controlInfo_->patchBreaks(bce_)) {
     return false;
   }
 
--- a/js/src/frontend/ObjectEmitter.cpp
+++ b/js/src/frontend/ObjectEmitter.cpp
@@ -407,17 +407,17 @@ bool ObjectEmitter::emitObject(size_t pr
   MOZ_ASSERT(propertyState_ == PropertyState::Start);
   MOZ_ASSERT(objectState_ == ObjectState::Start);
 
   //                [stack]
 
   // Emit code for {p:a, '%q':b, 2:c} that is equivalent to constructing
   // a new object and defining (in source order) each property on the object
   // (or mutating the object's [[Prototype]], in the case of __proto__).
-  top_ = bce_->offset();
+  top_ = bce_->bytecodeSection().offset();
   if (!bce_->emitNewInit()) {
     //              [stack] OBJ
     return false;
   }
 
   // Try to construct the shape of the object as we go, so we can emit a
   // JSOP_NEWOBJECT with the final shape instead.
   // In the case of computed property names and indices, we cannot fix the
--- a/js/src/frontend/SwitchEmitter.cpp
+++ b/js/src/frontend/SwitchEmitter.cpp
@@ -146,66 +146,66 @@ bool SwitchEmitter::validateCaseCount(ui
 
 bool SwitchEmitter::emitCond() {
   MOZ_ASSERT(state_ == State::CaseCount);
 
   kind_ = Kind::Cond;
 
   // After entering the scope if necessary, push the switch control.
   controlInfo_.emplace(bce_, StatementKind::Switch);
-  top_ = bce_->offset();
+  top_ = bce_->bytecodeSection().offset();
 
   if (!caseOffsets_.resize(caseCount_)) {
     ReportOutOfMemory(bce_->cx);
     return false;
   }
 
   // The note has two offsets: first tells total switch code length;
   // second tells offset to first JSOP_CASE.
   if (!bce_->newSrcNote3(SRC_CONDSWITCH, 0, 0, &noteIndex_)) {
     return false;
   }
 
-  MOZ_ASSERT(top_ == bce_->offset());
+  MOZ_ASSERT(top_ == bce_->bytecodeSection().offset());
   if (!bce_->emitN(JSOP_CONDSWITCH, 0)) {
     return false;
   }
 
   tdzCacheCaseAndBody_.emplace(bce_);
 
   state_ = State::Cond;
   return true;
 }
 
 bool SwitchEmitter::emitTable(const TableGenerator& tableGen) {
   MOZ_ASSERT(state_ == State::CaseCount);
   kind_ = Kind::Table;
 
   // After entering the scope if necessary, push the switch control.
   controlInfo_.emplace(bce_, StatementKind::Switch);
-  top_ = bce_->offset();
+  top_ = bce_->bytecodeSection().offset();
 
   // The note has one offset that tells total switch code length.
   if (!bce_->newSrcNote2(SRC_TABLESWITCH, 0, &noteIndex_)) {
     return false;
   }
 
   if (!caseOffsets_.resize(tableGen.tableLength())) {
     ReportOutOfMemory(bce_->cx);
     return false;
   }
 
-  MOZ_ASSERT(top_ == bce_->offset());
+  MOZ_ASSERT(top_ == bce_->bytecodeSection().offset());
   if (!bce_->emitN(JSOP_TABLESWITCH,
                    JSOP_TABLESWITCH_LENGTH - sizeof(jsbytecode))) {
     return false;
   }
 
   // Skip default offset.
-  jsbytecode* pc = bce_->code(top_ + JUMP_OFFSET_LEN);
+  jsbytecode* pc = bce_->bytecodeSection().code(top_ + JUMP_OFFSET_LEN);
 
   // Fill in switch bounds, which we know fit in 16-bit offsets.
   SET_JUMP_OFFSET(pc, tableGen.low());
   SET_JUMP_OFFSET(pc + JUMP_OFFSET_LEN, tableGen.high());
 
   state_ = State::Table;
   return true;
 }
@@ -218,19 +218,19 @@ bool SwitchEmitter::emitCaseOrDefaultJum
       return false;
     }
     return true;
   }
 
   if (caseIndex > 0) {
     // Link the last JSOP_CASE's SRC_NEXTCASE to current JSOP_CASE for the
     // benefit of IonBuilder.
-    if (!bce_->setSrcNoteOffset(caseNoteIndex_,
-                                SrcNote::NextCase::NextCaseOffset,
-                                bce_->offset() - lastCaseOffset_)) {
+    if (!bce_->setSrcNoteOffset(
+            caseNoteIndex_, SrcNote::NextCase::NextCaseOffset,
+            bce_->bytecodeSection().offset() - lastCaseOffset_)) {
       return false;
     }
   }
 
   if (!bce_->newSrcNote2(SRC_NEXTCASE, 0, &caseNoteIndex_)) {
     return false;
   }
 
@@ -238,21 +238,22 @@ bool SwitchEmitter::emitCaseOrDefaultJum
   if (!bce_->emitJump(JSOP_CASE, &caseJump)) {
     return false;
   }
   caseOffsets_[caseIndex] = caseJump.offset;
   lastCaseOffset_ = caseJump.offset;
 
   if (caseIndex == 0) {
     // Switch note's second offset is to first JSOP_CASE.
-    unsigned noteCount = bce_->notes().length();
+    unsigned noteCount = bce_->bytecodeSection().notes().length();
     if (!bce_->setSrcNoteOffset(noteIndex_, 1, lastCaseOffset_ - top_)) {
       return false;
     }
-    unsigned noteCountDelta = bce_->notes().length() - noteCount;
+    unsigned noteCountDelta =
+        bce_->bytecodeSection().notes().length() - noteCount;
     if (noteCountDelta != 0) {
       caseNoteIndex_ += noteCountDelta;
     }
   }
 
   return true;
 }
 
@@ -393,28 +394,29 @@ bool SwitchEmitter::emitEnd() {
   // Set the default offset (to end of switch if no default).
   jsbytecode* pc;
   if (kind_ == Kind::Cond) {
     pc = nullptr;
     bce_->patchJumpsToTarget(condSwitchDefaultOffset_,
                              defaultJumpTargetOffset_);
   } else {
     // Fill in the default jump target.
-    pc = bce_->code(top_);
+    pc = bce_->bytecodeSection().code(top_);
     SET_JUMP_OFFSET(pc, defaultJumpTargetOffset_.offset - top_);
     pc += JUMP_OFFSET_LEN;
   }
 
   // Set the SRC_SWITCH note's offset operand to tell end of switch.
   // This code is shared between table switch and cond switch.
   static_assert(unsigned(SrcNote::TableSwitch::EndOffset) ==
                     unsigned(SrcNote::CondSwitch::EndOffset),
                 "{TableSwitch,CondSwitch}::EndOffset should be same");
-  if (!bce_->setSrcNoteOffset(noteIndex_, SrcNote::TableSwitch::EndOffset,
-                              bce_->lastNonJumpTargetOffset() - top_)) {
+  if (!bce_->setSrcNoteOffset(
+          noteIndex_, SrcNote::TableSwitch::EndOffset,
+          bce_->bytecodeSection().lastNonJumpTargetOffset() - top_)) {
     return false;
   }
 
   if (kind_ == Kind::Table) {
     // Skip over the already-initialized switch bounds.
     pc += 2 * JUMP_OFFSET_LEN;
 
     // Use the 'default' offset for missing cases.
--- a/js/src/frontend/TryEmitter.cpp
+++ b/js/src/frontend/TryEmitter.cpp
@@ -48,47 +48,48 @@ bool TryEmitter::emitTry() {
 
   // Since an exception can be thrown at any place inside the try block,
   // we need to restore the stack and the scope chain before we transfer
   // the control to the exception handler.
   //
   // For that we store in a try note associated with the catch or
   // finally block the stack depth upon the try entry. The interpreter
   // uses this depth to properly unwind the stack and the scope chain.
-  depth_ = bce_->stackDepth;
+  depth_ = bce_->bytecodeSection().stackDepth();
 
   // Record the try location, then emit the try block.
   if (!bce_->newSrcNote(SRC_TRY, &noteIndex_)) {
     return false;
   }
   if (!bce_->emit1(JSOP_TRY)) {
     return false;
   }
-  tryStart_ = bce_->offset();
+  tryStart_ = bce_->bytecodeSection().offset();
 
 #ifdef DEBUG
   state_ = State::Try;
 #endif
   return true;
 }
 
 bool TryEmitter::emitTryEnd() {
   MOZ_ASSERT(state_ == State::Try);
-  MOZ_ASSERT(depth_ == bce_->stackDepth);
+  MOZ_ASSERT(depth_ == bce_->bytecodeSection().stackDepth());
 
   // GOSUB to finally, if present.
   if (hasFinally() && controlInfo_) {
     if (!bce_->emitGoSub(&controlInfo_->gosubs)) {
       return false;
     }
   }
 
   // Source note points to the jump at the end of the try block.
-  if (!bce_->setSrcNoteOffset(noteIndex_, SrcNote::Try::EndOfTryJumpOffset,
-                              bce_->offset() - tryStart_ + JSOP_TRY_LENGTH)) {
+  if (!bce_->setSrcNoteOffset(
+          noteIndex_, SrcNote::Try::EndOfTryJumpOffset,
+          bce_->bytecodeSection().offset() - tryStart_ + JSOP_TRY_LENGTH)) {
     return false;
   }
 
   // Emit jump over catch and/or finally.
   if (!bce_->emitJump(JSOP_GOTO, &catchAndFinallyJump_)) {
     return false;
   }
 
@@ -100,17 +101,17 @@ bool TryEmitter::emitTryEnd() {
 }
 
 bool TryEmitter::emitCatch() {
   MOZ_ASSERT(state_ == State::Try);
   if (!emitTryEnd()) {
     return false;
   }
 
-  MOZ_ASSERT(bce_->stackDepth == depth_);
+  MOZ_ASSERT(bce_->bytecodeSection().stackDepth() == depth_);
 
   if (controlKind_ == ControlKind::Syntactic) {
     // Clear the frame's return value that might have been set by the
     // try block:
     //
     //   eval("try { 1; throw 2 } catch(e) {}"); // undefined, not 1
     if (!bce_->emit1(JSOP_UNDEFINED)) {
       return false;
@@ -133,17 +134,17 @@ bool TryEmitter::emitCatchEnd() {
     return true;
   }
 
   // gosub <finally>, if required.
   if (hasFinally()) {
     if (!bce_->emitGoSub(&controlInfo_->gosubs)) {
       return false;
     }
-    MOZ_ASSERT(bce_->stackDepth == depth_);
+    MOZ_ASSERT(bce_->bytecodeSection().stackDepth() == depth_);
 
     // Jump over the finally block.
     if (!bce_->emitJump(JSOP_GOTO, &catchAndFinallyJump_)) {
       return false;
     }
   }
 
   return true;
@@ -172,17 +173,17 @@ bool TryEmitter::emitFinally(
     }
   } else {
     MOZ_ASSERT(state_ == State::Catch);
     if (!emitCatchEnd()) {
       return false;
     }
   }
 
-  MOZ_ASSERT(bce_->stackDepth == depth_);
+  MOZ_ASSERT(bce_->bytecodeSection().stackDepth() == depth_);
 
   if (!bce_->emitJumpTarget(&finallyStart_)) {
     return false;
   }
 
   if (controlInfo_) {
     // Fix up the gosubs that might have been emitted before non-local
     // jumps to the finally code.
@@ -248,17 +249,17 @@ bool TryEmitter::emitEnd() {
     }
   } else {
     MOZ_ASSERT(state_ == State::Finally);
     if (!emitFinallyEnd()) {
       return false;
     }
   }
 
-  MOZ_ASSERT(bce_->stackDepth == depth_);
+  MOZ_ASSERT(bce_->bytecodeSection().stackDepth() == depth_);
 
   // ReconstructPCStack needs a NOP here to mark the end of the last
   // catch block.
   if (!bce_->emit1(JSOP_NOP)) {
     return false;
   }
 
   // Fix up the end-of-try/catch jumps to come here.
--- a/js/src/frontend/WhileEmitter.cpp
+++ b/js/src/frontend/WhileEmitter.cpp
@@ -95,17 +95,18 @@ bool WhileEmitter::emitCond(const Maybe<
 
 bool WhileEmitter::emitEnd() {
   MOZ_ASSERT(state_ == State::Cond);
 
   if (!loopInfo_->emitLoopEnd(bce_, JSOP_IFNE)) {
     return false;
   }
 
-  if (!bce_->addTryNote(JSTRY_LOOP, bce_->stackDepth, loopInfo_->headOffset(),
+  if (!bce_->addTryNote(JSTRY_LOOP, bce_->bytecodeSection().stackDepth(),
+                        loopInfo_->headOffset(),
                         loopInfo_->breakTargetOffset())) {
     return false;
   }
 
   if (!bce_->setSrcNoteOffset(noteIndex_, SrcNote::While::BackJumpOffset,
                               loopInfo_->loopEndOffsetFromEntryJump())) {
     return false;
   }
--- a/js/src/gc/Allocator.cpp
+++ b/js/src/gc/Allocator.cpp
@@ -2,31 +2,35 @@
  * vim: set ts=8 sts=2 et sw=2 tw=80:
  * This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "gc/Allocator.h"
 
 #include "mozilla/DebugOnly.h"
+#include "mozilla/TimeStamp.h"
 
 #include "gc/GCInternals.h"
 #include "gc/GCTrace.h"
 #include "gc/Nursery.h"
 #include "jit/JitRealm.h"
 #include "threading/CpuCount.h"
 #include "vm/JSContext.h"
 #include "vm/Runtime.h"
 #include "vm/StringType.h"
 
 #include "gc/ArenaList-inl.h"
 #include "gc/Heap-inl.h"
 #include "gc/PrivateIterators-inl.h"
 #include "vm/JSObject-inl.h"
 
+using mozilla::TimeDuration;
+using mozilla::TimeStamp;
+
 using namespace js;
 using namespace gc;
 
 template <AllowGC allowGC /* = CanGC */>
 JSObject* js::AllocateObject(JSContext* cx, AllocKind kind,
                              size_t nDynamicSlots, InitialHeap heap,
                              const Class* clasp) {
   MOZ_ASSERT(IsObjectAllocKind(kind));
@@ -263,42 +267,61 @@ T* GCRuntime::tryNewTenuredThing(JSConte
   // Bump allocate in the arena's current free-list span.
   T* t = reinterpret_cast<T*>(cx->freeLists().allocate(kind));
   if (MOZ_UNLIKELY(!t)) {
     // Get the next available free list and allocate out of it. This may
     // acquire a new arena, which will lock the chunk list. If there are no
     // chunks available it may also allocate new memory directly.
     t = reinterpret_cast<T*>(refillFreeListFromAnyThread(cx, kind));
 
-    if (MOZ_UNLIKELY(!t && allowGC)) {
-      if (!cx->helperThread()) {
-        // We have no memory available for a new chunk; perform an
-        // all-compartments, non-incremental, shrinking GC and wait for
-        // sweeping to finish.
-        JS::PrepareForFullGC(cx);
-        cx->runtime()->gc.gc(GC_SHRINK, JS::GCReason::LAST_DITCH);
-        cx->runtime()->gc.waitBackgroundSweepOrAllocEnd();
-
+    if (MOZ_UNLIKELY(!t)) {
+      if (allowGC) {
+        cx->runtime()->gc.attemptLastDitchGC(cx);
         t = tryNewTenuredThing<T, NoGC>(cx, kind, thingSize);
       }
       if (!t) {
-        ReportOutOfMemory(cx);
+        if (allowGC) {
+          ReportOutOfMemory(cx);
+        }
+        return nullptr;
       }
     }
   }
 
   checkIncrementalZoneState(cx, t);
   gcTracer.traceTenuredAlloc(t, kind);
   // We count this regardless of the profiler's state, assuming that it costs
   // just as much to count it, as to check the profiler's state and decide not
   // to count it.
   cx->noteTenuredAlloc();
   return t;
 }
 
+void GCRuntime::attemptLastDitchGC(JSContext* cx) {
+  // Either there was no memory available for a new chunk or the heap hit its
+  // size limit. Try to perform an all-compartments, non-incremental, shrinking
+  // GC and wait for it to finish.
+
+  if (cx->helperThread()) {
+    return;
+  }
+
+  if (!lastLastDitchTime.IsNull() &&
+      TimeStamp::Now() - lastLastDitchTime <= tunables.minLastDitchGCPeriod()) {
+    return;
+  }
+
+  JS::PrepareForFullGC(cx);
+  gc(GC_SHRINK, JS::GCReason::LAST_DITCH);
+  waitBackgroundAllocEnd();
+  waitBackgroundFreeEnd();
+
+  lastLastDitchTime = mozilla::TimeStamp::Now();
+}
+
 template <AllowGC allowGC>
 bool GCRuntime::checkAllocatorState(JSContext* cx, AllocKind kind) {
   if (allowGC) {
     if (!gcIfNeededAtAllocation(cx)) {
       return false;
     }
   }
 
--- a/js/src/gc/GC.cpp
+++ b/js/src/gc/GC.cpp
@@ -359,16 +359,19 @@ static const uint32_t NurseryFreeThresho
 static const float NurseryFreeThresholdForIdleCollectionFraction = 0.25f;
 
 /* JSGC_PRETENURE_THRESHOLD */
 static const float PretenureThreshold = 0.6f;
 
 /* JSGC_PRETENURE_GROUP_THRESHOLD */
 static const float PretenureGroupThreshold = 3000;
 
+/* JSGC_MIN_LAST_DITCH_GC_PERIOD */
+static const TimeDuration MinLastDitchGCPeriod = TimeDuration::FromSeconds(60);
+
 }  // namespace TuningDefaults
 }  // namespace gc
 }  // namespace js
 
 /*
  * We start to incremental collection for a zone when a proportion of its
  * threshold is reached. This is configured by the
  * JSGC_ALLOCATION_THRESHOLD_FACTOR and
@@ -1516,16 +1519,19 @@ bool GCSchedulingTunables::setParameter(
       break;
     }
     case JSGC_PRETENURE_GROUP_THRESHOLD:
       if (value <= 0) {
         return false;
       }
       pretenureGroupThreshold_ = value;
       break;
+    case JSGC_MIN_LAST_DITCH_GC_PERIOD:
+      minLastDitchGCPeriod_ = TimeDuration::FromSeconds(value);
+      break;
     default:
       MOZ_CRASH("Unknown GC parameter.");
   }
 
   return true;
 }
 
 void GCSchedulingTunables::setMaxMallocBytes(size_t value) {
@@ -1608,17 +1614,18 @@ GCSchedulingTunables::GCSchedulingTunabl
       dynamicMarkSliceEnabled_(TuningDefaults::DynamicMarkSliceEnabled),
       minEmptyChunkCount_(TuningDefaults::MinEmptyChunkCount),
       maxEmptyChunkCount_(TuningDefaults::MaxEmptyChunkCount),
       nurseryFreeThresholdForIdleCollection_(
           TuningDefaults::NurseryFreeThresholdForIdleCollection),
       nurseryFreeThresholdForIdleCollectionFraction_(
           TuningDefaults::NurseryFreeThresholdForIdleCollectionFraction),
       pretenureThreshold_(TuningDefaults::PretenureThreshold),
-      pretenureGroupThreshold_(TuningDefaults::PretenureGroupThreshold) {}
+      pretenureGroupThreshold_(TuningDefaults::PretenureGroupThreshold),
+      minLastDitchGCPeriod_(TuningDefaults::MinLastDitchGCPeriod) {}
 
 void GCRuntime::resetParameter(JSGCParamKey key, AutoLockGC& lock) {
   switch (key) {
     case JSGC_MAX_MALLOC_BYTES:
       setMaxMallocBytes(TuningDefaults::MaxMallocBytes, lock);
       break;
     case JSGC_SLICE_TIME_BUDGET:
       defaultTimeBudget_ = TuningDefaults::DefaultTimeBudget;
@@ -1703,16 +1710,19 @@ void GCSchedulingTunables::resetParamete
           TuningDefaults::NurseryFreeThresholdForIdleCollectionFraction;
       break;
     case JSGC_PRETENURE_THRESHOLD:
       pretenureThreshold_ = TuningDefaults::PretenureThreshold;
       break;
     case JSGC_PRETENURE_GROUP_THRESHOLD:
       pretenureGroupThreshold_ = TuningDefaults::PretenureGroupThreshold;
       break;
+    case JSGC_MIN_LAST_DITCH_GC_PERIOD:
+      minLastDitchGCPeriod_ = TuningDefaults::MinLastDitchGCPeriod;
+      break;
     default:
       MOZ_CRASH("Unknown GC parameter.");
   }
 }
 
 uint32_t GCRuntime::getParameter(JSGCParamKey key, const AutoLockGC& lock) {
   switch (key) {
     case JSGC_MAX_BYTES:
@@ -1778,16 +1788,18 @@ uint32_t GCRuntime::getParameter(JSGCPar
       return tunables.nurseryFreeThresholdForIdleCollection();
     case JSGC_NURSERY_FREE_THRESHOLD_FOR_IDLE_COLLECTION_PERCENT:
       return uint32_t(tunables.nurseryFreeThresholdForIdleCollectionFraction() *
                       100.0f);
     case JSGC_PRETENURE_THRESHOLD:
       return uint32_t(tunables.pretenureThreshold() * 100);
     case JSGC_PRETENURE_GROUP_THRESHOLD:
       return tunables.pretenureGroupThreshold();
+    case JSGC_MIN_LAST_DITCH_GC_PERIOD:
+      return tunables.minLastDitchGCPeriod().ToSeconds();
     default:
       MOZ_CRASH("Unknown parameter key");
   }
 }
 
 void GCRuntime::setMarkStackLimit(size_t limit, AutoLockGC& lock) {
   MOZ_ASSERT(!JS::RuntimeHeapIsBusy());
   AutoUnlockGC unlock(lock);
--- a/js/src/gc/GCRuntime.h
+++ b/js/src/gc/GCRuntime.h
@@ -305,18 +305,17 @@ class GCRuntime {
 
  public:
   // Internal public interface
   State state() const { return incrementalState; }
   bool isHeapCompacting() const { return state() == State::Compact; }
   bool isForegroundSweeping() const { return state() == State::Sweep; }
   bool isBackgroundSweeping() { return sweepTask.isRunning(); }
   void waitBackgroundSweepEnd();
-  void waitBackgroundSweepOrAllocEnd() {
-    waitBackgroundSweepEnd();
+  void waitBackgroundAllocEnd() {
     allocTask.cancelAndWait();
   }
   void waitBackgroundFreeEnd();
 
   void lockGC() { lock.lock(); }
 
   void unlockGC() { lock.unlock(); }
 
@@ -530,16 +529,17 @@ class GCRuntime {
   template <typename T>
   static void checkIncrementalZoneState(JSContext* cx, T* t);
   static TenuredCell* refillFreeListFromAnyThread(JSContext* cx,
                                                   AllocKind thingKind);
   static TenuredCell* refillFreeListFromMainThread(JSContext* cx,
                                                    AllocKind thingKind);
   static TenuredCell* refillFreeListFromHelperThread(JSContext* cx,
                                                      AllocKind thingKind);
+  void attemptLastDitchGC(JSContext* cx);
 
   /*
    * Return the list of chunks that can be released outside the GC lock.
    * Must be called either during the GC or with the GC lock taken.
    */
   friend class BackgroundDecommitTask;
   ChunkPool expireEmptyChunkPool(const AutoLockGC& lock);
   void freeEmptyChunks(const AutoLockGC& lock);
@@ -1040,16 +1040,18 @@ class GCRuntime {
 
   void minorGC(JS::GCReason reason,
                gcstats::PhaseKind phase = gcstats::PhaseKind::MINOR_GC)
       JS_HAZ_GC_CALL;
   void evictNursery(JS::GCReason reason = JS::GCReason::EVICT_NURSERY) {
     minorGC(reason, gcstats::PhaseKind::EVICT_NURSERY);
   }
 
+  mozilla::TimeStamp lastLastDitchTime;
+
   friend class MarkingValidator;
   friend class AutoEnterIteration;
 };
 
 /* Prevent compartments and zones from being collected during iteration. */
 class MOZ_RAII AutoEnterIteration {
   GCRuntime* gc;
 
--- a/js/src/gc/Scheduling.h
+++ b/js/src/gc/Scheduling.h
@@ -451,16 +451,24 @@ class GCSchedulingTunables {
   /*
    * JSGC_PRETENURE_GROUP_THRESHOLD
    *
    * During a single nursery collection, if this many objects from the same
    * object group are tenured, then that group will be pretenured.
    */
   UnprotectedData<uint32_t> pretenureGroupThreshold_;
 
+  /*
+   * JSGC_MIN_LAST_DITCH_GC_PERIOD
+   *
+   * Last ditch GC is skipped if allocation failure occurs less than this many
+   * seconds from the previous one.
+   */
+  MainThreadData<mozilla::TimeDuration> minLastDitchGCPeriod_;
+
  public:
   GCSchedulingTunables();
 
   size_t gcMaxBytes() const { return gcMaxBytes_; }
   size_t maxMallocBytes() const { return maxMallocBytes_; }
   size_t gcMinNurseryBytes() const { return gcMinNurseryBytes_; }
   size_t gcMaxNurseryBytes() const { return gcMaxNurseryBytes_; }
   size_t gcZoneAllocThresholdBase() const { return gcZoneAllocThresholdBase_; }
@@ -497,16 +505,20 @@ class GCSchedulingTunables {
   float nurseryFreeThresholdForIdleCollectionFraction() const {
     return nurseryFreeThresholdForIdleCollectionFraction_;
   }
 
   bool attemptPretenuring() const { return pretenureThreshold_ < 1.0f; }
   float pretenureThreshold() const { return pretenureThreshold_; }
   uint32_t pretenureGroupThreshold() const { return pretenureGroupThreshold_; }
 
+  mozilla::TimeDuration minLastDitchGCPeriod() const {
+    return minLastDitchGCPeriod_;
+  }
+
   MOZ_MUST_USE bool setParameter(JSGCParamKey key, uint32_t value,
                                  const AutoLockGC& lock);
   void resetParameter(JSGCParamKey key, const AutoLockGC& lock);
 
   void setMaxMallocBytes(size_t value);
 
  private:
   void setHighFrequencyLowLimit(size_t value);
--- a/js/src/gc/Verifier.cpp
+++ b/js/src/gc/Verifier.cpp
@@ -784,18 +784,24 @@ bool js::gc::CheckWeakMapEntryMarking(co
     fprintf(stderr, "WeakMap object is marked differently to the map\n");
     fprintf(stderr, "(map %p is %s, object %p is %s)\n", map,
             CellColorName(mapColor), object,
             CellColorName(GetCellColor(object)));
     ok = false;
   }
 
   CellColor keyColor = GetCellColor(key);
-  CellColor valueColor =
-      valueZone->isGCMarking() ? GetCellColor(value) : CellColor::Black;
+
+  // Values belonging to other runtimes or in uncollected zones are treated as
+  // black.
+  CellColor valueColor = CellColor::Black;
+  if (value->runtimeFromAnyThread() == zone->runtimeFromAnyThread() &&
+      valueZone->isGCMarking()) {
+    valueColor = GetCellColor(value);
+  }
 
   if (valueColor < Min(mapColor, keyColor)) {
     fprintf(stderr, "WeakMap value is less marked than map and key\n");
     fprintf(stderr, "(map %p is %s, key %p is %s, value %p is %s)\n", map,
             CellColorName(mapColor), key, CellColorName(keyColor), value,
             CellColorName(valueColor));
     ok = false;
   }
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/gc/bug-1505622.js
@@ -0,0 +1,45 @@
+// Test that we don't repeatedly trigger last-ditch GCs.
+
+function allocUntilFail() {
+    gc();
+    let initGCNumber = gcparam("gcNumber");
+    let error;
+    try {
+        let a = [];
+        while (true) {
+            a.push(Symbol()); // Symbols are tenured.
+        }
+    } catch(err) {
+        error = err;
+    }
+    let finalGCNumber = gcparam("gcNumber");
+    gc();
+    assertEq(error, "out of memory");
+    return finalGCNumber - initGCNumber;
+}
+
+// Turn of any zeal which will disrupt GC number checks.
+gczeal(0);
+
+// Set a small heap limit.
+gcparam("maxBytes", 1024 * 1024);
+
+// Set the time limit for skipping last ditch GCs to 5 seconds.
+gcparam("minLastDitchGCPeriod", 5);
+assertEq(gcparam("minLastDitchGCPeriod"), 5);
+
+// Allocate up to the heap limit. This triggers a last ditch GC.
+let gcCount = allocUntilFail();
+assertEq(gcCount, 1)
+
+// Allocate up to the limit again. The second time we fail without
+// triggering a GC.
+gcCount = allocUntilFail();
+assertEq(gcCount, 0)
+
+// Wait for time limit to expire.
+sleep(6);
+
+// Check we trigger a GC again.
+gcCount = allocUntilFail();
+assertEq(gcCount, 1)
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/gc/bug-1543014.js
@@ -0,0 +1,11 @@
+// |jit-test| skip-if: helperThreadCount() === 0
+gczeal(0);
+evalInWorker(`
+  var sym4 = Symbol.match;
+  function basicSweeping() {};
+  var wm1 = new WeakMap();
+  wm1.set(basicSweeping, sym4);
+  startgc(100000, 'shrinking');
+`);
+gczeal(2);
+var d1 = newGlobal({});
--- a/js/src/tests/lib/progressbar.py
+++ b/js/src/tests/lib/progressbar.py
@@ -34,16 +34,18 @@ class ProgressBar(object):
         # [{str:str}] Describtion of how to lay out each field in the counters map.
         self.counters_fmt = fmt
         # int: The value of 'current' equal to 100%.
         self.limit = limit
         # int: max digits in limit
         self.limit_digits = int(math.ceil(math.log10(self.limit)))
         # datetime: The start time.
         self.t0 = datetime.now()
+        # datetime: Optional, the last time update() ran.
+        self.last_update_time = None
 
         # Compute the width of the counters and build the format string.
         self.counters_width = 1  # [
         for layout in self.counters_fmt:
             self.counters_width += self.limit_digits
             # | (or ']' for the last one)
             self.counters_width += 1
 
@@ -74,27 +76,32 @@ class ProgressBar(object):
         pct = int(100.0 * current / self.limit)
         sys.stdout.write('{:3d}% '.format(pct))
 
         barlen = int(1.0 * self.barlen * current / self.limit) - 1
         bar = '=' * barlen + '>' + ' ' * (self.barlen - barlen - 1)
         sys.stdout.write(bar + '|')
 
         # Update the bar.
-        dt = datetime.now() - self.t0
+        now = datetime.now()
+        dt = now - self.t0
         dt = dt.seconds + dt.microseconds * 1e-6
         sys.stdout.write('{:6.1f}s'.format(dt))
         Terminal.clear_right()
 
         # Force redisplay, since we didn't write a \n.
         sys.stdout.flush()
 
+        self.last_update_time = now
+
     def poke(self):
         if not self.prior:
             return
+        if datetime.now() - self.last_update_time < self.update_granularity():
+            return
         self.update(*self.prior)
 
     def finish(self, complete=True):
         if not self.prior:
             sys.stdout.write('No test run... You can try adding'
                              ' --run-slow-tests or --run-skipped to run more tests\n')
             return
         final_count = self.limit if complete else self.prior[0]
--- a/js/src/vm/CompilationAndEvaluation.cpp
+++ b/js/src/vm/CompilationAndEvaluation.cpp
@@ -503,19 +503,20 @@ JS_PUBLIC_API bool JS::CloneAndExecuteSc
       return false;
     }
 
     js::Debugger::onNewScript(cx, script);
   }
   return ExecuteScript(cx, envChain, script, rval.address());
 }
 
+template <typename Unit>
 static bool Evaluate(JSContext* cx, ScopeKind scopeKind, HandleObject env,
                      const ReadOnlyCompileOptions& optionsArg,
-                     SourceText<char16_t>& srcBuf, MutableHandleValue rval) {
+                     SourceText<Unit>& srcBuf, MutableHandleValue rval) {
   CompileOptions options(cx, optionsArg);
   MOZ_ASSERT(!cx->zone()->isAtomsZone());
   AssertHeapIsIdle();
   CHECK_THREAD(cx);
   cx->check(env);
   MOZ_ASSERT_IF(!IsGlobalLexicalEnvironment(env),
                 scopeKind == ScopeKind::NonSyntactic);
 
@@ -543,24 +544,18 @@ static bool Evaluate(JSContext* cx, Hand
     return false;
   }
   return ::Evaluate(cx, scope->kind(), env, optionsArg, srcBuf, rval);
 }
 
 extern JS_PUBLIC_API bool JS::EvaluateUtf8(
     JSContext* cx, const ReadOnlyCompileOptions& options, const char* bytes,
     size_t length, MutableHandle<Value> rval) {
-  auto chars = UniqueTwoByteChars(
-      UTF8CharsToNewTwoByteCharsZ(cx, UTF8Chars(bytes, length), &length).get());
-  if (!chars) {
-    return false;
-  }
-
-  SourceText<char16_t> srcBuf;
-  if (!srcBuf.init(cx, std::move(chars), length)) {
+  SourceText<Utf8Unit> srcBuf;
+  if (!srcBuf.init(cx, bytes, length, SourceOwnership::Borrowed)) {
     return false;
   }
 
   RootedObject globalLexical(cx, &cx->global()->lexicalEnvironment());
   return ::Evaluate(cx, ScopeKind::Global, globalLexical, options, srcBuf,
                     rval);
 }
 
@@ -589,12 +584,24 @@ JS_PUBLIC_API bool JS::EvaluateUtf8Path(
     if (!file.open(cx, filename) || !file.readAll(cx, buffer)) {
       return false;
     }
   }
 
   CompileOptions options(cx, optionsArg);
   options.setFileAndLine(filename, 1);
 
-  return EvaluateUtf8(cx, options,
-                      reinterpret_cast<const char*>(buffer.begin()),
-                      buffer.length(), rval);
+  auto contents = reinterpret_cast<const char*>(buffer.begin());
+  size_t length = buffer.length();
+  auto chars = UniqueTwoByteChars(
+      UTF8CharsToNewTwoByteCharsZ(cx, UTF8Chars(contents, length), &length)
+          .get());
+  if (!chars) {
+    return false;
+  }
+
+  SourceText<char16_t> srcBuf;
+  if (!srcBuf.init(cx, std::move(chars), length)) {
+    return false;
+  }
+
+  return Evaluate(cx, options, srcBuf, rval);
 }
--- a/js/src/vm/JSScript.cpp
+++ b/js/src/vm/JSScript.cpp
@@ -3233,48 +3233,48 @@ PrivateScriptData* PrivateScriptData::ne
   // Constuct the PrivateScriptData. Trailing arrays are uninitialized but
   // GCPtrs are put into a safe state.
   return new (raw) PrivateScriptData(nscopes, nconsts, nobjects, ntrynotes,
                                      nscopenotes, nresumeoffsets);
 }
 
 /* static */ bool PrivateScriptData::InitFromEmitter(
     JSContext* cx, js::HandleScript script, frontend::BytecodeEmitter* bce) {
-  uint32_t nscopes = bce->scopeList.length();
-  uint32_t nconsts = bce->numberList.length();
-  uint32_t nobjects = bce->objectList.length;
-  uint32_t ntrynotes = bce->tryNoteList.length();
-  uint32_t nscopenotes = bce->scopeNoteList.length();
-  uint32_t nresumeoffsets = bce->resumeOffsetList.length();
+  uint32_t nscopes = bce->perScriptData().scopeList().length();
+  uint32_t nconsts = bce->perScriptData().numberList().length();
+  uint32_t nobjects = bce->perScriptData().objectList().length;
+  uint32_t ntrynotes = bce->bytecodeSection().tryNoteList().length();
+  uint32_t nscopenotes = bce->bytecodeSection().scopeNoteList().length();
+  uint32_t nresumeoffsets = bce->bytecodeSection().resumeOffsetList().length();
 
   // Create and initialize PrivateScriptData
   if (!JSScript::createPrivateScriptData(cx, script, nscopes, nconsts, nobjects,
                                          ntrynotes, nscopenotes,
                                          nresumeoffsets)) {
     return false;
   }
 
   js::PrivateScriptData* data = script->data_;
   if (nscopes) {
-    bce->scopeList.finish(data->scopes());
+    bce->perScriptData().scopeList().finish(data->scopes());
   }
   if (nconsts) {
-    bce->numberList.finish(data->consts());
+    bce->perScriptData().numberList().finish(data->consts());
   }
   if (nobjects) {
-    bce->objectList.finish(data->objects());
+    bce->perScriptData().objectList().finish(data->objects());
   }
   if (ntrynotes) {
-    bce->tryNoteList.finish(data->tryNotes());
+    bce->bytecodeSection().tryNoteList().finish(data->tryNotes());
   }
   if (nscopenotes) {
-    bce->scopeNoteList.finish(data->scopeNotes());
+    bce->bytecodeSection().scopeNoteList().finish(data->scopeNotes());
   }
   if (nresumeoffsets) {
-    bce->resumeOffsetList.finish(data->resumeOffsets());
+    bce->bytecodeSection().resumeOffsetList().finish(data->resumeOffsets());
   }
 
   return true;
 }
 
 void PrivateScriptData::trace(JSTracer* trc) {
   auto scopearray = scopes();
   TraceRange(trc, scopearray.size(), scopearray.data(), "scopes");
@@ -3560,33 +3560,34 @@ bool JSScript::fullyInitFromEmitter(JSCo
 
   // If initialization fails, we must call JSScript::freeScriptData in order to
   // neuter the script. Various things that iterate raw scripts in a GC arena
   // use the presense of this data to detect if initialization is complete.
   auto scriptDataGuard =
       mozilla::MakeScopeExit([&] { script->freeScriptData(); });
 
   /* The counts of indexed things must be checked during code generation. */
-  MOZ_ASSERT(bce->atomIndices->count() <= INDEX_LIMIT);
-  MOZ_ASSERT(bce->objectList.length <= INDEX_LIMIT);
+  MOZ_ASSERT(bce->perScriptData().atomIndices()->count() <= INDEX_LIMIT);
+  MOZ_ASSERT(bce->perScriptData().objectList().length <= INDEX_LIMIT);
 
   uint64_t nslots =
-      bce->maxFixedSlots + static_cast<uint64_t>(bce->maxStackDepth);
+      bce->maxFixedSlots +
+      static_cast<uint64_t>(bce->bytecodeSection().maxStackDepth());
   if (nslots > UINT32_MAX) {
     bce->reportError(nullptr, JSMSG_NEED_DIET, js_script_str);
     return false;
   }
 
   // Initialize POD fields
   script->lineno_ = bce->firstLine;
   script->mainOffset_ = bce->mainOffset();
   script->nfixed_ = bce->maxFixedSlots;
   script->nslots_ = nslots;
   script->bodyScopeIndex_ = bce->bodyScopeIndex;
-  script->numBytecodeTypeSets_ = bce->typesetCount;
+  script->numBytecodeTypeSets_ = bce->bytecodeSection().typesetCount();
 
   // Initialize script flags from BytecodeEmitter
   script->setFlag(ImmutableFlags::Strict, bce->sc->strict());
   script->setFlag(ImmutableFlags::BindingsAccessedDynamically,
                   bce->sc->bindingsAccessedDynamically());
   script->setFlag(ImmutableFlags::HasSingletons, bce->hasSingletons);
   script->setFlag(ImmutableFlags::IsForEval, bce->sc->isEvalContext());
   script->setFlag(ImmutableFlags::IsModule, bce->sc->isModuleContext());
@@ -3623,17 +3624,17 @@ bool JSScript::fullyInitFromEmitter(JSCo
     } else {
       fun->setScript(script);
     }
   }
 
   // Part of the parse result – the scope containing each inner function – must
   // be stored in the inner function itself. Do this now that compilation is
   // complete and can no longer fail.
-  bce->objectList.finishInnerFunctions();
+  bce->perScriptData().objectList().finishInnerFunctions();
 
 #ifdef JS_STRUCTURED_SPEW
   // We want this to happen after line number initialization to allow filtering
   // to work.
   script->setSpewEnabled(StructuredSpewer::enabled(script));
 #endif
 
 #ifdef DEBUG
@@ -4517,34 +4518,34 @@ bool JSScript::hasBreakpointsAt(jsbyteco
     return false;
   }
 
   return site->enabledCount > 0;
 }
 
 /* static */ bool SharedScriptData::InitFromEmitter(
     JSContext* cx, js::HandleScript script, frontend::BytecodeEmitter* bce) {
-  uint32_t natoms = bce->atomIndices->count();
-  uint32_t codeLength = bce->code().length();
+  uint32_t natoms = bce->perScriptData().atomIndices()->count();
+  uint32_t codeLength = bce->bytecodeSection().code().length();
 
   // The + 1 is to account for the final SN_MAKE_TERMINATOR that is appended
   // when the notes are copied to their final destination by copySrcNotes.
-  uint32_t noteLength = bce->notes().length() + 1;
+  uint32_t noteLength = bce->bytecodeSection().notes().length() + 1;
 
   // Create and initialize SharedScriptData
   if (!script->createSharedScriptData(cx, codeLength, noteLength, natoms)) {
     return false;
   }
 
   js::SharedScriptData* data = script->scriptData_;
 
   // Initialize trailing arrays
-  std::copy_n(bce->code().begin(), codeLength, data->code());
+  std::copy_n(bce->bytecodeSection().code().begin(), codeLength, data->code());
   bce->copySrcNotes(data->notes(), noteLength);
-  InitAtomMap(*bce->atomIndices, data->atoms());
+  InitAtomMap(*bce->perScriptData().atomIndices(), data->atoms());
 
   return true;
 }
 
 void SharedScriptData::traceChildren(JSTracer* trc) {
   MOZ_ASSERT(refCount() != 0);
   for (uint32_t i = 0; i < natoms(); ++i) {
     TraceNullableEdge(trc, &atoms()[i], "atom");
--- a/widget/GfxInfoX11.cpp
+++ b/widget/GfxInfoX11.cpp
@@ -302,16 +302,44 @@ const nsTArray<GfxDriverInfo> &GfxInfo::
 
     // fglrx baseline (chosen arbitrarily as 2013-07-22 release).
     APPEND_TO_DRIVER_BLOCKLIST(
         OperatingSystem::Linux,
         (nsAString &)GfxDriverInfo::GetDeviceVendor(VendorATI),
         GfxDriverInfo::allDevices, GfxDriverInfo::allFeatures,
         nsIGfxInfo::FEATURE_BLOCKED_DRIVER_VERSION, DRIVER_LESS_THAN,
         V(13, 15, 100, 1), "FEATURE_FAILURE_OLD_FGLRX", "fglrx 13.15.100.1");
+
+    ////////////////////////////////////
+    // FEATURE_WEBRENDER
+
+    // Mesa baseline (chosen arbitrarily as that which ships with
+    // Ubuntu 18.04 LTS).
+    APPEND_TO_DRIVER_BLOCKLIST(
+        OperatingSystem::Linux,
+        (nsAString &)GfxDriverInfo::GetDeviceVendor(VendorMesaAll),
+        GfxDriverInfo::allDevices, nsIGfxInfo::FEATURE_WEBRENDER,
+        nsIGfxInfo::FEATURE_BLOCKED_DRIVER_VERSION, DRIVER_LESS_THAN,
+        V(18, 2, 8, 0), "FEATURE_FAILURE_WEBRENDER_OLD_MESA", "Mesa 18.2.8.0");
+
+    // Disable on all NVIDIA devices for now.
+    APPEND_TO_DRIVER_BLOCKLIST(
+        OperatingSystem::Linux,
+        (nsAString &)GfxDriverInfo::GetDeviceVendor(VendorNVIDIA),
+        GfxDriverInfo::allDevices, nsIGfxInfo::FEATURE_WEBRENDER,
+        nsIGfxInfo::FEATURE_BLOCKED_DEVICE, DRIVER_COMPARISON_IGNORED,
+        V(0, 0, 0, 0), "FEATURE_FAILURE_WEBRENDER_NO_LINUX_NVIDIA", "");
+
+    // Disable on all ATI devices for now.
+    APPEND_TO_DRIVER_BLOCKLIST(
+        OperatingSystem::Linux,
+        (nsAString &)GfxDriverInfo::GetDeviceVendor(VendorATI),
+        GfxDriverInfo::allDevices, nsIGfxInfo::FEATURE_WEBRENDER,
+        nsIGfxInfo::FEATURE_BLOCKED_DEVICE, DRIVER_COMPARISON_IGNORED,
+        V(0, 0, 0, 0), "FEATURE_FAILURE_WEBRENDER_NO_LINUX_ATI", "");
   }
   return *sDriverInfo;
 }
 
 bool GfxInfo::DoesVendorMatch(const nsAString &aBlocklistVendor,
                               const nsAString &aAdapterVendor) {
   if (mIsMesa &&
       aBlocklistVendor.Equals(GfxDriverInfo::GetDeviceVendor(VendorMesaAll),
--- a/widget/windows/GfxInfo.cpp
+++ b/widget/windows/GfxInfo.cpp
@@ -1597,19 +1597,19 @@ const nsTArray<GfxDriverInfo>& GfxInfo::
         (nsAString&)GfxDriverInfo::GetDeviceVendor(VendorNVIDIA),
         GfxDriverInfo::allDevices, nsIGfxInfo::FEATURE_DX_P010,
         nsIGfxInfo::FEATURE_BLOCKED_DEVICE, DRIVER_LESS_THAN,
         GfxDriverInfo::allDriverVersions, "FEATURE_UNQUALIFIED_P010_NVIDIA");
 
     ////////////////////////////////////
     // FEATURE_WEBRENDER
 
-    // We are blocking all non-Nvidia cards in gfxPlatform.cpp where we check
-    // for the WEBRENDER_QUALIFIED feature. However we also want to block some
-    // specific Nvidia cards for being too low-powered, so we do that here.
+    // We are blocking most hardware explicitly in gfxPlatform.cpp where we
+    // check for the WEBRENDER_QUALIFIED feature. However we also want to block
+    // some specific Nvidia cards for being too low-powered, so we do that here.
     APPEND_TO_DRIVER_BLOCKLIST2(
         OperatingSystem::Windows10,
         (nsAString&)GfxDriverInfo::GetDeviceVendor(VendorNVIDIA),
         (GfxDeviceFamily*)GfxDriverInfo::GetDeviceFamily(NvidiaBlockWebRender),
         nsIGfxInfo::FEATURE_WEBRENDER, nsIGfxInfo::FEATURE_BLOCKED_DEVICE,
         DRIVER_LESS_THAN, GfxDriverInfo::allDriverVersions,
         "FEATURE_UNQUALIFIED_WEBRENDER_NVIDIA_BLOCKED");