Merge mozilla-central to autoland. a=merge on a CLOSED TREE
authorRazvan Maries <rmaries@mozilla.com>
Wed, 13 Feb 2019 07:23:59 +0200
changeset 458840 1af69c96ec5f
parent 458839 cacd6882c461 (current diff)
parent 458731 875a93046d84 (diff)
child 458841 fc540ce0e429
push id35548
push useropoprus@mozilla.com
push dateWed, 13 Feb 2019 09:48:26 +0000
treeherdermozilla-central@93e37c529818 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersmerge
milestone67.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Merge mozilla-central to autoland. a=merge on a CLOSED TREE
js/src/frontend/BytecodeEmitter.cpp
js/src/shell/js.cpp
taskcluster/ci/test/test-sets.yml
toolkit/xre/nsAppRunner.cpp
--- a/browser/base/content/test/performance/browser_appmenu.js
+++ b/browser/base/content/test/performance/browser_appmenu.js
@@ -21,17 +21,17 @@ const EXPECTED_APPMENU_OPEN_REFLOWS = [
   },
 
   {
     stack: [
       "adjustArrowPosition@chrome://global/content/bindings/popup.xml",
       "onxblpopuppositioned@chrome://global/content/bindings/popup.xml",
     ],
 
-    maxCount: 3, // This number should only ever go down - never up.
+    maxCount: 17, // This number should only ever go down - never up.
   },
 
   {
     stack: [
       "_calculateMaxHeight@resource:///modules/PanelMultiView.jsm",
       "handleEvent@resource:///modules/PanelMultiView.jsm",
     ],
 
new file mode 100644
--- /dev/null
+++ b/browser/config/mozconfigs/win64-aarch64/artifact
@@ -0,0 +1,13 @@
+. "$topsrcdir/build/mozconfig.artifact.automation"
+
+# Needed to set SourceRepository in application.ini (used by Talos)
+export MOZILLA_OFFICIAL=1
+
+. "$topsrcdir/browser/config/mozconfigs/win64-aarch64/common-win64"
+. "$topsrcdir/browser/config/mozconfigs/common"
+. "$topsrcdir/build/mozconfig.win-common"
+. "$topsrcdir/build/win64-aarch64/mozconfig.vs-latest"
+. "$topsrcdir/build/mozconfig.common.override"
+
+. "$topsrcdir/build/mozconfig.artifact"
+ac_add_options --enable-artifact-build-symbols
new file mode 100644
--- /dev/null
+++ b/browser/config/mozconfigs/win64-aarch64/debug-artifact
@@ -0,0 +1,12 @@
+. "$topsrcdir/build/mozconfig.artifact.automation"
+
+. "$topsrcdir/browser/config/mozconfigs/win64-aarch64/common-win64"
+. "$topsrcdir/browser/config/mozconfigs/common"
+. "$topsrcdir/build/mozconfig.win-common"
+. "$topsrcdir/build/win64-aarch64/mozconfig.vs-latest"
+. "$topsrcdir/build/mozconfig.common.override"
+
+. "$topsrcdir/build/mozconfig.artifact"
+ac_add_options --enable-artifact-build-symbols
+
+ac_add_options --enable-debug
--- a/build/autoconf/frameptr.m4
+++ b/build/autoconf/frameptr.m4
@@ -7,16 +7,26 @@ dnl disabling frame pointers in this arc
 dnl options
 
 AC_DEFUN([MOZ_SET_FRAMEPTR_FLAGS], [
   if test "$GNU_CC"; then
     MOZ_ENABLE_FRAME_PTR="-fno-omit-frame-pointer -funwind-tables"
     MOZ_DISABLE_FRAME_PTR="-fomit-frame-pointer -funwind-tables"
   else
     case "$target" in
+    dnl some versions of clang-cl don't support -Oy-; accommodate them.
+    aarch64-windows*)
+      if test "$CC_TYPE" = "clang-cl"; then
+        MOZ_ENABLE_FRAME_PTR="-Xclang -mdisable-fp-elim"
+        MOZ_DISABLE_FRAME_PTR="-Xclang -mdisable-fp-elim"
+      else
+        MOZ_ENABLE_FRAME_PTR="-Oy-"
+        MOZ_DISABLE_FRAME_PTR="-Oy"
+      fi
+    ;;
     dnl Oy (Frame-Pointer Omission) is only support on x86 compilers
     *-mingw32*)
       MOZ_ENABLE_FRAME_PTR="-Oy-"
       MOZ_DISABLE_FRAME_PTR="-Oy"
     ;;
     esac
   fi
 
--- a/gfx/layers/ipc/CompositorBridgeChild.cpp
+++ b/gfx/layers/ipc/CompositorBridgeChild.cpp
@@ -38,16 +38,17 @@
 #include "nsTArray.h"         // for nsTArray, nsTArray_Impl
 #include "nsXULAppAPI.h"      // for XRE_GetIOMessageLoop, etc
 #include "FrameLayerBuilder.h"
 #include "mozilla/dom/TabChild.h"
 #include "mozilla/dom/TabParent.h"
 #include "mozilla/dom/ContentChild.h"
 #include "mozilla/Unused.h"
 #include "mozilla/DebugOnly.h"
+#include "nsThreadUtils.h"
 #if defined(XP_WIN)
 #  include "WinUtils.h"
 #endif
 #include "mozilla/widget/CompositorWidget.h"
 #ifdef MOZ_WIDGET_SUPPORTS_OOP_COMPOSITING
 #  include "mozilla/widget/CompositorWidgetChild.h"
 #endif
 #include "VsyncSource.h"
@@ -103,16 +104,26 @@ CompositorBridgeChild::~CompositorBridge
     gfxCriticalError() << "CompositorBridgeChild was not deinitialized";
   }
 }
 
 bool CompositorBridgeChild::IsSameProcess() const {
   return OtherPid() == base::GetCurrentProcId();
 }
 
+void CompositorBridgeChild::PrepareFinalDestroy() {
+  // Because of high priority DidComposite, we need to repost to
+  // high priority queue to ensure the actor is destroyed after possible
+  // pending DidComposite message.
+  nsCOMPtr<nsIRunnable> runnable =
+      NewRunnableMethod("CompositorBridgeChild::AfterDestroy", this,
+                        &CompositorBridgeChild::AfterDestroy);
+  NS_DispatchToCurrentThreadQueue(runnable.forget(), EventQueuePriority::High);
+}
+
 void CompositorBridgeChild::AfterDestroy() {
   // Note that we cannot rely upon mCanSend here because we already set that to
   // false to prevent normal IPDL calls from being made after SendWillClose.
   // The only time we should not issue Send__delete__ is if the actor is already
   // destroyed, e.g. the compositor process crashed.
   if (!mActorDestroyed) {
     Send__delete__(this);
     mActorDestroyed = true;
@@ -149,18 +160,18 @@ void CompositorBridgeChild::Destroy() {
   // Flush async paints before we destroy texture data.
   FlushAsyncPaints();
 
   if (!mCanSend) {
     // We may have already called destroy but still have lingering references
     // or CompositorBridgeChild::ActorDestroy was called. Ensure that we do our
     // post destroy clean up no matter what. It is safe to call multiple times.
     MessageLoop::current()->PostTask(
-        NewRunnableMethod("CompositorBridgeChild::AfterDestroy", selfRef,
-                          &CompositorBridgeChild::AfterDestroy));
+        NewRunnableMethod("CompositorBridgeChild::PrepareFinalDestroy", selfRef,
+                          &CompositorBridgeChild::PrepareFinalDestroy));
     return;
   }
 
   AutoTArray<PLayerTransactionChild*, 16> transactions;
   ManagedPLayerTransactionChild(transactions);
   for (int i = transactions.Length() - 1; i >= 0; --i) {
     RefPtr<LayerTransactionChild> layers =
         static_cast<LayerTransactionChild*>(transactions[i]);
@@ -196,18 +207,18 @@ void CompositorBridgeChild::Destroy() {
   // destruction of shared memory). We need to ensure this gets processed by the
   // CompositorBridgeChild before it gets destroyed. It suffices to ensure that
   // events already in the MessageLoop get processed before the
   // CompositorBridgeChild is destroyed, so we add a task to the MessageLoop to
   // handle compositor destruction.
 
   // From now on we can't send any message message.
   MessageLoop::current()->PostTask(
-      NewRunnableMethod("CompositorBridgeChild::AfterDestroy", selfRef,
-                        &CompositorBridgeChild::AfterDestroy));
+      NewRunnableMethod("CompositorBridgeChild::PrepareFinalDestroy", selfRef,
+                        &CompositorBridgeChild::PrepareFinalDestroy));
 }
 
 // static
 void CompositorBridgeChild::ShutDown() {
   if (sCompositorBridge) {
     sCompositorBridge->Destroy();
     SpinEventLoopUntil([&]() { return !sCompositorBridge; });
   }
--- a/gfx/layers/ipc/CompositorBridgeChild.h
+++ b/gfx/layers/ipc/CompositorBridgeChild.h
@@ -253,16 +253,17 @@ class CompositorBridgeChild final : publ
   // Private destructor, to discourage deletion outside of Release():
   virtual ~CompositorBridgeChild();
 
   // Must only be called from the paint thread. If the main thread is delaying
   // IPC messages, this forwards all such delayed IPC messages to the I/O thread
   // and resumes IPC.
   void ResumeIPCAfterAsyncPaint();
 
+  void PrepareFinalDestroy();
   void AfterDestroy();
 
   PLayerTransactionChild* AllocPLayerTransactionChild(
       const nsTArray<LayersBackend>& aBackendHints, const LayersId& aId);
 
   bool DeallocPLayerTransactionChild(PLayerTransactionChild* aChild);
 
   virtual void ActorDestroy(ActorDestroyReason aWhy) override;
--- a/gfx/layers/ipc/PCompositorBridge.ipdl
+++ b/gfx/layers/ipc/PCompositorBridge.ipdl
@@ -108,18 +108,18 @@ child:
   // TextureSources are recreated.
   async InvalidateLayers(LayersId layersId);
 
   // The compositor completed a layers transaction. id is the layers id
   // of the child layer tree that was composited (or 0 when notifying
   // the root layer tree).
   // transactionId is the id of the transaction before this composite, or 0
   // if there was no transaction since the last composite.
-  async DidComposite(LayersId id, TransactionId transactionId,
-                     TimeStamp compositeStart, TimeStamp compositeEnd);
+  prio(high) async DidComposite(LayersId id, TransactionId transactionId,
+                                TimeStamp compositeStart, TimeStamp compositeEnd);
 
   async NotifyFrameStats(FrameStats[] aFrameStats);
 
   /**
    * Parent informs the child that the graphics objects are ready for
    * compositing.  This usually means that the graphics objects (textures
    * and the like) are available on the GPU.  This is used for chrome UI.
    * @see RequestNotifyAfterRemotePaint
--- a/gfx/layers/mlgpu/FrameBuilder.cpp
+++ b/gfx/layers/mlgpu/FrameBuilder.cpp
@@ -315,16 +315,20 @@ MaskOperation* FrameBuilder::AddMaskOper
 
 void FrameBuilder::RetainTemporaryLayer(LayerMLGPU* aLayer) {
   // This should only be used with temporary layers. Temporary layers do not
   // have parents.
   MOZ_ASSERT(!aLayer->GetLayer()->GetParent());
   mTemporaryLayers.push_back(aLayer->GetLayer());
 }
 
+MLGRenderTarget* FrameBuilder::GetWidgetRT() {
+  return mWidgetRenderView->GetRenderTarget();
+}
+
 LayerConstants* FrameBuilder::AllocateLayerInfo(ItemInfo& aItem) {
   if (((mCurrentLayerBuffer.Length() + 1) * sizeof(LayerConstants)) >
       mDevice->GetMaxConstantBufferBindSize()) {
     FinishCurrentLayerBuffer();
     mLayerBufferMap.Clear();
     mCurrentLayerBuffer.ClearAndRetainStorage();
   }
 
--- a/gfx/layers/mlgpu/FrameBuilder.h
+++ b/gfx/layers/mlgpu/FrameBuilder.h
@@ -61,16 +61,18 @@ class FrameBuilder final {
   // These are called during rendering, and may return null if a buffer
   // couldn't be allocated.
   ConstantBufferSection GetLayerBufferByIndex(size_t aIndex) const;
   ConstantBufferSection GetMaskRectBufferByIndex(size_t aIndex) const;
 
   // Hold a layer alive until the frame ends.
   void RetainTemporaryLayer(LayerMLGPU* aLayer);
 
+  MLGRenderTarget* GetWidgetRT();
+
  private:
   void AssignLayer(Layer* aLayer, RenderViewMLGPU* aView,
                    const RenderTargetIntRect& aClipRect,
                    Maybe<gfx::Polygon>&& aGeometry);
 
   void ProcessChildList(ContainerLayer* aContainer, RenderViewMLGPU* aView,
                         const RenderTargetIntRect& aParentClipRect,
                         const Maybe<gfx::Polygon>& aParentGeometry);
--- a/gfx/layers/mlgpu/LayerManagerMLGPU.cpp
+++ b/gfx/layers/mlgpu/LayerManagerMLGPU.cpp
@@ -86,16 +86,17 @@ bool LayerManagerMLGPU::Initialize() {
 }
 
 void LayerManagerMLGPU::Destroy() {
   if (IsDestroyed()) {
     return;
   }
 
   LayerManager::Destroy();
+  mProfilerScreenshotGrabber.Destroy();
 
   if (mDevice && mDevice->IsValid()) {
     mDevice->Flush();
   }
   if (mSwapChain) {
     mSwapChain->Destroy();
     mSwapChain = nullptr;
   }
@@ -259,16 +260,17 @@ void LayerManagerMLGPU::Composite() {
   }
 
   AL_LOG("Computed invalid region: %s\n", Stringify(mInvalidRegion).c_str());
 
   // Now that we have the final invalid region, give it to the swap chain which
   // will tell us if we still need to render.
   if (!mSwapChain->ApplyNewInvalidRegion(std::move(mInvalidRegion),
                                          diagnosticRect)) {
+    mProfilerScreenshotGrabber.NotifyEmptyFrame();
     return;
   }
 
   AutoUnlockAllTextures autoUnlock(mDevice);
 
   mDevice->BeginFrame();
 
   RenderLayers();
@@ -341,16 +343,19 @@ void LayerManagerMLGPU::RenderLayers() {
   if (mDrawDiagnostics) {
     IntSize size = mSwapChain->GetBackBufferInvalidRegion().GetBounds().Size();
     uint32_t numPixels = size.width * size.height;
     mDevice->StartDiagnostics(numPixels);
   }
 
   // Execute all render passes.
   builder.Render();
+
+  mProfilerScreenshotGrabber.MaybeGrabScreenshot(
+      mDevice, builder.GetWidgetRT()->GetTexture());
   mCurrentFrame = nullptr;
 
   if (mDrawDiagnostics) {
     mDiagnostics->RecordCompositeTime(
         (TimeStamp::Now() - start).ToMilliseconds());
     mDevice->EndDiagnostics();
   }
 }
@@ -500,13 +505,14 @@ bool LayerManagerMLGPU::PreRender() {
     return false;
   }
   mWidgetContext = Some(context);
   return true;
 }
 
 void LayerManagerMLGPU::PostRender() {
   mWidget->PostRender(mWidgetContext.ptr());
+  mProfilerScreenshotGrabber.MaybeProcessQueue();
   mWidgetContext = Nothing();
 }
 
 }  // namespace layers
 }  // namespace mozilla
--- a/gfx/layers/mlgpu/LayerManagerMLGPU.h
+++ b/gfx/layers/mlgpu/LayerManagerMLGPU.h
@@ -5,16 +5,17 @@
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef MOZILLA_GFX_LAYERMANAGERMLGPU_H
 #define MOZILLA_GFX_LAYERMANAGERMLGPU_H
 
 #include "Layers.h"
 #include "mozilla/layers/LayerManagerComposite.h"
 #include "LayerMLGPU.h"
+#include "mozilla/layers/MLGPUScreenshotGrabber.h"
 
 namespace mozilla {
 namespace layers {
 
 class FrameBuilder;
 class LayerManagerMLGPU;
 class RenderPassMLGPU;
 class SharedBufferMLGPU;
@@ -119,14 +120,17 @@ class LayerManagerMLGPU final : public H
   gfx::IntRect mTargetRect;
   FrameBuilder* mCurrentFrame;
 
   // The debug frame number is incremented every frame and is included in the
   // WorldConstants bound to vertex shaders. This allows us to correlate
   // a frame in RenderDoc to spew in the console.
   uint32_t mDebugFrameNumber;
   RefPtr<MLGBuffer> mDiagnosticVertices;
+
+  // Screenshotting for the profiler.
+  MLGPUScreenshotGrabber mProfilerScreenshotGrabber;
 };
 
 }  // namespace layers
 }  // namespace mozilla
 
 #endif
new file mode 100644
--- /dev/null
+++ b/gfx/layers/mlgpu/MLGPUScreenshotGrabber.cpp
@@ -0,0 +1,335 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "MLGPUScreenshotGrabber.h"
+
+#include "mozilla/RefPtr.h"
+#include "mozilla/TimeStamp.h"
+#include "mozilla/UniquePtr.h"
+
+#include "mozilla/layers/ProfilerScreenshots.h"
+#include "mozilla/gfx/Point.h"
+#include "mozilla/gfx/Swizzle.h"
+#include "SharedBufferMLGPU.h"
+#include "ShaderDefinitionsMLGPU.h"
+#include "nsTArray.h"
+
+namespace mozilla {
+
+using namespace gfx;
+
+namespace layers {
+
+using namespace mlg;
+
+/**
+ * The actual implementation of screenshot grabbing.
+ * The MLGPUScreenshotGrabberImpl object is destroyed if the profiler is
+ * disabled and MaybeGrabScreenshot notices it.
+ */
+class MLGPUScreenshotGrabberImpl final {
+ public:
+  explicit MLGPUScreenshotGrabberImpl(const IntSize& aReadbackTextureSize);
+  ~MLGPUScreenshotGrabberImpl();
+
+  void GrabScreenshot(MLGDevice* aDevice, MLGTexture* aTexture);
+  void ProcessQueue();
+
+ private:
+  struct QueueItem final {
+    mozilla::TimeStamp mTimeStamp;
+    RefPtr<MLGTexture> mScreenshotReadbackTexture;
+    gfx::IntSize mScreenshotSize;
+    gfx::IntSize mWindowSize;
+    RefPtr<MLGDevice> mDevice;
+    uintptr_t mWindowIdentifier;
+  };
+
+  RefPtr<MLGTexture> ScaleDownWindowTargetToSize(MLGDevice* aCompositor,
+                                                 const gfx::IntSize& aDestSize,
+                                                 MLGTexture* aWindowTarget,
+                                                 size_t aLevel);
+
+  struct CachedLevel {
+    RefPtr<MLGRenderTarget> mRenderTarget;
+    RefPtr<MLGBuffer> mVertexBuffer;
+    RefPtr<MLGBuffer> mWorldConstants;
+  };
+  bool BlitTexture(MLGDevice* aDevice, CachedLevel& aDest, MLGTexture* aSource,
+                   const IntSize& aSourceSize, const IntSize& aDestSize);
+
+  already_AddRefed<MLGTexture> TakeNextReadbackTexture(MLGDevice* aCompositor);
+  void ReturnReadbackTexture(MLGTexture* aReadbackTexture);
+
+  nsTArray<CachedLevel> mCachedLevels;
+  nsTArray<RefPtr<MLGTexture>> mAvailableReadbackTextures;
+  Maybe<QueueItem> mCurrentFrameQueueItem;
+  nsTArray<QueueItem> mQueue;
+  UniquePtr<ProfilerScreenshots> mProfilerScreenshots;
+  const IntSize mReadbackTextureSize;
+};
+
+MLGPUScreenshotGrabber::MLGPUScreenshotGrabber() {}
+
+MLGPUScreenshotGrabber::~MLGPUScreenshotGrabber() {}
+
+void MLGPUScreenshotGrabber::MaybeGrabScreenshot(MLGDevice* aDevice,
+                                                 MLGTexture* aTexture) {
+  if (ProfilerScreenshots::IsEnabled()) {
+    if (!mImpl) {
+      mImpl = MakeUnique<MLGPUScreenshotGrabberImpl>(
+          ProfilerScreenshots::ScreenshotSize());
+    }
+    mImpl->GrabScreenshot(aDevice, aTexture);
+  } else if (mImpl) {
+    Destroy();
+  }
+}
+
+void MLGPUScreenshotGrabber::MaybeProcessQueue() {
+  if (ProfilerScreenshots::IsEnabled()) {
+    if (!mImpl) {
+      mImpl = MakeUnique<MLGPUScreenshotGrabberImpl>(
+          ProfilerScreenshots::ScreenshotSize());
+    }
+    mImpl->ProcessQueue();
+  } else if (mImpl) {
+    Destroy();
+  }
+}
+
+void MLGPUScreenshotGrabber::NotifyEmptyFrame() {
+#ifdef MOZ_GECKO_PROFILER
+  profiler_add_marker("NoCompositorScreenshot because nothing changed",
+                      js::ProfilingStackFrame::Category::GRAPHICS);
+#endif
+}
+
+void MLGPUScreenshotGrabber::Destroy() { mImpl = nullptr; }
+
+MLGPUScreenshotGrabberImpl::MLGPUScreenshotGrabberImpl(
+    const IntSize& aReadbackTextureSize)
+    : mReadbackTextureSize(aReadbackTextureSize) {}
+
+MLGPUScreenshotGrabberImpl::~MLGPUScreenshotGrabberImpl() {
+  // Any queue items in mQueue or mCurrentFrameQueueItem will be lost.
+  // That's ok: Either the profiler has stopped and we don't care about these
+  // screenshots, or the window is closing and we don't really need the last
+  // few frames from the window.
+}
+
+// Scale down aWindowTexture into a MLGTexture of size
+// mReadbackTextureSize * (1 << aLevel) and return that MLGTexture.
+// Don't scale down by more than a factor of 2 with a single scaling operation,
+// because it'll look bad. If higher scales are needed, use another
+// intermediate target by calling this function recursively with aLevel + 1.
+RefPtr<MLGTexture> MLGPUScreenshotGrabberImpl::ScaleDownWindowTargetToSize(
+    MLGDevice* aDevice, const IntSize& aDestSize, MLGTexture* aWindowTexture,
+    size_t aLevel) {
+  aDevice->SetScissorRect(Nothing());
+  aDevice->SetDepthTestMode(MLGDepthTestMode::Disabled);
+  aDevice->SetTopology(MLGPrimitiveTopology::UnitQuad);
+  // DiagnosticText happens to be the simplest shader we have to draw a quad.
+  aDevice->SetVertexShader(VertexShaderID::DiagnosticText);
+  aDevice->SetPixelShader(PixelShaderID::DiagnosticText);
+  aDevice->SetBlendState(MLGBlendState::Copy);
+  aDevice->SetSamplerMode(0, SamplerMode::LinearClamp);
+
+  if (aLevel == mCachedLevels.Length()) {
+    RefPtr<MLGRenderTarget> rt =
+        aDevice->CreateRenderTarget(mReadbackTextureSize * (1 << aLevel));
+    mCachedLevels.AppendElement(CachedLevel{rt, nullptr, nullptr});
+  }
+  MOZ_RELEASE_ASSERT(aLevel < mCachedLevels.Length());
+
+  RefPtr<MLGTexture> sourceTarget = aWindowTexture;
+  IntSize sourceSize = aWindowTexture->GetSize();
+  if (aWindowTexture->GetSize().width > aDestSize.width * 2) {
+    sourceSize = aDestSize * 2;
+    sourceTarget = ScaleDownWindowTargetToSize(aDevice, sourceSize,
+                                               aWindowTexture, aLevel + 1);
+  }
+
+  if (sourceTarget) {
+    if (BlitTexture(aDevice, mCachedLevels[aLevel], sourceTarget, sourceSize,
+                    aDestSize)) {
+      return mCachedLevels[aLevel].mRenderTarget->GetTexture();
+    }
+  }
+  return nullptr;
+}
+
+bool MLGPUScreenshotGrabberImpl::BlitTexture(MLGDevice* aDevice,
+                                             CachedLevel& aLevel,
+                                             MLGTexture* aSource,
+                                             const IntSize& aSourceSize,
+                                             const IntSize& aDestSize) {
+  MOZ_ASSERT(aLevel.mRenderTarget);
+  MLGRenderTarget* rt = aLevel.mRenderTarget;
+  MOZ_ASSERT(aDestSize <= rt->GetSize());
+
+  struct TextureRect {
+    Rect bounds;
+    Rect texCoords;
+  };
+
+  if (!aLevel.mVertexBuffer) {
+    TextureRect rect;
+    rect.bounds = Rect(Point(), Size(aDestSize));
+    rect.texCoords =
+        Rect(0.0, 0.0, Float(aSourceSize.width) / aSource->GetSize().width,
+             Float(aSourceSize.height) / aSource->GetSize().height);
+
+    VertexStagingBuffer instances;
+    if (!instances.AppendItem(rect)) {
+      return false;
+    }
+
+    RefPtr<MLGBuffer> vertices = aDevice->CreateBuffer(
+        MLGBufferType::Vertex, instances.NumItems() * instances.SizeOfItem(),
+        MLGUsage::Immutable, instances.GetBufferStart());
+    if (!vertices) {
+      return false;
+    }
+
+    aLevel.mVertexBuffer = vertices;
+  }
+
+  if (!aLevel.mWorldConstants) {
+    WorldConstants vsConstants;
+    Matrix4x4 projection = Matrix4x4::Translation(-1.0, 1.0, 0.0);
+    projection.PreScale(2.0 / float(rt->GetSize().width),
+                        2.0 / float(rt->GetSize().height), 1.0f);
+    projection.PreScale(1.0f, -1.0f, 1.0f);
+
+    memcpy(vsConstants.projection, &projection._11, 64);
+    vsConstants.targetOffset = Point();
+    vsConstants.sortIndexOffset = 0;
+    vsConstants.debugFrameNumber = 0;
+
+    aLevel.mWorldConstants =
+        aDevice->CreateBuffer(MLGBufferType::Constant, sizeof(vsConstants),
+                              MLGUsage::Immutable, &vsConstants);
+
+    if (!aLevel.mWorldConstants) {
+      return false;
+    }
+  }
+
+  aDevice->SetRenderTarget(rt);
+  aDevice->SetPSTexture(0, aSource);
+  aDevice->SetViewport(IntRect(IntPoint(0, 0), rt->GetSize()));
+  aDevice->SetVertexBuffer(1, aLevel.mVertexBuffer, sizeof(TextureRect));
+  aDevice->SetVSConstantBuffer(kWorldConstantBufferSlot,
+                               aLevel.mWorldConstants);
+  aDevice->DrawInstanced(4, 1, 0, 0);
+  return true;
+}
+
+void MLGPUScreenshotGrabberImpl::GrabScreenshot(MLGDevice* aDevice,
+                                                MLGTexture* aTexture) {
+  Size windowSize(aTexture->GetSize());
+  float scale = std::min(mReadbackTextureSize.width / windowSize.width,
+                         mReadbackTextureSize.height / windowSize.height);
+  IntSize scaledSize = IntSize::Round(windowSize * scale);
+
+  // The initial target is non-GPU readable. This copy could probably be
+  // avoided if we had created the swap chain differently. However we
+  // don't know if that may inadvertently affect performance in the
+  // non-profiling case.
+  RefPtr<MLGTexture> windowTexture = aDevice->CreateTexture(
+      aTexture->GetSize(), SurfaceFormat::B8G8R8A8, MLGUsage::Default,
+      MLGTextureFlags::ShaderResource);
+  aDevice->CopyTexture(windowTexture, IntPoint(), aTexture,
+                       IntRect(IntPoint(), aTexture->GetSize()));
+
+  RefPtr<MLGTexture> scaledTarget =
+      ScaleDownWindowTargetToSize(aDevice, scaledSize, windowTexture, 0);
+
+  if (!scaledTarget) {
+    PROFILER_ADD_MARKER(
+        "NoCompositorScreenshot because ScaleDownWindowTargetToSize failed",
+        GRAPHICS);
+    return;
+  }
+
+  RefPtr<MLGTexture> readbackTexture = TakeNextReadbackTexture(aDevice);
+  if (!readbackTexture) {
+    PROFILER_ADD_MARKER(
+        "NoCompositorScreenshot because AsyncReadbackReadbackTexture creation "
+        "failed",
+        GRAPHICS);
+    return;
+  }
+
+  aDevice->CopyTexture(readbackTexture, IntPoint(), scaledTarget,
+                       IntRect(IntPoint(), mReadbackTextureSize));
+
+  // This QueueItem will be added to the queue at the end of the next call to
+  // ProcessQueue(). This ensures that the ReadbackTexture isn't mapped into
+  // main memory until the next frame. If we did it in this frame, we'd block on
+  // the GPU.
+  mCurrentFrameQueueItem =
+      Some(QueueItem{TimeStamp::Now(), readbackTexture.forget(), scaledSize,
+                     aTexture->GetSize(), aDevice,
+                     reinterpret_cast<uintptr_t>(static_cast<void*>(this))});
+}
+
+already_AddRefed<MLGTexture>
+MLGPUScreenshotGrabberImpl::TakeNextReadbackTexture(MLGDevice* aDevice) {
+  if (!mAvailableReadbackTextures.IsEmpty()) {
+    RefPtr<MLGTexture> readbackTexture = mAvailableReadbackTextures[0];
+    mAvailableReadbackTextures.RemoveElementAt(0);
+    return readbackTexture.forget();
+  }
+  return aDevice
+      ->CreateTexture(mReadbackTextureSize, SurfaceFormat::B8G8R8A8,
+                      MLGUsage::Staging, MLGTextureFlags::None)
+      .forget();
+}
+
+void MLGPUScreenshotGrabberImpl::ReturnReadbackTexture(
+    MLGTexture* aReadbackTexture) {
+  mAvailableReadbackTextures.AppendElement(aReadbackTexture);
+}
+
+void MLGPUScreenshotGrabberImpl::ProcessQueue() {
+  if (!mQueue.IsEmpty()) {
+    if (!mProfilerScreenshots) {
+      mProfilerScreenshots = MakeUnique<ProfilerScreenshots>();
+    }
+    for (const auto& item : mQueue) {
+      mProfilerScreenshots->SubmitScreenshot(
+          item.mWindowIdentifier, item.mWindowSize, item.mScreenshotSize,
+          item.mTimeStamp, [&item](DataSourceSurface* aTargetSurface) {
+            MLGMappedResource map;
+            if (!item.mDevice->Map(item.mScreenshotReadbackTexture,
+                                   MLGMapType::READ, &map)) {
+              return false;
+            }
+            DataSourceSurface::ScopedMap destMap(aTargetSurface,
+                                                 DataSourceSurface::WRITE);
+            bool result =
+                SwizzleData(map.mData, map.mStride, SurfaceFormat::B8G8R8A8,
+                            destMap.GetData(), destMap.GetStride(),
+                            aTargetSurface->GetFormat(), item.mScreenshotSize);
+
+            item.mDevice->Unmap(item.mScreenshotReadbackTexture);
+            return result;
+          });
+      ReturnReadbackTexture(item.mScreenshotReadbackTexture);
+    }
+  }
+  mQueue.Clear();
+
+  if (mCurrentFrameQueueItem) {
+    mQueue.AppendElement(std::move(*mCurrentFrameQueueItem));
+    mCurrentFrameQueueItem = Nothing();
+  }
+}
+
+}  // namespace layers
+}  // namespace mozilla
new file mode 100644
--- /dev/null
+++ b/gfx/layers/mlgpu/MLGPUScreenshotGrabber.h
@@ -0,0 +1,59 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_layers_MLGPUScreenshotGrabber_h
+#define mozilla_layers_MLGPUScreenshotGrabber_h
+
+#include "mozilla/UniquePtr.h"
+#include "mozilla/layers/MLGDevice.h"
+
+namespace mozilla {
+namespace layers {
+
+class MLGPUScreenshotGrabberImpl;
+
+/**
+ * Used by LayerManagerComposite to grab snapshots from the compositor and
+ * submit them to the Gecko profiler.
+ * Doesn't do any work if the profiler is not running or the "screenshots"
+ * feature is not enabled.
+ * Screenshots are scaled down to fit within a fixed size, and read back to
+ * main memory using async readback. Scaling is done in multiple scale-by-0.5x
+ * steps using CompositingRenderTargets and Compositor::BlitFromRenderTarget,
+ * and readback is done using AsyncReadbackBuffers.
+ */
+class MLGPUScreenshotGrabber final {
+ public:
+  MLGPUScreenshotGrabber();
+  ~MLGPUScreenshotGrabber();
+
+  // Scale the contents of aTexture into an appropriately sized MLGTexture
+  // and read its contents into an AsyncReadbackBuffer. The AsyncReadbackBuffer
+  // is not mapped into main memory until the second call to
+  // MaybeProcessQueue() after this call to MaybeGrabScreenshot().
+  void MaybeGrabScreenshot(MLGDevice* aDevice, MLGTexture* aTexture);
+
+  // Map the contents of any outstanding AsyncReadbackBuffers from previous
+  // composites into main memory and submit each screenshot to the profiler.
+  void MaybeProcessQueue();
+
+  // Insert a special profiler marker for a composite that didn't do any actual
+  // compositing, so that the profiler knows why no screenshot was taken for
+  // this frame.
+  void NotifyEmptyFrame();
+
+  // Destroy all Compositor-related resources that this class is holding on to.
+  void Destroy();
+
+ private:
+  // non-null while ProfilerScreenshots::IsEnabled() returns true
+  UniquePtr<MLGPUScreenshotGrabberImpl> mImpl;
+};
+
+}  // namespace layers
+}  // namespace mozilla
+
+#endif  // mozilla_layers_MLGPUScreenshotGrabber_h
--- a/gfx/layers/moz.build
+++ b/gfx/layers/moz.build
@@ -210,16 +210,17 @@ EXPORTS.mozilla.layers += [
     'LayersHelpers.h',
     'LayersTypes.h',
     'MemoryPressureObserver.h',
     'mlgpu/LayerManagerMLGPU.h',
     'mlgpu/LayerMLGPU.h',
     'mlgpu/MemoryReportingMLGPU.h',
     'mlgpu/MLGDevice.h',
     'mlgpu/MLGDeviceTypes.h',
+    'mlgpu/MLGPUScreenshotGrabber.h',
     'mlgpu/ShaderDefinitionsMLGPU.h',
     'mlgpu/UtilityMLGPU.h',
     'opengl/CompositingRenderTargetOGL.h',
     'opengl/CompositorOGL.h',
     'opengl/MacIOSurfaceTextureClientOGL.h',
     'opengl/MacIOSurfaceTextureHostOGL.h',
     'opengl/TextureClientOGL.h',
     'opengl/TextureHostOGL.h',
@@ -454,16 +455,17 @@ UNIFIED_SOURCES += [
     'mlgpu/ContainerLayerMLGPU.cpp',
     'mlgpu/FrameBuilder.cpp',
     'mlgpu/ImageLayerMLGPU.cpp',
     'mlgpu/LayerManagerMLGPU.cpp',
     'mlgpu/LayerMLGPU.cpp',
     'mlgpu/MaskOperation.cpp',
     'mlgpu/MemoryReportingMLGPU.cpp',
     'mlgpu/MLGDevice.cpp',
+    'mlgpu/MLGPUScreenshotGrabber.cpp',
     'mlgpu/PaintedLayerMLGPU.cpp',
     'mlgpu/RenderPassMLGPU.cpp',
     'mlgpu/RenderViewMLGPU.cpp',
     'mlgpu/SharedBufferMLGPU.cpp',
     'mlgpu/StagingBuffer.cpp',
     'mlgpu/TexturedLayerMLGPU.cpp',
     'mlgpu/TextureSourceProviderMLGPU.cpp',
     'opengl/CompositingRenderTargetOGL.cpp',
--- a/gfx/layers/wr/WebRenderDrawEventRecorder.h
+++ b/gfx/layers/wr/WebRenderDrawEventRecorder.h
@@ -3,16 +3,17 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef MOZILLA_LAYERS_WEBRENDERDRAWTARGETRECORDER_H
 #define MOZILLA_LAYERS_WEBRENDERDRAWTARGETRECORDER_H
 
 #include "mozilla/gfx/DrawEventRecorder.h"
 #include "mozilla/gfx/InlineTranslator.h"
+#include "mozilla/webrender/webrender_ffi.h"
 
 namespace mozilla {
 namespace layers {
 
 struct BlobFont {
   wr::FontInstanceKey mFontInstanceKey;
   gfx::ReferencePtr mScaledFontPtr;
 };
--- a/js/src/ctypes/CTypes.cpp
+++ b/js/src/ctypes/CTypes.cpp
@@ -3236,17 +3236,17 @@ static bool ImplicitConvert(JSContext* c
       }
       *static_cast<bool*>(buffer) = result;
       break;
     }
 #define CHAR16_CASE(name, type, ffiType)                                     \
   case TYPE_##name: {                                                        \
     /* Convert from a 1-character string, regardless of encoding, */         \
     /* or from an integer, provided the result fits in 'type'. */            \
-    type result;                                                             \
+    type result = 0;                                                             \
     if (val.isString()) {                                                    \
       JSString* str = val.toString();                                        \
       if (str->length() != 1)                                                \
         return ConvError(cx, #name, val, convType, funObj, argIndex, arrObj, \
                          arrIndex);                                          \
       JSLinearString* linear = str->ensureLinear(cx);                        \
       if (!linear) return false;                                             \
       result = linear->latin1OrTwoByteChar(0);                               \
--- a/js/src/frontend/BytecodeEmitter.cpp
+++ b/js/src/frontend/BytecodeEmitter.cpp
@@ -3074,29 +3074,59 @@ bool BytecodeEmitter::emitDefault(ParseN
 
   if (!ifUndefined.emitEnd()) {
     //              [stack] VALUE/DEFAULTVALUE
     return false;
   }
   return true;
 }
 
-bool BytecodeEmitter::setOrEmitSetFunName(ParseNode* maybeFun,
-                                          HandleAtom name) {
-  MOZ_ASSERT(maybeFun->isDirectRHSAnonFunction());
-
-  if (maybeFun->is<FunctionNode>()) {
+bool BytecodeEmitter::emitAnonymousFunctionWithName(ParseNode* node,
+                                                    HandleAtom name) {
+  MOZ_ASSERT(node->isDirectRHSAnonFunction());
+
+  if (node->is<FunctionNode>()) {
+    if (!emitTree(node)) {
+      return false;
+    }
+
     // Function doesn't have 'name' property at this point.
     // Set function's name at compile time.
-    return setFunName(maybeFun->as<FunctionNode>().funbox()->function(), name);
-  }
-
-  MOZ_ASSERT(maybeFun->isKind(ParseNodeKind::ClassDecl));
-
-  return emitSetClassConstructorName(name);
+    return setFunName(node->as<FunctionNode>().funbox()->function(), name);
+  }
+
+  MOZ_ASSERT(node->is<ClassNode>());
+
+  return emitClass(&node->as<ClassNode>(), ClassNameKind::InferredName, name);
+}
+
+bool BytecodeEmitter::emitAnonymousFunctionWithComputedName(
+    ParseNode* node, FunctionPrefixKind prefixKind) {
+  MOZ_ASSERT(node->isDirectRHSAnonFunction());
+
+  if (node->is<FunctionNode>()) {
+    if (!emitTree(node)) {
+      //            [stack] NAME FUN
+      return false;
+    }
+    if (!emitDupAt(1)) {
+      //            [stack] NAME FUN NAME
+      return false;
+    }
+    if (!emit2(JSOP_SETFUNNAME, uint8_t(prefixKind))) {
+      //            [stack] NAME FUN
+      return false;
+    }
+    return true;
+  }
+
+  MOZ_ASSERT(node->is<ClassNode>());
+  MOZ_ASSERT(prefixKind == FunctionPrefixKind::None);
+
+  return emitClass(&node->as<ClassNode>(), ClassNameKind::ComputedName);
 }
 
 bool BytecodeEmitter::setFunName(JSFunction* fun, JSAtom* name) {
   // The inferred name may already be set if this function is an interpreted
   // lazy function and we OOM'ed after we set the inferred name the first
   // time.
   if (fun->hasInferredName()) {
     MOZ_ASSERT(fun->isInterpretedLazy());
@@ -3104,43 +3134,26 @@ bool BytecodeEmitter::setFunName(JSFunct
 
     return true;
   }
 
   fun->setInferredName(name);
   return true;
 }
 
-bool BytecodeEmitter::emitSetClassConstructorName(JSAtom* name) {
-  uint32_t nameIndex;
-  if (!makeAtomIndex(name, &nameIndex)) {
-    return false;
-  }
-  if (!emitIndexOp(JSOP_STRING, nameIndex)) {
-    //              [stack] FUN NAME
-    return false;
-  }
-  uint8_t kind = uint8_t(FunctionPrefixKind::None);
-  if (!emit2(JSOP_SETFUNNAME, kind)) {
-    //              [stack] FUN
-    return false;
-  }
-  return true;
-}
-
 bool BytecodeEmitter::emitInitializer(ParseNode* initializer,
                                       ParseNode* pattern) {
-  if (!emitTree(initializer)) {
-    return false;
-  }
-
   if (initializer->isDirectRHSAnonFunction()) {
     MOZ_ASSERT(!pattern->isInParens());
     RootedAtom name(cx, pattern->as<NameNode>().name());
-    if (!setOrEmitSetFunName(initializer, name)) {
+    if (!emitAnonymousFunctionWithName(initializer, name)) {
+      return false;
+    }
+  } else {
+    if (!emitTree(initializer)) {
       return false;
     }
   }
 
   return true;
 }
 
 bool BytecodeEmitter::emitDestructuringOpsArray(ListNode* pattern,
@@ -4056,27 +4069,28 @@ bool BytecodeEmitter::emitAssignment(Par
     NameOpEmitter noe(this, name,
                       isCompound ? NameOpEmitter::Kind::CompoundAssignment
                                  : NameOpEmitter::Kind::SimpleAssignment);
     if (!noe.prepareForRhs()) {
       //            [stack] ENV? VAL?
       return false;
     }
 
-    // Emit the RHS. If we emitted a BIND[G]NAME, then the scope is on
-    // the top of the stack and we need to pick the right RHS value.
-    uint8_t offset = noe.emittedBindOp() ? 2 : 1;
-    if (!EmitAssignmentRhs(this, rhs, offset)) {
-      //            [stack] ENV? VAL? RHS
-      return false;
-    }
     if (rhs && rhs->isDirectRHSAnonFunction()) {
       MOZ_ASSERT(!nameNode->isInParens());
       MOZ_ASSERT(!isCompound);
-      if (!setOrEmitSetFunName(rhs, name)) {
+      if (!emitAnonymousFunctionWithName(rhs, name)) {
+        //          [stack] ENV? VAL? RHS
+        return false;
+      }
+    } else {
+      // Emit the RHS. If we emitted a BIND[G]NAME, then the scope is on
+      // the top of the stack and we need to pick the right RHS value.
+      uint8_t offset = noe.emittedBindOp() ? 2 : 1;
+      if (!EmitAssignmentRhs(this, rhs, offset)) {
         //          [stack] ENV? VAL? RHS
         return false;
       }
     }
 
     // Emit the compound assignment op if there is one.
     if (isCompound) {
       if (!emit1(compoundOp)) {
@@ -7656,27 +7670,64 @@ bool BytecodeEmitter::emitPropertyList(L
       }
       continue;
     }
 
     BinaryNode* prop = &propdef->as<BinaryNode>();
 
     ParseNode* key = prop->left();
     ParseNode* propVal = prop->right();
-    bool isPropertyAnonFunctionOrClass = propVal->isDirectRHSAnonFunction();
     JSOp op = propdef->getOp();
     MOZ_ASSERT(op == JSOP_INITPROP || op == JSOP_INITPROP_GETTER ||
                op == JSOP_INITPROP_SETTER);
 
-    auto emitValue = [this, &propVal, &pe]() {
+    auto emitValue = [this, &key, &propVal, op, &pe]() {
       //            [stack] CTOR? OBJ CTOR? KEY?
 
-      if (!emitTree(propVal)) {
-        //          [stack] CTOR? OBJ CTOR? KEY? VAL
-        return false;
+      if (propVal->isDirectRHSAnonFunction()) {
+        if (key->isKind(ParseNodeKind::NumberExpr)) {
+          MOZ_ASSERT(op == JSOP_INITPROP);
+
+          NumericLiteral* literal = &key->as<NumericLiteral>();
+          RootedAtom keyAtom(cx, NumberToAtom(cx, literal->value()));
+          if (!keyAtom) {
+            return false;
+          }
+          if (!emitAnonymousFunctionWithName(propVal, keyAtom)) {
+            //      [stack] CTOR? OBJ CTOR? KEY VAL
+            return false;
+          }
+        } else if (key->isKind(ParseNodeKind::ObjectPropertyName) ||
+                   key->isKind(ParseNodeKind::StringExpr)) {
+          MOZ_ASSERT(op == JSOP_INITPROP);
+
+          RootedAtom keyAtom(cx, key->as<NameNode>().atom());
+          if (!emitAnonymousFunctionWithName(propVal, keyAtom)) {
+            //      [stack] CTOR? OBJ CTOR? VAL
+            return false;
+          }
+        } else {
+          MOZ_ASSERT(key->isKind(ParseNodeKind::ComputedName));
+
+          FunctionPrefixKind prefix = op == JSOP_INITPROP
+                                          ? FunctionPrefixKind::None
+                                          : op == JSOP_INITPROP_GETTER
+                                                ? FunctionPrefixKind::Get
+                                                : FunctionPrefixKind::Set;
+
+          if (!emitAnonymousFunctionWithComputedName(propVal, prefix)) {
+            //      [stack] CTOR? OBJ CTOR? KEY VAL
+            return false;
+          }
+        }
+      } else {
+        if (!emitTree(propVal)) {
+          //        [stack] CTOR? OBJ CTOR? KEY? VAL
+          return false;
+        }
       }
 
       if (propVal->is<FunctionNode>() &&
           propVal->as<FunctionNode>().funbox()->needsHomeObject()) {
         FunctionBox* funbox = propVal->as<FunctionNode>().funbox();
         MOZ_ASSERT(funbox->function()->allowSuperProperty());
 
         if (!pe.emitInitHomeObject(funbox->asyncKind())) {
@@ -7707,30 +7758,28 @@ bool BytecodeEmitter::emitPropertyList(L
       }
       if (!emitValue()) {
         //          [stack] CTOR? OBJ CTOR? KEY VAL
         return false;
       }
 
       switch (op) {
         case JSOP_INITPROP:
-          if (!pe.emitInitIndexProp(isPropertyAnonFunctionOrClass)) {
+          if (!pe.emitInitIndexProp()) {
             //      [stack] CTOR? OBJ
             return false;
           }
           break;
         case JSOP_INITPROP_GETTER:
-          MOZ_ASSERT(!isPropertyAnonFunctionOrClass);
           if (!pe.emitInitIndexGetter()) {
             //      [stack] CTOR? OBJ
             return false;
           }
           break;
         case JSOP_INITPROP_SETTER:
-          MOZ_ASSERT(!isPropertyAnonFunctionOrClass);
           if (!pe.emitInitIndexSetter()) {
             //      [stack] CTOR? OBJ
             return false;
           }
           break;
         default:
           MOZ_CRASH("Invalid op");
       }
@@ -7753,53 +7802,31 @@ bool BytecodeEmitter::emitPropertyList(L
         //          [stack] CTOR? OBJ CTOR?
         return false;
       }
       if (!emitValue()) {
         //          [stack] CTOR? OBJ CTOR? VAL
         return false;
       }
 
-      RootedFunction anonFunction(cx);
-      if (isPropertyAnonFunctionOrClass) {
-        MOZ_ASSERT(op == JSOP_INITPROP);
-
-        if (propVal->is<FunctionNode>()) {
-          // When the value is function, we set the function's name
-          // at the compile-time, instead of emitting SETFUNNAME.
-          FunctionBox* funbox = propVal->as<FunctionNode>().funbox();
-          anonFunction = funbox->function();
-        } else {
-          // Only object literal can have a property where key is
-          // name and value is an anonymous class.
-          //
-          //   ({ foo: class {} });
-          MOZ_ASSERT(type == ObjectLiteral);
-          MOZ_ASSERT(propVal->isKind(ParseNodeKind::ClassDecl));
-        }
-      }
-
       RootedAtom keyAtom(cx, key->as<NameNode>().atom());
       switch (op) {
         case JSOP_INITPROP:
-          if (!pe.emitInitProp(keyAtom, isPropertyAnonFunctionOrClass,
-                               anonFunction)) {
+          if (!pe.emitInitProp(keyAtom)) {
             //      [stack] CTOR? OBJ
             return false;
           }
           break;
         case JSOP_INITPROP_GETTER:
-          MOZ_ASSERT(!isPropertyAnonFunctionOrClass);
           if (!pe.emitInitGetter(keyAtom)) {
             //      [stack] CTOR? OBJ
             return false;
           }
           break;
         case JSOP_INITPROP_SETTER:
-          MOZ_ASSERT(!isPropertyAnonFunctionOrClass);
           if (!pe.emitInitSetter(keyAtom)) {
             //      [stack] CTOR? OBJ
             return false;
           }
           break;
         default:
           MOZ_CRASH("Invalid op");
       }
@@ -7825,30 +7852,28 @@ bool BytecodeEmitter::emitPropertyList(L
     }
     if (!emitValue()) {
       //            [stack] CTOR? OBJ CTOR? KEY VAL
       return false;
     }
 
     switch (op) {
       case JSOP_INITPROP:
-        if (!pe.emitInitComputedProp(isPropertyAnonFunctionOrClass)) {
+        if (!pe.emitInitComputedProp()) {
           //        [stack] CTOR? OBJ
           return false;
         }
         break;
       case JSOP_INITPROP_GETTER:
-        MOZ_ASSERT(isPropertyAnonFunctionOrClass);
         if (!pe.emitInitComputedGetter()) {
           //        [stack] CTOR? OBJ
           return false;
         }
         break;
       case JSOP_INITPROP_SETTER:
-        MOZ_ASSERT(isPropertyAnonFunctionOrClass);
         if (!pe.emitInitComputedSetter()) {
           //        [stack] CTOR? OBJ
           return false;
         }
         break;
       default:
         MOZ_CRASH("Invalid op");
     }
@@ -8495,28 +8520,37 @@ static MOZ_ALWAYS_INLINE FunctionNode* F
     }
   }
 
   return nullptr;
 }
 
 // This follows ES6 14.5.14 (ClassDefinitionEvaluation) and ES6 14.5.15
 // (BindingClassDeclarationEvaluation).
-bool BytecodeEmitter::emitClass(ClassNode* classNode) {
-  ClassNames* names = classNode->names();
+bool BytecodeEmitter::emitClass(
+    ClassNode* classNode,
+    ClassNameKind nameKind /* = ClassNameKind::BindingName */,
+    HandleAtom nameForAnonymousClass /* = nullptr */) {
+  MOZ_ASSERT((nameKind == ClassNameKind::InferredName) ==
+             (nameForAnonymousClass != nullptr));
+
   ParseNode* heritageExpression = classNode->heritage();
   ListNode* classMembers = classNode->memberList();
   FunctionNode* constructor = FindConstructor(cx, classMembers);
 
+  // If |nameKind != ClassNameKind::ComputedName|
   //                [stack]
+  // Else
+  //                [stack] NAME
 
   ClassEmitter ce(this);
   RootedAtom innerName(cx);
   ClassEmitter::Kind kind = ClassEmitter::Kind::Expression;
-  if (names) {
+  if (ClassNames* names = classNode->names()) {
+    MOZ_ASSERT(nameKind == ClassNameKind::BindingName);
     innerName = names->innerBinding()->name();
     MOZ_ASSERT(innerName);
 
     if (names->outerBinding()) {
       MOZ_ASSERT(names->outerBinding()->name());
       MOZ_ASSERT(names->outerBinding()->name() == innerName);
       kind = ClassEmitter::Kind::Declaration;
     }
@@ -8527,41 +8561,49 @@ bool BytecodeEmitter::emitClass(ClassNod
     }
   }
 
   // This is kind of silly. In order to the get the home object defined on
   // the constructor, we have to make it second, but we want the prototype
   // on top for EmitPropertyList, because we expect static properties to be
   // rarer. The result is a few more swaps than we would like. Such is life.
   bool isDerived = !!heritageExpression;
+  bool hasNameOnStack = nameKind == ClassNameKind::ComputedName;
   if (isDerived) {
     if (!emitTree(heritageExpression)) {
       //            [stack] HERITAGE
       return false;
     }
-    if (!ce.emitDerivedClass(innerName)) {
+    if (!ce.emitDerivedClass(innerName, nameForAnonymousClass,
+                             hasNameOnStack)) {
       //            [stack] HERITAGE HOMEOBJ
       return false;
     }
   } else {
-    if (!ce.emitClass(innerName)) {
+    if (!ce.emitClass(innerName, nameForAnonymousClass, hasNameOnStack)) {
       //            [stack] HOMEOBJ
       return false;
     }
   }
 
   // Stack currently has HOMEOBJ followed by optional HERITAGE. When HERITAGE
   // is not used, an implicit value of %FunctionPrototype% is implied.
   if (constructor) {
     bool needsHomeObject = constructor->funbox()->needsHomeObject();
     // HERITAGE is consumed inside emitFunction.
     if (!emitFunction(constructor, isDerived)) {
       //            [stack] HOMEOBJ CTOR
       return false;
     }
+    if (nameKind == ClassNameKind::InferredName) {
+      if (!setFunName(constructor->funbox()->function(),
+                      nameForAnonymousClass)) {
+        return false;
+      }
+    }
     if (!ce.emitInitConstructor(needsHomeObject)) {
       //            [stack] CTOR HOMEOBJ
       return false;
     }
   } else {
     if (!ce.emitInitDefaultConstructor(Some(classNode->pn_pos.begin),
                                        Some(classNode->pn_pos.end))) {
       //            [stack] CTOR HOMEOBJ
@@ -8581,33 +8623,35 @@ bool BytecodeEmitter::emitClass(ClassNod
   }
 
   return true;
 }
 
 bool BytecodeEmitter::emitExportDefault(BinaryNode* exportNode) {
   MOZ_ASSERT(exportNode->isKind(ParseNodeKind::ExportDefaultStmt));
 
-  ParseNode* nameNode = exportNode->left();
-  if (!emitTree(nameNode)) {
-    return false;
+  ParseNode* valueNode = exportNode->left();
+  if (valueNode->isDirectRHSAnonFunction()) {
+    MOZ_ASSERT(exportNode->right());
+
+    HandlePropertyName name = cx->names().default_;
+    if (!emitAnonymousFunctionWithName(valueNode, name)) {
+      return false;
+    }
+  } else {
+    if (!emitTree(valueNode)) {
+      return false;
+    }
   }
 
   if (ParseNode* binding = exportNode->right()) {
     if (!emitLexicalInitialization(&binding->as<NameNode>())) {
       return false;
     }
 
-    if (nameNode->isDirectRHSAnonFunction()) {
-      HandlePropertyName name = cx->names().default_;
-      if (!setOrEmitSetFunName(nameNode, name)) {
-        return false;
-      }
-    }
-
     if (!emit1(JSOP_POP)) {
       return false;
     }
   }
 
   return true;
 }
 
--- a/js/src/frontend/BytecodeEmitter.h
+++ b/js/src/frontend/BytecodeEmitter.h
@@ -762,20 +762,23 @@ struct MOZ_STACK_CLASS BytecodeEmitter {
 
   // Check if the value on top of the stack is "undefined". If so, replace
   // that value on the stack with the value defined by |defaultExpr|.
   // |pattern| is a lhs node of the default expression.  If it's an
   // identifier and |defaultExpr| is an anonymous function, |SetFunctionName|
   // is called at compile time.
   MOZ_MUST_USE bool emitDefault(ParseNode* defaultExpr, ParseNode* pattern);
 
-  MOZ_MUST_USE bool setOrEmitSetFunName(ParseNode* maybeFun, HandleAtom name);
+  MOZ_MUST_USE bool emitAnonymousFunctionWithName(ParseNode* node,
+                                                  HandleAtom name);
+
+  MOZ_MUST_USE bool emitAnonymousFunctionWithComputedName(
+      ParseNode* node, FunctionPrefixKind prefixKind);
 
   MOZ_MUST_USE bool setFunName(JSFunction* fun, JSAtom* name);
-  MOZ_MUST_USE bool emitSetClassConstructorName(JSAtom* name);
   MOZ_MUST_USE bool emitInitializer(ParseNode* initializer, ParseNode* pattern);
 
   MOZ_MUST_USE bool emitCallSiteObject(CallSiteNode* callSiteObj);
   MOZ_MUST_USE bool emitTemplateString(ListNode* templateString);
   MOZ_MUST_USE bool emitAssignment(ParseNode* lhs, JSOp compoundOp,
                                    ParseNode* rhs);
 
   MOZ_MUST_USE bool emitReturn(UnaryNode* returnNode);
@@ -846,17 +849,30 @@ struct MOZ_STACK_CLASS BytecodeEmitter {
   // emitSpread expects the current index (I) of the array, the array itself
   // and the iterator to be on the stack in that order (iterator on the bottom).
   // It will pop the iterator and I, then iterate over the iterator by calling
   // |.next()| and put the results into the I-th element of array with
   // incrementing I, then push the result I (it will be original I +
   // iteration count). The stack after iteration will look like |ARRAY INDEX|.
   MOZ_MUST_USE bool emitSpread(bool allowSelfHosted = false);
 
-  MOZ_MUST_USE bool emitClass(ClassNode* classNode);
+  enum class ClassNameKind {
+    // The class name is defined through its BindingIdentifier, if present.
+    BindingName,
+
+    // The class is anonymous and has a statically inferred name.
+    InferredName,
+
+    // The class is anonymous and has a dynamically computed name.
+    ComputedName
+  };
+
+  MOZ_MUST_USE bool emitClass(
+      ClassNode* classNode, ClassNameKind nameKind = ClassNameKind::BindingName,
+      HandleAtom nameForAnonymousClass = nullptr);
   MOZ_MUST_USE bool emitSuperElemOperands(
       PropertyByValue* elem, EmitElemOption opts = EmitElemOption::Get);
   MOZ_MUST_USE bool emitSuperGetElem(PropertyByValue* elem,
                                      bool isCall = false);
 
   MOZ_MUST_USE bool emitCalleeAndThis(ParseNode* callee, ParseNode* call,
                                       CallOrNewEmitter& cone);
 
--- a/js/src/frontend/ObjectEmitter.cpp
+++ b/js/src/frontend/ObjectEmitter.cpp
@@ -280,80 +280,67 @@ bool PropertyEmitter::emitInitHomeObject
     propertyState_ = PropertyState::InitHomeObjForIndex;
   } else {
     propertyState_ = PropertyState::InitHomeObjForComputed;
   }
 #endif
   return true;
 }
 
-bool PropertyEmitter::emitInitProp(
-    JS::Handle<JSAtom*> key, bool isPropertyAnonFunctionOrClass /* = false */,
-    JS::Handle<JSFunction*> anonFunction /* = nullptr */) {
-  return emitInit(isClass_ ? JSOP_INITHIDDENPROP : JSOP_INITPROP, key,
-                  isPropertyAnonFunctionOrClass, anonFunction);
+bool PropertyEmitter::emitInitProp(JS::Handle<JSAtom*> key) {
+  return emitInit(isClass_ ? JSOP_INITHIDDENPROP : JSOP_INITPROP, key);
 }
 
 bool PropertyEmitter::emitInitGetter(JS::Handle<JSAtom*> key) {
   obj_ = nullptr;
   return emitInit(isClass_ ? JSOP_INITHIDDENPROP_GETTER : JSOP_INITPROP_GETTER,
-                  key, false, nullptr);
+                  key);
 }
 
 bool PropertyEmitter::emitInitSetter(JS::Handle<JSAtom*> key) {
   obj_ = nullptr;
   return emitInit(isClass_ ? JSOP_INITHIDDENPROP_SETTER : JSOP_INITPROP_SETTER,
-                  key, false, nullptr);
+                  key);
 }
 
-bool PropertyEmitter::emitInitIndexProp(
-    bool isPropertyAnonFunctionOrClass /* = false */) {
-  return emitInitIndexOrComputed(isClass_ ? JSOP_INITHIDDENELEM : JSOP_INITELEM,
-                                 FunctionPrefixKind::None,
-                                 isPropertyAnonFunctionOrClass);
+bool PropertyEmitter::emitInitIndexProp() {
+  return emitInitIndexOrComputed(isClass_ ? JSOP_INITHIDDENELEM
+                                          : JSOP_INITELEM);
 }
 
 bool PropertyEmitter::emitInitIndexGetter() {
   obj_ = nullptr;
-  return emitInitIndexOrComputed(
-      isClass_ ? JSOP_INITHIDDENELEM_GETTER : JSOP_INITELEM_GETTER,
-      FunctionPrefixKind::Get, false);
+  return emitInitIndexOrComputed(isClass_ ? JSOP_INITHIDDENELEM_GETTER
+                                          : JSOP_INITELEM_GETTER);
 }
 
 bool PropertyEmitter::emitInitIndexSetter() {
   obj_ = nullptr;
-  return emitInitIndexOrComputed(
-      isClass_ ? JSOP_INITHIDDENELEM_SETTER : JSOP_INITELEM_SETTER,
-      FunctionPrefixKind::Set, false);
+  return emitInitIndexOrComputed(isClass_ ? JSOP_INITHIDDENELEM_SETTER
+                                          : JSOP_INITELEM_SETTER);
 }
 
-bool PropertyEmitter::emitInitComputedProp(
-    bool isPropertyAnonFunctionOrClass /* = false */) {
-  return emitInitIndexOrComputed(isClass_ ? JSOP_INITHIDDENELEM : JSOP_INITELEM,
-                                 FunctionPrefixKind::None,
-                                 isPropertyAnonFunctionOrClass);
+bool PropertyEmitter::emitInitComputedProp() {
+  return emitInitIndexOrComputed(isClass_ ? JSOP_INITHIDDENELEM
+                                          : JSOP_INITELEM);
 }
 
 bool PropertyEmitter::emitInitComputedGetter() {
   obj_ = nullptr;
-  return emitInitIndexOrComputed(
-      isClass_ ? JSOP_INITHIDDENELEM_GETTER : JSOP_INITELEM_GETTER,
-      FunctionPrefixKind::Get, true);
+  return emitInitIndexOrComputed(isClass_ ? JSOP_INITHIDDENELEM_GETTER
+                                          : JSOP_INITELEM_GETTER);
 }
 
 bool PropertyEmitter::emitInitComputedSetter() {
   obj_ = nullptr;
-  return emitInitIndexOrComputed(
-      isClass_ ? JSOP_INITHIDDENELEM_SETTER : JSOP_INITELEM_SETTER,
-      FunctionPrefixKind::Set, true);
+  return emitInitIndexOrComputed(isClass_ ? JSOP_INITHIDDENELEM_SETTER
+                                          : JSOP_INITELEM_SETTER);
 }
 
-bool PropertyEmitter::emitInit(JSOp op, JS::Handle<JSAtom*> key,
-                               bool isPropertyAnonFunctionOrClass,
-                               JS::Handle<JSFunction*> anonFunction) {
+bool PropertyEmitter::emitInit(JSOp op, JS::Handle<JSAtom*> key) {
   MOZ_ASSERT(propertyState_ == PropertyState::PropValue ||
              propertyState_ == PropertyState::InitHomeObj);
 
   MOZ_ASSERT(op == JSOP_INITPROP || op == JSOP_INITHIDDENPROP ||
              op == JSOP_INITPROP_GETTER || op == JSOP_INITHIDDENPROP_GETTER ||
              op == JSOP_INITPROP_SETTER || op == JSOP_INITHIDDENPROP_SETTER);
 
   //                [stack] CTOR? OBJ CTOR? VAL
@@ -371,73 +358,43 @@ bool PropertyEmitter::emitInit(JSOp op, 
                                   JSPROP_ENUMERATE)) {
       return false;
     }
     if (obj_->inDictionaryMode()) {
       obj_ = nullptr;
     }
   }
 
-  if (isPropertyAnonFunctionOrClass) {
-    MOZ_ASSERT(op == JSOP_INITPROP || op == JSOP_INITHIDDENPROP);
-
-    if (anonFunction) {
-      if (!bce_->setFunName(anonFunction, key)) {
-        return false;
-      }
-    } else {
-      // NOTE: This is setting the constructor's name of the class which is
-      //       the property value.  Not of the enclosing class.
-      if (!bce_->emitSetClassConstructorName(key)) {
-        //          [stack] CTOR? OBJ CTOR? FUN
-        return false;
-      }
-    }
-  }
-
   if (!bce_->emitIndex32(op, index)) {
     //              [stack] CTOR? OBJ CTOR?
     return false;
   }
 
   if (!emitPopClassConstructor()) {
     return false;
   }
 
 #ifdef DEBUG
   propertyState_ = PropertyState::Init;
 #endif
   return true;
 }
 
-bool PropertyEmitter::emitInitIndexOrComputed(
-    JSOp op, FunctionPrefixKind prefixKind,
-    bool isPropertyAnonFunctionOrClass) {
+bool PropertyEmitter::emitInitIndexOrComputed(JSOp op) {
   MOZ_ASSERT(propertyState_ == PropertyState::IndexValue ||
              propertyState_ == PropertyState::InitHomeObjForIndex ||
              propertyState_ == PropertyState::ComputedValue ||
              propertyState_ == PropertyState::InitHomeObjForComputed);
 
   MOZ_ASSERT(op == JSOP_INITELEM || op == JSOP_INITHIDDENELEM ||
              op == JSOP_INITELEM_GETTER || op == JSOP_INITHIDDENELEM_GETTER ||
              op == JSOP_INITELEM_SETTER || op == JSOP_INITHIDDENELEM_SETTER);
 
   //                [stack] CTOR? OBJ CTOR? KEY VAL
 
-  if (isPropertyAnonFunctionOrClass) {
-    if (!bce_->emitDupAt(1)) {
-      //            [stack] CTOR? OBJ CTOR? KEY FUN FUN
-      return false;
-    }
-    if (!bce_->emit2(JSOP_SETFUNNAME, uint8_t(prefixKind))) {
-      //            [stack] CTOR? OBJ CTOR? KEY FUN
-      return false;
-    }
-  }
-
   if (!bce_->emit1(op)) {
     //              [stack] CTOR? OBJ CTOR?
     return false;
   }
 
   if (!emitPopClassConstructor()) {
     return false;
   }
@@ -531,17 +488,20 @@ AutoSaveLocalStrictMode::~AutoSaveLocalS
 }
 
 void AutoSaveLocalStrictMode::restore() {
   MOZ_ALWAYS_TRUE(sc_->setLocalStrictMode(savedStrictness_));
   sc_ = nullptr;
 }
 
 ClassEmitter::ClassEmitter(BytecodeEmitter* bce)
-    : PropertyEmitter(bce), strictMode_(bce->sc), name_(bce->cx) {
+    : PropertyEmitter(bce),
+      strictMode_(bce->sc),
+      name_(bce->cx),
+      nameForAnonymousClass_(bce->cx) {
   isClass_ = true;
 }
 
 bool ClassEmitter::emitScopeForNamedClass(
     JS::Handle<LexicalScope::Data*> scopeBindings) {
   MOZ_ASSERT(propertyState_ == PropertyState::Start);
   MOZ_ASSERT(classState_ == ClassState::Start);
 
@@ -552,45 +512,57 @@ bool ClassEmitter::emitScopeForNamedClas
   }
 
 #ifdef DEBUG
   classState_ = ClassState::Scope;
 #endif
   return true;
 }
 
-bool ClassEmitter::emitClass(JS::Handle<JSAtom*> name) {
+bool ClassEmitter::emitClass(JS::Handle<JSAtom*> name,
+                             JS::Handle<JSAtom*> nameForAnonymousClass,
+                             bool hasNameOnStack) {
   MOZ_ASSERT(propertyState_ == PropertyState::Start);
   MOZ_ASSERT(classState_ == ClassState::Start ||
              classState_ == ClassState::Scope);
+  MOZ_ASSERT_IF(nameForAnonymousClass || hasNameOnStack, !name);
+  MOZ_ASSERT(!(nameForAnonymousClass && hasNameOnStack));
 
   //                [stack]
 
-  setName(name);
+  name_ = name;
+  nameForAnonymousClass_ = nameForAnonymousClass;
+  hasNameOnStack_ = hasNameOnStack;
   isDerived_ = false;
 
   if (!bce_->emitNewInit()) {
     //              [stack] HOMEOBJ
     return false;
   }
 
 #ifdef DEBUG
   classState_ = ClassState::Class;
 #endif
   return true;
 }
 
-bool ClassEmitter::emitDerivedClass(JS::Handle<JSAtom*> name) {
+bool ClassEmitter::emitDerivedClass(JS::Handle<JSAtom*> name,
+                                    JS::Handle<JSAtom*> nameForAnonymousClass,
+                                    bool hasNameOnStack) {
   MOZ_ASSERT(propertyState_ == PropertyState::Start);
   MOZ_ASSERT(classState_ == ClassState::Start ||
              classState_ == ClassState::Scope);
+  MOZ_ASSERT_IF(nameForAnonymousClass || hasNameOnStack, !name);
+  MOZ_ASSERT(!nameForAnonymousClass || !hasNameOnStack);
 
   //                [stack]
 
-  setName(name);
+  name_ = name;
+  nameForAnonymousClass_ = nameForAnonymousClass;
+  hasNameOnStack_ = hasNameOnStack;
   isDerived_ = true;
 
   InternalIfEmitter ifThenElse(bce_);
 
   // Heritage must be null or a non-generator constructor
   if (!bce_->emit1(JSOP_CHECKCLASSHERITAGE)) {
     //              [stack] HERITAGE
     return false;
@@ -655,23 +627,16 @@ bool ClassEmitter::emitDerivedClass(JS::
   }
 
 #ifdef DEBUG
   classState_ = ClassState::Class;
 #endif
   return true;
 }
 
-void ClassEmitter::setName(JS::Handle<JSAtom*> name) {
-  name_ = name;
-  if (!name_) {
-    name_ = bce_->cx->names().empty;
-  }
-}
-
 bool ClassEmitter::emitInitConstructor(bool needsHomeObject) {
   MOZ_ASSERT(propertyState_ == PropertyState::Start);
   MOZ_ASSERT(classState_ == ClassState::Class);
 
   //                [stack] HOMEOBJ CTOR
 
   if (needsHomeObject) {
     if (!bce_->emitDupAt(1)) {
@@ -706,58 +671,103 @@ bool ClassEmitter::emitInitDefaultConstr
     // actually make the constructor during execution, we can give it the
     // correct toString output.
     if (!bce_->newSrcNote3(SRC_CLASS_SPAN, ptrdiff_t(*classStart),
                            ptrdiff_t(*classEnd))) {
       return false;
     }
   }
 
+  RootedAtom className(bce_->cx, name_);
+  if (!className) {
+    if (nameForAnonymousClass_) {
+      className = nameForAnonymousClass_;
+    } else {
+      className = bce_->cx->names().empty;
+    }
+  }
+
   if (isDerived_) {
     //              [stack] HERITAGE PROTO
-    if (!bce_->emitAtomOp(name_, JSOP_DERIVEDCONSTRUCTOR)) {
+    if (!bce_->emitAtomOp(className, JSOP_DERIVEDCONSTRUCTOR)) {
       //            [stack] HOMEOBJ CTOR
       return false;
     }
   } else {
     //              [stack] HOMEOBJ
-    if (!bce_->emitAtomOp(name_, JSOP_CLASSCONSTRUCTOR)) {
+    if (!bce_->emitAtomOp(className, JSOP_CLASSCONSTRUCTOR)) {
       //            [stack] HOMEOBJ CTOR
       return false;
     }
   }
 
+  // The empty string is used as a placeholder, so if the inferred name for this
+  // anonymous class expression is also the empty string, we need to set it
+  // explicitly here.
+  if (nameForAnonymousClass_ == bce_->cx->names().empty) {
+    if (!emitSetEmptyClassConstructorNameForDefaultCtor()) {
+      return false;
+    }
+  }
+
   if (!initProtoAndCtor()) {
     //              [stack] CTOR HOMEOBJ
     return false;
   }
 
 #ifdef DEBUG
   classState_ = ClassState::InitConstructor;
 #endif
   return true;
 }
 
+bool ClassEmitter::emitSetEmptyClassConstructorNameForDefaultCtor() {
+  uint32_t nameIndex;
+  if (!bce_->makeAtomIndex(bce_->cx->names().empty, &nameIndex)) {
+    return false;
+  }
+  if (!bce_->emitIndexOp(JSOP_STRING, nameIndex)) {
+    //              [stack] CTOR NAME
+    return false;
+  }
+  if (!bce_->emit2(JSOP_SETFUNNAME, uint8_t(FunctionPrefixKind::None))) {
+    //              [stack] CTOR
+    return false;
+  }
+  return true;
+}
+
 bool ClassEmitter::initProtoAndCtor() {
-  //                [stack] HOMEOBJ CTOR
+  //                [stack] NAME? HOMEOBJ CTOR
+
+  if (hasNameOnStack_) {
+    if (!bce_->emitDupAt(2)) {
+      //            [stack] NAME HOMEOBJ CTOR NAME
+      return false;
+    }
+    if (!bce_->emit2(JSOP_SETFUNNAME, uint8_t(FunctionPrefixKind::None))) {
+      //            [stack] NAME HOMEOBJ CTOR
+      return false;
+    }
+  }
 
   if (!bce_->emit1(JSOP_SWAP)) {
-    //              [stack] CTOR HOMEOBJ
+    //              [stack] NAME? CTOR HOMEOBJ
     return false;
   }
   if (!bce_->emit1(JSOP_DUP2)) {
-    //              [stack] CTOR HOMEOBJ CTOR HOMEOBJ
+    //              [stack] NAME? CTOR HOMEOBJ CTOR HOMEOBJ
     return false;
   }
   if (!bce_->emitAtomOp(bce_->cx->names().prototype, JSOP_INITLOCKEDPROP)) {
-    //              [stack] CTOR HOMEOBJ CTOR
+    //              [stack] NAME? CTOR HOMEOBJ CTOR
     return false;
   }
   if (!bce_->emitAtomOp(bce_->cx->names().constructor, JSOP_INITHIDDENPROP)) {
-    //              [stack] CTOR HOMEOBJ
+    //              [stack] NAME? CTOR HOMEOBJ
     return false;
   }
 
   return true;
 }
 
 bool ClassEmitter::emitEnd(Kind kind) {
   MOZ_ASSERT(propertyState_ == PropertyState::Start ||
@@ -766,17 +776,17 @@ bool ClassEmitter::emitEnd(Kind kind) {
 
   //                [stack] CTOR HOMEOBJ
 
   if (!bce_->emit1(JSOP_POP)) {
     //              [stack] CTOR
     return false;
   }
 
-  if (name_ != bce_->cx->names().empty) {
+  if (name_) {
     MOZ_ASSERT(tdzCacheForInnerName_.isSome());
     MOZ_ASSERT(innerNameScope_.isSome());
 
     if (!bce_->emitLexicalInitialization(name_)) {
       //            [stack] CTOR
       return false;
     }
 
--- a/js/src/frontend/ObjectEmitter.h
+++ b/js/src/frontend/ObjectEmitter.h
@@ -244,58 +244,38 @@ class MOZ_STACK_CLASS PropertyEmitter {
       const mozilla::Maybe<uint32_t>& keyPos, Kind kind = Kind::Prototype);
   MOZ_MUST_USE bool prepareForComputedPropValue();
 
   MOZ_MUST_USE bool emitInitHomeObject(
       FunctionAsyncKind kind = FunctionAsyncKind::SyncFunction);
 
   // @param key
   //        Property key
-  // @param isPropertyAnonFunctionOrClass
-  //        True if the property value is an anonymous function or
-  //        an anonymous class
-  // @param anonFunction
-  //        The anonymous function object for property value
-  MOZ_MUST_USE bool emitInitProp(
-      JS::Handle<JSAtom*> key, bool isPropertyAnonFunctionOrClass = false,
-      JS::Handle<JSFunction*> anonFunction = nullptr);
+  MOZ_MUST_USE bool emitInitProp(JS::Handle<JSAtom*> key);
   MOZ_MUST_USE bool emitInitGetter(JS::Handle<JSAtom*> key);
   MOZ_MUST_USE bool emitInitSetter(JS::Handle<JSAtom*> key);
 
-  MOZ_MUST_USE bool emitInitIndexProp(
-      bool isPropertyAnonFunctionOrClass = false);
+  MOZ_MUST_USE bool emitInitIndexProp();
   MOZ_MUST_USE bool emitInitIndexGetter();
   MOZ_MUST_USE bool emitInitIndexSetter();
 
-  MOZ_MUST_USE bool emitInitComputedProp(
-      bool isPropertyAnonFunctionOrClass = false);
+  MOZ_MUST_USE bool emitInitComputedProp();
   MOZ_MUST_USE bool emitInitComputedGetter();
   MOZ_MUST_USE bool emitInitComputedSetter();
 
  private:
   MOZ_MUST_USE MOZ_ALWAYS_INLINE bool prepareForProp(
       const mozilla::Maybe<uint32_t>& keyPos, bool isStatic, bool isComputed);
 
   // @param op
   //        Opcode for initializing property
-  // @param prefixKind
-  //        None, Get, or Set
   // @param key
   //        Atom of the property if the property key is not computed
-  // @param isPropertyAnonFunctionOrClass
-  //        True if the property is either an anonymous function or an
-  //        anonymous class
-  // @param anonFunction
-  //        Anonymous function object for the property
-  MOZ_MUST_USE bool emitInit(JSOp op, JS::Handle<JSAtom*> key,
-                             bool isPropertyAnonFunctionOrClass,
-                             JS::Handle<JSFunction*> anonFunction);
-  MOZ_MUST_USE bool emitInitIndexOrComputed(JSOp op,
-                                            FunctionPrefixKind prefixKind,
-                                            bool isPropertyAnonFunctionOrClass);
+  MOZ_MUST_USE bool emitInit(JSOp op, JS::Handle<JSAtom*> key);
+  MOZ_MUST_USE bool emitInitIndexOrComputed(JSOp op);
 
   MOZ_MUST_USE bool emitPopClassConstructor();
 };
 
 // Class for emitting bytecode for object literal.
 //
 // Usage: (check for the return value is omitted for simplicity)
 //
@@ -315,17 +295,17 @@ class MOZ_STACK_CLASS PropertyEmitter {
 //     oe.emitEnd();
 //
 //   `{ prop: function() {} }`, when property value is anonymous function
 //     ObjectEmitter oe(this);
 //     oe.emitObject(1);
 //
 //     oe.prepareForPropValue(Some(offset_of_prop));
 //     emit(function);
-//     oe.emitInitProp(atom_of_prop, true, function_object);
+//     oe.emitInitProp(atom_of_prop);
 //
 //     oe.emitEnd();
 //
 //   `{ get prop() { ... }, set prop(v) { ... } }`
 //     ObjectEmitter oe(this);
 //     oe.emitObject(2);
 //
 //     oe.prepareForPropValue(Some(offset_of_prop));
@@ -341,17 +321,17 @@ class MOZ_STACK_CLASS PropertyEmitter {
 //   `{ 1: 10, get 2() { ... }, set 3(v) { ... } }`
 //     ObjectEmitter oe(this);
 //     oe.emitObject(3);
 //
 //     oe.prepareForIndexPropKey(Some(offset_of_prop));
 //     emit(1);
 //     oe.prepareForIndexPropValue();
 //     emit(10);
-//     oe.emitInitIndexedProp(atom_of_prop);
+//     oe.emitInitIndexedProp();
 //
 //     oe.prepareForIndexPropKey(Some(offset_of_opening_bracket));
 //     emit(2);
 //     oe.prepareForIndexPropValue();
 //     emit(function_for_getter);
 //     oe.emitInitIndexGetter();
 //
 //     oe.prepareForIndexPropKey(Some(offset_of_opening_bracket));
@@ -455,70 +435,70 @@ class MOZ_RAII AutoSaveLocalStrictMode {
 };
 
 // Class for emitting bytecode for JS class.
 //
 // Usage: (check for the return value is omitted for simplicity)
 //
 //   `class {}`
 //     ClassEmitter ce(this);
-//     ce.emitClass();
+//     ce.emitClass(nullptr, nullptr, false);
 //
 //     ce.emitInitDefaultConstructor(Some(offset_of_class),
 //                                   Some(offset_of_closing_bracket));
 //
 //     ce.emitEnd(ClassEmitter::Kind::Expression);
 //
 //   `class { constructor() { ... } }`
 //     ClassEmitter ce(this);
-//     ce.emitClass();
+//     ce.emitClass(nullptr, nullptr, false);
 //
 //     emit(function_for_constructor);
 //     ce.emitInitConstructor(/* needsHomeObject = */ false);
 //
 //     ce.emitEnd(ClassEmitter::Kind::Expression);
 //
 //   `class X { constructor() { ... } }`
 //     ClassEmitter ce(this);
 //     ce.emitScopeForNamedClass(scopeBindingForName);
-//     ce.emitClass(atom_of_X);
+//     ce.emitClass(atom_of_X, nullptr, false);
 //
 //     ce.emitInitDefaultConstructor(Some(offset_of_class),
 //                                   Some(offset_of_closing_bracket));
 //
 //     ce.emitEnd(ClassEmitter::Kind::Expression);
 //
 //   `class X { constructor() { ... } }`
 //     ClassEmitter ce(this);
 //     ce.emitScopeForNamedClass(scopeBindingForName);
-//     ce.emitClass(atom_of_X);
+//     ce.emitClass(atom_of_X, nullptr, false);
 //
 //     emit(function_for_constructor);
 //     ce.emitInitConstructor(/* needsHomeObject = */ false);
 //
 //     ce.emitEnd(ClassEmitter::Kind::Expression);
 //
 //   `class X extends Y { constructor() { ... } }`
 //     ClassEmitter ce(this);
 //     ce.emitScopeForNamedClass(scopeBindingForName);
 //
 //     emit(Y);
-//     ce.emitDerivedClass(atom_of_X);
+//     ce.emitDerivedClass(atom_of_X, nullptr, false);
 //
 //     emit(function_for_constructor);
 //     ce.emitInitConstructor(/* needsHomeObject = */ false);
 //
 //     ce.emitEnd(ClassEmitter::Kind::Expression);
 //
 //   `class X extends Y { constructor() { ... super.f(); ... } }`
 //     ClassEmitter ce(this);
 //     ce.emitScopeForNamedClass(scopeBindingForName);
 //
 //     emit(Y);
-//     ce.emitDerivedClass(atom_of_X);
+//     ce.emitDerivedClass(atom_of_X, nullptr, false);
 //
 //     emit(function_for_constructor);
 //     // pass true if constructor contains super.prop access
 //     ce.emitInitConstructor(/* needsHomeObject = */ true);
 //
 //     ce.emitEnd(ClassEmitter::Kind::Expression);
 //
 //   `m() {}` in class
@@ -675,27 +655,37 @@ class MOZ_STACK_CLASS ClassEmitter : pub
 
     // After calling emitEnd.
     End,
   };
   ClassState classState_ = ClassState::Start;
 #endif
 
   JS::Rooted<JSAtom*> name_;
+  JS::Rooted<JSAtom*> nameForAnonymousClass_;
+  bool hasNameOnStack_ = false;
 
  public:
   explicit ClassEmitter(BytecodeEmitter* bce);
 
   MOZ_MUST_USE bool emitScopeForNamedClass(
       JS::Handle<LexicalScope::Data*> scopeBindings);
 
   // @param name
   //        Name of the class (nullptr if this is anonymous class)
-  MOZ_MUST_USE bool emitClass(JS::Handle<JSAtom*> name);
-  MOZ_MUST_USE bool emitDerivedClass(JS::Handle<JSAtom*> name);
+  // @param nameForAnonymousClass
+  //        Statically inferred name of the class (only for anonymous classes)
+  // @param hasNameOnStack
+  //        If true the name is on the stack (only for anonymous classes)
+  MOZ_MUST_USE bool emitClass(JS::Handle<JSAtom*> name,
+                              JS::Handle<JSAtom*> nameForAnonymousClass,
+                              bool hasNameOnStack);
+  MOZ_MUST_USE bool emitDerivedClass(JS::Handle<JSAtom*> name,
+                                     JS::Handle<JSAtom*> nameForAnonymousClass,
+                                     bool hasNameOnStack);
 
   // @param needsHomeObject
   //        True if the constructor contains `super.foo`
   MOZ_MUST_USE bool emitInitConstructor(bool needsHomeObject);
 
   // Parameters are the offset in the source code for each character below:
   //
   //   class X { foo() {} }
@@ -707,16 +697,16 @@ class MOZ_STACK_CLASS ClassEmitter : pub
   //
   MOZ_MUST_USE bool emitInitDefaultConstructor(
       const mozilla::Maybe<uint32_t>& classStart,
       const mozilla::Maybe<uint32_t>& classEnd);
 
   MOZ_MUST_USE bool emitEnd(Kind kind);
 
  private:
-  void setName(JS::Handle<JSAtom*> name);
+  MOZ_MUST_USE bool emitSetEmptyClassConstructorNameForDefaultCtor();
   MOZ_MUST_USE bool initProtoAndCtor();
 };
 
 } /* namespace frontend */
 } /* namespace js */
 
 #endif /* frontend_ObjectEmitter_h */
--- a/js/src/jit-test/tests/auto-regress/bug1448582-6.js
+++ b/js/src/jit-test/tests/auto-regress/bug1448582-6.js
@@ -4,24 +4,23 @@
 // - The |o[index]| inner function has a dynamic name from a computed property name.
 // - The |self| inner function uses |Function.prototype.caller| to reinvoke the outer function.
 
 (function(index) {
     var o = {
         [index]: class {
             constructor() {}
 
-            // Prevent adding an inferred name at index = 1 by creating a
-            // static method named "name".
+            // The static method named "name" is added after assigning the
+            // inferred name.
             static [(index === 0 ? "not-name" : "name")]() {}
         }
     }
 
-    // At index = 0 the class will get the inferred name "0".
-    // At index = 1 the class should have no inferred name.
-    assertEq(displayName(o[index]), index === 0 ? "0" : "");
+    // The inferred name matches the current index.
+    assertEq(displayName(o[index]), String(index));
 
     if (index === 0) {
         (function self() {
             self.caller(1);
         })();
     }
 })(0);
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/wasm/regress/baseline-arm64-chunk-pop.js
@@ -0,0 +1,31 @@
+var bin = wasmTextToBinary(
+    `(module
+       (func (export "f4786") (result i32)
+         (local i32 i64 i64 i64 f32)
+         i32.const 1
+         tee_local 0
+         get_local 0
+         get_local 0
+         get_local 0
+         get_local 0
+         get_local 0
+         get_local 0
+         get_local 0
+         if i32
+           get_local 0
+         else
+           get_local 0
+           tee_local 0
+           get_local 0
+           br_if 1
+         end
+         drop
+         drop
+         drop
+         drop
+         drop
+         drop
+         drop))`);
+var ins = new WebAssembly.Instance(new WebAssembly.Module(bin));
+ins.exports.f4786();
+
--- a/js/src/jit/BaselineCompiler.cpp
+++ b/js/src/jit/BaselineCompiler.cpp
@@ -2193,17 +2193,17 @@ bool BaselineCodeGen<Handler>::emit_JSOP
   masm.tagValue(JSVAL_TYPE_OBJECT, ReturnReg, R0);
   frame.push(R0);
   return true;
 }
 
 typedef bool (*SetFunNameFn)(JSContext*, HandleFunction, HandleValue,
                              FunctionPrefixKind);
 static const VMFunction SetFunNameInfo =
-    FunctionInfo<SetFunNameFn>(js::SetFunctionNameIfNoOwnName, "SetFunName");
+    FunctionInfo<SetFunNameFn>(js::SetFunctionName, "SetFunName");
 
 template <typename Handler>
 bool BaselineCodeGen<Handler>::emit_JSOP_SETFUNNAME() {
   frame.popRegsAndSync(2);
 
   frame.push(R0);
   frame.syncStack(0);
 
--- a/js/src/jit/CodeGenerator.cpp
+++ b/js/src/jit/CodeGenerator.cpp
@@ -3180,17 +3180,17 @@ void CodeGenerator::emitLambdaInit(Regis
   // the nursery.
   masm.storePtr(ImmGCPtr(info.funUnsafe()->displayAtom()),
                 Address(output, JSFunction::offsetOfAtom()));
 }
 
 typedef bool (*SetFunNameFn)(JSContext*, HandleFunction, HandleValue,
                              FunctionPrefixKind);
 static const VMFunction SetFunNameInfo =
-    FunctionInfo<SetFunNameFn>(js::SetFunctionNameIfNoOwnName, "SetFunName");
+    FunctionInfo<SetFunNameFn>(js::SetFunctionName, "SetFunName");
 
 void CodeGenerator::visitSetFunName(LSetFunName* lir) {
   pushArg(Imm32(lir->mir()->prefixKind()));
   pushArg(ToValue(lir, LSetFunName::NameValue));
   pushArg(ToRegister(lir->fun()));
 
   callVM(SetFunNameInfo, lir);
 }
--- a/js/src/jsapi-tests/testGCAllocator.cpp
+++ b/js/src/jsapi-tests/testGCAllocator.cpp
@@ -32,17 +32,17 @@ BEGIN_TEST(testGCAllocator) {
   }
 #endif
 
   size_t PageSize = js::gc::SystemPageSize();
 
   /* Finish any ongoing background free activity. */
   js::gc::FinishGC(cx);
 
-  bool growUp;
+  bool growUp = false;
   CHECK(addressesGrowUp(&growUp));
 
   if (growUp) {
     return testGCAllocatorUp(PageSize);
   } else {
     return testGCAllocatorDown(PageSize);
   }
 }
--- a/js/src/shell/js.cpp
+++ b/js/src/shell/js.cpp
@@ -10820,29 +10820,29 @@ int main(int argc, char** argv, char** e
    */
   OOM_printAllocationCount = op.getBoolOption('O');
 #endif
 
 #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
   if (op.getBoolOption("no-sse3")) {
     js::jit::CPUInfo::SetSSE3Disabled();
     if (!sCompilerProcessFlags.append("--no-sse3")) {
-      return EXIT_SUCCESS;
+      return EXIT_FAILURE;
     }
   }
   if (op.getBoolOption("no-sse4")) {
     js::jit::CPUInfo::SetSSE4Disabled();
     if (!sCompilerProcessFlags.append("--no-sse4")) {
-      return EXIT_SUCCESS;
+      return EXIT_FAILURE;
     }
   }
   if (op.getBoolOption("enable-avx")) {
     js::jit::CPUInfo::SetAVXEnabled();
     if (!sCompilerProcessFlags.append("--enable-avx")) {
-      return EXIT_SUCCESS;
+      return EXIT_FAILURE;
     }
   }
 #endif
 
   if (op.getBoolOption("no-threads")) {
     js::DisableExtraThreads();
   }
 
--- a/js/src/tests/non262/Function/function-toString-discard-source-name.js
+++ b/js/src/tests/non262/Function/function-toString-discard-source-name.js
@@ -87,17 +87,17 @@ class classDecl {}
 var classExpr = class C {};
 var classExprAnon = class {};
 
 this.classDecl = classDecl;
 `);
 
 assertFunctionName(classDecl, "classDecl");
 assertFunctionName(classExpr, "C");
-assertFunctionName(classExprAnon, undefined);
+assertFunctionName(classExprAnon, "classExprAnon");
 
 
 // Class declarations and expressions (explicit constructor).
 eval(`
 class classDecl { constructor() {} }
 var classExpr = class C { constructor() {} };
 var classExprAnon = class { constructor() {} };
 
--- a/js/src/vm/Interpreter.cpp
+++ b/js/src/vm/Interpreter.cpp
@@ -3652,17 +3652,17 @@ static MOZ_NEVER_INLINE JS_HAZ_JSNATIVE_
     END_CASE(JSOP_TRYSKIPAWAIT)
 
     CASE(JSOP_SETFUNNAME) {
       MOZ_ASSERT(REGS.stackDepth() >= 2);
       FunctionPrefixKind prefixKind = FunctionPrefixKind(GET_UINT8(REGS.pc));
       ReservedRooted<Value> name(&rootValue0, REGS.sp[-1]);
       ReservedRooted<JSFunction*> fun(&rootFunction0,
                                       &REGS.sp[-2].toObject().as<JSFunction>());
-      if (!SetFunctionNameIfNoOwnName(cx, fun, name, prefixKind)) {
+      if (!SetFunctionName(cx, fun, name, prefixKind)) {
         goto error;
       }
 
       REGS.sp--;
     }
     END_CASE(JSOP_SETFUNNAME)
 
     CASE(JSOP_CALLEE) {
--- a/js/src/vm/JSFunction.cpp
+++ b/js/src/vm/JSFunction.cpp
@@ -2211,18 +2211,18 @@ static inline JSFunction* NewFunctionClo
   if (!clone) {
     return nullptr;
   }
 
   // JSFunction::HAS_INFERRED_NAME can be set at compile-time and at
   // runtime. In the latter case we should actually clear the flag before
   // cloning the function, but since we can't differentiate between both
   // cases here, we'll end up with a momentarily incorrect function name.
-  // This will be fixed up in SetFunctionNameIfNoOwnName(), which should
-  // happen through JSOP_SETFUNNAME directly after JSOP_LAMBDA.
+  // This will be fixed up in SetFunctionName(), which should happen through
+  // JSOP_SETFUNNAME directly after JSOP_LAMBDA.
   constexpr uint16_t NonCloneableFlags = JSFunction::EXTENDED |
                                          JSFunction::RESOLVED_LENGTH |
                                          JSFunction::RESOLVED_NAME;
 
   uint16_t flags = fun->flags() & ~NonCloneableFlags;
   if (allocKind == gc::AllocKind::FUNCTION_EXTENDED) {
     flags |= JSFunction::EXTENDED;
   }
@@ -2469,53 +2469,41 @@ JSAtom* js::IdToFunctionName(
     return SymbolToFunctionName(cx, JSID_TO_SYMBOL(id), prefixKind);
   }
 
   // Step 5.
   RootedValue idv(cx, IdToValue(id));
   return NameToFunctionName(cx, idv, prefixKind);
 }
 
-bool js::SetFunctionNameIfNoOwnName(JSContext* cx, HandleFunction fun,
-                                    HandleValue name,
-                                    FunctionPrefixKind prefixKind) {
+bool js::SetFunctionName(JSContext* cx, HandleFunction fun, HandleValue name,
+                         FunctionPrefixKind prefixKind) {
   MOZ_ASSERT(name.isString() || name.isSymbol() || name.isNumber());
 
-  // An inferred name may already be set if this function is a clone of a
-  // singleton function. Clear the inferred name in all cases, even if we
-  // end up not adding a new inferred name if |fun| is a class constructor.
+  // `fun` is a newly created function, so normally it can't already have an
+  // inferred name. The rare exception is when `fun` was created by cloning
+  // a singleton function; see the comment in NewFunctionClone. In that case,
+  // the inferred name is bogus, so clear it out.
   if (fun->hasInferredName()) {
     MOZ_ASSERT(fun->isSingleton());
     fun->clearInferredName();
   }
 
-  if (fun->isClassConstructor()) {
-    // A class may have static 'name' method or accessor.
-    if (fun->contains(cx, cx->names().name)) {
-      return true;
-    }
-  } else {
-    // Anonymous function shouldn't have own 'name' property at this point.
-    MOZ_ASSERT(!fun->containsPure(cx->names().name));
-  }
+  // Anonymous functions should neither have an own 'name' property nor a
+  // resolved name at this point.
+  MOZ_ASSERT(!fun->containsPure(cx->names().name));
+  MOZ_ASSERT(!fun->hasResolvedName());
 
   JSAtom* funName = name.isSymbol()
                         ? SymbolToFunctionName(cx, name.toSymbol(), prefixKind)
                         : NameToFunctionName(cx, name, prefixKind);
   if (!funName) {
     return false;
   }
 
-  // RESOLVED_NAME shouldn't yet be set, at least as long as we don't
-  // support the "static public fields" or "decorators" proposal.
-  // These two proposals allow to access class constructors before
-  // JSOP_SETFUNNAME is executed, which means user code may have set the
-  // RESOLVED_NAME flag when we reach this point.
-  MOZ_ASSERT(!fun->hasResolvedName());
-
   fun->setInferredName(funName);
 
   return true;
 }
 
 JSFunction* js::DefineFunction(
     JSContext* cx, HandleObject obj, HandleId id, Native native, unsigned nargs,
     unsigned flags, gc::AllocKind allocKind /* = AllocKind::FUNCTION */) {
--- a/js/src/vm/JSFunction.h
+++ b/js/src/vm/JSFunction.h
@@ -67,18 +67,18 @@ class JSFunction : public js::NativeObje
                         nonstandard function-statement) */
     SELF_HOSTED =
         0x0080, /* On an interpreted function, indicates a self-hosted builtin,
                    which must not be decompilable nor constructible. On a native
                    function, indicates an 'intrinsic', intended for use from
                    self-hosted code only. */
     HAS_INFERRED_NAME = 0x0100, /* function had no explicit name, but a name was
                                    set by SetFunctionName at compile time or
-                                   SetFunctionNameIfNoOwnName at runtime. See
-                                   atom_ for more info about this flag. */
+                                   SetFunctionName at runtime. See atom_ for
+                                   more info about this flag. */
     INTERPRETED_LAZY =
         0x0200, /* function is interpreted but doesn't have a script yet */
     RESOLVED_LENGTH =
         0x0400,             /* f.length has been resolved (see fun_resolve). */
     RESOLVED_NAME = 0x0800, /* f.name has been resolved (see fun_resolve). */
     NEW_SCRIPT_CLEARED =
         0x1000, /* For a function used as an interpreted constructor, whether
                    a 'new' type had constructor information cleared. */
@@ -179,18 +179,17 @@ class JSFunction : public js::NativeObje
   //      compile-time, the HAS_INFERRED_NAME is set directly in the
   //      bytecode emitter, when it happens at runtime, the flag is set when
   //      evaluating the JSOP_SETFUNNAME bytecode.
   //   d. HAS_GUESSED_ATOM and HAS_INFERRED_NAME cannot both be set.
   //   e. |atom_| can be null if neither an explicit, nor inferred, nor a
   //      guessed name was set.
   //   f. HAS_INFERRED_NAME can be set for cloned singleton function, even
   //      though the clone shouldn't receive an inferred name. See the
-  //      comments in NewFunctionClone() and SetFunctionNameIfNoOwnName()
-  //      for details.
+  //      comments in NewFunctionClone() and SetFunctionName() for details.
   //
   // 2. If the function is a bound function:
   //   a. To store the initial value of the "name" property.
   //   b. If HAS_BOUND_FUNCTION_NAME_PREFIX is not set, |atom_| doesn't
   //      contain the "bound " prefix which is prepended to the "name"
   //      property of bound functions per ECMAScript.
   //   c. Bound functions can never have an inferred or guessed name.
   //   d. |atom_| is never null for bound functions.
@@ -893,19 +892,18 @@ extern JSFunction* NewScriptedFunction(
     JSContext* cx, unsigned nargs, JSFunction::Flags flags, HandleAtom atom,
     HandleObject proto = nullptr,
     gc::AllocKind allocKind = gc::AllocKind::FUNCTION,
     NewObjectKind newKind = GenericObject, HandleObject enclosingEnv = nullptr);
 extern JSAtom* IdToFunctionName(
     JSContext* cx, HandleId id,
     FunctionPrefixKind prefixKind = FunctionPrefixKind::None);
 
-extern bool SetFunctionNameIfNoOwnName(JSContext* cx, HandleFunction fun,
-                                       HandleValue name,
-                                       FunctionPrefixKind prefixKind);
+extern bool SetFunctionName(JSContext* cx, HandleFunction fun, HandleValue name,
+                            FunctionPrefixKind prefixKind);
 
 extern JSFunction* DefineFunction(
     JSContext* cx, HandleObject obj, HandleId id, JSNative native,
     unsigned nargs, unsigned flags,
     gc::AllocKind allocKind = gc::AllocKind::FUNCTION);
 
 extern bool fun_toString(JSContext* cx, unsigned argc, Value* vp);
 
--- a/js/src/wasm/WasmBaselineCompile.cpp
+++ b/js/src/wasm/WasmBaselineCompile.cpp
@@ -944,56 +944,56 @@ using ScratchEBX = ScratchI32;
 
 // ScratchI8 is a mnemonic device: For some ops we need a register with a
 // byte subregister.
 using ScratchI8 = ScratchI32;
 #endif
 
 // The stack frame.
 //
-// The frame has four parts ("below" means at lower addresses):
+// The stack frame has four parts ("below" means at lower addresses):
 //
-//  - the Header, comprising the Frame and DebugFrame elements;
-//  - the Local area, allocated below the header with various forms of
-//    alignment;
+//  - the Frame element;
+//  - the Local area, including the DebugFrame element; allocated below the
+//    header with various forms of alignment;
 //  - the Dynamic area, comprising the temporary storage the compiler uses for
 //    register spilling, allocated below the Local area;
 //  - the Arguments area, comprising memory allocated for outgoing calls,
 //    allocated below the Dynamic area.
 //
 //                 +============================+
 //                 |    Incoming arg            |
 //                 |    ...                     |
 //                 +----------------------------+
 //                 |    unspecified             |
 // --------------  +============================+
-//  ^              |    Frame (fixed size)      |
-// fixedSize       +----------------------------+ <------------------ FP
-//  |              |    DebugFrame (optional)   |               ^^
-//  |    --------  +============================+ ---------     ||
-//  |       ^      |    Local (static size)     |    ^          ||
-//  | localSize    |    ...                     |    |        framePushed
-//  v       v      |    (padding)               |    |          ||
-// --------------  +============================+ stackHeight   ||
-//          ^      |    Dynamic (variable)      |    |          ||
-//   dynamicSize   |    ...                     |    |          ||
-//          v      |    ...                     |    v          ||
-// --------------  |    (free space, sometimes) | ---------     v|
+//                 |    Frame (fixed size)      |
+// --------------  +============================+ <-------------------- FP
+//          ^      |    DebugFrame (optional)   |    ^                ^^
+//          |      +----------------------------+    |                ||
+//    localSize    |    Local (static size)     |    |                ||
+//          |      |    ...                     |    |        framePushed
+//          v      |    (padding)               |    |                ||
+// --------------  +============================+ currentStackHeight  ||
+//          ^      |    Dynamic (variable size) |    |                ||
+//   dynamicSize   |    ...                     |    |                ||
+//          v      |    ...                     |    v                ||
+// --------------  |    (free space, sometimes) | ---------           v|
 //                 +============================+ <----- SP not-during calls
-//                 |    Arguments (sometimes)   |                |
-//                 |    ...                     |                v
+//                 |    Arguments (sometimes)   |                      |
+//                 |    ...                     |                      v
 //                 +============================+ <----- SP during calls
 //
-// The Header is addressed off the stack pointer.  masm.framePushed() is always
+// The Frame is addressed off the stack pointer.  masm.framePushed() is always
 // correct, and masm.getStackPointer() + masm.framePushed() always addresses the
 // Frame, with the DebugFrame optionally below it.
 //
-// The Local area is laid out by BaseLocalIter and is allocated and deallocated
-// by standard prologue and epilogue functions that manipulate the stack
-// pointer, but it is accessed via BaseStackFrame.
+// The Local area (including the DebugFrame) is laid out by BaseLocalIter and is
+// allocated and deallocated by standard prologue and epilogue functions that
+// manipulate the stack pointer, but it is accessed via BaseStackFrame.
 //
 // The Dynamic area is maintained by and accessed via BaseStackFrame.  On some
 // systems (such as ARM64), the Dynamic memory may be allocated in chunks
 // because the SP needs a specific alignment, and in this case there will
 // normally be some free space directly above the SP.  The stack height does not
 // include the free space, it reflects the logically used space only.
 //
 // The Arguments area is allocated and deallocated via BaseStackFrame (see
@@ -1142,21 +1142,24 @@ class BaseStackFrameAllocator {
   // pointer.
   //
   // Good values for ChunkSize are the subject of future empirical analysis;
   // eight words is just an educated guess.
 
   static constexpr uint32_t ChunkSize = 8 * sizeof(void*);
   static constexpr uint32_t InitialChunk = ChunkSize;
 
-  // The current logical height of the frame, ie the sum of space for the
-  // Local and Dynamic areas.  The allocated size of the frame -- provided by
-  // masm.framePushed() -- is usually larger than currentStackHeight_, notably
-  // at the beginning of execution when we've allocated InitialChunk extra
-  // space.
+  // The current logical height of the frame is
+  //   currentStackHeight_ = localSize_ + dynamicSize
+  // where dynamicSize is not accounted for explicitly and localSize_ also
+  // includes size for the DebugFrame.
+  //
+  // The allocated size of the frame, provided by masm.framePushed(), is usually
+  // larger than currentStackHeight_, notably at the beginning of execution when
+  // we've allocated InitialChunk extra space.
 
   uint32_t currentStackHeight_;
 #endif
 
   // Size of the Local area in bytes (stable after BaseCompiler::init() has
   // called BaseStackFrame::setupLocals(), which in turn calls
   // BaseStackFrameAllocator::setLocalSize()), always rounded to the proper
   // stack alignment.  The Local area is then allocated in beginFunction(),
@@ -1200,27 +1203,41 @@ class BaseStackFrameAllocator {
 #ifdef RABALDR_CHUNKY_STACK
     currentStackHeight_ = localSize_;
 #endif
   }
 
  public:
   // The fixed amount of memory, in bytes, allocated on the stack below the
   // Header for purposes such as locals and other fixed values.  Includes all
-  // necessary alignment.
-
-  uint32_t fixedSize() const {
+  // necessary alignment, and on ARM64 also the initial chunk for the working
+  // stack memory.
+
+  uint32_t fixedAllocSize() const {
     MOZ_ASSERT(localSize_ != UINT32_MAX);
 #ifdef RABALDR_CHUNKY_STACK
     return localSize_ + InitialChunk;
 #else
     return localSize_;
 #endif
   }
 
+#ifdef RABALDR_CHUNKY_STACK
+  // The allocated frame size is frequently larger than the logical stack
+  // height; we round up to a chunk boundary, and special case the initial
+  // chunk.
+  uint32_t framePushedForHeight(uint32_t logicalHeight) {
+    if (logicalHeight <= fixedAllocSize()) {
+      return fixedAllocSize();
+    }
+    return fixedAllocSize() + AlignBytes(logicalHeight - fixedAllocSize(),
+                                         ChunkSize);
+  }
+#endif
+
  protected:
   //////////////////////////////////////////////////////////////////////
   //
   // The Dynamic area - the dynamic part of the frame, for spilling and saving
   // intermediate values.
 
   // Offset off of sp_ for the slot at stack area location `offset`.
 
@@ -1235,60 +1252,59 @@ class BaseStackFrameAllocator {
     }
     currentStackHeight_ += bytes;
     checkChunkyInvariants();
   }
 
   void popChunkyBytes(uint32_t bytes) {
     checkChunkyInvariants();
     currentStackHeight_ -= bytes;
-    // Sometimes, popChunkyBytes() is used to pop a larger area, as when we
-    // drop values consumed by a call, and we may need to drop several
-    // chunks.  But never drop the initial chunk.
-    if (masm.framePushed() - currentStackHeight_ >= ChunkSize) {
-      uint32_t target =
-          Max(fixedSize(), AlignBytes(currentStackHeight_, ChunkSize));
-      uint32_t amount = masm.framePushed() - target;
-      if (amount) {
-        masm.freeStack(amount);
-      }
-      MOZ_ASSERT(masm.framePushed() >= fixedSize());
+    // Sometimes, popChunkyBytes() is used to pop a larger area, as when we drop
+    // values consumed by a call, and we may need to drop several chunks.  But
+    // never drop the initial chunk.  Crucially, the amount we drop is always an
+    // integral number of chunks.
+    uint32_t freeSpace = masm.framePushed() - currentStackHeight_;
+    if (freeSpace >= ChunkSize) {
+      uint32_t targetAllocSize = framePushedForHeight(currentStackHeight_);
+      uint32_t amountToFree = masm.framePushed() - targetAllocSize;
+      MOZ_ASSERT(amountToFree % ChunkSize == 0);
+      if (amountToFree) {
+        masm.freeStack(amountToFree);
+      }
     }
     checkChunkyInvariants();
   }
 #endif
 
   uint32_t currentStackHeight() const {
 #ifdef RABALDR_CHUNKY_STACK
     return currentStackHeight_;
 #else
     return masm.framePushed();
 #endif
   }
 
  private:
 #ifdef RABALDR_CHUNKY_STACK
   void checkChunkyInvariants() {
+    MOZ_ASSERT(masm.framePushed() >= fixedAllocSize());
     MOZ_ASSERT(masm.framePushed() >= currentStackHeight_);
-    MOZ_ASSERT(masm.framePushed() == fixedSize() ||
+    MOZ_ASSERT(masm.framePushed() == fixedAllocSize() ||
                masm.framePushed() - currentStackHeight_ < ChunkSize);
+    MOZ_ASSERT((masm.framePushed() - localSize_) % ChunkSize == 0);
   }
 #endif
 
   // For a given stack height, return the appropriate size of the allocated
   // frame.
 
   uint32_t framePushedForHeight(StackHeight stackHeight) {
 #ifdef RABALDR_CHUNKY_STACK
-    // The allocated frame size is frequently larger than the stack height;
-    // we round up to a chunk boundary, and special case the initial chunk.
-    return stackHeight.height <= fixedSize()
-               ? fixedSize()
-               : fixedSize() +
-                     AlignBytes(stackHeight.height - fixedSize(), ChunkSize);
+    // A more complicated adjustment is needed.
+    return framePushedForHeight(stackHeight.height);
 #else
     // The allocated frame size equals the stack height.
     return stackHeight.height;
 #endif
   }
 
  public:
   // The current height of the stack area, not necessarily zero-based, in a
@@ -4116,17 +4132,17 @@ class BaseCompiler final : public BaseCo
     if (!smgen_.generateStackmapEntriesForTrapExit(args, extras)) {
       return false;
     }
     if (!createStackMap("stack check", extras, masm.currentOffset(),
                         HasRefTypedDebugFrame::No)) {
       return false;
     }
 
-    size_t reservedBytes = fr.fixedSize() - masm.framePushed();
+    size_t reservedBytes = fr.fixedAllocSize() - masm.framePushed();
     MOZ_ASSERT(0 == (reservedBytes % sizeof(void*)));
 
     masm.reserveStack(reservedBytes);
     fr.onFixedStackAllocated();
     if (!smgen_.mst_.pushNonGCPointers(reservedBytes / sizeof(void*))) {
       return false;
     }
 
@@ -4277,17 +4293,17 @@ class BaseCompiler final : public BaseCo
       }
       insertBreakablePoint(CallSiteDesc::LeaveFrame);
       if (!createStackMap("debug: leave frame", refDebugFrame)) {
         return false;
       }
       restoreResult();
     }
 
-    GenerateFunctionEpilogue(masm, fr.fixedSize(), &offsets_);
+    GenerateFunctionEpilogue(masm, fr.fixedAllocSize(), &offsets_);
 
 #if defined(JS_ION_PERF)
     // FIXME - profiling code missing.  No bug for this.
 
     // Note the end of the inline code and start of the OOL code.
     // gen->perfSpewer().noteEndInlineCode(masm);
 #endif
 
@@ -9550,17 +9566,17 @@ bool BaseCompiler::emitSelect() {
   }
 
   // I32 condition on top, then false, then true.
 
   Label done;
   BranchState b(&done);
   emitBranchSetup(&b);
 
-  switch (NonAnyToValType(type).code()) {
+  switch (NonTVarToValType(type).code()) {
     case ValType::I32: {
       RegI32 r, rs;
       pop2xI32(&r, &rs);
       emitBranchPerform(&b);
       moveI32(rs, r);
       masm.bind(&done);
       freeI32(rs);
       pushI32(r);
@@ -11970,18 +11986,17 @@ bool js::wasm::BaselineCompileFunctions(
     Decoder d(func.begin, func.end, func.lineOrBytecode, error);
 
     // Build the local types vector.
 
     ValTypeVector locals;
     if (!locals.appendAll(env.funcTypes[func.index]->args())) {
       return false;
     }
-    if (!DecodeLocalEntries(d, env.kind, env.types, env.gcTypesEnabled(),
-                            &locals)) {
+    if (!DecodeLocalEntries(d, env.types, env.gcTypesEnabled(), &locals)) {
       return false;
     }
 
     // One-pass baseline compilation.
 
     BaseCompiler f(env, func, locals, trapExitLayout, trapExitLayoutNumWords, d,
                    &alloc, &masm, &code->stackMaps);
     if (!f.init()) {
--- a/js/src/wasm/WasmIonCompile.cpp
+++ b/js/src/wasm/WasmIonCompile.cpp
@@ -4030,18 +4030,17 @@ bool wasm::IonCompileFunctions(const Mod
     Decoder d(func.begin, func.end, func.lineOrBytecode, error);
 
     // Build the local types vector.
 
     ValTypeVector locals;
     if (!locals.appendAll(env.funcTypes[func.index]->args())) {
       return false;
     }
-    if (!DecodeLocalEntries(d, env.kind, env.types, env.gcTypesEnabled(),
-                            &locals)) {
+    if (!DecodeLocalEntries(d,  env.types, env.gcTypesEnabled(), &locals)) {
       return false;
     }
 
     // Set up for Ion compilation.
 
     const JitCompileOptions options;
     MIRGraph graph(&alloc);
     CompileInfo compileInfo(locals.length());
--- a/js/src/wasm/WasmOpIter.h
+++ b/js/src/wasm/WasmOpIter.h
@@ -14,16 +14,17 @@
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
 
 #ifndef wasm_op_iter_h
 #define wasm_op_iter_h
 
+#include "mozilla/Pair.h"
 #include "mozilla/Poison.h"
 
 #include "jit/AtomicOp.h"
 #include "js/Printf.h"
 #include "wasm/WasmValidate.h"
 
 namespace js {
 namespace wasm {
@@ -98,17 +99,17 @@ class StackType {
   bool operator==(Code that) const {
     MOZ_ASSERT(that != Code::Ref);
     return code() == that;
   }
 
   bool operator!=(Code that) const { return !(*this == that); }
 };
 
-static inline ValType NonAnyToValType(StackType type) {
+static inline ValType NonTVarToValType(StackType type) {
   MOZ_ASSERT(type != StackType::TVar);
   return ValType(type.packed());
 }
 
 #ifdef DEBUG
 // Families of opcodes that share a signature and validation logic.
 enum class OpKind {
   Block,
@@ -192,117 +193,63 @@ struct LinearMemoryAddress {
 
   LinearMemoryAddress() : offset(0), align(0) {}
   LinearMemoryAddress(Value base, uint32_t offset, uint32_t align)
       : base(base), offset(offset), align(align) {}
 };
 
 template <typename ControlItem>
 class ControlStackEntry {
-  LabelKind kind_;
-  bool polymorphicBase_;
-  ExprType type_;
-  size_t valueStackStart_;
-  ControlItem controlItem_;
-
- public:
-  ControlStackEntry(LabelKind kind, ExprType type, size_t valueStackStart)
-      : kind_(kind),
-        polymorphicBase_(false),
-        type_(type),
-        valueStackStart_(valueStackStart),
-        controlItem_() {
-    MOZ_ASSERT(type != ExprType::Limit);
-  }
-
-  LabelKind kind() const { return kind_; }
-  ExprType resultType() const { return type_; }
-  ExprType branchTargetType() const {
-    return kind_ == LabelKind::Loop ? ExprType::Void : type_;
-  }
-  size_t valueStackStart() const { return valueStackStart_; }
-  ControlItem& controlItem() { return controlItem_; }
-  void setPolymorphicBase() { polymorphicBase_ = true; }
-  bool polymorphicBase() const { return polymorphicBase_; }
-
-  void switchToElse() {
-    MOZ_ASSERT(kind_ == LabelKind::Then);
-    kind_ = LabelKind::Else;
-    polymorphicBase_ = false;
-  }
-};
-
-// Specialization for when there is no additional data needed.
-template <>
-class ControlStackEntry<Nothing> {
-  LabelKind kind_;
+  // Use a Pair to optimize away empty ControlItem.
+  mozilla::Pair<LabelKind, ControlItem> kindAndItem_;
   bool polymorphicBase_;
   ExprType type_;
   size_t valueStackStart_;
 
  public:
   ControlStackEntry(LabelKind kind, ExprType type, size_t valueStackStart)
-      : kind_(kind),
+      : kindAndItem_(kind, ControlItem()),
         polymorphicBase_(false),
         type_(type),
         valueStackStart_(valueStackStart) {
     MOZ_ASSERT(type != ExprType::Limit);
   }
 
-  LabelKind kind() const { return kind_; }
+  LabelKind kind() const { return kindAndItem_.first(); }
   ExprType resultType() const { return type_; }
   ExprType branchTargetType() const {
-    return kind_ == LabelKind::Loop ? ExprType::Void : type_;
+    return kind() == LabelKind::Loop ? ExprType::Void : type_;
   }
   size_t valueStackStart() const { return valueStackStart_; }
-  Nothing controlItem() { return Nothing(); }
+  ControlItem& controlItem() { return kindAndItem_.second(); }
   void setPolymorphicBase() { polymorphicBase_ = true; }
   bool polymorphicBase() const { return polymorphicBase_; }
 
   void switchToElse() {
-    MOZ_ASSERT(kind_ == LabelKind::Then);
-    kind_ = LabelKind::Else;
+    MOZ_ASSERT(kind() == LabelKind::Then);
+    kindAndItem_.first() = LabelKind::Else;
     polymorphicBase_ = false;
   }
 };
 
 template <typename Value>
 class TypeAndValue {
-  StackType type_;
-  Value value_;
+  // Use a Pair to optimize away empty Value.
+  mozilla::Pair<StackType, Value> tv_;
 
  public:
-  TypeAndValue() : type_(StackType::TVar), value_() {}
-  explicit TypeAndValue(StackType type) : type_(type), value_() {}
-  explicit TypeAndValue(ValType type) : type_(StackType(type)), value_() {}
-  TypeAndValue(StackType type, Value value) : type_(type), value_(value) {}
-  TypeAndValue(ValType type, Value value)
-      : type_(StackType(type)), value_(value) {}
-  StackType type() const { return type_; }
-  StackType& typeRef() { return type_; }
-  Value value() const { return value_; }
-  void setValue(Value value) { value_ = value; }
-};
-
-// Specialization for when there is no additional data needed.
-template <>
-class TypeAndValue<Nothing> {
-  StackType type_;
-
- public:
-  TypeAndValue() : type_(StackType::TVar) {}
-  explicit TypeAndValue(StackType type) : type_(type) {}
-  explicit TypeAndValue(ValType type) : type_(StackType(type)) {}
-  TypeAndValue(StackType type, Nothing value) : type_(type) {}
-  TypeAndValue(ValType type, Nothing value) : type_(StackType(type)) {}
-
-  StackType type() const { return type_; }
-  StackType& typeRef() { return type_; }
-  Nothing value() const { return Nothing(); }
-  void setValue(Nothing value) {}
+  TypeAndValue() : tv_(StackType::TVar, Value()) {}
+  explicit TypeAndValue(StackType type) : tv_(type, Value()) {}
+  explicit TypeAndValue(ValType type) : tv_(StackType(type), Value()) {}
+  TypeAndValue(StackType type, Value value) : tv_(type, value) {}
+  TypeAndValue(ValType type, Value value) : tv_(StackType(type), value) {}
+  StackType type() const { return tv_.first(); }
+  StackType& typeRef() { return tv_.first(); }
+  Value value() const { return tv_.second(); }
+  void setValue(Value value) { tv_.second() = value; }
 };
 
 // An iterator over the bytes of a function body. It performs validation
 // and unpacks the data into a usable form.
 //
 // The MOZ_STACK_CLASS attribute here is because of the use of DebugOnly.
 // There's otherwise nothing inherent in this class which would require
 // it to be used on the stack.
@@ -339,22 +286,19 @@ class MOZ_STACK_CLASS OpIter : private P
   MOZ_MUST_USE bool readBlockType(ExprType* expr);
   MOZ_MUST_USE bool readStructTypeIndex(uint32_t* typeIndex);
   MOZ_MUST_USE bool readFieldIndex(uint32_t* fieldIndex,
                                    const StructType& structType);
 
   MOZ_MUST_USE bool popCallArgs(const ValTypeVector& expectedTypes,
                                 Vector<Value, 8, SystemAllocPolicy>* values);
 
-  MOZ_MUST_USE bool popAnyType(StackType* type, Value* value);
-  MOZ_MUST_USE bool typeMismatch(StackType actual, StackType expected);
-  MOZ_MUST_USE bool popWithType(StackType expectedType, Value* value);
-  MOZ_MUST_USE bool popWithType(ValType valType, Value* value) {
-    return popWithType(StackType(valType), value);
-  }
+  MOZ_MUST_USE bool failEmptyStack();
+  MOZ_MUST_USE bool popStackType(StackType* type, Value* value);
+  MOZ_MUST_USE bool popWithType(ValType valType, Value* value);
   MOZ_MUST_USE bool popWithType(ExprType expectedType, Value* value);
   MOZ_MUST_USE bool topWithType(ExprType expectedType, Value* value);
   MOZ_MUST_USE bool topWithType(ValType valType, Value* value);
 
   MOZ_MUST_USE bool pushControl(LabelKind kind, ExprType type);
   MOZ_MUST_USE bool checkStackAtEndOfBlock(ExprType* type, Value* value);
   MOZ_MUST_USE bool getControl(uint32_t relativeDepth,
                                ControlStackEntry<ControlItem>** controlEntry);
@@ -380,17 +324,18 @@ class MOZ_STACK_CLASS OpIter : private P
     valueStack_.infallibleAppend(tv);
   }
 
   void afterUnconditionalBranch() {
     valueStack_.shrinkTo(controlStack_.back().valueStackStart());
     controlStack_.back().setPolymorphicBase();
   }
 
-  inline bool Join(StackType one, StackType two, StackType* result);
+  inline bool Join(StackType one, StackType two, StackType* result) const;
+  inline bool checkIsSubtypeOf(ValType lhs, ValType rhs);
 
  public:
   typedef Vector<Value, 8, SystemAllocPolicy> ValueVector;
 
 #ifdef DEBUG
   explicit OpIter(const ModuleEnvironment& env, Decoder& decoder)
       : d_(decoder),
         env_(env),
@@ -528,16 +473,17 @@ class MOZ_STACK_CLASS OpIter : private P
   MOZ_MUST_USE bool readTableSize(uint32_t* tableIndex);
   MOZ_MUST_USE bool readStructNew(uint32_t* typeIndex, ValueVector* argValues);
   MOZ_MUST_USE bool readStructGet(uint32_t* typeIndex, uint32_t* fieldIndex,
                                   Value* ptr);
   MOZ_MUST_USE bool readStructSet(uint32_t* typeIndex, uint32_t* fieldIndex,
                                   Value* ptr, Value* val);
   MOZ_MUST_USE bool readStructNarrow(ValType* inputType, ValType* outputType,
                                      Value* ptr);
+  MOZ_MUST_USE bool readValType(ValType* type);
   MOZ_MUST_USE bool readReferenceType(ValType* type, const char* const context);
 
   // At a location where readOp is allowed, peek at the next opcode
   // without consuming it or updating any internal state.
   // Never fails: returns uint16_t(Op::Limit) in op->b0 if it can't read.
   void peekOp(OpBytes* op);
 
   // ------------------------------------------------------------------------
@@ -563,52 +509,73 @@ class MOZ_STACK_CLASS OpIter : private P
 
   // Test whether the control-stack is empty, meaning we've consumed the final
   // end of the function body.
   bool controlStackEmpty() const { return controlStack_.empty(); }
 };
 
 template <typename Policy>
 inline bool OpIter<Policy>::Join(StackType one, StackType two,
-                                 StackType* result) {
+                                 StackType* result) const {
   if (MOZ_LIKELY(one == two)) {
     *result = one;
     return true;
   }
 
   if (one == StackType::TVar) {
     *result = two;
     return true;
   }
 
   if (two == StackType::TVar) {
     *result = one;
     return true;
   }
 
-  if (env_.gcTypesEnabled() && one.isReference() && two.isReference()) {
-    if (env_.isRefSubtypeOf(NonAnyToValType(two), NonAnyToValType(one))) {
+  if (one.isReference() && two.isReference()) {
+    if (env_.isRefSubtypeOf(NonTVarToValType(two), NonTVarToValType(one))) {
       *result = one;
       return true;
     }
 
-    if (env_.isRefSubtypeOf(NonAnyToValType(one), NonAnyToValType(two))) {
+    if (env_.isRefSubtypeOf(NonTVarToValType(one), NonTVarToValType(two))) {
       *result = two;
       return true;
     }
 
     // No subtyping relations between the two types.
     *result = StackType::AnyRef;
     return true;
   }
 
   return false;
 }
 
 template <typename Policy>
+inline bool OpIter<Policy>::checkIsSubtypeOf(ValType actual, ValType expected) {
+  if (actual == expected) {
+    return true;
+  }
+
+  if (actual.isReference() && expected.isReference() &&
+      env_.isRefSubtypeOf(actual, expected)) {
+    return true;
+  }
+
+  UniqueChars error(
+      JS_smprintf("type mismatch: expression has type %s but expected %s",
+                  ToCString(actual), ToCString(expected)));
+  if (!error) {
+    return false;
+  }
+
+  return fail(error.get());
+}
+
+template <typename Policy>
 inline bool OpIter<Policy>::unrecognizedOpcode(const OpBytes* expr) {
   UniqueChars error(JS_smprintf("unrecognized opcode: %x %x", expr->b0,
                                 IsPrefixByte(expr->b0) ? expr->b1 : 0));
   if (!error) {
     return false;
   }
 
   return fail(error.get());
@@ -623,101 +590,87 @@ template <typename Policy>
 inline bool OpIter<Policy>::fail_ctx(const char* fmt, const char* context) {
   UniqueChars error(JS_smprintf(fmt, context));
   if (!error) {
     return false;
   }
   return fail(error.get());
 }
 
-// This function pops exactly one value from the stack, yielding Any types in
+template <typename Policy>
+inline bool OpIter<Policy>::failEmptyStack() {
+  return valueStack_.empty() ? fail("popping value from empty stack")
+                             : fail("popping value from outside block");
+}
+
+// This function pops exactly one value from the stack, yielding TVar types in
 // various cases and therefore making it the caller's responsibility to do the
 // right thing for StackType::TVar. Prefer (pop|top)WithType.
 template <typename Policy>
-inline bool OpIter<Policy>::popAnyType(StackType* type, Value* value) {
+inline bool OpIter<Policy>::popStackType(StackType* type, Value* value) {
   ControlStackEntry<ControlItem>& block = controlStack_.back();
 
   MOZ_ASSERT(valueStack_.length() >= block.valueStackStart());
   if (MOZ_UNLIKELY(valueStack_.length() == block.valueStackStart())) {
     // If the base of this block's stack is polymorphic, then we can pop a
     // dummy value of any type; it won't be used since we're in unreachable
     // code.
     if (block.polymorphicBase()) {
       *type = StackType::TVar;
       *value = Value();
 
       // Maintain the invariant that, after a pop, there is always memory
       // reserved to push a value infallibly.
       return valueStack_.reserve(valueStack_.length() + 1);
     }
 
-    if (valueStack_.empty()) {
-      return fail("popping value from empty stack");
-    }
-    return fail("popping value from outside block");
+    return failEmptyStack();
   }
 
   TypeAndValue<Value>& tv = valueStack_.back();
   *type = tv.type();
   *value = tv.value();
   valueStack_.popBack();
   return true;
 }
 
-template <typename Policy>
-inline bool OpIter<Policy>::typeMismatch(StackType actual, StackType expected) {
-  UniqueChars error(
-      JS_smprintf("type mismatch: expression has type %s but expected %s",
-                  ToCString(NonAnyToValType(actual)),
-                  ToCString(NonAnyToValType(expected))));
-  if (!error) {
-    return false;
-  }
-
-  return fail(error.get());
-}
-
 // This function pops exactly one value from the stack, checking that it has the
 // expected type which can either be a specific value type or a type variable.
 template <typename Policy>
-inline bool OpIter<Policy>::popWithType(StackType expectedType, Value* value) {
+inline bool OpIter<Policy>::popWithType(ValType expectedType, Value* value) {
   ControlStackEntry<ControlItem>& block = controlStack_.back();
 
   MOZ_ASSERT(valueStack_.length() >= block.valueStackStart());
   if (MOZ_UNLIKELY(valueStack_.length() == block.valueStackStart())) {
     // If the base of this block's stack is polymorphic, then we can pop a
     // dummy value of any expected type; it won't be used since we're in
     // unreachable code.
     if (block.polymorphicBase()) {
       *value = Value();
 
       // Maintain the invariant that, after a pop, there is always memory
       // reserved to push a value infallibly.
       return valueStack_.reserve(valueStack_.length() + 1);
     }
 
-    if (valueStack_.empty()) {
-      return fail("popping value from empty stack");
-    }
-    return fail("popping value from outside block");
+    return failEmptyStack();
   }
 
-  TypeAndValue<Value> tv = valueStack_.popCopy();
-
-  StackType observedType = tv.type();
-  if (!(MOZ_LIKELY(observedType == expectedType) ||
-        observedType == StackType::TVar || expectedType == StackType::TVar ||
-        (env_.gcTypesEnabled() && observedType.isReference() &&
-         expectedType.isReference() &&
-         env_.isRefSubtypeOf(NonAnyToValType(observedType),
-                             NonAnyToValType(expectedType))))) {
-    return typeMismatch(observedType, expectedType);
+  TypeAndValue<Value> observed = valueStack_.popCopy();
+
+  if (observed.type() == StackType::TVar) {
+    *value = Value();
+    return true;
   }
 
-  *value = tv.value();
+  if (!checkIsSubtypeOf(NonTVarToValType(observed.type()), expectedType)) {
+    return false;
+  }
+
+  *value = observed.value();
   return true;
 }
 
 // This function pops as many types from the stack as determined by the given
 // signature. Currently, all signatures are limited to 0 or 1 types, with
 // ExprType::Void meaning 0 and all other ValTypes meaning 1, but this will be
 // generalized in the future.
 template <typename Policy>
@@ -731,54 +684,47 @@ inline bool OpIter<Policy>::popWithType(
 }
 
 // This function is just an optimization of popWithType + push.
 template <typename Policy>
 inline bool OpIter<Policy>::topWithType(ValType expectedType, Value* value) {
   ControlStackEntry<ControlItem>& block = controlStack_.back();
 
   MOZ_ASSERT(valueStack_.length() >= block.valueStackStart());
-  if (MOZ_UNLIKELY(valueStack_.length() == block.valueStackStart())) {
+  if (valueStack_.length() == block.valueStackStart()) {
     // If the base of this block's stack is polymorphic, then we can just
     // pull out a dummy value of the expected type; it won't be used since
     // we're in unreachable code. We must however push this value onto the
     // stack since it is now fixed to a specific type by this type
     // constraint.
     if (block.polymorphicBase()) {
       if (!valueStack_.emplaceBack(expectedType, Value())) {
         return false;
       }
 
       *value = Value();
       return true;
     }
 
-    if (valueStack_.empty()) {
-      return fail("reading value from empty stack");
-    }
-    return fail("reading value from outside block");
+    return failEmptyStack();
   }
 
-  TypeAndValue<Value>& tv = valueStack_.back();
-
-  StackType observed = tv.type();
-  StackType expected = StackType(expectedType);
-
-  if (!MOZ_UNLIKELY(observed == expected)) {
-    if (observed == StackType::TVar ||
-        (env_.gcTypesEnabled() && observed.isReference() &&
-         expected.isReference() &&
-         env_.isRefSubtypeOf(NonAnyToValType(observed), expectedType))) {
-      tv.typeRef() = expected;
-    } else {
-      return typeMismatch(observed, expected);
-    }
+  TypeAndValue<Value>& observed = valueStack_.back();
+
+  if (observed.type() == StackType::TVar) {
+    observed.typeRef() = StackType(expectedType);
+    *value = Value();
+    return true;
   }
 
-  *value = tv.value();
+  if (!checkIsSubtypeOf(NonTVarToValType(observed.type()), expectedType)) {
+    return false;
+  }
+
+  *value = observed.value();
   return true;
 }
 
 template <typename Policy>
 inline bool OpIter<Policy>::topWithType(ExprType expectedType, Value* value) {
   if (IsVoid(expectedType)) {
     *value = Value();
     return true;
@@ -1149,17 +1095,17 @@ inline bool OpIter<Policy>::readUnreacha
   return true;
 }
 
 template <typename Policy>
 inline bool OpIter<Policy>::readDrop() {
   MOZ_ASSERT(Classify(op_) == OpKind::Drop);
   StackType type;
   Value value;
-  return popAnyType(&type, &value);
+  return popStackType(&type, &value);
 }
 
 template <typename Policy>
 inline bool OpIter<Policy>::readUnary(ValType operandType, Value* input) {
   MOZ_ASSERT(Classify(op_) == OpKind::Unary);
 
   if (!popWithType(operandType, input)) {
     return false;
@@ -1396,22 +1342,22 @@ inline bool OpIter<Policy>::readSelect(S
                                        Value* falseValue, Value* condition) {
   MOZ_ASSERT(Classify(op_) == OpKind::Select);
 
   if (!popWithType(ValType::I32, condition)) {
     return false;
   }
 
   StackType falseType;
-  if (!popAnyType(&falseType, falseValue)) {
+  if (!popStackType(&falseType, falseValue)) {
     return false;
   }
 
   StackType trueType;
-  if (!popAnyType(&trueType, trueValue)) {
+  if (!popStackType(&trueType, trueValue)) {
     return false;
   }
 
   if (!Join(falseType, trueType, type)) {
     return fail("select operand types must match");
   }
 
   infalliblePush(*type);
@@ -1550,38 +1496,27 @@ inline bool OpIter<Policy>::readF64Const
 template <typename Policy>
 inline bool OpIter<Policy>::readRefNull() {
   MOZ_ASSERT(Classify(op_) == OpKind::RefNull);
 
   return push(StackType(ValType::NullRef));
 }
 
 template <typename Policy>
+inline bool OpIter<Policy>::readValType(ValType* type) {
+  return d_.readValType(env_.types, env_.gcTypesEnabled(), type);
+}
+
+template <typename Policy>
 inline bool OpIter<Policy>::readReferenceType(ValType* type,
                                               const char* context) {
-  uint8_t code;
-  uint32_t refTypeIndex;
-
-  if (!d_.readValType(&code, &refTypeIndex)) {
+  if (!readValType(type) || !type->isReference()) {
     return fail_ctx("invalid reference type for %s", context);
   }
 
-  if (code == uint8_t(ValType::Code::Ref)) {
-    if (refTypeIndex >= env_.types.length()) {
-      return fail_ctx("invalid reference type for %s", context);
-    }
-    if (!env_.types[refTypeIndex].isStructType()) {
-      return fail_ctx("reference to struct required for %s", context);
-    }
-  } else if (code != uint8_t(ValType::Code::AnyRef)) {
-    return fail_ctx("invalid reference type for %s", context);
-  }
-
-  *type = ValType(ValType::Code(code), refTypeIndex);
-
   return true;
 }
 
 template <typename Policy>
 inline bool OpIter<Policy>::popCallArgs(const ValTypeVector& expectedTypes,
                                         ValueVector* values) {
   // Iterate through the argument types backward so that pops occur in the
   // right order.
--- a/js/src/wasm/WasmTypes.h
+++ b/js/src/wasm/WasmTypes.h
@@ -218,16 +218,17 @@ static inline PackedTypeCode PackTypeCod
   MOZ_ASSERT(tc != TypeCode::Ref);
   return PackedTypeCode((NoRefTypeIndex << 8) | uint32_t(tc));
 }
 
 static inline PackedTypeCode PackTypeCode(TypeCode tc, uint32_t refTypeIndex) {
   MOZ_ASSERT(uint32_t(tc) <= 0xFF);
   MOZ_ASSERT_IF(tc != TypeCode::Ref, refTypeIndex == NoRefTypeIndex);
   MOZ_ASSERT_IF(tc == TypeCode::Ref, refTypeIndex <= MaxTypes);
+  static_assert(MaxTypes < (1 << (32 - 8)), "enough bits");
   return PackedTypeCode((refTypeIndex << 8) | uint32_t(tc));
 }
 
 static inline PackedTypeCode PackedTypeCodeFromBits(uint32_t bits) {
   return PackTypeCode(TypeCode(bits & 255), bits >> 8);
 }
 
 static inline bool IsValid(PackedTypeCode ptc) {
--- a/js/src/wasm/WasmValidate.cpp
+++ b/js/src/wasm/WasmValidate.cpp
@@ -389,111 +389,54 @@ bool wasm::EncodeLocalEntries(Encoder& e
     if (!e.writeValType(prev)) {
       return false;
     }
   }
 
   return true;
 }
 
-static bool DecodeValType(Decoder& d, ModuleKind kind, uint32_t numTypes,
-                          bool gcTypesEnabled, ValType* type) {
-  uint8_t uncheckedCode;
-  uint32_t uncheckedRefTypeIndex;
-  if (!d.readValType(&uncheckedCode, &uncheckedRefTypeIndex)) {
-    return false;
-  }
-
-  switch (uncheckedCode) {
-    case uint8_t(ValType::I32):
-    case uint8_t(ValType::F32):
-    case uint8_t(ValType::F64):
-    case uint8_t(ValType::I64):
-      *type = ValType(ValType::Code(uncheckedCode));
-      return true;
-    case uint8_t(ValType::AnyRef):
-      if (!gcTypesEnabled) {
-        return d.fail("reference types not enabled");
-      }
-      *type = ValType(ValType::Code(uncheckedCode));
-      return true;
-    case uint8_t(ValType::Ref): {
-      if (!gcTypesEnabled) {
-        return d.fail("reference types not enabled");
-      }
-      if (uncheckedRefTypeIndex >= numTypes) {
-        return d.fail("ref index out of range");
-      }
-      // We further validate ref types in the caller.
-      *type = ValType(ValType::Code(uncheckedCode), uncheckedRefTypeIndex);
-      return true;
-    }
-    default:
-      break;
-  }
-  return d.fail("bad type");
-}
-
-static bool ValidateRefType(Decoder& d, const TypeDefVector& types,
-                            ValType type) {
-  if (type.isRef() && !types[type.refTypeIndex()].isStructType()) {
-    return d.fail("ref does not reference a struct type");
-  }
-  return true;
-}
-
-bool wasm::DecodeLocalEntries(Decoder& d, ModuleKind kind,
-                              const TypeDefVector& types, bool gcTypesEnabled,
-                              ValTypeVector* locals) {
+bool wasm::DecodeLocalEntries(Decoder& d, const TypeDefVector& types,
+                              bool gcTypesEnabled, ValTypeVector* locals) {
   uint32_t numLocalEntries;
   if (!d.readVarU32(&numLocalEntries)) {
     return d.fail("failed to read number of local entries");
   }
 
   for (uint32_t i = 0; i < numLocalEntries; i++) {
     uint32_t count;
     if (!d.readVarU32(&count)) {
       return d.fail("failed to read local entry count");
     }
 
     if (MaxLocals - locals->length() < count) {
       return d.fail("too many locals");
     }
 
     ValType type;
-    if (!DecodeValType(d, kind, types.length(), gcTypesEnabled, &type)) {
-      return false;
-    }
-    if (!ValidateRefType(d, types, type)) {
+    if (!d.readValType(types, gcTypesEnabled, &type)) {
       return false;
     }
 
     if (!locals->appendN(type, count)) {
       return false;
     }
   }
 
   return true;
 }
 
 bool wasm::DecodeValidatedLocalEntries(Decoder& d, ValTypeVector* locals) {
   uint32_t numLocalEntries;
   MOZ_ALWAYS_TRUE(d.readVarU32(&numLocalEntries));
 
   for (uint32_t i = 0; i < numLocalEntries; i++) {
-    uint32_t count;
-    MOZ_ALWAYS_TRUE(d.readVarU32(&count));
+    uint32_t count = d.uncheckedReadVarU32();
     MOZ_ASSERT(MaxLocals - locals->length() >= count);
-
-    uint8_t uncheckedCode;
-    uint32_t uncheckedRefTypeIndex;
-    MOZ_ALWAYS_TRUE(d.readValType(&uncheckedCode, &uncheckedRefTypeIndex));
-
-    ValType type = ValType(ValType::Code(uncheckedCode), uncheckedRefTypeIndex);
-    if (!locals->appendN(type, count)) {
+    if (!locals->appendN(d.uncheckedReadValType(), count)) {
       return false;
     }
   }
 
   return true;
 }
 
 // Function body validation.
@@ -1188,18 +1131,17 @@ bool wasm::ValidateFunctionBody(const Mo
 
   ValTypeVector locals;
   if (!locals.appendAll(funcType.args())) {
     return false;
   }
 
   const uint8_t* bodyBegin = d.currentPosition();
 
-  if (!DecodeLocalEntries(d, ModuleKind::Wasm, env.types, env.gcTypesEnabled(),
-                          &locals)) {
+  if (!DecodeLocalEntries(d, env.types, env.gcTypesEnabled(), &locals)) {
     return false;
   }
 
   if (!DecodeFunctionBodyExprs(env, funcType, locals, bodyBegin + bodySize,
                                &d)) {
     return false;
   }
 
@@ -1226,18 +1168,18 @@ static bool DecodePreamble(Decoder& d) {
 
   return true;
 }
 
 enum class TypeState { None, Struct, ForwardStruct, Func };
 
 typedef Vector<TypeState, 0, SystemAllocPolicy> TypeStateVector;
 
-static bool ValidateRefType(Decoder& d, TypeStateVector* typeState,
-                            ValType type) {
+static bool ValidateTypeState(Decoder& d, TypeStateVector* typeState,
+                              ValType type) {
   if (!type.isRef()) {
     return true;
   }
 
   uint32_t refTypeIndex = type.refTypeIndex();
   switch ((*typeState)[refTypeIndex]) {
     case TypeState::None:
       (*typeState)[refTypeIndex] = TypeState::ForwardStruct;
@@ -1272,21 +1214,20 @@ static bool DecodeFuncType(Decoder& d, M
   }
 
   ValTypeVector args;
   if (!args.resize(numArgs)) {
     return false;
   }
 
   for (uint32_t i = 0; i < numArgs; i++) {
-    if (!DecodeValType(d, ModuleKind::Wasm, env->types.length(),
-                       env->gcTypesEnabled(), &args[i])) {
+    if (!d.readValType(env->types.length(), env->gcTypesEnabled(), &args[i])) {
       return false;
     }
-    if (!ValidateRefType(d, typeState, args[i])) {
+    if (!ValidateTypeState(d, typeState, args[i])) {
       return false;
     }
   }
 
   uint32_t numRets;
   if (!d.readVarU32(&numRets)) {
     return d.fail("bad number of function returns");
   }
@@ -1294,21 +1235,20 @@ static bool DecodeFuncType(Decoder& d, M
   if (numRets > 1) {
     return d.fail("too many returns in signature");
   }
 
   ExprType result = ExprType::Void;
 
   if (numRets == 1) {
     ValType type;
-    if (!DecodeValType(d, ModuleKind::Wasm, env->types.length(),
-                       env->gcTypesEnabled(), &type)) {
+    if (!d.readValType(env->types.length(), env->gcTypesEnabled(), &type)) {
       return false;
     }
-    if (!ValidateRefType(d, typeState, type)) {
+    if (!ValidateTypeState(d, typeState, type)) {
       return false;
     }
 
     result = ExprType(type);
   }
 
   if ((*typeState)[typeIndex] != TypeState::None) {
     return d.fail("function type entry referenced as struct");
@@ -1345,21 +1285,21 @@ static bool DecodeStructType(Decoder& d,
     uint8_t flags;
     if (!d.readFixedU8(&flags)) {
       return d.fail("expected flag");
     }
     if ((flags & ~uint8_t(FieldFlags::AllowedMask)) != 0) {
       return d.fail("garbage flag bits");
     }
     fields[i].isMutable = flags & uint8_t(FieldFlags::Mutable);
-    if (!DecodeValType(d, ModuleKind::Wasm, env->types.length(),
-                       env->gcTypesEnabled(), &fields[i].type)) {
+    if (!d.readValType(env->types.length(), env->gcTypesEnabled(),
+                       &fields[i].type)) {
       return false;
     }
-    if (!ValidateRefType(d, typeState, fields[i].type)) {
+    if (!ValidateTypeState(d, typeState, fields[i].type)) {
       return false;
     }
 
     CheckedInt32 offset;
     switch (fields[i].type.code()) {
       case ValType::I32:
         offset = layout.addScalar(Scalar::Int32);
         break;
@@ -1667,21 +1607,17 @@ static bool GlobalIsJSCompatible(Decoder
   }
 
   return true;
 }
 
 static bool DecodeGlobalType(Decoder& d, const TypeDefVector& types,
                              bool gcTypesEnabled, ValType* type,
                              bool* isMutable) {
-  if (!DecodeValType(d, ModuleKind::Wasm, types.length(), gcTypesEnabled,
-                     type)) {
-    return false;
-  }
-  if (!ValidateRefType(d, types, *type)) {
+  if (!d.readValType(types, gcTypesEnabled, type)) {
     return false;
   }
 
   uint8_t flags;
   if (!d.readFixedU8(&flags)) {
     return d.fail("expected global flags");
   }
 
--- a/js/src/wasm/WasmValidate.h
+++ b/js/src/wasm/WasmValidate.h
@@ -578,30 +578,71 @@ class Decoder {
   MOZ_MUST_USE bool readVarU32(uint32_t* out) {
     return readVarU<uint32_t>(out);
   }
   MOZ_MUST_USE bool readVarS32(int32_t* out) { return readVarS<int32_t>(out); }
   MOZ_MUST_USE bool readVarU64(uint64_t* out) {
     return readVarU<uint64_t>(out);
   }
   MOZ_MUST_USE bool readVarS64(int64_t* out) { return readVarS<int64_t>(out); }
-  MOZ_MUST_USE bool readValType(uint8_t* code, uint32_t* refTypeIndex) {
+
+  MOZ_MUST_USE ValType uncheckedReadValType() {
+    uint8_t code = uncheckedReadFixedU8();
+    switch (code) {
+      case uint8_t(ValType::Ref):
+        return ValType(ValType::Code(code), uncheckedReadVarU32());
+      default:
+        return ValType::Code(code);
+    }
+  }
+  MOZ_MUST_USE bool readValType(uint32_t numTypes, bool gcTypesEnabled,
+                                ValType* type) {
     static_assert(uint8_t(TypeCode::Limit) <= UINT8_MAX, "fits");
-    if (!readFixedU8(code)) {
+    uint8_t code;
+    if (!readFixedU8(&code)) {
       return false;
     }
-    if (*code == uint8_t(TypeCode::Ref)) {
-      if (!readVarU32(refTypeIndex)) {
-        return false;
+    switch (code) {
+      case uint8_t(ValType::I32):
+      case uint8_t(ValType::F32):
+      case uint8_t(ValType::F64):
+      case uint8_t(ValType::I64):
+        *type = ValType::Code(code);
+        return true;
+      case uint8_t(ValType::AnyRef):
+        if (!gcTypesEnabled) {
+          return fail("reference types not enabled");
+        }
+        *type = ValType::Code(code);
+        return true;
+      case uint8_t(ValType::Ref): {
+        if (!gcTypesEnabled) {
+          return fail("reference types not enabled");
+        }
+        uint32_t typeIndex;
+        if (!readVarU32(&typeIndex)) {
+          return false;
+        }
+        if (typeIndex >= numTypes) {
+          return fail("ref index out of range");
+        }
+        *type = ValType(ValType::Code(code), typeIndex);
+        return true;
       }
-      if (*refTypeIndex > MaxTypes) {
-        return false;
-      }
-    } else {
-      *refTypeIndex = NoRefTypeIndex;
+      default:
+        return fail("bad type");
+    }
+  }
+  MOZ_MUST_USE bool readValType(const TypeDefVector& types, bool gcTypesEnabled,
+                                ValType* type) {
+    if (!readValType(types.length(), gcTypesEnabled, type)) {
+      return false;
+    }
+    if (type->isRef() && !types[type->refTypeIndex()].isStructType()) {
+      return fail("ref does not reference a struct type");
     }
     return true;
   }
   MOZ_MUST_USE bool readBlockType(uint8_t* code, uint32_t* refTypeIndex) {
     static_assert(size_t(TypeCode::Limit) <= UINT8_MAX, "fits");
     if (!readFixedU8(code)) {
       return false;
     }
@@ -738,18 +779,17 @@ MOZ_MUST_USE bool EncodeLocalEntries(Enc
 // This performs no validation; the local entries must already have been
 // validated by an earlier pass.
 
 MOZ_MUST_USE bool DecodeValidatedLocalEntries(Decoder& d,
                                               ValTypeVector* locals);
 
 // This validates the entries.
 
-MOZ_MUST_USE bool DecodeLocalEntries(Decoder& d, ModuleKind kind,
-                                     const TypeDefVector& types,
+MOZ_MUST_USE bool DecodeLocalEntries(Decoder& d, const TypeDefVector& types,
                                      bool gcTypesEnabled,
                                      ValTypeVector* locals);
 
 // Returns whether the given [begin, end) prefix of a module's bytecode starts a
 // code section and, if so, returns the SectionRange of that code section.
 // Note that, even if this function returns 'false', [begin, end) may actually
 // be a valid module in the special case when there are no function defs and the
 // code section is not present. Such modules can be valid so the caller must
--- a/layout/base/nsRefreshDriver.cpp
+++ b/layout/base/nsRefreshDriver.cpp
@@ -1743,16 +1743,17 @@ void nsRefreshDriver::Tick(VsyncId aId, 
   if ((aNowTime <= mMostRecentRefresh) && !mTestControllingRefreshes) {
     return;
   }
 
   if (IsWaitingForPaint(aNowTime)) {
     // We're currently suspended waiting for earlier Tick's to
     // be completed (on the Compositor). Mark that we missed the paint
     // and keep waiting.
+    PROFILER_ADD_MARKER("nsRefreshDriver::Tick waiting for paint", LAYOUT);
     return;
   }
 
   TimeStamp previousRefresh = mMostRecentRefresh;
   mMostRecentRefresh = aNowTime;
 
   if (mRootRefresh) {
     mRootRefresh->RemoveRefreshObserver(this, FlushType::Style);
--- a/netwerk/base/nsStandardURL.cpp
+++ b/netwerk/base/nsStandardURL.cpp
@@ -64,17 +64,22 @@ bool nsStandardURL::gPunycodeHost = true
 // so we do not need to "optimize" TestForInvalidHostCharacters.
 //
 constexpr bool TestForInvalidHostCharacters(char c) {
   // Testing for these:
   // CONTROL_CHARACTERS " #/:?@[\\]*<>|\"";
   return (c > 0 && c < 32) ||  // The control characters are [1, 31]
          c == ' ' || c == '#' || c == '/' || c == ':' || c == '?' || c == '@' ||
          c == '[' || c == '\\' || c == ']' || c == '*' || c == '<' ||
+#if defined(MOZ_THUNDERBIRD) || defined(MOZ_SUITE)
+         // Mailnews %-escapes file paths into URLs.
+         c == '>' || c == '|' || c == '"';
+#else
          c == '>' || c == '|' || c == '"' || c == '%';
+#endif
 }
 constexpr ASCIIMaskArray sInvalidHostChars =
     CreateASCIIMask(TestForInvalidHostCharacters);
 
 //----------------------------------------------------------------------------
 // nsStandardURL::nsSegmentEncoder
 //----------------------------------------------------------------------------
 
--- a/netwerk/test/unit/test_standardurl.js
+++ b/netwerk/test/unit/test_standardurl.js
@@ -1,11 +1,12 @@
 "use strict";
 
-ChromeUtils.import('resource://gre/modules/Services.jsm');
+var {Services} = ChromeUtils.import("resource://gre/modules/Services.jsm");
+var {AppConstants} = ChromeUtils.import("resource://gre/modules/AppConstants.jsm");
 const gPrefs = Cc["@mozilla.org/preferences-service;1"].getService(Ci.nsIPrefBranch);
 
 function symmetricEquality(expect, a, b)
 {
   /* Use if/else instead of |do_check_eq(expect, a.spec == b.spec)| so
      that we get the specs output on the console if the check fails.
    */
   if (expect) {
@@ -303,17 +304,18 @@ add_test(function test_accentEncoding()
   Assert.equal(url.query, "hello=`");
 
   url = stringToURL("http://example.com/?hello=%2C");
   Assert.equal(url.spec, "http://example.com/?hello=%2C");
   Assert.equal(url.query, "hello=%2C");
   run_next_test();
 });
 
-add_test(function test_percentDecoding()
+add_test({ skip_if: () => AppConstants.MOZ_APP_NAME == "thunderbird" },
+         function test_percentDecoding()
 {
   var url = stringToURL("http://%70%61%73%74%65%62%69%6E.com");
   Assert.equal(url.spec, "http://pastebin.com/");
 
   // Disallowed hostname characters are rejected even when percent encoded
   Assert.throws(() => { url = stringToURL("http://example.com%0a%23.google.com/"); },
                 /NS_ERROR_MALFORMED_URI/, "invalid characters are not allowed");
   run_next_test();
@@ -693,17 +695,18 @@ add_test(function test_idna_host() {
 
   url = stringToURL("http://user:password@www.ält.com:8080/path?query#etc");
   url = url.mutate().setRef("").finalize();
   equal(url.spec, "http://user:password@www.xn--lt-uia.com:8080/path?query");
 
   run_next_test();
 });
 
-add_test(function test_bug1517025() {
+add_test({ skip_if: () => AppConstants.MOZ_APP_NAME == "thunderbird" },
+         function test_bug1517025() {
   Assert.throws(() => { let other = stringToURL("https://b%9a/"); },
                 /NS_ERROR_UNEXPECTED/, "bad URI");
 
   Assert.throws(() => { let other = stringToURL("https://b%9ª/"); },
                 /NS_ERROR_MALFORMED_URI/, "bad URI");
 
   let base = stringToURL("https://bug1517025.bmoattachments.org/attachment.cgi?id=9033787");
   Assert.throws(() => { let uri = Services.io.newURI("/\\b%9ª", "windows-1252", base); },
--- a/netwerk/test/unit/xpcshell.ini
+++ b/netwerk/test/unit/xpcshell.ini
@@ -377,17 +377,19 @@ skip-if = os == "android"
 [test_suspend_channel_on_modified.js]
 [test_inhibit_caching.js]
 [test_dns_disable_ipv4.js]
 [test_dns_disable_ipv6.js]
 [test_bug1195415.js]
 [test_cookie_blacklist.js]
 [test_getHost.js]
 [test_bug412457.js]
+skip-if = appname == "thunderbird"
 [test_bug464591.js]
+skip-if = appname == "thunderbird"
 [test_alt-data_simple.js]
 [test_alt-data_stream.js]
 [test_alt-data_too_big.js]
 [test_alt-data_overwrite.js]
 [test_cache-control_request.js]
 [test_bug1279246.js]
 [test_throttlequeue.js]
 [test_throttlechannel.js]
--- a/python/mozbuild/mozbuild/artifacts.py
+++ b/python/mozbuild/mozbuild/artifacts.py
@@ -587,16 +587,23 @@ JOB_DETAILS = {
                                    r'public/build/target\.common\.tests\.(zip|tar\.gz)')),
     'win64-pgo': (WinArtifactJob, (r'public/build/firefox-(.*)\.win64\.(zip|tar\.gz)|'
                                    r'public/build/target\.(zip|tar\.gz)',
                                    r'public/build/firefox-(.*)\.common\.tests\.(zip|tar\.gz)|'
                                    r'public/build/target\.common\.tests\.(zip|tar\.gz)')),
     'win64-debug': (WinArtifactJob, (r'public/build/firefox-(.*)\.win64\.(zip|tar\.gz)|public/build/target\.(zip|tar\.gz)',
                                      r'public/build/firefox-(.*)\.common\.tests\.(zip|tar\.gz)|'
                                      r'public/build/target\.common\.tests\.(zip|tar\.gz)')),
+    'win64-aarch64-opt': (WinArtifactJob, (r'public/build/firefox-(.*)\.win64\.(zip|tar\.gz)|'
+                                           r'public/build/target\.(zip|tar\.gz)',
+                                           r'public/build/firefox-(.*)\.common\.tests\.(zip|tar\.gz)|'
+                                           r'public/build/target\.common\.tests\.(zip|tar\.gz)')),
+    'win64-aarch64-debug': (WinArtifactJob, (r'public/build/firefox-(.*)\.win64\.(zip|tar\.gz)|public/build/target\.(zip|tar\.gz)',
+                                             r'public/build/firefox-(.*)\.common\.tests\.(zip|tar\.gz)|'
+                                             r'public/build/target\.common\.tests\.(zip|tar\.gz)')),
 }
 
 
 
 def get_job_details(job, log=None,
                     download_symbols=False,
                     download_host_bins=False,
                     substs=None):
@@ -1018,16 +1025,18 @@ class Artifacts(object):
 
         target_64bit = False
         if self._substs['target_cpu'] == 'x86_64':
             target_64bit = True
 
         if self._defines.get('XP_LINUX', False):
             return ('linux64' if target_64bit else 'linux') + target_suffix
         if self._defines.get('XP_WIN', False):
+            if self._substs['target_cpu'] == 'aarch64':
+                return 'win64-aarch64' + target_suffix
             return ('win64' if target_64bit else 'win32') + target_suffix
         if self._defines.get('XP_MACOSX', False):
             # We only produce unified builds in automation, so the target_cpu
             # check is not relevant.
             return 'macosx64' + target_suffix
         raise Exception('Cannot determine default job for |mach artifact|!')
 
     def _pushheads_from_rev(self, rev, count):
--- a/taskcluster/ci/build/windows.yml
+++ b/taskcluster/ci/build/windows.yml
@@ -1104,16 +1104,17 @@ win64-aarch64/opt:
             PERFHERDER_EXTRA_OPTIONS: aarch64
     run:
         actions: [get-secrets, build]
         options: [append-env-variables-from-configs]
         script: mozharness/scripts/fx_desktop_build.py
         config:
             - builds/releng_base_firefox.py
             - builds/taskcluster_base_windows.py
+            - builds/taskcluster_base_win64.py
         extra-config:
             stage_platform: win64-aarch64
             mozconfig_platform: win64-aarch64
     toolchains:
         - win64-clang-cl
         - win64-aarch64-rust
         - win64-cbindgen
         - win64-sccache
--- a/taskcluster/ci/test/test-sets.yml
+++ b/taskcluster/ci/test/test-sets.yml
@@ -369,17 +369,17 @@ android-common-tests:
 android-opt-tests:
     # Robocop tests often fail on Debug builds
     - robocop
 
 android-x86_64-opt-tests:
     # crashtests failing on debug; bug 1524493
     - crashtest
     # geckoview-junit perma-fail on opt and debug; bug 1521195
-    - geckoview-junit
+    # - geckoview-junit
 
 android-x86_64-tests:
     - jsreftest
     # various mochitest (plain) failures; bug 1460411
     # - mochitest
     - mochitest-clipboard
     - mochitest-gpu
     # various reftest (plain) failures; bug 1501582
--- a/testing/marionette/harness/marionette_harness/tests/unit/test_capabilities.py
+++ b/testing/marionette/harness/marionette_harness/tests/unit/test_capabilities.py
@@ -42,20 +42,24 @@ class TestCapabilities(MarionetteTestCas
             # universally available (missing from sdk 18).
             # Attempt to resolve the most common symlink cases by using
             # ls -l to determine if the root of the path (like /sdcard)
             # is a symlink.
             import posixpath
             import re
             device = self.marionette.instance.runner.device.app_ctx.device
             root = posixpath.sep.join(profile.split(posixpath.sep)[0:2])
-            ls_out = device.shell_output("ls -l %s" % root)
-            match = re.match(r'.*->\s(.*)', ls_out)
-            if match:
-                new_root = match.group(1)
+            new_root = root
+            match = True
+            while match:
+                ls_out = device.shell_output("ls -l %s" % new_root)
+                match = re.match(r'.*->\s(.*)', ls_out)
+                if match:
+                    new_root = match.group(1)
+            if new_root != root:
                 profile = profile.replace(root, new_root)
         return profile
 
     def test_mandated_capabilities(self):
         self.assertIn("browserName", self.caps)
         self.assertIn("browserVersion", self.caps)
         self.assertIn("platformName", self.caps)
         self.assertIn("platformVersion", self.caps)
--- a/testing/marionette/harness/marionette_harness/tests/unit/test_cookies.py
+++ b/testing/marionette/harness/marionette_harness/tests/unit/test_cookies.py
@@ -29,17 +29,19 @@ class CookieTest(MarionetteTestCase):
 
     def test_add_cookie(self):
         self.marionette.add_cookie(self.COOKIE_A)
         cookie_returned = str(self.marionette.execute_script("return document.cookie"))
         self.assertTrue(self.COOKIE_A["name"] in cookie_returned)
 
     def test_adding_a_cookie_that_expired_in_the_past(self):
         cookie = self.COOKIE_A.copy()
-        cookie["expiry"] = calendar.timegm(time.gmtime()) - 1
+        # for android, the browser is running on a remote system; allow for
+        # differences between local and remote clocks
+        cookie["expiry"] = calendar.timegm(time.gmtime()) - 60
         self.marionette.add_cookie(cookie)
         cookies = self.marionette.get_cookies()
         self.assertEquals(0, len(cookies))
 
     def test_chrome_error(self):
         with self.marionette.using_context("chrome"):
             self.assertRaises(UnsupportedOperationException,
                               self.marionette.add_cookie, self.COOKIE_A)
--- a/testing/marionette/harness/marionette_harness/tests/unit/test_execute_sandboxes.py
+++ b/testing/marionette/harness/marionette_harness/tests/unit/test_execute_sandboxes.py
@@ -1,17 +1,17 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 from __future__ import absolute_import
 
 from marionette_driver.errors import JavascriptException
 
-from marionette_harness import MarionetteTestCase
+from marionette_harness import MarionetteTestCase, skip_if_mobile
 
 
 class TestExecuteSandboxes(MarionetteTestCase):
     def setUp(self):
         super(TestExecuteSandboxes, self).setUp()
 
     def test_execute_system_sandbox(self):
         # Test that "system" sandbox has elevated privileges in execute_script
@@ -36,16 +36,17 @@ class TestExecuteSandboxes(MarionetteTes
         self.marionette.execute_script("foo = 2", sandbox="2")
         foo = self.marionette.execute_script(
             "return foo", sandbox="1", new_sandbox=False)
         self.assertEqual(foo, 1)
         foo = self.marionette.execute_script(
             "return foo", sandbox="2", new_sandbox=False)
         self.assertEqual(foo, 2)
 
+    @skip_if_mobile("Intermittent on Android - bug 1526914")
     def test_execute_new_sandbox(self):
         # test that clearing a sandbox does not affect other sandboxes
         self.marionette.execute_script("foo = 1", sandbox="1")
         self.marionette.execute_script("foo = 2", sandbox="2")
 
         # deprecate sandbox 1 by asking explicitly for a fresh one
         with self.assertRaises(JavascriptException):
             self.marionette.execute_script("return foo",
--- a/toolkit/components/antitracking/AntiTrackingCommon.cpp
+++ b/toolkit/components/antitracking/AntiTrackingCommon.cpp
@@ -141,17 +141,18 @@ void CreatePermissionKey(const nsCString
   if (aTrackingOrigin == aGrantedOrigin) {
     CreatePermissionKey(aTrackingOrigin, aPermissionKey);
     return;
   }
 
   static const nsLiteralCString prefix =
       NS_LITERAL_CSTRING(ANTITRACKING_PERM_KEY "^");
 
-  aPermissionKey.SetCapacity(prefix.Length() + 1 + aTrackingOrigin.Length());
+  aPermissionKey.SetCapacity(prefix.Length() + 1 + aTrackingOrigin.Length() +
+                             aGrantedOrigin.Length());
   aPermissionKey.Append(prefix);
   aPermissionKey.Append(aTrackingOrigin);
   aPermissionKey.AppendLiteral("^");
   aPermissionKey.Append(aGrantedOrigin);
 }
 
 // This internal method returns ACCESS_DENY if the access is denied,
 // ACCESS_DEFAULT if unknown, some other access code if granted.
--- a/toolkit/content/widgets/tree.js
+++ b/toolkit/content/widgets/tree.js
@@ -205,19 +205,19 @@
         <menupopup anonid="popup">
           <menuseparator anonid="menuseparator"></menuseparator>
           <menuitem anonid="menuitem" label="&restoreColumnOrder.label;"></menuitem>
         </menupopup>
       `, ["chrome://global/locale/tree.dtd"]));
     }
 
     buildPopup(aPopup) {
-      // We no longer cache the picker content, remove the old content.
-      while (aPopup.childNodes.length > 2)
-        aPopup.firstChild.remove();
+      // We no longer cache the picker content, remove the old content related to
+      // the cols - menuitem and separator should stay.
+      aPopup.querySelectorAll("[colindex]").forEach((e) => { e.remove(); });
 
       var refChild = aPopup.firstChild;
 
       var tree = this.parentNode.parentNode;
       for (var currCol = tree.columns.getFirstColumn(); currCol; currCol = currCol.getNext()) {
         // Construct an entry for each column in the row, unless
         // it is not being shown.
         var currElement = currCol.element;
@@ -232,21 +232,17 @@
             popupChild.setAttribute("checked", "true");
           if (currCol.primary)
             popupChild.setAttribute("disabled", "true");
           aPopup.insertBefore(popupChild, refChild);
         }
       }
 
       var hidden = !tree.enableColumnDrag;
-      const anonids = ["menuseparator", "menuitem"];
-      for (var i = 0; i < anonids.length; i++) {
-        var element = this.querySelector(`[anonid=\"${anonids[i]}\"]`);
-        element.hidden = hidden;
-      }
+      aPopup.querySelectorAll(":not([colindex])").forEach((e) => { e.hidden = hidden; });
     }
   }
 
   customElements.define("treecolpicker", MozTreecolPicker);
 
   class MozTreecol extends MozElements.BaseControl {
     static get observedAttributes() {
       return [
--- a/toolkit/xre/nsAppRunner.cpp
+++ b/toolkit/xre/nsAppRunner.cpp
@@ -1523,19 +1523,23 @@ static void DumpHelp() {
 
   // this works, but only after the components have registered.  so if you drop
   // in a new command line handler, --help won't not until the second run. out
   // of the bug, because we ship a component.reg file, it works correctly.
   DumpArbitraryHelp();
 }
 
 static inline void DumpVersion() {
-  if (gAppData->vendor) printf("%s ", (const char*)gAppData->vendor);
+  if (gAppData->vendor) {
+    printf("%s ", (const char*)gAppData->vendor);
+  }
   printf("%s %s", (const char*)gAppData->name, (const char*)gAppData->version);
-  if (gAppData->copyright) printf(", %s", (const char*)gAppData->copyright);
+  if (gAppData->copyright) {
+    printf(", %s", (const char*)gAppData->copyright);
+  }
   printf("\n");
 }
 
 #if defined(MOZ_WIDGET_GTK)
 static RemoteResult ParseRemoteCommandLine(nsCString& program,
                                            const char** profile,
                                            const char** username) {
   ArgResult ar;