Merge inbound to mozilla-central r=merge a=merge
authorNoemi Erli <nerli@mozilla.com>
Sat, 02 Dec 2017 23:41:02 +0200
changeset 446354 de1f7a92e8726bdd365d4bbc5e65eaa369fbc20a
parent 446339 44fad51c239833e710307b286b2b40953771fb96 (current diff)
parent 446353 799ed2f23d7d72c2b444ff67848a4cf03277d1d8 (diff)
child 446356 06e34f070bf91f989261c974ad2246c734979e2d
child 446507 8877e768af74b7ff7ddc50d723f240ceebfbefe6
push id8527
push userCallek@gmail.com
push dateThu, 11 Jan 2018 21:05:50 +0000
treeherdermozilla-beta@95342d212a7a [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersmerge, merge
milestone59.0a1
first release with
nightly linux32
de1f7a92e872 / 59.0a1 / 20171202220100 / files
nightly linux64
de1f7a92e872 / 59.0a1 / 20171202220100 / files
nightly mac
de1f7a92e872 / 59.0a1 / 20171202220100 / files
nightly win32
de1f7a92e872 / 59.0a1 / 20171202220100 / files
nightly win64
de1f7a92e872 / 59.0a1 / 20171202220100 / files
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
releases
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Merge inbound to mozilla-central r=merge a=merge
--- a/build/moz.configure/old.configure
+++ b/build/moz.configure/old.configure
@@ -186,19 +186,17 @@ def old_configure_options(*options):
     '--enable-faststripe',
     '--enable-feeds',
     '--enable-gamepad',
     '--enable-gconf',
     '--enable-gczeal',
     '--enable-hardware-aec-ns',
     '--enable-icf',
     '--enable-install-strip',
-    '--enable-ion',
     '--enable-ios-target',
-    '--enable-jitspew',
     '--enable-libjpeg-turbo',
     '--enable-libproxy',
     '--enable-llvm-hacks',
     '--enable-logrefcnt',
     '--enable-maintenance-service',
     '--enable-memory-sanitizer',
     '--enable-mobile-optimize',
     '--enable-necko-wifi',
@@ -212,30 +210,28 @@ def old_configure_options(*options):
     '--enable-posix-nspr-emulation',
     '--enable-pref-extensions',
     '--enable-pulseaudio',
     '--enable-raw',
     '--enable-readline',
     '--enable-reflow-perf',
     '--enable-sandbox',
     '--enable-signmar',
-    '--enable-simulator',
     '--enable-small-chunk-size',
     '--enable-startup-notification',
     '--enable-startupcache',
     '--enable-stdcxx-compat',
     '--enable-strip',
     '--enable-synth-pico',
     '--enable-system-cairo',
     '--enable-system-extension-dirs',
     '--enable-system-pixman',
     '--enable-system-sqlite',
     '--enable-tasktracer',
     '--enable-thread-sanitizer',
-    '--enable-trace-logging',
     '--enable-ubsan-int-overflow',
     '--enable-ui-locale',
     '--enable-universalchardet',
     '--enable-updater',
     '--enable-valgrind',
     '--enable-verify-mar',
     '--enable-xul',
     '--enable-zipwriter',
--- a/dom/media/MediaStreamGraph.cpp
+++ b/dom/media/MediaStreamGraph.cpp
@@ -957,17 +957,17 @@ MediaStreamGraphImpl::PlayAudio(MediaStr
       }
       t = end;
     }
     audioOutput.mLastTickWritten = offset;
 
     // Need unique id for stream & track - and we want it to match the inserter
     output.WriteTo(LATENCY_STREAM_ID(aStream, track->GetID()),
                                      mMixer,
-                                     CurrentDriver()->AsAudioCallbackDriver()->OutputChannelCount(),
+                                     AudioChannelCount(),
                                      mSampleRate);
   }
   return ticksWritten;
 }
 
 void
 MediaStreamGraphImpl::OpenAudioInputImpl(int aID,
                                          AudioDataListener *aListener)
--- a/dom/media/webaudio/test/test_mediaElementAudioSourceNodeFidelity.html
+++ b/dom/media/webaudio/test/test_mediaElementAudioSourceNodeFidelity.html
@@ -89,17 +89,17 @@ var loopCount = 0;
 audioElement.onplaying = function() {
   audioElement.ontimeupdate = function() {
     // We don't run the analysis when close to loop point or at the
     // beginning, since looping is not seamless, there could be an
     // unpredictable amount of silence
     var rv = checkFrequency(an);
     info("currentTime: " + audioElement.currentTime);
     if (audioElement.currentTime < 4 ||
-        audioElement.currentTIme > 8){
+        audioElement.currentTime > 8){
       return;
     }
     if (!rv) {
       ok(false, "Found unexpected noise during analysis.");
       audioElement.ontimeupdate = null;
       audioElement.onplaying = null;
       ac.close();
       audioElement.src = '';
--- a/image/VectorImage.cpp
+++ b/image/VectorImage.cpp
@@ -323,16 +323,46 @@ SVGDrawingCallback::operator()(gfxContex
 
   presShell->RenderDocument(svgRect, renderDocFlags,
                             NS_RGBA(0, 0, 0, 0), // transparent
                             aContext);
 
   return true;
 }
 
+class MOZ_STACK_CLASS AutoRestoreSVGState final {
+public:
+  AutoRestoreSVGState(const SVGDrawingParameters& aParams,
+                      SVGDocumentWrapper* aSVGDocumentWrapper,
+                      bool& aIsDrawing,
+                      bool aContextPaint)
+    : mIsDrawing(aIsDrawing)
+    // Apply any 'preserveAspectRatio' override (if specified) to the root
+    // element:
+    , mPAR(aParams.svgContext, aSVGDocumentWrapper->GetRootSVGElem())
+    // Set the animation time:
+    , mTime(aSVGDocumentWrapper->GetRootSVGElem(), aParams.animationTime)
+  {
+    MOZ_ASSERT(!aIsDrawing);
+    aIsDrawing = true;
+
+    // Set context paint (if specified) on the document:
+    if (aContextPaint) {
+      mContextPaint.emplace(aParams.svgContext->GetContextPaint(),
+                            aSVGDocumentWrapper->GetDocument());
+    }
+  }
+
+private:
+  AutoRestore<bool> mIsDrawing;
+  AutoPreserveAspectRatioOverride mPAR;
+  AutoSVGTimeSetRestore mTime;
+  Maybe<AutoSetRestoreSVGContextPaint> mContextPaint;
+};
+
 // Implement VectorImage's nsISupports-inherited methods
 NS_IMPL_ISUPPORTS(VectorImage,
                   imgIContainer,
                   nsIStreamListener,
                   nsIRequestObserver)
 
 //------------------------------------------------------------------------------
 // Constructor / Destructor
@@ -736,16 +766,20 @@ VectorImage::GetFrame(uint32_t aWhichFra
   return GetFrameAtSize(imageIntSize, aWhichFrame, aFlags);
 }
 
 NS_IMETHODIMP_(already_AddRefed<SourceSurface>)
 VectorImage::GetFrameAtSize(const IntSize& aSize,
                             uint32_t aWhichFrame,
                             uint32_t aFlags)
 {
+#ifdef DEBUG
+  NotifyDrawingObservers();
+#endif
+
   auto result = GetFrameInternal(aSize, Nothing(), aWhichFrame, aFlags);
   RefPtr<SourceSurface> surf = Get<2>(result).forget();
 
   // If we are here, it suggests the image is embedded in a canvas or some
   // other path besides layers, and we won't need the file handle.
   MarkSurfaceShared(surf);
   return surf.forget();
 }
@@ -780,45 +814,42 @@ VectorImage::GetFrameInternal(const IntS
   }
 
   if (mIsDrawing) {
     NS_WARNING("Refusing to make re-entrant call to VectorImage::Draw");
     return MakeTuple(DrawResult::TEMPORARY_ERROR, aSize,
                      RefPtr<SourceSurface>());
   }
 
-  // Make our surface the size of what will ultimately be drawn to it.
-  // (either the full image size, or the restricted region)
-  RefPtr<DrawTarget> dt = gfxPlatform::GetPlatform()->
-    CreateOffscreenContentDrawTarget(aSize, SurfaceFormat::B8G8R8A8);
-  if (!dt || !dt->IsValid()) {
-    NS_ERROR("Could not create a DrawTarget");
+  // By using a null gfxContext, we ensure that we will always attempt to
+  // create a surface, even if we aren't capable of caching it (e.g. due to our
+  // flags, having an animation, etc). Otherwise CreateSurface will assume that
+  // the caller is capable of drawing directly to its own draw target if we
+  // cannot cache.
+  SVGDrawingParameters params(nullptr, aSize, ImageRegion::Create(aSize),
+                              SamplingFilter::POINT, aSVGContext,
+                              mSVGDocumentWrapper->GetCurrentTime(),
+                              aFlags, 1.0);
+
+  bool didCache; // Was the surface put into the cache?
+  bool contextPaint = aSVGContext && aSVGContext->GetContextPaint();
+
+  AutoRestoreSVGState autoRestore(params, mSVGDocumentWrapper,
+                                  mIsDrawing, contextPaint);
+
+  RefPtr<gfxDrawable> svgDrawable = CreateSVGDrawable(params);
+  RefPtr<SourceSurface> surface =
+    CreateSurface(params, svgDrawable, didCache);
+  if (!surface) {
+    MOZ_ASSERT(!didCache);
     return MakeTuple(DrawResult::TEMPORARY_ERROR, aSize,
                      RefPtr<SourceSurface>());
   }
 
-  RefPtr<gfxContext> context = gfxContext::CreateOrNull(dt);
-  MOZ_ASSERT(context); // already checked the draw target above
-
-  SVGDrawingParameters params(context, aSize, ImageRegion::Create(aSize),
-                              SamplingFilter::POINT, aSVGContext,
-                              mSVGDocumentWrapper->GetCurrentTime(),
-                              aFlags, 1.0);
-
-  // DrawInternal may return a surface which is stored in the cache. It is
-  // important to prefer this result over the snapshot because it may be a
-  // different surface type (e.g. SourceSurfaceSharedData for WebRender). If
-  // we did not put anything in the cache, we will need to fallback to the
-  // snapshot surface.
-  bool contextPaint = aSVGContext && aSVGContext->GetContextPaint();
-  RefPtr<SourceSurface> surface = DrawInternal(params, contextPaint);
-  if (!surface) {
-    surface = dt->Snapshot();
-  }
-
+  SendFrameComplete(didCache, params.flags);
   return MakeTuple(DrawResult::SUCCESS, aSize, Move(surface));
 }
 
 //******************************************************************************
 IntSize
 VectorImage::GetImageContainerSize(LayerManager* aManager,
                                    const IntSize& aSize,
                                    uint32_t aFlags)
@@ -990,52 +1021,51 @@ VectorImage::Draw(gfxContext* aContext,
 
   // else, we need to paint the image:
 
   if (mIsDrawing) {
     NS_WARNING("Refusing to make re-entrant call to VectorImage::Draw");
     return DrawResult::TEMPORARY_ERROR;
   }
 
-  RefPtr<SourceSurface> surface = DrawInternal(params, contextPaint);
+  AutoRestoreSVGState autoRestore(params, mSVGDocumentWrapper,
+                                  mIsDrawing, contextPaint);
+
+  bool didCache; // Was the surface put into the cache?
+  RefPtr<gfxDrawable> svgDrawable = CreateSVGDrawable(params);
+  sourceSurface = CreateSurface(params, svgDrawable, didCache);
+  if (!sourceSurface) {
+    MOZ_ASSERT(!didCache);
+    Show(svgDrawable, params);
+    return DrawResult::SUCCESS;
+  }
+
+  RefPtr<gfxDrawable> drawable =
+    new gfxSurfaceDrawable(sourceSurface, params.size);
+  Show(drawable, params);
+  SendFrameComplete(didCache, params.flags);
 
   // Image got put into a painted layer, it will not be shared with another
   // process.
-  MarkSurfaceShared(surface);
+  MarkSurfaceShared(sourceSurface);
   return DrawResult::SUCCESS;
 }
 
-already_AddRefed<SourceSurface>
-VectorImage::DrawInternal(const SVGDrawingParameters& aParams,
-                          bool aContextPaint)
+already_AddRefed<gfxDrawable>
+VectorImage::CreateSVGDrawable(const SVGDrawingParameters& aParams)
 {
-  MOZ_ASSERT(!mIsDrawing);
-
-  AutoRestore<bool> autoRestoreIsDrawing(mIsDrawing);
-  mIsDrawing = true;
-
-  // Apply any 'preserveAspectRatio' override (if specified) to the root
-  // element:
-  AutoPreserveAspectRatioOverride autoPAR(aParams.svgContext,
-                                          mSVGDocumentWrapper->GetRootSVGElem());
+  RefPtr<gfxDrawingCallback> cb =
+    new SVGDrawingCallback(mSVGDocumentWrapper,
+                           aParams.viewportSize,
+                           aParams.size,
+                           aParams.flags);
 
-  // Set the animation time:
-  AutoSVGTimeSetRestore autoSVGTime(mSVGDocumentWrapper->GetRootSVGElem(),
-                                    aParams.animationTime);
-
-  // Set context paint (if specified) on the document:
-  Maybe<AutoSetRestoreSVGContextPaint> autoContextPaint;
-  if (aContextPaint) {
-    autoContextPaint.emplace(aParams.svgContext->GetContextPaint(),
-                             mSVGDocumentWrapper->GetDocument());
-  }
-
-  // We didn't get a hit in the surface cache, so we'll need to rerasterize.
-  BackendType backend = aParams.context->GetDrawTarget()->GetBackendType();
-  return CreateSurfaceAndShow(aParams, backend);
+  RefPtr<gfxDrawable> svgDrawable =
+    new gfxCallbackDrawable(cb, aParams.size);
+  return svgDrawable.forget();
 }
 
 already_AddRefed<SourceSurface>
 VectorImage::LookupCachedSurface(const IntSize& aSize,
                                  const Maybe<SVGImageContext>& aSVGContext,
                                  uint32_t aFlags)
 {
   // If we're not allowed to use a cached surface, don't attempt a lookup.
@@ -1065,104 +1095,121 @@ VectorImage::LookupCachedSurface(const I
     RecoverFromLossOfSurfaces();
     return nullptr;
   }
 
   return sourceSurface.forget();
 }
 
 already_AddRefed<SourceSurface>
-VectorImage::CreateSurfaceAndShow(const SVGDrawingParameters& aParams, BackendType aBackend)
+VectorImage::CreateSurface(const SVGDrawingParameters& aParams,
+                           gfxDrawable* aSVGDrawable,
+                           bool& aWillCache)
 {
+  MOZ_ASSERT(mIsDrawing);
+
   mSVGDocumentWrapper->UpdateViewportBounds(aParams.viewportSize);
   mSVGDocumentWrapper->FlushImageTransformInvalidation();
 
-  RefPtr<gfxDrawingCallback> cb =
-    new SVGDrawingCallback(mSVGDocumentWrapper,
-                           aParams.viewportSize,
-                           aParams.size,
-                           aParams.flags);
-
-  RefPtr<gfxDrawable> svgDrawable =
-    new gfxCallbackDrawable(cb, aParams.size);
+  // Determine whether or not we should put the surface to be created into
+  // the cache. If we fail, we need to reset this to false to let the caller
+  // know nothing was put in the cache.
+  aWillCache = !(aParams.flags & FLAG_BYPASS_SURFACE_CACHE) &&
+               // Refuse to cache animated images:
+               // XXX(seth): We may remove this restriction in bug 922893.
+               !mHaveAnimations &&
+               // The image is too big to fit in the cache:
+               SurfaceCache::CanHold(aParams.size);
 
-  bool bypassCache = bool(aParams.flags & FLAG_BYPASS_SURFACE_CACHE) ||
-                     // Refuse to cache animated images:
-                     // XXX(seth): We may remove this restriction in bug 922893.
-                     mHaveAnimations ||
-                     // The image is too big to fit in the cache:
-                     !SurfaceCache::CanHold(aParams.size);
-  if (bypassCache) {
-    Show(svgDrawable, aParams);
+  // If we weren't given a context, then we know we just want the rasterized
+  // surface. We will create the frame below but only insert it into the cache
+  // if we actually need to.
+  if (!aWillCache && aParams.context) {
     return nullptr;
   }
 
   // We're about to rerasterize, which may mean that some of the previous
   // surfaces we've rasterized aren't useful anymore. We can allow them to
   // expire from the cache by unlocking them here, and then sending out an
   // invalidation. If this image is locked, any surfaces that are still useful
   // will become locked again when Draw touches them, and the remainder will
   // eventually expire.
-  SurfaceCache::UnlockEntries(ImageKey(this));
+  if (aWillCache) {
+    SurfaceCache::UnlockEntries(ImageKey(this));
+  }
+
+  // If there is no context, the default backend is fine.
+  BackendType backend =
+    aParams.context ? aParams.context->GetDrawTarget()->GetBackendType()
+                    : gfxPlatform::GetPlatform()->GetDefaultContentBackend();
 
   // Try to create an imgFrame, initializing the surface it contains by drawing
   // our gfxDrawable into it. (We use FILTER_NEAREST since we never scale here.)
   auto frame = MakeNotNull<RefPtr<imgFrame>>();
   nsresult rv =
-    frame->InitWithDrawable(svgDrawable, aParams.size,
+    frame->InitWithDrawable(aSVGDrawable, aParams.size,
                             SurfaceFormat::B8G8R8A8,
                             SamplingFilter::POINT, aParams.flags,
-                            aBackend);
+                            backend);
 
   // If we couldn't create the frame, it was probably because it would end
   // up way too big. Generally it also wouldn't fit in the cache, but the prefs
   // could be set such that the cache isn't the limiting factor.
   if (NS_FAILED(rv)) {
-    Show(svgDrawable, aParams);
+    aWillCache = false;
     return nullptr;
   }
 
   // Take a strong reference to the frame's surface and make sure it hasn't
   // already been purged by the operating system.
   RefPtr<SourceSurface> surface = frame->GetSourceSurface();
   if (!surface) {
-    Show(svgDrawable, aParams);
+    aWillCache = false;
     return nullptr;
   }
 
+  // We created the frame, but only because we had no context to draw to
+  // directly. All the caller wants is the surface in this case.
+  if (!aWillCache) {
+    return surface.forget();
+  }
+
   // Attempt to cache the frame.
   SurfaceKey surfaceKey = VectorSurfaceKey(aParams.size, aParams.svgContext);
   NotNull<RefPtr<ISurfaceProvider>> provider =
     MakeNotNull<SimpleSurfaceProvider*>(ImageKey(this), surfaceKey, frame);
   SurfaceCache::Insert(provider);
+  return surface.forget();
+}
 
-  // Draw.
-  RefPtr<gfxDrawable> drawable =
-    new gfxSurfaceDrawable(surface, aParams.size);
-  Show(drawable, aParams);
+void
+VectorImage::SendFrameComplete(bool aDidCache, uint32_t aFlags)
+{
+  // If the cache was not updated, we have nothing to do.
+  if (!aDidCache) {
+    return;
+  }
 
   // Send out an invalidation so that surfaces that are still in use get
   // re-locked. See the discussion of the UnlockSurfaces call above.
-  if (!(aParams.flags & FLAG_ASYNC_NOTIFY)) {
+  if (!(aFlags & FLAG_ASYNC_NOTIFY)) {
     mProgressTracker->SyncNotifyProgress(FLAG_FRAME_COMPLETE,
                                          GetMaxSizedIntRect());
   } else {
     NotNull<RefPtr<VectorImage>> image = WrapNotNull(this);
     NS_DispatchToMainThread(NS_NewRunnableFunction(
                               "ProgressTracker::SyncNotifyProgress",
                               [=]() -> void {
       RefPtr<ProgressTracker> tracker = image->GetProgressTracker();
       if (tracker) {
         tracker->SyncNotifyProgress(FLAG_FRAME_COMPLETE,
                                     GetMaxSizedIntRect());
       }
     }));
   }
-
-  return surface.forget();
 }
 
 
 void
 VectorImage::Show(gfxDrawable* aDrawable, const SVGDrawingParameters& aParams)
 {
   MOZ_ASSERT(aDrawable, "Should have a gfxDrawable by now");
   gfxUtils::DrawPixelSnapped(aParams.context, aDrawable,
--- a/image/VectorImage.h
+++ b/image/VectorImage.h
@@ -96,22 +96,30 @@ private:
     LookupCachedSurface(const IntSize& aSize,
                         const Maybe<SVGImageContext>& aSVGContext,
                         uint32_t aFlags);
 
   bool MaybeRestrictSVGContext(Maybe<SVGImageContext>& aNewSVGContext,
                                const Maybe<SVGImageContext>& aSVGContext,
                                uint32_t aFlags);
 
-  already_AddRefed<SourceSurface>
-    DrawInternal(const SVGDrawingParameters& aParams, bool aContextPaint);
+  /// Create a gfxDrawable which callbacks into the SVG document.
+  already_AddRefed<gfxDrawable>
+    CreateSVGDrawable(const SVGDrawingParameters& aParams);
 
+  /// Rasterize the SVG into a surface. aWillCache will be set to whether or
+  /// not the new surface was put into the cache.
   already_AddRefed<SourceSurface>
-    CreateSurfaceAndShow(const SVGDrawingParameters& aParams,
-                         gfx::BackendType aBackend);
+    CreateSurface(const SVGDrawingParameters& aParams,
+                  gfxDrawable* aSVGDrawable,
+                  bool& aWillCache);
+
+  /// Send a frame complete notification if appropriate. Must be called only
+  /// after all drawing has been completed.
+  void SendFrameComplete(bool aDidCache, uint32_t aFlags);
 
   void Show(gfxDrawable* aDrawable, const SVGDrawingParameters& aParams);
 
   nsresult Init(const char* aMimeType, uint32_t aFlags);
 
   /**
    * In catastrophic circumstances like a GPU driver crash, we may lose our
    * surfaces even if they're locked. RecoverFromLossOfSurfaces discards all
--- a/js/moz.configure
+++ b/js/moz.configure
@@ -31,16 +31,20 @@ js_option('--disable-js-shell', default=
 @depends('--disable-js-shell')
 def js_disable_shell(value):
     if not value:
         return True
 
 set_config('JS_DISABLE_SHELL', js_disable_shell)
 
 
+set_define('JS_PUNBOX64', depends(target)(lambda t: t.bitness == 64 or None))
+set_define('JS_NUNBOX32', depends(target)(lambda t: t.bitness == 32 or None))
+
+
 # SpiderMonkey as a shared library, and how its symbols are exported
 # ==================================================================
 js_option('--disable-shared-js', default=building_js,
           help='Do not create a shared library')
 
 js_option('--disable-export-js', default=building_js,
           help='Do not mark JS symbols as DLL exported/visible')
 
@@ -93,16 +97,108 @@ def disable_export_js(value):
         suggestion = '--disable-export-js'
     else:
         suggestion = '--enable-export-js'
 
     die('Setting %s is deprecated, use %s instead.',
         value.format('DISABLE_EXPORT_JS'), suggestion)
 
 
+# JIT support
+# =======================================================
+@depends(target)
+def ion_default(target):
+    if target.cpu in ('x86', 'x86_64', 'arm', 'aarch64', 'mips32', 'mips64'):
+        return True
+
+js_option('--enable-ion',
+          default=ion_default,
+          help='Enable use of the IonMonkey JIT')
+
+set_config('ENABLE_ION', depends_if('--enable-ion')(lambda x: True))
+
+# JIT code simulator for cross compiles
+# =======================================================
+js_option('--enable-simulator', choices=('arm', 'arm64', 'mips32', 'mips64'),
+          nargs=1,
+          help='Enable a JIT code simulator for the specified architecture')
+
+@depends('--enable-ion', '--enable-simulator', target)
+def simulator(ion_enabled, simulator_enabled, target):
+    if not ion_enabled or not simulator_enabled:
+        return
+
+    sim_cpu = simulator_enabled[0]
+
+    if sim_cpu in ('arm', 'mips32'):
+        if target.cpu != 'x86':
+            die('The %s simulator only works on x86.' % sim_cpu)
+
+    if sim_cpu in ('arm64', 'mips64'):
+        if target.cpu != 'x86_64':
+            die('The %s simulator only works on x86-64.' % sim_cpu)
+
+    return namespace(**{sim_cpu: True})
+
+set_config('JS_SIMULATOR', depends_if(simulator)(lambda x: True))
+set_config('JS_SIMULATOR_ARM', simulator.arm)
+set_config('JS_SIMULATOR_ARM64', simulator.arm64)
+set_config('JS_SIMULATOR_MIPS32', simulator.mips32)
+set_config('JS_SIMULATOR_MIPS64', simulator.mips64)
+set_define('JS_SIMULATOR', depends_if(simulator)(lambda x: True))
+set_define('JS_SIMULATOR_ARM', simulator.arm)
+set_define('JS_SIMULATOR_ARM64', simulator.arm64)
+set_define('JS_SIMULATOR_MIPS32', simulator.mips32)
+set_define('JS_SIMULATOR_MIPS64', simulator.mips64)
+
+@depends('--enable-ion', simulator, target)
+def jit_codegen(ion_enabled, simulator, target):
+    if not ion_enabled:
+        return namespace(none=True)
+
+    if simulator:
+        return simulator
+
+    if target.cpu == 'aarch64':
+        return namespace(arm64=True)
+    elif target.cpu == 'x86_64':
+        return namespace(x64=True)
+
+    return namespace(**{str(target.cpu): True})
+
+set_config('JS_CODEGEN_NONE', jit_codegen.none)
+set_config('JS_CODEGEN_ARM', jit_codegen.arm)
+set_config('JS_CODEGEN_ARM64', jit_codegen.arm64)
+set_config('JS_CODEGEN_MIPS32', jit_codegen.mips32)
+set_config('JS_CODEGEN_MIPS64', jit_codegen.mips64)
+set_config('JS_CODEGEN_X86', jit_codegen.x86)
+set_config('JS_CODEGEN_X64', jit_codegen.x64)
+set_define('JS_CODEGEN_NONE', jit_codegen.none)
+set_define('JS_CODEGEN_ARM', jit_codegen.arm)
+set_define('JS_CODEGEN_ARM64', jit_codegen.arm64)
+set_define('JS_CODEGEN_MIPS32', jit_codegen.mips32)
+set_define('JS_CODEGEN_MIPS64', jit_codegen.mips64)
+set_define('JS_CODEGEN_X86', jit_codegen.x86)
+set_define('JS_CODEGEN_X64', jit_codegen.x64)
+
+@depends('--enable-ion', simulator, target, moz_debug)
+def jit_disasm_arm(ion_enabled, simulator, target, debug):
+    if not ion_enabled:
+        return
+
+    if simulator:
+        if getattr(simulator, 'arm', None):
+            return True
+
+    if target.cpu == 'arm' and debug:
+        return True
+
+set_config('JS_DISASM_ARM', jit_disasm_arm)
+set_define('JS_DISASM_ARM', jit_disasm_arm)
+
 # Profiling
 # =======================================================
 js_option('--enable-instruments', env='MOZ_INSTRUMENTS',
           help='Enable instruments remote profiling')
 
 @depends('--enable-instruments', target)
 def instruments(value, target):
     if value and target.os != 'OSX':
@@ -173,27 +269,49 @@ js_option('--enable-gc-trace', env='JS_G
 @depends('--enable-gc-trace')
 def gc_trace(value):
     if value:
         return True
 
 set_define('JS_GC_TRACE', gc_trace)
 
 
+# Trace logging.
+# =======================================================
+js_option('--enable-trace-logging',
+          default=depends(when=moz_debug)(lambda: True),
+          help='Enable trace logging')
+
+set_config('ENABLE_TRACE_LOGGING',
+           depends_if('--enable-trace-logging')(lambda x: True))
+set_define('JS_TRACE_LOGGING',
+           depends_if('--enable-trace-logging')(lambda x: True))
+
+
 js_option('--enable-perf', env='JS_ION_PERF',
           help='Enable Linux perf integration')
 
 @depends('--enable-perf')
 def ion_perf(value):
     if value:
         return True
 
 set_define('JS_ION_PERF', ion_perf)
 
 
+js_option('--enable-jitspew',
+          default=depends(when=moz_debug)(lambda: True),
+          help='Enable the Jit spew and IONFLAGS environment variable.')
+
+set_define('JS_JITSPEW',
+           depends_if('--enable-jitspew')(lambda _: True))
+set_config('JS_JITSPEW',
+           depends_if('--enable-jitspew')(lambda _: True))
+
+
 js_option('--enable-more-deterministic', env='JS_MORE_DETERMINISTIC',
           help='Enable changes that make the shell more deterministic')
 
 @depends('--enable-more-deterministic')
 def more_deterministic(value):
     if value:
         return True
 
--- a/js/src/builtin/AtomicsObject.cpp
+++ b/js/src/builtin/AtomicsObject.cpp
@@ -511,173 +511,16 @@ js::atomics_isLockFree(JSContext* cx, un
             args.rval().setBoolean(false);
             return true;
         }
     }
     args.rval().setBoolean(jit::AtomicOperations::isLockfreeJS(size));
     return true;
 }
 
-// asm.js callouts for platforms that do not have non-word-sized
-// atomics where we don't want to inline the logic for the atomics.
-//
-// Memory will always be shared since the callouts are only called from
-// code that checks that the memory is shared.
-//
-// To test this, either run on eg Raspberry Pi Model 1, or invoke the ARM
-// simulator build with ARMHWCAP=vfp set.  Do not set any other flags; other
-// vfp/neon flags force ARMv7 to be set.
-
-int32_t
-js::atomics_add_asm_callout(wasm::Instance* instance, int32_t vt, int32_t offset, int32_t value)
-{
-    if (size_t(offset) >= instance->memory()->volatileMemoryLength())
-        return 0;
-
-    SharedMem<void*> heap = instance->memoryBase().cast<void*>();
-    switch (Scalar::Type(vt)) {
-      case Scalar::Int8:
-        return PerformAdd::operate(heap.cast<int8_t*>() + offset, value);
-      case Scalar::Uint8:
-        return PerformAdd::operate(heap.cast<uint8_t*>() + offset, value);
-      case Scalar::Int16:
-        return PerformAdd::operate(heap.cast<int16_t*>() + (offset >> 1), value);
-      case Scalar::Uint16:
-        return PerformAdd::operate(heap.cast<uint16_t*>() + (offset >> 1), value);
-      default:
-        MOZ_CRASH("Invalid size");
-    }
-}
-
-int32_t
-js::atomics_sub_asm_callout(wasm::Instance* instance, int32_t vt, int32_t offset, int32_t value)
-{
-    if (size_t(offset) >= instance->memory()->volatileMemoryLength())
-        return 0;
-
-    SharedMem<void*> heap = instance->memoryBase().cast<void*>();
-    switch (Scalar::Type(vt)) {
-      case Scalar::Int8:
-        return PerformSub::operate(heap.cast<int8_t*>() + offset, value);
-      case Scalar::Uint8:
-        return PerformSub::operate(heap.cast<uint8_t*>() + offset, value);
-      case Scalar::Int16:
-        return PerformSub::operate(heap.cast<int16_t*>() + (offset >> 1), value);
-      case Scalar::Uint16:
-        return PerformSub::operate(heap.cast<uint16_t*>() + (offset >> 1), value);
-      default:
-        MOZ_CRASH("Invalid size");
-    }
-}
-
-int32_t
-js::atomics_and_asm_callout(wasm::Instance* instance, int32_t vt, int32_t offset, int32_t value)
-{
-    if (size_t(offset) >= instance->memory()->volatileMemoryLength())
-        return 0;
-
-    SharedMem<void*> heap = instance->memoryBase().cast<void*>();
-    switch (Scalar::Type(vt)) {
-      case Scalar::Int8:
-        return PerformAnd::operate(heap.cast<int8_t*>() + offset, value);
-      case Scalar::Uint8:
-        return PerformAnd::operate(heap.cast<uint8_t*>() + offset, value);
-      case Scalar::Int16:
-        return PerformAnd::operate(heap.cast<int16_t*>() + (offset >> 1), value);
-      case Scalar::Uint16:
-        return PerformAnd::operate(heap.cast<uint16_t*>() + (offset >> 1), value);
-      default:
-        MOZ_CRASH("Invalid size");
-    }
-}
-
-int32_t
-js::atomics_or_asm_callout(wasm::Instance* instance, int32_t vt, int32_t offset, int32_t value)
-{
-    if (size_t(offset) >= instance->memory()->volatileMemoryLength())
-        return 0;
-
-    SharedMem<void*> heap = instance->memoryBase().cast<void*>();
-    switch (Scalar::Type(vt)) {
-      case Scalar::Int8:
-        return PerformOr::operate(heap.cast<int8_t*>() + offset, value);
-      case Scalar::Uint8:
-        return PerformOr::operate(heap.cast<uint8_t*>() + offset, value);
-      case Scalar::Int16:
-        return PerformOr::operate(heap.cast<int16_t*>() + (offset >> 1), value);
-      case Scalar::Uint16:
-        return PerformOr::operate(heap.cast<uint16_t*>() + (offset >> 1), value);
-      default:
-        MOZ_CRASH("Invalid size");
-    }
-}
-
-int32_t
-js::atomics_xor_asm_callout(wasm::Instance* instance, int32_t vt, int32_t offset, int32_t value)
-{
-    if (size_t(offset) >= instance->memory()->volatileMemoryLength())
-        return 0;
-
-    SharedMem<void*> heap = instance->memoryBase().cast<void*>();
-    switch (Scalar::Type(vt)) {
-      case Scalar::Int8:
-        return PerformXor::operate(heap.cast<int8_t*>() + offset, value);
-      case Scalar::Uint8:
-        return PerformXor::operate(heap.cast<uint8_t*>() + offset, value);
-      case Scalar::Int16:
-        return PerformXor::operate(heap.cast<int16_t*>() + (offset >> 1), value);
-      case Scalar::Uint16:
-        return PerformXor::operate(heap.cast<uint16_t*>() + (offset >> 1), value);
-      default:
-        MOZ_CRASH("Invalid size");
-    }
-}
-
-int32_t
-js::atomics_xchg_asm_callout(wasm::Instance* instance, int32_t vt, int32_t offset, int32_t value)
-{
-    if (size_t(offset) >= instance->memory()->volatileMemoryLength())
-        return 0;
-
-    SharedMem<void*> heap = instance->memoryBase().cast<void*>();
-    switch (Scalar::Type(vt)) {
-      case Scalar::Int8:
-        return ExchangeOrStore<DoExchange>(Scalar::Int8, value, heap, offset);
-      case Scalar::Uint8:
-        return ExchangeOrStore<DoExchange>(Scalar::Uint8, value, heap, offset);
-      case Scalar::Int16:
-        return ExchangeOrStore<DoExchange>(Scalar::Int16, value, heap, offset>>1);
-      case Scalar::Uint16:
-        return ExchangeOrStore<DoExchange>(Scalar::Uint16, value, heap, offset>>1);
-      default:
-        MOZ_CRASH("Invalid size");
-    }
-}
-
-int32_t
-js::atomics_cmpxchg_asm_callout(wasm::Instance* instance, int32_t vt, int32_t offset, int32_t oldval, int32_t newval)
-{
-    if (size_t(offset) >= instance->memory()->volatileMemoryLength())
-        return 0;
-
-    SharedMem<void*> heap = instance->memoryBase().cast<void*>();
-    switch (Scalar::Type(vt)) {
-      case Scalar::Int8:
-        return CompareExchange(Scalar::Int8, oldval, newval, heap, offset);
-      case Scalar::Uint8:
-        return CompareExchange(Scalar::Uint8, oldval, newval, heap, offset);
-      case Scalar::Int16:
-        return CompareExchange(Scalar::Int16, oldval, newval, heap, offset>>1);
-      case Scalar::Uint16:
-        return CompareExchange(Scalar::Uint16, oldval, newval, heap, offset>>1);
-      default:
-        MOZ_CRASH("Invalid size");
-    }
-}
-
 namespace js {
 
 // Represents one waiting worker.
 //
 // The type is declared opaque in SharedArrayObject.h.  Instances of
 // js::FutexWaiter are stack-allocated and linked onto a list across a
 // call to FutexThread::wait().
 //
--- a/js/src/builtin/AtomicsObject.h
+++ b/js/src/builtin/AtomicsObject.h
@@ -33,26 +33,16 @@ MOZ_MUST_USE bool atomics_add(JSContext*
 MOZ_MUST_USE bool atomics_sub(JSContext* cx, unsigned argc, Value* vp);
 MOZ_MUST_USE bool atomics_and(JSContext* cx, unsigned argc, Value* vp);
 MOZ_MUST_USE bool atomics_or(JSContext* cx, unsigned argc, Value* vp);
 MOZ_MUST_USE bool atomics_xor(JSContext* cx, unsigned argc, Value* vp);
 MOZ_MUST_USE bool atomics_isLockFree(JSContext* cx, unsigned argc, Value* vp);
 MOZ_MUST_USE bool atomics_wait(JSContext* cx, unsigned argc, Value* vp);
 MOZ_MUST_USE bool atomics_wake(JSContext* cx, unsigned argc, Value* vp);
 
-/* asm.js callouts */
-namespace wasm { class Instance; }
-int32_t atomics_add_asm_callout(wasm::Instance* i, int32_t vt, int32_t offset, int32_t value);
-int32_t atomics_sub_asm_callout(wasm::Instance* i, int32_t vt, int32_t offset, int32_t value);
-int32_t atomics_and_asm_callout(wasm::Instance* i, int32_t vt, int32_t offset, int32_t value);
-int32_t atomics_or_asm_callout(wasm::Instance* i, int32_t vt, int32_t offset, int32_t value);
-int32_t atomics_xor_asm_callout(wasm::Instance* i, int32_t vt, int32_t offset, int32_t value);
-int32_t atomics_cmpxchg_asm_callout(wasm::Instance* i, int32_t vt, int32_t offset, int32_t oldval, int32_t newval);
-int32_t atomics_xchg_asm_callout(wasm::Instance* i, int32_t vt, int32_t offset, int32_t value);
-
 class FutexThread
 {
     friend class AutoLockFutexAPI;
 
 public:
     static MOZ_MUST_USE bool initialize();
     static void destroy();
 
--- a/js/src/jit/AtomicOperations.h
+++ b/js/src/jit/AtomicOperations.h
@@ -9,18 +9,16 @@
 
 #include "mozilla/Types.h"
 
 #include "vm/SharedMem.h"
 
 namespace js {
 namespace jit {
 
-class RegionLock;
-
 /*
  * The atomic operations layer defines types and functions for
  * JIT-compatible atomic operation.
  *
  * The fundamental constraints on the functions are:
  *
  * - That their realization here MUST be compatible with code the JIT
  *   generates for its Atomics operations, so that an atomic access
@@ -80,19 +78,16 @@ class RegionLock;
  * Trusting the compiler not to generate code that blows up on a
  * race definitely won't work in the presence of TSan, or even of
  * optimizing compilers in seemingly-"innocuous" conditions.  (See
  * https://www.usenix.org/legacy/event/hotpar11/tech/final_files/Boehm.pdf
  * for details.)
  */
 class AtomicOperations
 {
-    friend class RegionLock;
-
-  private:
     // The following functions are defined for T = int8_t, uint8_t,
     // int16_t, uint16_t, int32_t, uint32_t, int64_t, and uint64_t.
 
     // Atomically read *addr.
     template<typename T>
     static inline T loadSeqCst(T* addr);
 
     // Atomically store val in *addr.
@@ -284,44 +279,16 @@ class AtomicOperations
     tier1Constraints(const T* addr) {
         static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
         return (sizeof(T) < 8 || (hasAtomic8() && isLockfree8())) &&
                !(uintptr_t(addr) & (sizeof(T) - 1));
     }
 #endif
 };
 
-/* A data type representing a lock on some region of a SharedArrayRawBuffer's
- * memory, to be used only when the hardware does not provide necessary
- * atomicity.
- */
-class RegionLock
-{
-  public:
-    RegionLock() : spinlock(0) {}
-
-    /* Addr is the address to be locked, nbytes the number of bytes we
-     * need to lock.  The lock that is taken may cover a larger range
-     * of bytes, indeed it may cover all of memory.
-     */
-    template<size_t nbytes>
-    void acquire(void* addr);
-
-    /* Addr is the address to be unlocked, nbytes the number of bytes
-     * we need to unlock.  The lock must be held by the calling thread,
-     * at the given address and for the number of bytes.
-     */
-    template<size_t nbytes>
-    void release(void* addr);
-
-  private:
-    /* For now, a simple spinlock that covers the entire buffer. */
-    uint32_t spinlock;
-};
-
 inline bool
 AtomicOperations::isLockfreeJS(int32_t size)
 {
     // Keep this in sync with visitAtomicIsLockFree() in jit/CodeGenerator.cpp.
 
     switch (size) {
       case 1:
         return true;
--- a/js/src/jit/arm/AtomicOperations-arm.h
+++ b/js/src/jit/arm/AtomicOperations-arm.h
@@ -239,30 +239,9 @@ js::jit::AtomicOperations::memcpySafeWhe
 }
 
 inline void
 js::jit::AtomicOperations::memmoveSafeWhenRacy(void* dest, const void* src, size_t nbytes)
 {
     memmove(dest, src, nbytes);
 }
 
-template<size_t nbytes>
-inline void
-js::jit::RegionLock::acquire(void* addr)
-{
-    uint32_t zero = 0;
-    uint32_t one = 1;
-    while (!__atomic_compare_exchange(&spinlock, &zero, &one, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE)) {
-        zero = 0;
-        continue;
-    }
-}
-
-template<size_t nbytes>
-inline void
-js::jit::RegionLock::release(void* addr)
-{
-    MOZ_ASSERT(AtomicOperations::loadSeqCst(&spinlock) == 1, "releasing unlocked region lock");
-    uint32_t zero = 0;
-    __atomic_store(&spinlock, &zero, __ATOMIC_SEQ_CST);
-}
-
 #endif // jit_arm_AtomicOperations_arm_h
--- a/js/src/jit/arm/CodeGenerator-arm.cpp
+++ b/js/src/jit/arm/CodeGenerator-arm.cpp
@@ -2544,83 +2544,31 @@ CodeGeneratorARM::visitWasmCompareExchan
     Register newval = ToRegister(ins->newValue());
 
     masm.compareExchangeToTypedIntArray(vt == Scalar::Uint32 ? Scalar::Int32 : vt,
                                         srcAddr, oldval, newval, InvalidReg,
                                         ToAnyRegister(ins->output()));
 }
 
 void
-CodeGeneratorARM::visitWasmCompareExchangeCallout(LWasmCompareExchangeCallout* ins)
-{
-    const MWasmCompareExchangeHeap* mir = ins->mir();
-    MOZ_ASSERT(mir->access().offset() == 0);
-
-    Register ptr = ToRegister(ins->ptr());
-    Register oldval = ToRegister(ins->oldval());
-    Register newval = ToRegister(ins->newval());
-    Register tls = ToRegister(ins->tls());
-    Register instance = ToRegister(ins->getTemp(0));
-    Register viewType = ToRegister(ins->getTemp(1));
-
-    MOZ_ASSERT(ToRegister(ins->output()) == ReturnReg);
-
-    masm.loadPtr(Address(tls, offsetof(wasm::TlsData, instance)), instance);
-    masm.ma_mov(Imm32(mir->access().type()), viewType);
-
-    masm.setupWasmABICall();
-    masm.passABIArg(instance);
-    masm.passABIArg(viewType);
-    masm.passABIArg(ptr);
-    masm.passABIArg(oldval);
-    masm.passABIArg(newval);
-    masm.callWithABI(mir->bytecodeOffset(), wasm::SymbolicAddress::AtomicCmpXchg);
-}
-
-void
 CodeGeneratorARM::visitWasmAtomicExchangeHeap(LWasmAtomicExchangeHeap* ins)
 {
     MWasmAtomicExchangeHeap* mir = ins->mir();
 
     Scalar::Type vt = mir->access().type();
     Register ptrReg = ToRegister(ins->ptr());
     Register value = ToRegister(ins->value());
     BaseIndex srcAddr(HeapReg, ptrReg, TimesOne, mir->access().offset());
     MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
 
     masm.atomicExchangeToTypedIntArray(vt == Scalar::Uint32 ? Scalar::Int32 : vt,
                                        srcAddr, value, InvalidReg, ToAnyRegister(ins->output()));
 }
 
 void
-CodeGeneratorARM::visitWasmAtomicExchangeCallout(LWasmAtomicExchangeCallout* ins)
-{
-    const MWasmAtomicExchangeHeap* mir = ins->mir();
-    MOZ_ASSERT(mir->access().offset() == 0);
-
-    Register ptr = ToRegister(ins->ptr());
-    Register value = ToRegister(ins->value());
-    Register tls = ToRegister(ins->tls());
-    Register instance = ToRegister(ins->getTemp(0));
-    Register viewType = ToRegister(ins->getTemp(1));
-
-    MOZ_ASSERT(ToRegister(ins->output()) == ReturnReg);
-
-    masm.loadPtr(Address(tls, offsetof(wasm::TlsData, instance)), instance);
-    masm.ma_mov(Imm32(mir->access().type()), viewType);
-
-    masm.setupWasmABICall();
-    masm.passABIArg(instance);
-    masm.passABIArg(viewType);
-    masm.passABIArg(ptr);
-    masm.passABIArg(value);
-    masm.callWithABI(mir->bytecodeOffset(), wasm::SymbolicAddress::AtomicXchg);
-}
-
-void
 CodeGeneratorARM::visitWasmAtomicBinopHeap(LWasmAtomicBinopHeap* ins)
 {
     MWasmAtomicBinopHeap* mir = ins->mir();
     MOZ_ASSERT(mir->hasUses());
 
     Scalar::Type vt = mir->access().type();
     Register ptrReg = ToRegister(ins->ptr());
     Register flagTemp = ToRegister(ins->flagTemp());
@@ -2658,59 +2606,16 @@ CodeGeneratorARM::visitWasmAtomicBinopHe
 
     if (value->isConstant())
         atomicBinopToTypedIntArray(op, vt, Imm32(ToInt32(value)), srcAddr, flagTemp);
     else
         atomicBinopToTypedIntArray(op, vt, ToRegister(value), srcAddr, flagTemp);
 }
 
 void
-CodeGeneratorARM::visitWasmAtomicBinopCallout(LWasmAtomicBinopCallout* ins)
-{
-    const MWasmAtomicBinopHeap* mir = ins->mir();
-    MOZ_ASSERT(mir->access().offset() == 0);
-
-    Register ptr = ToRegister(ins->ptr());
-    Register value = ToRegister(ins->value());
-    Register tls = ToRegister(ins->tls());
-    Register instance = ToRegister(ins->getTemp(0));
-    Register viewType = ToRegister(ins->getTemp(1));
-
-    masm.loadPtr(Address(tls, offsetof(wasm::TlsData, instance)), instance);
-    masm.move32(Imm32(mir->access().type()), viewType);
-
-    masm.setupWasmABICall();
-    masm.passABIArg(instance);
-    masm.passABIArg(viewType);
-    masm.passABIArg(ptr);
-    masm.passABIArg(value);
-
-    wasm::BytecodeOffset bytecodeOffset = mir->bytecodeOffset();
-    switch (mir->operation()) {
-      case AtomicFetchAddOp:
-        masm.callWithABI(bytecodeOffset, wasm::SymbolicAddress::AtomicFetchAdd);
-        break;
-      case AtomicFetchSubOp:
-        masm.callWithABI(bytecodeOffset, wasm::SymbolicAddress::AtomicFetchSub);
-        break;
-      case AtomicFetchAndOp:
-        masm.callWithABI(bytecodeOffset, wasm::SymbolicAddress::AtomicFetchAnd);
-        break;
-      case AtomicFetchOrOp:
-        masm.callWithABI(bytecodeOffset, wasm::SymbolicAddress::AtomicFetchOr);
-        break;
-      case AtomicFetchXorOp:
-        masm.callWithABI(bytecodeOffset, wasm::SymbolicAddress::AtomicFetchXor);
-        break;
-      default:
-        MOZ_CRASH("Unknown op");
-    }
-}
-
-void
 CodeGeneratorARM::visitWasmStackArg(LWasmStackArg* ins)
 {
     const MWasmStackArg* mir = ins->mir();
     Address dst(StackPointer, mir->spOffset());
     ScratchRegisterScope scratch(masm);
     SecondScratchRegisterScope scratch2(masm);
 
     if (ins->arg()->isConstant()) {
--- a/js/src/jit/arm/CodeGenerator-arm.h
+++ b/js/src/jit/arm/CodeGenerator-arm.h
@@ -244,22 +244,19 @@ class CodeGeneratorARM : public CodeGene
     void visitWasmAddOffset(LWasmAddOffset* ins);
     void visitWasmStore(LWasmStore* ins);
     void visitWasmStoreI64(LWasmStoreI64* ins);
     void visitWasmUnalignedStore(LWasmUnalignedStore* ins);
     void visitWasmUnalignedStoreI64(LWasmUnalignedStoreI64* ins);
     void visitAsmJSLoadHeap(LAsmJSLoadHeap* ins);
     void visitAsmJSStoreHeap(LAsmJSStoreHeap* ins);
     void visitWasmCompareExchangeHeap(LWasmCompareExchangeHeap* ins);
-    void visitWasmCompareExchangeCallout(LWasmCompareExchangeCallout* ins);
     void visitWasmAtomicExchangeHeap(LWasmAtomicExchangeHeap* ins);
-    void visitWasmAtomicExchangeCallout(LWasmAtomicExchangeCallout* ins);
     void visitWasmAtomicBinopHeap(LWasmAtomicBinopHeap* ins);
     void visitWasmAtomicBinopHeapForEffect(LWasmAtomicBinopHeapForEffect* ins);
-    void visitWasmAtomicBinopCallout(LWasmAtomicBinopCallout* ins);
     void visitWasmStackArg(LWasmStackArg* ins);
     void visitWasmTruncateToInt32(LWasmTruncateToInt32* ins);
     void visitOutOfLineWasmTruncateCheck(OutOfLineWasmTruncateCheck* ool);
     void visitCopySignD(LCopySignD* ins);
     void visitCopySignF(LCopySignF* ins);
 
     void visitMemoryBarrier(LMemoryBarrier* ins);
 
--- a/js/src/jit/arm/LIR-arm.h
+++ b/js/src/jit/arm/LIR-arm.h
@@ -457,108 +457,16 @@ class LSoftUDivOrMod : public LBinaryCal
         setOperand(1, rhs);
     }
 
     MInstruction* mir() {
         return mir_->toInstruction();
     }
 };
 
-class LWasmCompareExchangeCallout : public LCallInstructionHelper<1, 4, 2>
-{
-  public:
-    LIR_HEADER(WasmCompareExchangeCallout)
-    LWasmCompareExchangeCallout(const LAllocation& ptr, const LAllocation& oldval,
-                                const LAllocation& newval, const LAllocation& tls,
-                                const LDefinition& temp1, const LDefinition& temp2)
-    {
-        setOperand(0, ptr);
-        setOperand(1, oldval);
-        setOperand(2, newval);
-        setOperand(3, tls);
-        setTemp(0, temp1);
-        setTemp(1, temp2);
-    }
-    const LAllocation* ptr() {
-        return getOperand(0);
-    }
-    const LAllocation* oldval() {
-        return getOperand(1);
-    }
-    const LAllocation* newval() {
-        return getOperand(2);
-    }
-    const LAllocation* tls() {
-        return getOperand(3);
-    }
-
-    const MWasmCompareExchangeHeap* mir() const {
-        return mir_->toWasmCompareExchangeHeap();
-    }
-};
-
-class LWasmAtomicExchangeCallout : public LCallInstructionHelper<1, 3, 2>
-{
-  public:
-    LIR_HEADER(WasmAtomicExchangeCallout)
-
-    LWasmAtomicExchangeCallout(const LAllocation& ptr, const LAllocation& value,
-                               const LAllocation& tls, const LDefinition& temp1,
-                               const LDefinition& temp2)
-    {
-        setOperand(0, ptr);
-        setOperand(1, value);
-        setOperand(2, tls);
-        setTemp(0, temp1);
-        setTemp(1, temp2);
-    }
-    const LAllocation* ptr() {
-        return getOperand(0);
-    }
-    const LAllocation* value() {
-        return getOperand(1);
-    }
-    const LAllocation* tls() {
-        return getOperand(2);
-    }
-
-    const MWasmAtomicExchangeHeap* mir() const {
-        return mir_->toWasmAtomicExchangeHeap();
-    }
-};
-
-class LWasmAtomicBinopCallout : public LCallInstructionHelper<1, 3, 2>
-{
-  public:
-    LIR_HEADER(WasmAtomicBinopCallout)
-    LWasmAtomicBinopCallout(const LAllocation& ptr, const LAllocation& value,
-                            const LAllocation& tls, const LDefinition& temp1,
-                            const LDefinition& temp2)
-    {
-        setOperand(0, ptr);
-        setOperand(1, value);
-        setOperand(2, tls);
-        setTemp(0, temp1);
-        setTemp(1, temp2);
-    }
-    const LAllocation* ptr() {
-        return getOperand(0);
-    }
-    const LAllocation* value() {
-        return getOperand(1);
-    }
-    const LAllocation* tls() {
-        return getOperand(2);
-    }
-
-    const MWasmAtomicBinopHeap* mir() const {
-        return mir_->toWasmAtomicBinopHeap();
-    }
-};
-
 class LWasmTruncateToInt64 : public LCallInstructionHelper<INT64_PIECES, 1, 0>
 {
   public:
     LIR_HEADER(WasmTruncateToInt64);
 
     LWasmTruncateToInt64(const LAllocation& in) {
         setOperand(0, in);
     }
--- a/js/src/jit/arm/LOpcodes-arm.h
+++ b/js/src/jit/arm/LOpcodes-arm.h
@@ -12,19 +12,16 @@
 #define LIR_CPU_OPCODE_LIST(_)     \
     _(BoxFloatingPoint)            \
     _(SoftDivI)                    \
     _(SoftModI)                    \
     _(ModMaskI)                    \
     _(UDiv)                        \
     _(UMod)                        \
     _(SoftUDivOrMod)               \
-    _(WasmCompareExchangeCallout)  \
-    _(WasmAtomicExchangeCallout)   \
-    _(WasmAtomicBinopCallout)      \
     _(DivOrModI64)                 \
     _(UDivOrModI64)                \
     _(WasmTruncateToInt64)         \
     _(WasmAtomicLoadI64)           \
     _(WasmAtomicStoreI64)          \
     _(WasmCompareExchangeI64)      \
     _(WasmAtomicBinopI64)          \
     _(WasmAtomicExchangeI64)       \
--- a/js/src/jit/arm/Lowering-arm.cpp
+++ b/js/src/jit/arm/Lowering-arm.cpp
@@ -927,29 +927,17 @@ LIRGeneratorARM::visitWasmCompareExchang
                                                                        Register64(IntArgReg3,
                                                                                   IntArgReg2)));
         defineInt64Fixed(lir, ins, LInt64Allocation(LAllocation(AnyRegister(IntArgReg1)),
                                                     LAllocation(AnyRegister(IntArgReg0))));
         return;
     }
 
     MOZ_ASSERT(ins->access().type() < Scalar::Float32);
-
-    if (byteSize(ins->access().type()) != 4 && !HasLDSTREXBHD()) {
-        MOZ_ASSERT(ins->access().offset() == 0);
-        LWasmCompareExchangeCallout* lir =
-            new(alloc()) LWasmCompareExchangeCallout(useFixedAtStart(base, IntArgReg2),
-                                                     useFixedAtStart(ins->oldValue(), IntArgReg3),
-                                                     useFixedAtStart(ins->newValue(), CallTempReg0),
-                                                     useFixedAtStart(ins->tls(), WasmTlsReg),
-                                                     tempFixed(IntArgReg0),
-                                                     tempFixed(IntArgReg1));
-        defineReturn(lir, ins);
-        return;
-    }
+    MOZ_ASSERT(HasLDSTREXBHD(), "by HasCompilerSupport() constraints");
 
     LWasmCompareExchangeHeap* lir =
         new(alloc()) LWasmCompareExchangeHeap(useRegister(base),
                                               useRegister(ins->oldValue()),
                                               useRegister(ins->newValue()));
 
     define(lir, ins);
 }
@@ -966,27 +954,17 @@ LIRGeneratorARM::visitWasmAtomicExchange
                                                                                  IntArgReg2)),
                                                         ins->access());
         defineInt64Fixed(lir, ins, LInt64Allocation(LAllocation(AnyRegister(IntArgReg1)),
                                                     LAllocation(AnyRegister(IntArgReg0))));
         return;
     }
 
     MOZ_ASSERT(ins->access().type() < Scalar::Float32);
-
-    if (byteSize(ins->access().type()) < 4 && !HasLDSTREXBHD()) {
-        MOZ_ASSERT(ins->access().offset() == 0);
-        // Call out on ARMv6.
-        defineReturn(new(alloc()) LWasmAtomicExchangeCallout(useFixedAtStart(ins->base(), IntArgReg2),
-                                                             useFixedAtStart(ins->value(), IntArgReg3),
-                                                             useFixedAtStart(ins->tls(), WasmTlsReg),
-                                                             tempFixed(IntArgReg0),
-                                                             tempFixed(IntArgReg1)), ins);
-        return;
-    }
+    MOZ_ASSERT(HasLDSTREXBHD(), "by HasCompilerSupport() constraints");
 
     const LAllocation base = useRegister(ins->base());
     const LAllocation value = useRegister(ins->value());
     define(new(alloc()) LWasmAtomicExchangeHeap(base, value), ins);
 }
 
 void
 LIRGeneratorARM::visitWasmAtomicBinopHeap(MWasmAtomicBinopHeap* ins)
@@ -999,32 +977,21 @@ LIRGeneratorARM::visitWasmAtomicBinopHea
                                                      ins->access(),
                                                      ins->operation());
         defineInt64Fixed(lir, ins, LInt64Allocation(LAllocation(AnyRegister(IntArgReg1)),
                                                     LAllocation(AnyRegister(IntArgReg0))));
         return;
     }
 
     MOZ_ASSERT(ins->access().type() < Scalar::Float32);
+    MOZ_ASSERT(HasLDSTREXBHD(), "by HasCompilerSupport() constraints");
 
     MDefinition* base = ins->base();
     MOZ_ASSERT(base->type() == MIRType::Int32);
 
-    if (byteSize(ins->access().type()) != 4 && !HasLDSTREXBHD()) {
-        MOZ_ASSERT(ins->access().offset() == 0);
-        LWasmAtomicBinopCallout* lir =
-            new(alloc()) LWasmAtomicBinopCallout(useFixedAtStart(base, IntArgReg2),
-                                                 useFixedAtStart(ins->value(), IntArgReg3),
-                                                 useFixedAtStart(ins->tls(), WasmTlsReg),
-                                                 tempFixed(IntArgReg0),
-                                                 tempFixed(IntArgReg1));
-        defineReturn(lir, ins);
-        return;
-    }
-
     if (!ins->hasUses()) {
         LWasmAtomicBinopHeapForEffect* lir =
             new(alloc()) LWasmAtomicBinopHeapForEffect(useRegister(base),
                                                        useRegister(ins->value()),
                                                        /* flagTemp= */ temp());
         add(lir, ins);
         return;
     }
--- a/js/src/jit/arm/MacroAssembler-arm.cpp
+++ b/js/src/jit/arm/MacroAssembler-arm.cpp
@@ -4188,32 +4188,16 @@ MacroAssemblerARMCompat::computePointer<
         return src.base;
     ma_add(src.base, Imm32(src.offset), r, scratch);
     return r;
 }
 
 } // namespace jit
 } // namespace js
 
-template<typename T>
-void
-MacroAssemblerARMCompat::compareExchange(int nbytes, bool signExtend, const T& mem,
-                                         Register oldval, Register newval, Register output)
-{
-    // If LDREXB/H and STREXB/H are not available we use the
-    // word-width operations with read-modify-add.  That does not
-    // abstract well, so fork.
-    //
-    // Bug 1077321: We may further optimize for ARMv8 (AArch32) here.
-    if (nbytes < 4 && !HasLDSTREXBHD())
-        compareExchangeARMv6(nbytes, signExtend, mem, oldval, newval, output);
-    else
-        compareExchangeARMv7(nbytes, signExtend, mem, oldval, newval, output);
-}
-
 // General algorithm:
 //
 //     ...    ptr, <addr>         ; compute address of item
 //     dmb
 // L0  ldrex* output, [ptr]
 //     sxt*   output, output, 0   ; sign-extend if applicable
 //     *xt*   tmp, oldval, 0      ; sign-extend or zero-extend if applicable
 //     cmp    output, tmp
@@ -4227,18 +4211,18 @@ MacroAssemblerARMCompat::compareExchange
 // However note that that discussion uses 'isb' as the trailing fence.
 // I've not quite figured out why, and I've gone with dmb here which
 // is safe.  Also see the LLVM source, which uses 'dmb ish' generally.
 // (Apple's Swift CPU apparently handles ish in a non-default, faster
 // way.)
 
 template<typename T>
 void
-MacroAssemblerARMCompat::compareExchangeARMv7(int nbytes, bool signExtend, const T& mem,
-                                              Register oldval, Register newval, Register output)
+MacroAssemblerARMCompat::compareExchange(int nbytes, bool signExtend, const T& mem,
+                                         Register oldval, Register newval, Register output)
 {
     Label again;
     Label done;
 
     asMasm().memoryBarrier(MembarFull);
 
     SecondScratchRegisterScope scratch2(asMasm());
     Register ptr = computePointer(mem, scratch2);
@@ -4288,56 +4272,31 @@ MacroAssemblerARMCompat::compareExchange
     }
     as_cmp(scratch, Imm8(1));
     as_b(&again, Equal);
     bind(&done);
 
     asMasm().memoryBarrier(MembarFull);
 }
 
-template<typename T>
-void
-MacroAssemblerARMCompat::compareExchangeARMv6(int nbytes, bool signExtend, const T& mem,
-                                              Register oldval, Register newval, Register output)
-{
-    // Bug 1077318: Must use read-modify-write with LDREX / STREX.
-    MOZ_ASSERT(nbytes == 1 || nbytes == 2);
-    MOZ_CRASH("NYI");
-}
-
 template void
 js::jit::MacroAssemblerARMCompat::compareExchange(int nbytes, bool signExtend,
                                                   const Address& address, Register oldval,
                                                   Register newval, Register output);
 template void
 js::jit::MacroAssemblerARMCompat::compareExchange(int nbytes, bool signExtend,
                                                   const BaseIndex& address, Register oldval,
                                                   Register newval, Register output);
 
 template<typename T>
 void
 MacroAssemblerARMCompat::atomicExchange(int nbytes, bool signExtend, const T& mem,
                                         Register value, Register output)
 {
-    // If LDREXB/H and STREXB/H are not available we use the
-    // word-width operations with read-modify-add.  That does not
-    // abstract well, so fork.
-    //
     // Bug 1077321: We may further optimize for ARMv8 (AArch32) here.
-    if (nbytes < 4 && !HasLDSTREXBHD())
-        atomicExchangeARMv6(nbytes, signExtend, mem, value, output);
-    else
-        atomicExchangeARMv7(nbytes, signExtend, mem, value, output);
-}
-
-template<typename T>
-void
-MacroAssemblerARMCompat::atomicExchangeARMv7(int nbytes, bool signExtend, const T& mem,
-                                             Register value, Register output)
-{
     Label again;
     Label done;
 
     asMasm().memoryBarrier(MembarFull);
 
     SecondScratchRegisterScope scratch2(asMasm());
     Register ptr = computePointer(mem, scratch2);
 
@@ -4367,26 +4326,16 @@ MacroAssemblerARMCompat::atomicExchangeA
     }
     as_cmp(scratch, Imm8(1));
     as_b(&again, Equal);
     bind(&done);
 
     asMasm().memoryBarrier(MembarFull);
 }
 
-template<typename T>
-void
-MacroAssemblerARMCompat::atomicExchangeARMv6(int nbytes, bool signExtend, const T& mem,
-                                             Register value, Register output)
-{
-    // Bug 1077318: Must use read-modify-write with LDREX / STREX.
-    MOZ_ASSERT(nbytes == 1 || nbytes == 2);
-    MOZ_CRASH("NYI");
-}
-
 template void
 js::jit::MacroAssemblerARMCompat::atomicExchange(int nbytes, bool signExtend,
                                                  const Address& address, Register value,
                                                  Register output);
 template void
 js::jit::MacroAssemblerARMCompat::atomicExchange(int nbytes, bool signExtend,
                                                  const BaseIndex& address, Register value,
                                                  Register output);
@@ -4424,31 +4373,16 @@ MacroAssemblerARMCompat::atomicFetchOp(i
 // output nor the bits stored are affected by OP.
 
 template<typename T>
 void
 MacroAssemblerARMCompat::atomicFetchOp(int nbytes, bool signExtend, AtomicOp op,
                                        const Register& value, const T& mem, Register flagTemp,
                                        Register output)
 {
-    // Fork for non-word operations on ARMv6.
-    //
-    // Bug 1077321: We may further optimize for ARMv8 (AArch32) here.
-    if (nbytes < 4 && !HasLDSTREXBHD())
-        atomicFetchOpARMv6(nbytes, signExtend, op, value, mem, flagTemp, output);
-    else
-        atomicFetchOpARMv7(nbytes, signExtend, op, value, mem, flagTemp, output);
-}
-
-template<typename T>
-void
-MacroAssemblerARMCompat::atomicFetchOpARMv7(int nbytes, bool signExtend, AtomicOp op,
-                                            const Register& value, const T& mem, Register flagTemp,
-                                            Register output)
-{
     MOZ_ASSERT(flagTemp != InvalidReg);
     MOZ_ASSERT(output != value);
 
     Label again;
 
     SecondScratchRegisterScope scratch2(asMasm());
     Register ptr = computePointer(mem, scratch2);
 
@@ -4503,69 +4437,32 @@ MacroAssemblerARMCompat::atomicFetchOpAR
         break;
     }
     as_cmp(flagTemp, Imm8(1));
     as_b(&again, Equal);
 
     asMasm().memoryBarrier(MembarFull);
 }
 
-template<typename T>
-void
-MacroAssemblerARMCompat::atomicFetchOpARMv6(int nbytes, bool signExtend, AtomicOp op,
-                                            const Register& value, const T& mem, Register flagTemp,
-                                            Register output)
-{
-    // Bug 1077318: Must use read-modify-write with LDREX / STREX.
-    MOZ_ASSERT(nbytes == 1 || nbytes == 2);
-    MOZ_CRASH("NYI");
-}
-template<typename T>
-void
-MacroAssemblerARMCompat::atomicEffectOp(int nbytes, AtomicOp op, const Register& value,
-                                        const T& mem, Register flagTemp)
-{
-    // Fork for non-word operations on ARMv6.
-    //
-    // Bug 1077321: We may further optimize for ARMv8 (AArch32) here.
-    if (nbytes < 4 && !HasLDSTREXBHD())
-        atomicEffectOpARMv6(nbytes, op, value, mem, flagTemp);
-    else
-        atomicEffectOpARMv7(nbytes, op, value, mem, flagTemp);
-}
-
-template<typename T>
-void
-MacroAssemblerARMCompat::atomicEffectOp(int nbytes, AtomicOp op, const Imm32& value,
-                                        const T& mem, Register flagTemp)
-{
-    // The Imm32 case is not needed yet because lowering always forces
-    // the value into a register at present (bug 1077317).
-    //
-    // This would be useful for immediates small enough to fit into
-    // add/sub/and/or/xor.
-    MOZ_CRASH("NYI");
-}
-
 // Uses both scratch registers, one for the address and one for a temp,
 // but needs two temps for strex:
 //
 //     ...    ptr, <addr>         ; compute address of item
 //     dmb
 // L0  ldrex* temp, [ptr]
 //     OP     temp, temp, value   ; compute value to store
 //     strex* temp2, temp, [ptr]
 //     cmp    temp2, 1
 //     beq    L0                  ; failed - location is dirty, retry
 //     dmb                        ; ordering barrier required
 
 template<typename T>
 void
-MacroAssemblerARMCompat::atomicEffectOpARMv7(int nbytes, AtomicOp op, const Register& value,
-                                             const T& mem, Register flagTemp)
+MacroAssemblerARMCompat::atomicEffectOp(int nbytes, AtomicOp op, const Register& value,
+                                        const T& mem, Register flagTemp)
 {
     MOZ_ASSERT(flagTemp != InvalidReg);
 
     Label again;
 
     SecondScratchRegisterScope scratch2(asMasm());
     Register ptr = computePointer(mem, scratch2);
 
@@ -4617,21 +4514,24 @@ MacroAssemblerARMCompat::atomicEffectOpA
     as_cmp(flagTemp, Imm8(1));
     as_b(&again, Equal);
 
     asMasm().memoryBarrier(MembarFull);
 }
 
 template<typename T>
 void
-MacroAssemblerARMCompat::atomicEffectOpARMv6(int nbytes, AtomicOp op, const Register& value,
-                                             const T& mem, Register flagTemp)
-{
-    // Bug 1077318: Must use read-modify-write with LDREX / STREX.
-    MOZ_ASSERT(nbytes == 1 || nbytes == 2);
+MacroAssemblerARMCompat::atomicEffectOp(int nbytes, AtomicOp op, const Imm32& value,
+                                        const T& mem, Register flagTemp)
+{
+    // The Imm32 case is not needed yet because lowering always forces
+    // the value into a register at present (bug 1077317).
+    //
+    // This would be useful for immediates small enough to fit into
+    // add/sub/and/or/xor.
     MOZ_CRASH("NYI");
 }
 
 template void
 js::jit::MacroAssemblerARMCompat::atomicFetchOp(int nbytes, bool signExtend, AtomicOp op,
                                                 const Imm32& value, const Address& mem,
                                                 Register flagTemp, Register output);
 template void
--- a/js/src/jit/arm/MacroAssembler-arm.h
+++ b/js/src/jit/arm/MacroAssembler-arm.h
@@ -1107,64 +1107,32 @@ class MacroAssemblerARMCompat : public M
         ma_vmov(src, dest, cc);
     }
 
   private:
     template<typename T>
     Register computePointer(const T& src, Register r);
 
     template<typename T>
-    void compareExchangeARMv6(int nbytes, bool signExtend, const T& mem, Register oldval,
-                              Register newval, Register output);
-
-    template<typename T>
-    void compareExchangeARMv7(int nbytes, bool signExtend, const T& mem, Register oldval,
-                              Register newval, Register output);
-
-    template<typename T>
     void compareExchange(int nbytes, bool signExtend, const T& address, Register oldval,
                          Register newval, Register output);
 
     template<typename T>
-    void atomicExchangeARMv6(int nbytes, bool signExtend, const T& mem, Register value,
-                             Register output);
-
-    template<typename T>
-    void atomicExchangeARMv7(int nbytes, bool signExtend, const T& mem, Register value,
-                             Register output);
-
-    template<typename T>
     void atomicExchange(int nbytes, bool signExtend, const T& address, Register value,
                         Register output);
 
     template<typename T>
-    void atomicFetchOpARMv6(int nbytes, bool signExtend, AtomicOp op, const Register& value,
-                            const T& mem, Register flagTemp, Register output);
-
-    template<typename T>
-    void atomicFetchOpARMv7(int nbytes, bool signExtend, AtomicOp op, const Register& value,
-                            const T& mem, Register flagTemp, Register output);
-
-    template<typename T>
     void atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Imm32& value,
                        const T& address, Register flagTemp, Register output);
 
     template<typename T>
     void atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Register& value,
                        const T& address, Register flagTemp, Register output);
 
     template<typename T>
-    void atomicEffectOpARMv6(int nbytes, AtomicOp op, const Register& value, const T& address,
-                             Register flagTemp);
-
-    template<typename T>
-    void atomicEffectOpARMv7(int nbytes, AtomicOp op, const Register& value, const T& address,
-                             Register flagTemp);
-
-    template<typename T>
     void atomicEffectOp(int nbytes, AtomicOp op, const Imm32& value, const T& address,
                              Register flagTemp);
 
     template<typename T>
     void atomicEffectOp(int nbytes, AtomicOp op, const Register& value, const T& address,
                              Register flagTemp);
 
     template<typename T>
--- a/js/src/jit/arm/Simulator-arm.h
+++ b/js/src/jit/arm/Simulator-arm.h
@@ -107,21 +107,23 @@ class Simulator
     static Simulator* Create(JSContext* cx);
 
     static void Destroy(Simulator* simulator);
 
     // Constructor/destructor are for internal use only; use the static methods above.
     explicit Simulator(JSContext* cx);
     ~Simulator();
 
+    static bool supportsAtomics() { return HasLDSTREXBHD(); }
+
     // The currently executing Simulator instance. Potentially there can be one
     // for each native thread.
     static Simulator* Current();
 
-    static inline uintptr_t StackLimit() {
+    static uintptr_t StackLimit() {
         return Simulator::Current()->stackLimit();
     }
 
     // Disassemble some instructions starting at instr and print them
     // on stdout.  Useful for working within GDB after a MOZ_CRASH(),
     // among other things.
     //
     // Typical use within a crashed instruction decoding method is simply:
--- a/js/src/jit/arm64/AtomicOperations-arm64.h
+++ b/js/src/jit/arm64/AtomicOperations-arm64.h
@@ -171,32 +171,9 @@ js::jit::AtomicOperations::memcpySafeWhe
 
 inline void
 js::jit::AtomicOperations::memmoveSafeWhenRacy(void* dest, const void* src,
                                                size_t nbytes)
 {
     memmove(dest, src, nbytes);
 }
 
-template<size_t nbytes>
-inline void
-js::jit::RegionLock::acquire(void* addr)
-{
-    uint32_t zero = 0;
-    uint32_t one = 1;
-    while (!__atomic_compare_exchange(&spinlock, &zero, &one, false, __ATOMIC_ACQUIRE,
-                                      __ATOMIC_ACQUIRE))
-    {
-        zero = 0;
-        continue;
-    }
-}
-
-template<size_t nbytes>
-inline void
-js::jit::RegionLock::release(void* addr)
-{
-    MOZ_ASSERT(AtomicOperations::loadSeqCst(&spinlock) == 1, "releasing unlocked region lock");
-    uint32_t zero = 0;
-    __atomic_store(&spinlock, &zero, __ATOMIC_SEQ_CST);
-}
-
 #endif // jit_arm64_AtomicOperations_arm64_h
--- a/js/src/jit/arm64/vixl/Simulator-vixl.h
+++ b/js/src/jit/arm64/vixl/Simulator-vixl.h
@@ -711,19 +711,22 @@ class Simulator : public DecoderVisitor 
   bool overRecursedWithExtra(uint32_t extra) const;
   int64_t call(uint8_t* entry, int argument_count, ...);
   static void* RedirectNativeFunction(void* nativeFunction, js::jit::ABIFunctionType type);
   void setGPR32Result(int32_t result);
   void setGPR64Result(int64_t result);
   void setFP32Result(float result);
   void setFP64Result(double result);
   void VisitCallRedirection(const Instruction* instr);
-  static inline uintptr_t StackLimit() {
+  static uintptr_t StackLimit() {
     return Simulator::Current()->stackLimit();
   }
+  static bool supportsAtomics() {
+    return true;
+  }
 
   void ResetState();
 
   // Run the simulator.
   virtual void Run();
   void RunFrom(const Instruction* first);
 
   // Simulation helpers.
--- a/js/src/jit/mips-shared/AtomicOperations-mips-shared.h
+++ b/js/src/jit/mips-shared/AtomicOperations-mips-shared.h
@@ -217,31 +217,9 @@ inline T
 js::jit::AtomicOperations::exchangeSeqCst(T* addr, T val)
 {
     static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
     T v;
     __atomic_exchange(addr, &val, &v, __ATOMIC_SEQ_CST);
     return v;
 }
 
-template<size_t nbytes>
-inline void
-js::jit::RegionLock::acquire(void* addr)
-{
-    uint32_t zero = 0;
-    uint32_t one = 1;
-    while (!__atomic_compare_exchange(&spinlock, &zero, &one, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE)) {
-        zero = 0;
-        continue;
-    }
-}
-
-template<size_t nbytes>
-inline void
-js::jit::RegionLock::release(void* addr)
-{
-    MOZ_ASSERT(AtomicOperations::loadSeqCst(&spinlock) == 1, "releasing unlocked region lock");
-    uint32_t zero = 0;
-    __atomic_store(&spinlock, &zero, __ATOMIC_SEQ_CST);
-}
-
-
 #endif // jit_mips_shared_AtomicOperations_mips_shared_h
--- a/js/src/jit/none/AtomicOperations-feeling-lucky.h
+++ b/js/src/jit/none/AtomicOperations-feeling-lucky.h
@@ -444,46 +444,16 @@ template<>
 inline uint64_t
 AtomicOperations::exchangeSeqCst(uint64_t* addr, uint64_t val) {
     MOZ_CRASH("No 64-bit atomics");
 }
 
 } }
 #endif
 
-template<size_t nbytes>
-inline void
-js::jit::RegionLock::acquire(void* addr)
-{
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
-    while (!__sync_bool_compare_and_swap(&spinlock, 0, 1))
-        ;
-# else
-    uint32_t zero = 0;
-    uint32_t one = 1;
-    while (!__atomic_compare_exchange(&spinlock, &zero, &one, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE)) {
-        zero = 0;
-        continue;
-    }
-# endif
-}
-
-template<size_t nbytes>
-inline void
-js::jit::RegionLock::release(void* addr)
-{
-    MOZ_ASSERT(AtomicOperations::loadSeqCst(&spinlock) == 1, "releasing unlocked region lock");
-# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
-    __sync_sub_and_fetch(&spinlock, 1);
-# else
-    uint32_t zero = 0;
-    __atomic_store(&spinlock, &zero, __ATOMIC_SEQ_CST);
-# endif
-}
-
 #elif defined(ENABLE_SHARED_ARRAY_BUFFER)
 
 # error "Either disable JS shared memory, use GCC or Clang, or add code here"
 
 #endif
 
 #undef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
 #undef GNUC_COMPATIBLE
--- a/js/src/jit/x86-shared/AtomicOperations-x86-shared-gcc.h
+++ b/js/src/jit/x86-shared/AtomicOperations-x86-shared-gcc.h
@@ -255,31 +255,9 @@ js::jit::AtomicOperations::memcpySafeWhe
 }
 
 inline void
 js::jit::AtomicOperations::memmoveSafeWhenRacy(void* dest, const void* src, size_t nbytes)
 {
     ::memmove(dest, src, nbytes);
 }
 
-template<size_t nbytes>
-inline void
-js::jit::RegionLock::acquire(void* addr)
-{
-    uint32_t zero = 0;
-    uint32_t one = 1;
-    while (!__atomic_compare_exchange(&spinlock, &zero, &one, false, __ATOMIC_ACQUIRE,
-                                      __ATOMIC_ACQUIRE))
-    {
-        zero = 0;
-    }
-}
-
-template<size_t nbytes>
-inline void
-js::jit::RegionLock::release(void* addr)
-{
-    MOZ_ASSERT(AtomicOperations::loadSeqCst(&spinlock) == 1, "releasing unlocked region lock");
-    uint32_t zero = 0;
-    __atomic_store(&spinlock, &zero, __ATOMIC_SEQ_CST);
-}
-
 #endif // jit_shared_AtomicOperations_x86_shared_gcc_h
--- a/js/src/jit/x86-shared/AtomicOperations-x86-shared-msvc.h
+++ b/js/src/jit/x86-shared/AtomicOperations-x86-shared-msvc.h
@@ -364,25 +364,9 @@ js::jit::AtomicOperations::memcpySafeWhe
 }
 
 inline void
 js::jit::AtomicOperations::memmoveSafeWhenRacy(void* dest, const void* src, size_t nbytes)
 {
     ::memmove(dest, src, nbytes);
 }
 
-template<size_t nbytes>
-inline void
-js::jit::RegionLock::acquire(void* addr)
-{
-    while (_InterlockedCompareExchange((long*)&spinlock, /*newval=*/1, /*oldval=*/0) == 1)
-        continue;
-}
-
-template<size_t nbytes>
-inline void
-js::jit::RegionLock::release(void* addr)
-{
-    MOZ_ASSERT(AtomicOperations::loadSeqCst(&spinlock) == 1, "releasing unlocked region lock");
-    _InterlockedExchange((long*)&spinlock, 0);
-}
-
 #endif // jit_shared_AtomicOperations_x86_shared_msvc_h
--- a/js/src/old-configure.in
+++ b/js/src/old-configure.in
@@ -887,36 +887,16 @@ dnl Only one oddball right now (QNX), bu
 dnl if any other platforms need to override this in the future.
 AC_DEFINE_UNQUOTED(D_INO,$DIRENT_INO)
 
 if test -z "$COMPILE_ENVIRONMENT"; then
     SKIP_COMPILER_CHECKS=1
     SKIP_LIBRARY_CHECKS=1
 fi
 
-dnl Configure JIT support
-
-case "$CPU_ARCH" in
-x86|x86_64|arm|aarch64|mips*)
-    ENABLE_ION=1
-    ;;
-esac
-
-if test "$HAVE_64BIT_BUILD" ; then
-    AC_DEFINE(JS_PUNBOX64)
-else
-    AC_DEFINE(JS_NUNBOX32)
-fi
-
-MOZ_ARG_DISABLE_BOOL(ion,
-[  --disable-ion      Disable use of the IonMonkey JIT],
-  ENABLE_ION= )
-
-AC_SUBST(ENABLE_ION)
-
 if test -n "$COMPILE_ENVIRONMENT"; then
     MOZ_COMPILER_OPTS
 fi
 
 if test -z "$SKIP_COMPILER_CHECKS"; then
 dnl Checks for typedefs, structures, and compiler characteristics.
 dnl ========================================================
 AC_C_CONST
@@ -1487,34 +1467,16 @@ fi
 fi # COMPILE_ENVIRONMENT
 
 AC_SUBST_LIST(MOZ_FRAMEPTR_FLAGS)
 AC_SUBST_LIST(MOZ_OPTIMIZE_FLAGS)
 AC_SUBST_LIST(MOZ_OPTIMIZE_LDFLAGS)
 AC_SUBST_LIST(MOZ_PGO_OPTIMIZE_FLAGS)
 
 dnl ========================================================
-dnl = Trace logging. Enabled by default in DEBUG builds.
-dnl ========================================================
-MOZ_ARG_ENABLE_BOOL(trace-logging,
-[  --enable-trace-logging   Enable trace logging],
-    ENABLE_TRACE_LOGGING=1,
-    ENABLE_TRACE_LOGGING= )
-
-if test -n "$MOZ_DEBUG"; then
-    ENABLE_TRACE_LOGGING=1
-fi
-
-AC_SUBST(ENABLE_TRACE_LOGGING)
-
-if test -n "$ENABLE_TRACE_LOGGING"; then
-    AC_DEFINE(JS_TRACE_LOGGING)
-fi
-
-dnl ========================================================
 dnl = Disable treating compiler warnings as errors
 dnl ========================================================
 if test -z "$MOZ_ENABLE_WARNINGS_AS_ERRORS"; then
    WARNINGS_AS_ERRORS=''
 fi
 
 dnl ========================================================
 dnl = Enable jemalloc
@@ -1573,138 +1535,23 @@ if test -n "$MOZ_VALGRIND"; then
         AC_MSG_ERROR(
             [--enable-valgrind specified but Valgrind is not installed]))
     AC_DEFINE(MOZ_VALGRIND)
     MOZ_VALGRIND=1
 fi
 AC_SUBST(MOZ_VALGRIND)
 
 dnl ========================================================
-dnl = Use a JIT code simulator for a foreign architecture.
-dnl ========================================================
-MOZ_ARG_ENABLE_STRING(simulator,
-[  --enable-simulator=ARCH
-                          Enable a JIT code simulator for the specified arch.
-                          (arm, arm64, mips32, mips64).],
-    JS_SIMULATOR="$enableval")
-
-if test -n "$JS_SIMULATOR"; then
-    case "$JS_SIMULATOR" in
-        arm|arm64|mips32|mips64) ;;
-        no)
-            JS_SIMULATOR=
-            ;;
-        *) AC_MSG_ERROR([Invalid simulator. Valid simulators are: arm, arm64, mips32, mips64.]) ;;
-    esac
-fi
-
-if test -z "$ENABLE_ION"; then
-    AC_DEFINE(JS_CODEGEN_NONE)
-    JS_CODEGEN_NONE=1
-elif test "$JS_SIMULATOR" = arm; then
-    if test "$CPU_ARCH" != "x86"; then
-        AC_MSG_ERROR([The ARM simulator only works on x86.])
-    fi
-    AC_DEFINE(JS_SIMULATOR)
-    AC_DEFINE(JS_SIMULATOR_ARM)
-    AC_DEFINE(JS_DISASM_ARM)
-    AC_DEFINE(JS_CODEGEN_ARM)
-    JS_SIMULATOR_ARM=1
-    JS_DISASM_ARM=1
-    JS_CODEGEN_ARM=1
-elif test "$JS_SIMULATOR" = arm64; then
-    if test "$CPU_ARCH" != "x86_64"; then
-        AC_MSG_ERROR([The ARM64 simulator only works on x86_64.])
-    fi
-    AC_DEFINE(JS_SIMULATOR)
-    AC_DEFINE(JS_SIMULATOR_ARM64)
-    AC_DEFINE(JS_CODEGEN_ARM64)
-    JS_SIMULATOR_ARM64=1
-    JS_CODEGEN_ARM64=1
-elif test "$JS_SIMULATOR" = mips32; then
-    if test "$CPU_ARCH" != "x86"; then
-        AC_MSG_ERROR([The MIPS32 simulator only works on x86.])
-    fi
-    AC_DEFINE(JS_SIMULATOR)
-    AC_DEFINE(JS_SIMULATOR_MIPS32)
-    AC_DEFINE(JS_CODEGEN_MIPS32)
-    JS_SIMULATOR_MIPS32=1
-    JS_CODEGEN_MIPS32=1
-elif test "$JS_SIMULATOR" = mips64; then
-    if test "$CPU_ARCH" != "x86_64"; then
-        AC_MSG_ERROR([The MIPS64 simulator only works on x86_64.])
-    fi
-    AC_DEFINE(JS_SIMULATOR)
-    AC_DEFINE(JS_SIMULATOR_MIPS64)
-    AC_DEFINE(JS_CODEGEN_MIPS64)
-    JS_SIMULATOR_MIPS64=1
-    JS_CODEGEN_MIPS64=1
-elif test "$CPU_ARCH" = "x86"; then
-    AC_DEFINE(JS_CODEGEN_X86)
-    JS_CODEGEN_X86=1
-elif test "$CPU_ARCH" = "x86_64"; then
-    AC_DEFINE(JS_CODEGEN_X64)
-    JS_CODEGEN_X64=1
-elif test "$CPU_ARCH" = "arm"; then
-    AC_DEFINE(JS_CODEGEN_ARM)
-    JS_CODEGEN_ARM=1
-    if test -n "$MOZ_DEBUG"; then
-        AC_DEFINE(JS_DISASM_ARM)
-        JS_DISASM_ARM=1
-    fi
-
-    dnl ARM platforms may trap on unaligned accesses; catch the signal and
-    dnl recover.
-elif test "$CPU_ARCH" = "aarch64"; then
-    AC_DEFINE(JS_CODEGEN_ARM64)
-    JS_CODEGEN_ARM64=1
-elif test "$CPU_ARCH" = "mips32"; then
-    AC_DEFINE(JS_CODEGEN_MIPS32)
-    JS_CODEGEN_MIPS32=1
-elif test "$CPU_ARCH" = "mips64"; then
-    AC_DEFINE(JS_CODEGEN_MIPS64)
-    JS_CODEGEN_MIPS64=1
-fi
-
-AC_SUBST(JS_SIMULATOR)
-AC_SUBST(JS_SIMULATOR_ARM)
-AC_SUBST(JS_SIMULATOR_ARM64)
-AC_SUBST(JS_SIMULATOR_MIPS32)
-AC_SUBST(JS_SIMULATOR_MIPS64)
-AC_SUBST(JS_CODEGEN_ARM)
-AC_SUBST(JS_CODEGEN_ARM64)
-AC_SUBST(JS_CODEGEN_MIPS32)
-AC_SUBST(JS_CODEGEN_MIPS64)
-AC_SUBST(JS_CODEGEN_X86)
-AC_SUBST(JS_CODEGEN_X64)
-AC_SUBST(JS_CODEGEN_NONE)
-AC_SUBST(JS_DISASM_ARM)
-
-dnl ========================================================
 dnl instruments
 dnl ========================================================
 if test -n "$MOZ_INSTRUMENTS"; then
     LIBS="$LIBS -framework CoreFoundation"
 fi
 
 dnl ========================================================
-dnl JitSpew.  Enabled by default in debug builds.
-dnl ========================================================
-MOZ_ARG_ENABLE_BOOL(jitspew,
-[  --enable-jitspew        Enable the Jit spew and IONFLAGS.],
-    JS_JITSPEW=1,
-    JS_JITSPEW= )
-
-if test -n "$JS_JITSPEW" -o -n "$MOZ_DEBUG"; then
-    AC_DEFINE(JS_JITSPEW)
-fi
-
-AC_SUBST(JS_JITSPEW)
-
-dnl ========================================================
 dnl Debug (see Bug 939505)
 dnl ========================================================
 if test -n "$MOZ_DEBUG"; then
     AC_DEFINE(JS_DEBUG)
 fi
 
 dnl ========================================================
 dnl Zealous JavaScript GC
--- a/js/src/wasm/WasmBuiltins.cpp
+++ b/js/src/wasm/WasmBuiltins.cpp
@@ -501,37 +501,16 @@ AddressOf(SymbolicAddress imm, ABIFuncti
         return FuncCast(Int64ToFloat32, *abiType);
 #if defined(JS_CODEGEN_ARM)
       case SymbolicAddress::aeabi_idivmod:
         *abiType = Args_General2;
         return FuncCast(__aeabi_idivmod, *abiType);
       case SymbolicAddress::aeabi_uidivmod:
         *abiType = Args_General2;
         return FuncCast(__aeabi_uidivmod, *abiType);
-      case SymbolicAddress::AtomicCmpXchg:
-        *abiType = Args_General5;
-        return FuncCast(atomics_cmpxchg_asm_callout, *abiType);
-      case SymbolicAddress::AtomicXchg:
-        *abiType = Args_General4;
-        return FuncCast(atomics_xchg_asm_callout, *abiType);
-      case SymbolicAddress::AtomicFetchAdd:
-        *abiType = Args_General4;
-        return FuncCast(atomics_add_asm_callout, *abiType);
-      case SymbolicAddress::AtomicFetchSub:
-        *abiType = Args_General4;
-        return FuncCast(atomics_sub_asm_callout, *abiType);
-      case SymbolicAddress::AtomicFetchAnd:
-        *abiType = Args_General4;
-        return FuncCast(atomics_and_asm_callout, *abiType);
-      case SymbolicAddress::AtomicFetchOr:
-        *abiType = Args_General4;
-        return FuncCast(atomics_or_asm_callout, *abiType);
-      case SymbolicAddress::AtomicFetchXor:
-        *abiType = Args_General4;
-        return FuncCast(atomics_xor_asm_callout, *abiType);
 #endif
       case SymbolicAddress::ModD:
         *abiType = Args_Double_DoubleDouble;
         return FuncCast(NumberMod, *abiType);
       case SymbolicAddress::SinD:
         *abiType = Args_Double_Double;
         return FuncCast<double (double)>(sin, *abiType);
       case SymbolicAddress::CosD:
@@ -635,23 +614,16 @@ wasm::NeedsBuiltinThunk(SymbolicAddress 
       case SymbolicAddress::TruncateDoubleToInt64:
       case SymbolicAddress::Uint64ToDouble:
       case SymbolicAddress::Uint64ToFloat32:
       case SymbolicAddress::Int64ToDouble:
       case SymbolicAddress::Int64ToFloat32:
 #if defined(JS_CODEGEN_ARM)
       case SymbolicAddress::aeabi_idivmod:
       case SymbolicAddress::aeabi_uidivmod:
-      case SymbolicAddress::AtomicCmpXchg:
-      case SymbolicAddress::AtomicXchg:
-      case SymbolicAddress::AtomicFetchAdd:
-      case SymbolicAddress::AtomicFetchSub:
-      case SymbolicAddress::AtomicFetchAnd:
-      case SymbolicAddress::AtomicFetchOr:
-      case SymbolicAddress::AtomicFetchXor:
 #endif
       case SymbolicAddress::ModD:
       case SymbolicAddress::SinD:
       case SymbolicAddress::CosD:
       case SymbolicAddress::TanD:
       case SymbolicAddress::ASinD:
       case SymbolicAddress::ACosD:
       case SymbolicAddress::ATanD:
--- a/js/src/wasm/WasmFrameIter.cpp
+++ b/js/src/wasm/WasmFrameIter.cpp
@@ -978,30 +978,16 @@ ThunkedNativeToDescription(SymbolicAddre
         return "call to native f64.convert_s/i64 (in wasm)";
       case SymbolicAddress::Int64ToFloat32:
         return "call to native f32.convert_s/i64 (in wasm)";
 #if defined(JS_CODEGEN_ARM)
       case SymbolicAddress::aeabi_idivmod:
         return "call to native i32.div_s (in wasm)";
       case SymbolicAddress::aeabi_uidivmod:
         return "call to native i32.div_u (in wasm)";
-      case SymbolicAddress::AtomicCmpXchg:
-        return "call to native atomic compare exchange (in wasm)";
-      case SymbolicAddress::AtomicXchg:
-        return "call to native atomic exchange (in wasm)";
-      case SymbolicAddress::AtomicFetchAdd:
-        return "call to native atomic fetch add (in wasm)";
-      case SymbolicAddress::AtomicFetchSub:
-        return "call to native atomic fetch sub (in wasm)";
-      case SymbolicAddress::AtomicFetchAnd:
-        return "call to native atomic fetch and (in wasm)";
-      case SymbolicAddress::AtomicFetchOr:
-        return "call to native atomic fetch or (in wasm)";
-      case SymbolicAddress::AtomicFetchXor:
-        return "call to native atomic fetch xor (in wasm)";
 #endif
       case SymbolicAddress::ModD:
         return "call to asm.js native f64 % (mod)";
       case SymbolicAddress::SinD:
         return "call to asm.js native f64 Math.sin";
       case SymbolicAddress::CosD:
         return "call to asm.js native f64 Math.cos";
       case SymbolicAddress::TanD:
--- a/js/src/wasm/WasmIonCompile.cpp
+++ b/js/src/wasm/WasmIonCompile.cpp
@@ -796,19 +796,17 @@ class FunctionCompiler
 
     void checkOffsetAndAlignmentAndBounds(MemoryAccessDesc* access, MDefinition** base)
     {
         MOZ_ASSERT(!inDeadCode());
 
         // Fold a constant base into the offset (so the base is 0 in which case
         // the codegen is optimized), if it doesn't wrap or trigger an
         // MWasmAddOffset.
-        if (!access->isAtomic() && !env_.isAsmJS() && // TODO bug 1421244
-            (*base)->isConstant())
-        {
+        if ((*base)->isConstant()) {
             uint32_t basePtr = (*base)->toConstant()->toInt32();
             uint32_t offset = access->offset();
 
             static_assert(OffsetGuardLimit < UINT32_MAX,
                           "checking for overflow against OffsetGuardLimit is enough.");
 
             if (offset < OffsetGuardLimit && basePtr < OffsetGuardLimit - offset) {
                 auto* ins = MConstant::New(alloc(), Int32Value(0), MIRType::Int32);
--- a/js/src/wasm/WasmJS.cpp
+++ b/js/src/wasm/WasmJS.cpp
@@ -69,16 +69,21 @@ wasm::HasCompilerSupport(JSContext* cx)
         return false;
 
 #ifdef ENABLE_WASM_THREAD_OPS
     // Wasm threads require 8-byte lock-free atomics.
     if (!jit::AtomicOperations::isLockfree8())
         return false;
 #endif
 
+#ifdef JS_SIMULATOR
+    if (!Simulator::supportsAtomics())
+        return false;
+#endif
+
 #if defined(JS_CODEGEN_NONE) || defined(JS_CODEGEN_ARM64)
     return false;
 #else
     return true;
 #endif
 }
 
 bool
--- a/js/src/wasm/WasmTypes.h
+++ b/js/src/wasm/WasmTypes.h
@@ -1307,23 +1307,16 @@ typedef Vector<CallSiteTarget, 0, System
 // during static linking.
 
 enum class SymbolicAddress
 {
     ToInt32,
 #if defined(JS_CODEGEN_ARM)
     aeabi_idivmod,
     aeabi_uidivmod,
-    AtomicCmpXchg,
-    AtomicXchg,
-    AtomicFetchAdd,
-    AtomicFetchSub,
-    AtomicFetchAnd,
-    AtomicFetchOr,
-    AtomicFetchXor,
 #endif
     ModD,
     SinD,
     CosD,
     TanD,
     ASinD,
     ACosD,
     ATanD,
--- a/toolkit/content/aboutNetworking.xhtml
+++ b/toolkit/content/aboutNetworking.xhtml
@@ -232,17 +232,17 @@
             <br/>
             <div>
               &aboutNetworking.currentLogFile; <div id="current-log-file"></div><br/>
               <input type="text" name="log-file" id="log-file"></input>
               <button id="set-log-file-button"> &aboutNetworking.setLogFile; </button>
             </div>
             <div>
               &aboutNetworking.currentLogModules; <div id="current-log-modules"></div><br/>
-              <input type="text" name="log-modules" id="log-modules" value="timestamp,sync,nsHttp:5,nsSocketTransport:5,nsStreamPump:5,nsHostResolver:5"></input>
+              <input type="text" name="log-modules" id="log-modules" value="timestamp,sync,nsHttp:5,cache2:5,nsSocketTransport:5,nsHostResolver:5"></input>
               <button id="set-log-modules-button"> &aboutNetworking.setLogModules; </button>
             </div>
           </div>
 
         </div>
     </body>
 </html>
 
--- a/widget/nsGUIEventIPC.h
+++ b/widget/nsGUIEventIPC.h
@@ -16,33 +16,21 @@
 #include "mozilla/TouchEvents.h"
 #include "mozilla/dom/Selection.h"
 #include "InputData.h"
 
 namespace IPC
 {
 
 template<>
-struct ParamTraits<mozilla::EventMessage>
-{
-  typedef mozilla::EventMessage paramType;
-
-  static void Write(Message* aMsg, const paramType& aParam)
-  {
-    WriteParam(aMsg, static_cast<const mozilla::EventMessageType&>(aParam));
-  }
-
-  static bool Read(const Message* aMsg, PickleIterator* aIter, paramType* aResult)
-  {
-    mozilla::EventMessageType eventMessage = 0;
-    bool ret = ReadParam(aMsg, aIter, &eventMessage);
-    *aResult = static_cast<paramType>(eventMessage);
-    return ret;
-  }
-};
+struct ParamTraits<mozilla::EventMessage> :
+  public ContiguousEnumSerializer<mozilla::EventMessage,
+                                  mozilla::EventMessage(0),
+                                  mozilla::EventMessage::eEventMessage_MaxValue>
+{};
 
 template<>
 struct ParamTraits<mozilla::BaseEventFlags>
 {
   typedef mozilla::BaseEventFlags paramType;
 
   static void Write(Message* aMsg, const paramType& aParam)
   {