Merge inbound to mozilla-central. a=merge
authorBogdan Tara <btara@mozilla.com>
Mon, 09 Apr 2018 00:43:58 +0300
changeset 468395 b4bc6b2401738b78fd47127a4c716bb9178e1a09
parent 468386 56e8e0fb75d71e5eb59cabac55fd3762b428534a (current diff)
parent 468394 525e7c6d62dead02bfd4887aee6ae9bc983939a0 (diff)
child 468396 e13061bb26d744e3cf689e5bfda2cd1bbf31bc03
child 468405 65cf64ac028053071b5b7156eccc78b645cf0d52
push id1728
push userjlund@mozilla.com
push dateMon, 18 Jun 2018 21:12:27 +0000
treeherdermozilla-release@c296fde26f5f [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersmerge
milestone61.0a1
first release with
nightly linux32
b4bc6b240173 / 61.0a1 / 20180408220126 / files
nightly linux64
b4bc6b240173 / 61.0a1 / 20180408220126 / files
nightly mac
b4bc6b240173 / 61.0a1 / 20180408220126 / files
nightly win32
b4bc6b240173 / 61.0a1 / 20180408220126 / files
nightly win64
b4bc6b240173 / 61.0a1 / 20180408220126 / files
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
releases
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Merge inbound to mozilla-central. a=merge
js/src/jit-test/tests/ion/iloop-nosignaling.js
--- a/browser/base/content/browser-tabsintitlebar.js
+++ b/browser/base/content/browser-tabsintitlebar.js
@@ -293,17 +293,16 @@ var TabsInTitlebar = {
   },
 
   _sizePlaceholder(type, width) {
     Array.forEach(document.querySelectorAll(".titlebar-placeholder[type='" + type + "']"),
                   function(node) { node.style.width = width + "px"; });
   },
 
   uninit() {
-    removeEventListener("resolutionchange", this);
     Services.prefs.removeObserver(this._prefName, this);
     this._menuObserver.disconnect();
     CustomizableUI.removeListener(this);
     gDragSpaceObserver.uninit();
   }
 };
 
 function onTitlebarMaxClick() {
new file mode 100644
--- /dev/null
+++ b/dom/security/test/general/file_same_site_cookies_subrequest.sjs
@@ -0,0 +1,75 @@
+// Custom *.sjs file specifically for the needs of Bug 1286861
+
+// small red image
+const IMG_BYTES = atob(
+  "iVBORw0KGgoAAAANSUhEUgAAAAUAAAAFCAYAAACNbyblAAAAHElEQVQI12" +
+  "P4//8/w38GIAXDIBKE0DHxgljNBAAO9TXL0Y4OHwAAAABJRU5ErkJggg==");
+
+const FRAME = `
+  <!DOCTYPE html>
+  <html>
+  <head>
+    <title>Bug 1286861 - Add support for same site cookies</title>
+  </head>
+  <body>
+    <img src = "http://mochi.test:8888/tests/dom/security/test/general/file_same_site_cookies_subrequest.sjs?checkCookie">
+  </body>
+  </html>`;
+
+function handleRequest(request, response)
+{
+  // avoid confusing cache behaviors
+  response.setHeader("Cache-Control", "no-cache", false);
+
+  if (request.queryString === "setStrictSameSiteCookie") {
+    response.setHeader("Set-Cookie", "myKey=strictSameSiteCookie; samesite=strict", true);
+    response.setHeader("Content-Type", "image/png");
+    response.write(IMG_BYTES);
+    return;
+  }
+
+  if (request.queryString === "setLaxSameSiteCookie") {
+    response.setHeader("Set-Cookie", "myKey=laxSameSiteCookie; samesite=lax", true);
+    response.setHeader("Content-Type", "image/png");
+    response.write(IMG_BYTES);
+    return;
+  }
+
+  // save the object state of the initial request, which returns
+  // async once the server has processed the img request.
+  if (request.queryString === "queryresult") {
+    response.processAsync();
+    setObjectState("queryResult", response);
+    return;
+  }
+
+  if (request.queryString === "loadFrame") {
+    response.write(FRAME);
+    return;
+  }
+
+  if (request.queryString === "checkCookie") {
+    var cookie = "unitialized";
+    if (request.hasHeader("Cookie")) {
+      cookie = request.getHeader("Cookie");
+    }
+    else {
+      cookie = "myKey=noCookie";
+    }
+    response.setHeader("Content-Type", "image/png");
+    response.write(IMG_BYTES);
+
+    // return the result
+    getObjectState("queryResult", function(queryResponse) {
+      if (!queryResponse) {
+        return;
+      }
+      queryResponse.write(cookie);
+      queryResponse.finish();
+    });
+    return;
+  }
+
+  // we should never get here, but just in case return something unexpected
+  response.write("D'oh");
+}
new file mode 100644
--- /dev/null
+++ b/dom/security/test/general/file_same_site_cookies_toplevel_nav.sjs
@@ -0,0 +1,87 @@
+// Custom *.sjs file specifically for the needs of Bug 1286861
+
+// small red image
+const IMG_BYTES = atob(
+  "iVBORw0KGgoAAAANSUhEUgAAAAUAAAAFCAYAAACNbyblAAAAHElEQVQI12" +
+  "P4//8/w38GIAXDIBKE0DHxgljNBAAO9TXL0Y4OHwAAAABJRU5ErkJggg==");
+
+const FRAME = `
+  <!DOCTYPE html>
+  <html>
+  <head>
+    <title>Bug 1286861 - Add support for same site cookies</title>
+  </head>
+  <body>
+    <script type="application/javascript">
+      let myWin = window.open("http://mochi.test:8888/tests/dom/security/test/general/file_same_site_cookies_toplevel_nav.sjs?loadWin");
+      myWin.onload = function() {
+        myWin.close();
+      }
+    </script>
+  </body>
+  </html>`;
+
+const WIN = `
+  <!DOCTYPE html>
+  <html>
+  <body>
+    just a dummy window
+  </body>
+  </html>`;
+
+function handleRequest(request, response)
+{
+  // avoid confusing cache behaviors
+  response.setHeader("Cache-Control", "no-cache", false);
+
+  if (request.queryString === "setStrictSameSiteCookie") {
+    response.setHeader("Set-Cookie", "myKey=strictSameSiteCookie; samesite=strict", true);
+    response.setHeader("Content-Type", "image/png");
+    response.write(IMG_BYTES);
+    return;
+  }
+
+  if (request.queryString === "setLaxSameSiteCookie") {
+    response.setHeader("Set-Cookie", "myKey=laxSameSiteCookie; samesite=lax", true);
+    response.setHeader("Content-Type", "image/png");
+    response.write(IMG_BYTES);
+    return;
+  }
+
+  // save the object state of the initial request, which returns
+  // async once the server has processed the img request.
+  if (request.queryString === "queryresult") {
+    response.processAsync();
+    setObjectState("queryResult", response);
+    return;
+  }
+
+  if (request.queryString === "loadFrame") {
+    response.write(FRAME);
+    return;
+  }
+
+  if (request.queryString === "loadWin") {
+    var cookie = "unitialized";
+    if (request.hasHeader("Cookie")) {
+      cookie = request.getHeader("Cookie");
+    }
+    else {
+      cookie = "myKey=noCookie";
+    }
+    response.write(WIN);
+
+    // return the result
+    getObjectState("queryResult", function(queryResponse) {
+      if (!queryResponse) {
+        return;
+      }
+      queryResponse.write(cookie);
+      queryResponse.finish();
+    });
+    return;
+  }
+
+  // we should never get here, but just in case return something unexpected
+  response.write("D'oh");
+}
--- a/dom/security/test/general/mochitest.ini
+++ b/dom/security/test/general/mochitest.ini
@@ -3,21 +3,25 @@ support-files =
   file_contentpolicytype_targeted_link_iframe.sjs
   file_nosniff_testserver.sjs
   file_block_script_wrong_mime_server.sjs
   file_block_toplevel_data_navigation.html
   file_block_toplevel_data_navigation2.html
   file_block_toplevel_data_navigation3.html
   file_block_toplevel_data_redirect.sjs
   file_block_subresource_redir_to_data.sjs
+  file_same_site_cookies_subrequest.sjs
+  file_same_site_cookies_toplevel_nav.sjs
 
 [test_contentpolicytype_targeted_link_iframe.html]
 [test_nosniff.html]
 [test_block_script_wrong_mime.html]
 [test_block_toplevel_data_navigation.html]
 skip-if = toolkit == 'android' || webrender # intermittent failure; bug 1424752 for webrender
 [test_block_toplevel_data_img_navigation.html]
 skip-if = toolkit == 'android' # intermittent failure
 [test_allow_opening_data_pdf.html]
 skip-if = toolkit == 'android'
 [test_allow_opening_data_json.html]
 skip-if = toolkit == 'android'
 [test_block_subresource_redir_to_data.html]
+[test_same_site_cookies_subrequest.html]
+[test_same_site_cookies_toplevel_nav.html]
new file mode 100644
--- /dev/null
+++ b/dom/security/test/general/test_same_site_cookies_subrequest.html
@@ -0,0 +1,113 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+  <title>Bug 1286861 - Test same site cookies on subrequests</title>
+  <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
+  <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<img id="cookieImage">
+<iframe id="testframe"></iframe>
+
+<script class="testbody" type="text/javascript">
+
+/*
+ * Description of the test:
+ * 1) We load an image from http://mochi.test which sets a same site cookie
+ * 2) We load an iframe from:
+ *    * http://mochi.test which loads another image from http://mochi.test
+ *    * http://example.com which loads another image from http://mochi.test
+ * 3) We observe that the same site cookie is sent in the same origin case,
+ *    but not in the cross origin case.
+ *
+ * In detail:
+ * We perform an XHR request to the *.sjs file which is processed async on
+ * the server and waits till the image request has been processed by the server.
+ * Once the image requets was processed, the server responds to the initial
+ * XHR request with the expecuted result (the cookie value).
+ */
+
+SimpleTest.waitForExplicitFinish();
+
+const SAME_ORIGIN = "http://mochi.test:8888/";
+const CROSS_ORIGIN = "http://example.com/";
+const PATH = "tests/dom/security/test/general/file_same_site_cookies_subrequest.sjs";
+
+let curTest = 0;
+
+var tests = [
+  {
+    description: "same origin site using cookie policy 'samesite=strict'",
+    imgSRC: SAME_ORIGIN + PATH + "?setStrictSameSiteCookie",
+    frameSRC: SAME_ORIGIN + PATH + "?loadFrame",
+    result: "myKey=strictSameSiteCookie",
+  },
+  {
+    description: "cross origin site using cookie policy 'samesite=strict'",
+    imgSRC: SAME_ORIGIN + PATH + "?setStrictSameSiteCookie",
+    frameSRC: CROSS_ORIGIN + PATH + "?loadFrame",
+    result: "myKey=noCookie",
+  },
+  {
+    description: "same origin site using cookie policy 'samesite=lax'",
+    imgSRC: SAME_ORIGIN + PATH + "?setLaxSameSiteCookie",
+    frameSRC: SAME_ORIGIN + PATH + "?loadFrame",
+    result: "myKey=laxSameSiteCookie",
+  },
+  {
+    description: "cross origin site using cookie policy 'samesite=lax'",
+    imgSRC: SAME_ORIGIN + PATH + "?setLaxSameSiteCookie",
+    frameSRC: CROSS_ORIGIN + PATH + "?loadFrame",
+    result: "myKey=noCookie",
+  },
+];
+
+function checkResult(aCookieVal) {
+  is(aCookieVal, tests[curTest].result, tests[curTest].description);
+  curTest += 1;
+
+  // lets see if we ran all the tests
+  if (curTest == tests.length) {
+    SimpleTest.finish();
+    return;
+  }
+  // otherwise it's time to run the next test
+  setCookieAndInitTest();
+}
+
+function setupQueryResultAndRunTest() {
+  var myXHR = new XMLHttpRequest();
+  myXHR.open("GET", "file_same_site_cookies_subrequest.sjs?queryresult");
+  myXHR.onload = function(e) {
+    checkResult(myXHR.responseText);
+  }
+  myXHR.onerror = function(e) {
+    ok(false, "could not query results from server (" + e.message + ")");
+  }
+  myXHR.send();
+
+  // give it some time and load the test frame
+  SimpleTest.executeSoon(function() {
+    let testframe = document.getElementById("testframe");
+    testframe.src = tests[curTest].frameSRC;
+  });
+}
+
+function setCookieAndInitTest() {
+  var cookieImage = document.getElementById("cookieImage");
+  cookieImage.onload = function() {
+    ok(true, "set cookie for test (" + tests[curTest].description + ")");
+    setupQueryResultAndRunTest();
+  }
+  cookieImage.onerror = function() {
+    ok(false, "could not set cookie for test (" + tests[curTest].description + ")");
+  }
+  cookieImage.src = tests[curTest].imgSRC;
+}
+
+// fire up the test
+setCookieAndInitTest();
+
+</script>
+</body>
+</html>
new file mode 100644
--- /dev/null
+++ b/dom/security/test/general/test_same_site_cookies_toplevel_nav.html
@@ -0,0 +1,114 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+  <title>Bug 1286861 - Test same site cookies on top-level navigations</title>
+  <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
+  <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<img id="cookieImage">
+<iframe id="testframe"></iframe>
+
+<script class="testbody" type="text/javascript">
+
+/*
+ * Description of the test:
+ * 1) We load an image from http://mochi.test which sets a same site cookie
+ * 2) We open a new window to
+ *    * a same origin location
+ *    * a cross origin location
+ * 3) We observe that the same site cookie is sent in the same origin case,
+ *    but not in the cross origin case, unless the policy = 'lax', which should
+ *    send the cookie in a top-level navigation case.
+ *
+ * In detail:
+ * We perform an XHR request to the *.sjs file which is processed async on
+ * the server and waits till the image request has been processed by the server.
+ * Once the image requets was processed, the server responds to the initial
+ * XHR request with the expecuted result (the cookie value).
+ */
+
+SimpleTest.waitForExplicitFinish();
+
+const SAME_ORIGIN = "http://mochi.test:8888/";
+const CROSS_ORIGIN = "http://example.com/";
+const PATH = "tests/dom/security/test/general/file_same_site_cookies_toplevel_nav.sjs";
+
+let curTest = 0;
+
+var tests = [
+  {
+    description: "same origin navigation using cookie policy 'samesite=strict'",
+    imgSRC: SAME_ORIGIN + PATH + "?setStrictSameSiteCookie",
+    frameSRC: SAME_ORIGIN + PATH + "?loadFrame",
+    result: "myKey=strictSameSiteCookie",
+  },
+  {
+    description: "cross origin navigation using cookie policy 'samesite=strict'",
+    imgSRC: SAME_ORIGIN + PATH + "?setStrictSameSiteCookie",
+    frameSRC: CROSS_ORIGIN + PATH + "?loadFrame",
+    result: "myKey=noCookie",
+  },
+  {
+    description: "same origin navigation using cookie policy 'samesite=lax'",
+    imgSRC: SAME_ORIGIN + PATH + "?setLaxSameSiteCookie",
+    frameSRC: SAME_ORIGIN + PATH + "?loadFrame",
+    result: "myKey=laxSameSiteCookie",
+  },
+  {
+    description: "cross origin navigation using cookie policy 'samesite=lax'",
+    imgSRC: SAME_ORIGIN + PATH + "?setLaxSameSiteCookie",
+    frameSRC: CROSS_ORIGIN + PATH + "?loadFrame",
+    result: "myKey=laxSameSiteCookie",
+  },
+];
+
+function checkResult(aCookieVal) {
+  is(aCookieVal, tests[curTest].result, tests[curTest].description);
+  curTest += 1;
+
+  // lets see if we ran all the tests
+  if (curTest == tests.length) {
+    SimpleTest.finish();
+    return;
+  }
+  // otherwise it's time to run the next test
+  setCookieAndInitTest();
+}
+
+function setupQueryResultAndRunTest() {
+  var myXHR = new XMLHttpRequest();
+  myXHR.open("GET", "file_same_site_cookies_toplevel_nav.sjs?queryresult");
+  myXHR.onload = function(e) {
+    checkResult(myXHR.responseText);
+  }
+  myXHR.onerror = function(e) {
+    ok(false, "could not query results from server (" + e.message + ")");
+  }
+  myXHR.send();
+
+  // give it some time and load the test window
+  SimpleTest.executeSoon(function() {
+    let testframe = document.getElementById("testframe");
+    testframe.src = tests[curTest].frameSRC;
+  });
+}
+
+function setCookieAndInitTest() {
+  var cookieImage = document.getElementById("cookieImage");
+  cookieImage.onload = function() {
+    ok(true, "set cookie for test (" + tests[curTest].description + ")");
+    setupQueryResultAndRunTest();
+  }
+  cookieImage.onerror = function() {
+    ok(false, "could not set cookie for test (" + tests[curTest].description + ")");
+  }
+  cookieImage.src = tests[curTest].imgSRC;
+}
+
+// fire up the test
+setCookieAndInitTest();
+
+</script>
+</body>
+</html>
--- a/js/src/gc/GC.cpp
+++ b/js/src/gc/GC.cpp
@@ -6535,20 +6535,18 @@ GCRuntime::endSweepPhase(bool destroying
          * Sweep script filenames after sweeping functions in the generic loop
          * above. In this way when a scripted function's finalizer destroys the
          * script and calls rt->destroyScriptHook, the hook can still access the
          * script's filename. See bug 323267.
          */
         SweepScriptData(rt);
 
         /* Clear out any small pools that we're hanging on to. */
-        if (rt->hasJitRuntime()) {
+        if (rt->hasJitRuntime())
             rt->jitRuntime()->execAlloc().purge();
-            rt->jitRuntime()->backedgeExecAlloc().purge();
-        }
     }
 
     {
         gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::FINALIZE_END);
         callFinalizeCallbacks(&fop, JSFINALIZE_COLLECTION_END);
 
         if (allCCVisibleZonesWereCollected())
             grayBitsValid = true;
@@ -7866,21 +7864,16 @@ js::NewCompartment(JSContext* cx, JSPrin
     if (!group) {
         MOZ_ASSERT(!zone);
         group = cx->new_<ZoneGroup>(rt);
         if (!group)
             return nullptr;
 
         groupHolder.reset(group);
 
-        if (!group->init()) {
-            ReportOutOfMemory(cx);
-            return nullptr;
-        }
-
         if (cx->generationalDisabled)
             group->nursery().disable();
     }
 
     if (!zone) {
         zone = cx->new_<Zone>(cx->runtime(), group);
         if (!zone)
             return nullptr;
--- a/js/src/gc/ZoneGroup.cpp
+++ b/js/src/gc/ZoneGroup.cpp
@@ -18,47 +18,32 @@ namespace js {
 ZoneGroup::ZoneGroup(JSRuntime* runtime)
   : runtime(runtime),
     helperThreadOwnerContext_(nullptr),
     zones_(this),
     helperThreadUse(HelperThreadUse::None),
 #ifdef DEBUG
     ionBailAfter_(this, 0),
 #endif
-    jitZoneGroup(this, nullptr),
     debuggerList_(this),
     numFinishedBuilders(0),
     ionLazyLinkListSize_(0)
 {}
 
-bool
-ZoneGroup::init()
-{
-    AutoLockGC lock(runtime);
-
-    jitZoneGroup = js_new<jit::JitZoneGroup>(this);
-    if (!jitZoneGroup)
-        return false;
-
-    return true;
-}
-
 ZoneGroup::~ZoneGroup()
 {
 #ifdef DEBUG
     MOZ_ASSERT(helperThreadUse == HelperThreadUse::None);
     {
         AutoLockHelperThreadState lock;
         MOZ_ASSERT(ionLazyLinkListSize_ == 0);
         MOZ_ASSERT(ionLazyLinkList().isEmpty());
     }
 #endif
 
-    js_delete(jitZoneGroup.ref());
-
     if (this == runtime->gc.systemZoneGroup)
         runtime->gc.systemZoneGroup = nullptr;
 }
 
 void
 ZoneGroup::setHelperThreadOwnerContext(JSContext* cx)
 {
     MOZ_ASSERT_IF(cx, TlsContext.get() == cx);
--- a/js/src/gc/ZoneGroup.h
+++ b/js/src/gc/ZoneGroup.h
@@ -77,18 +77,16 @@ class ZoneGroup
     void clearUsedByHelperThread() {
         MOZ_ASSERT(helperThreadUse != HelperThreadUse::None);
         helperThreadUse = HelperThreadUse::None;
     }
 
     explicit ZoneGroup(JSRuntime* runtime);
     ~ZoneGroup();
 
-    bool init();
-
     inline Nursery& nursery();
     inline gc::StoreBuffer& storeBuffer();
 
     inline bool isCollecting();
     inline bool isGCScheduled();
 
     // Delete an empty zone after its contents have been merged.
     void deleteEmptyZone(Zone* zone);
@@ -104,18 +102,16 @@ class ZoneGroup
 
     // Set after how many bailing places we should forcefully bail.
     // Zero disables this feature.
     void setIonBailAfter(uint32_t after) {
         ionBailAfter_ = after;
     }
 #endif
 
-    ZoneGroupData<jit::JitZoneGroup*> jitZoneGroup;
-
   private:
     /* Linked list of all Debugger objects in the group. */
     ZoneGroupData<mozilla::LinkedList<js::Debugger>> debuggerList_;
   public:
     mozilla::LinkedList<js::Debugger>& debuggerList() { return debuggerList_.ref(); }
 
     // Number of Ion compilations which were finished off thread and are
     // waiting to be lazily linked. This is only set while holding the helper
--- a/js/src/gdb/tests/test-ExecutableAllocator.cpp
+++ b/js/src/gdb/tests/test-ExecutableAllocator.cpp
@@ -1,40 +1,40 @@
 #include "gdb-tests.h"
 #include "jsapi.h"
 
 #include "jit/ExecutableAllocator.h"
 #include "vm/JSContext.h"
 
 FRAGMENT(ExecutableAllocator, empty) {
     using namespace js::jit;
-    ExecutableAllocator execAlloc(cx->runtime());
+    ExecutableAllocator execAlloc;
 
     breakpoint();
 
     use(execAlloc);
 }
 
 FRAGMENT(ExecutableAllocator, onepool) {
     using namespace js::jit;
     ExecutablePool* pool = nullptr;
-    ExecutableAllocator execAlloc(cx->runtime());
+    ExecutableAllocator execAlloc;
     execAlloc.alloc(cx, 16 * 1024, &pool, CodeKind::Baseline);
 
     breakpoint();
 
     use(pool);
     use(execAlloc);
 }
 
 FRAGMENT(ExecutableAllocator, twopools) {
     using namespace js::jit;
     ExecutablePool* init = nullptr;
     ExecutablePool* pool = nullptr;
-    ExecutableAllocator execAlloc(cx->runtime());
+    ExecutableAllocator execAlloc;
 
     execAlloc.alloc(cx, 16 * 1024, &init, CodeKind::Baseline);
 
     do { // Keep allocating until we get a second pool.
         execAlloc.alloc(cx, 32 * 1024, &pool, CodeKind::Ion);
     } while (pool == init);
 
     breakpoint();
deleted file mode 100644
--- a/js/src/jit-test/tests/ion/iloop-nosignaling.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// |jit-test| exitstatus: 6;
-
-setJitCompilerOption('ion.interrupt-without-signals', 1);
-timeout(1);
-for(;;);
--- a/js/src/jit/CodeGenerator.cpp
+++ b/js/src/jit/CodeGenerator.cpp
@@ -3054,68 +3054,21 @@ CodeGenerator::visitPhi(LPhi* lir)
 }
 
 void
 CodeGenerator::visitGoto(LGoto* lir)
 {
     jumpToBlock(lir->target());
 }
 
-// Out-of-line path to execute any move groups between the start of a loop
-// header and its interrupt check, then invoke the interrupt handler.
-class OutOfLineInterruptCheckImplicit : public OutOfLineCodeBase<CodeGenerator>
-{
-  public:
-    LBlock* block;
-    LInterruptCheck* lir;
-
-    OutOfLineInterruptCheckImplicit(LBlock* block, LInterruptCheck* lir)
-      : block(block), lir(lir)
-    { }
-
-    void accept(CodeGenerator* codegen) override {
-        codegen->visitOutOfLineInterruptCheckImplicit(this);
-    }
-};
-
 typedef bool (*InterruptCheckFn)(JSContext*);
 static const VMFunction InterruptCheckInfo =
     FunctionInfo<InterruptCheckFn>(InterruptCheck, "InterruptCheck");
 
 void
-CodeGenerator::visitOutOfLineInterruptCheckImplicit(OutOfLineInterruptCheckImplicit* ool)
-{
-#ifdef CHECK_OSIPOINT_REGISTERS
-    // This is path is entered from the patched back-edge of the loop. This
-    // means that the JitAtivation flags used for checking the validity of the
-    // OSI points are not reseted by the path generated by generateBody, so we
-    // have to reset it here.
-    resetOsiPointRegs(ool->lir->safepoint());
-#endif
-
-    LInstructionIterator iter = ool->block->begin();
-    for (; iter != ool->block->end(); iter++) {
-        if (iter->isMoveGroup()) {
-            // Replay this move group that preceds the interrupt check at the
-            // start of the loop header. Any incoming jumps here will be from
-            // the backedge and will skip over the move group emitted inline.
-            visitMoveGroup(iter->toMoveGroup());
-        } else {
-            break;
-        }
-    }
-    MOZ_ASSERT(*iter == ool->lir);
-
-    saveLive(ool->lir);
-    callVM(InterruptCheckInfo, ool->lir);
-    restoreLive(ool->lir);
-    masm.jump(ool->rejoin());
-}
-
-void
 CodeGenerator::visitTableSwitch(LTableSwitch* ins)
 {
     MTableSwitch* mir = ins->mir();
     Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
     const LAllocation* temp;
 
     if (mir->getOperand(0)->type() != MIRType::Int32) {
         temp = ins->tempInt()->output();
@@ -10320,29 +10273,29 @@ CodeGenerator::link(JSContext* cx, Compi
 
     IonScript* ionScript =
         IonScript::New(cx, compilationId,
                        graph.totalSlotCount(), argumentSlots, scriptFrameSize,
                        snapshots_.listSize(), snapshots_.RVATableSize(),
                        recovers_.size(), bailouts_.length(), graph.numConstants(),
                        safepointIndices_.length(), osiIndices_.length(),
                        icList_.length(), runtimeData_.length(),
-                       safepoints_.size(), patchableBackedges_.length(),
-                       sharedStubs_.length(), optimizationLevel);
+                       safepoints_.size(), sharedStubs_.length(),
+                       optimizationLevel);
     if (!ionScript)
         return false;
     auto guardIonScript = mozilla::MakeScopeExit([&ionScript] {
-        // Use js_free instead of IonScript::Destroy: the cache list and
-        // backedge list are still uninitialized.
+        // Use js_free instead of IonScript::Destroy: the cache list is still
+        // uninitialized.
         js_free(ionScript);
     });
 
     Linker linker(masm);
     AutoFlushICache afc("IonLink");
-    JitCode* code = linker.newCode(cx, CodeKind::Ion, !patchableBackedges_.empty());
+    JitCode* code = linker.newCode(cx, CodeKind::Ion);
     if (!code)
         return false;
 
     // Encode native to bytecode map if profiling is enabled.
     if (isProfilerInstrumentationEnabled()) {
         // Generate native-to-bytecode main table.
         if (!generateCompactNativeToBytecodeMap(cx, code))
             return false;
@@ -10538,18 +10491,16 @@ CodeGenerator::link(JSContext* cx, Compi
         for (size_t i = 0; i < graph.numConstants(); i++) {
             const Value& v = vp[i];
             if ((v.isObject() || v.isString()) && IsInsideNursery(v.toGCThing())) {
                 cx->zone()->group()->storeBuffer().putWholeCell(script);
                 break;
             }
         }
     }
-    if (patchableBackedges_.length() > 0)
-        ionScript->copyPatchableBackedges(cx, code, patchableBackedges_.begin(), masm);
 
     // Attach any generated script counts to the script.
     if (IonScriptCounts* counts = extractScriptCounts())
         script->addIonCounts(counts);
 
     guardIonScript.release();
     return true;
 }
@@ -12893,25 +12844,16 @@ CodeGenerator::visitAssertRangeV(LAssert
 
     masm.assumeUnreachable("Incorrect range for Value.");
     masm.bind(&done);
 }
 
 void
 CodeGenerator::visitInterruptCheck(LInterruptCheck* lir)
 {
-    if (lir->implicit()) {
-        OutOfLineInterruptCheckImplicit* ool = new(alloc()) OutOfLineInterruptCheckImplicit(current, lir);
-        addOutOfLineCode(ool, lir->mir());
-
-        lir->setOolEntry(ool->entry());
-        masm.bind(ool->rejoin());
-        return;
-    }
-
     OutOfLineCode* ool = oolCallVM(InterruptCheckInfo, lir, ArgList(), StoreNothing());
 
     const void* interruptAddr = gen->runtime->addressOfInterrupt();
     masm.branch32(Assembler::NotEqual, AbsoluteAddress(interruptAddr), Imm32(0), ool->entry());
     masm.bind(ool->rejoin());
 }
 
 void
--- a/js/src/jit/CodeGenerator.h
+++ b/js/src/jit/CodeGenerator.h
@@ -38,17 +38,16 @@ enum class SwitchTableType {
     OutOfLine
 };
 
 template <SwitchTableType tableType> class OutOfLineSwitch;
 class OutOfLineTestObject;
 class OutOfLineNewArray;
 class OutOfLineNewObject;
 class CheckOverRecursedFailure;
-class OutOfLineInterruptCheckImplicit;
 class OutOfLineUnboxFloatingPoint;
 class OutOfLineStoreElementHole;
 class OutOfLineTypeOfV;
 class OutOfLineUpdateCache;
 class OutOfLineICFallback;
 class OutOfLineCallPostWriteBarrier;
 class OutOfLineCallPostWriteElementBarrier;
 class OutOfLineIsCallable;
@@ -102,18 +101,16 @@ class CodeGenerator final : public CodeG
 
     void visitCheckOverRecursedFailure(CheckOverRecursedFailure* ool);
 
     void visitOutOfLineUnboxFloatingPoint(OutOfLineUnboxFloatingPoint* ool);
     void visitOutOfLineStoreElementHole(OutOfLineStoreElementHole* ool);
 
     void visitOutOfLineICFallback(OutOfLineICFallback* ool);
 
-    void visitOutOfLineInterruptCheckImplicit(OutOfLineInterruptCheckImplicit* ins);
-
     void visitOutOfLineCallPostWriteBarrier(OutOfLineCallPostWriteBarrier* ool);
     void visitOutOfLineCallPostWriteElementBarrier(OutOfLineCallPostWriteElementBarrier* ool);
 
     void visitOutOfLineNewArray(OutOfLineNewArray* ool);
     void visitOutOfLineNewObject(OutOfLineNewObject* ool);
 
   private:
     void emitSharedStub(ICStub::Kind kind, LInstruction* lir);
--- a/js/src/jit/ExecutableAllocator.cpp
+++ b/js/src/jit/ExecutableAllocator.cpp
@@ -88,29 +88,24 @@ ExecutablePool::alloc(size_t n, CodeKind
 
 size_t
 ExecutablePool::available() const
 {
     MOZ_ASSERT(m_end >= m_freePtr);
     return m_end - m_freePtr;
 }
 
-ExecutableAllocator::ExecutableAllocator(JSRuntime* rt)
-  : rt_(rt)
-{
-    MOZ_ASSERT(m_smallPools.empty());
-}
-
 ExecutableAllocator::~ExecutableAllocator()
 {
     for (size_t i = 0; i < m_smallPools.length(); i++)
         m_smallPools[i]->release(/* willDestroy = */true);
 
     // If this asserts we have a pool leak.
-    MOZ_ASSERT_IF(m_pools.initialized() && rt_->gc.shutdownCollectedEverything(),
+    MOZ_ASSERT_IF((m_pools.initialized() &&
+                   TlsContext.get()->runtime()->gc.shutdownCollectedEverything()),
                   m_pools.empty());
 }
 
 ExecutablePool*
 ExecutableAllocator::poolForSize(size_t n)
 {
     // Try to fit in an existing small allocator.  Use the pool with the
     // least available space that is big enough (best-fit).  This is the
@@ -176,18 +171,16 @@ ExecutableAllocator::roundUpAllocationSi
     size = size & ~(granularity - 1);
     MOZ_ASSERT(size >= request);
     return size;
 }
 
 ExecutablePool*
 ExecutableAllocator::createPool(size_t n)
 {
-    MOZ_ASSERT(rt_->jitRuntime()->preventBackedgePatching());
-
     size_t allocSize = roundUpAllocationSize(n, ExecutableCodePageSize);
     if (allocSize == OVERSIZE_ALLOCATION)
         return nullptr;
 
     if (!m_pools.initialized() && !m_pools.init())
         return nullptr;
 
     ExecutablePool::Allocation a = systemAlloc(allocSize);
@@ -207,19 +200,16 @@ ExecutableAllocator::createPool(size_t n
     }
 
     return pool;
 }
 
 void*
 ExecutableAllocator::alloc(JSContext* cx, size_t n, ExecutablePool** poolp, CodeKind type)
 {
-    // Don't race with reprotectAll called from the signal handler.
-    JitRuntime::AutoPreventBackedgePatching apbp(rt_);
-
     // Caller must ensure 'n' is word-size aligned. If all allocations are
     // of word sized quantities, then all subsequent allocations will be
     // aligned.
     MOZ_ASSERT(roundUpAllocationSize(n, sizeof(void*)) == n);
 
     if (n == OVERSIZE_ALLOCATION) {
         *poolp = nullptr;
         return nullptr;
@@ -237,35 +227,29 @@ ExecutableAllocator::alloc(JSContext* cx
     cx->zone()->updateJitCodeMallocBytes(n);
 
     return result;
 }
 
 void
 ExecutableAllocator::releasePoolPages(ExecutablePool* pool)
 {
-    // Don't race with reprotectAll called from the signal handler.
-    JitRuntime::AutoPreventBackedgePatching apbp(rt_);
-
     MOZ_ASSERT(pool->m_allocation.pages);
     systemRelease(pool->m_allocation);
 
     MOZ_ASSERT(m_pools.initialized());
 
     // Pool may not be present in m_pools if we hit OOM during creation.
     if (auto ptr = m_pools.lookup(pool))
         m_pools.remove(ptr);
 }
 
 void
 ExecutableAllocator::purge()
 {
-    // Don't race with reprotectAll called from the signal handler.
-    JitRuntime::AutoPreventBackedgePatching apbp(rt_);
-
     for (size_t i = 0; i < m_smallPools.length(); ) {
         ExecutablePool* pool = m_smallPools[i];
         if (pool->m_refCount > 1) {
             // Releasing this pool is not going to deallocate it, so we might as
             // well hold on to it and reuse it for future allocations.
             i++;
             continue;
         }
@@ -286,46 +270,29 @@ ExecutableAllocator::addSizeOfCode(JS::C
             sizes->baseline += pool->m_codeBytes[CodeKind::Baseline];
             sizes->regexp   += pool->m_codeBytes[CodeKind::RegExp];
             sizes->other    += pool->m_codeBytes[CodeKind::Other];
             sizes->unused   += pool->m_allocation.size - pool->usedCodeBytes();
         }
     }
 }
 
-void
-ExecutableAllocator::reprotectAll(ProtectionSetting protection)
-{
-    if (!m_pools.initialized())
-        return;
-
-    for (ExecPoolHashSet::Range r = m_pools.all(); !r.empty(); r.popFront())
-        reprotectPool(rt_, r.front(), protection);
-}
-
 /* static */ void
 ExecutableAllocator::reprotectPool(JSRuntime* rt, ExecutablePool* pool, ProtectionSetting protection)
 {
-    // Don't race with reprotectAll called from the signal handler.
-    MOZ_ASSERT(rt->jitRuntime()->preventBackedgePatching() ||
-               rt->mainContextFromAnyThread()->handlingJitInterrupt());
-
     char* start = pool->m_allocation.pages;
     if (!ReprotectRegion(start, pool->m_freePtr - start, protection))
         MOZ_CRASH();
 }
 
 /* static */ void
 ExecutableAllocator::poisonCode(JSRuntime* rt, JitPoisonRangeVector& ranges)
 {
     MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
 
-    // Don't race with reprotectAll called from the signal handler.
-    JitRuntime::AutoPreventBackedgePatching apbp(rt);
-
 #ifdef DEBUG
     // Make sure no pools have the mark bit set.
     for (size_t i = 0; i < ranges.length(); i++)
         MOZ_ASSERT(!ranges[i].pool->isMarked());
 #endif
 
     for (size_t i = 0; i < ranges.length(); i++) {
         ExecutablePool* pool = ranges[i].pool;
--- a/js/src/jit/ExecutableAllocator.h
+++ b/js/src/jit/ExecutableAllocator.h
@@ -175,20 +175,18 @@ struct JitPoisonRange
       : pool(pool), start(start), size(size)
     {}
 };
 
 typedef Vector<JitPoisonRange, 0, SystemAllocPolicy> JitPoisonRangeVector;
 
 class ExecutableAllocator
 {
-    JSRuntime* rt_;
-
   public:
-    explicit ExecutableAllocator(JSRuntime* rt);
+    ExecutableAllocator() = default;
     ~ExecutableAllocator();
 
     void purge();
 
     // alloc() returns a pointer to some memory, and also (by reference) a
     // pointer to reference-counted pool. The caller owns a reference to the
     // pool; i.e. alloc() increments the count before returning the object.
     void* alloc(JSContext* cx, size_t n, ExecutablePool** poolp, CodeKind type);
@@ -219,23 +217,16 @@ class ExecutableAllocator
     }
 
     MOZ_MUST_USE
     static bool makeExecutable(void* start, size_t size)
     {
         return ReprotectRegion(start, size, ProtectionSetting::Executable);
     }
 
-    void makeAllWritable() {
-        reprotectAll(ProtectionSetting::Writable);
-    }
-    void makeAllExecutable() {
-        reprotectAll(ProtectionSetting::Executable);
-    }
-
     static void poisonCode(JSRuntime* rt, JitPoisonRangeVector& ranges);
 
 #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64) || defined(JS_SIMULATOR_ARM64) || defined(JS_CODEGEN_NONE)
     static void cacheFlush(void*, size_t)
     {
     }
 #elif defined(JS_SIMULATOR_ARM) || defined(JS_SIMULATOR_MIPS32) || defined(JS_SIMULATOR_MIPS64)
     static void cacheFlush(void* code, size_t size)
@@ -322,18 +313,16 @@ class ExecutableAllocator
         sync_instruction_memory((caddr_t)code, size);
     }
 #endif
 
   private:
     ExecutableAllocator(const ExecutableAllocator&) = delete;
     void operator=(const ExecutableAllocator&) = delete;
 
-    void reprotectAll(ProtectionSetting);
-
     // These are strong references;  they keep pools alive.
     static const size_t maxSmallPools = 4;
     typedef js::Vector<ExecutablePool*, maxSmallPools, js::SystemAllocPolicy> SmallExecPoolVector;
     SmallExecPoolVector m_smallPools;
 
     // All live pools are recorded here, just for stats purposes.  These are
     // weak references;  they don't keep pools alive.  When a pool is destroyed
     // its reference is removed from m_pools.
--- a/js/src/jit/Ion.cpp
+++ b/js/src/jit/Ion.cpp
@@ -191,35 +191,33 @@ jit::InitializeIon()
 
 #if defined(JS_CODEGEN_ARM)
     InitARMFlags();
 #endif
     CheckPerf();
     return true;
 }
 
-JitRuntime::JitRuntime(JSRuntime* rt)
-  : execAlloc_(rt),
-    backedgeExecAlloc_(rt),
+JitRuntime::JitRuntime()
+  : execAlloc_(),
     nextCompilationId_(0),
     exceptionTailOffset_(0),
     bailoutTailOffset_(0),
     profilerExitFrameTailOffset_(0),
     enterJITOffset_(0),
     bailoutHandlerOffset_(0),
     argumentsRectifierOffset_(0),
     argumentsRectifierReturnOffset_(0),
     invalidatorOffset_(0),
     lazyLinkStubOffset_(0),
     interpreterStubOffset_(0),
     debugTrapHandler_(nullptr),
     baselineDebugModeOSRHandler_(nullptr),
     trampolineCode_(nullptr),
     functionWrappers_(nullptr),
-    preventBackedgePatching_(false),
     jitcodeGlobalTable_(nullptr)
 {
 }
 
 JitRuntime::~JitRuntime()
 {
     js_delete(functionWrappers_.ref());
 
@@ -384,61 +382,16 @@ JSContext::allocateOsrTempData(size_t si
 
 void
 JSContext::freeOsrTempData()
 {
     js_free(osrTempData_);
     osrTempData_ = nullptr;
 }
 
-void
-JitZoneGroup::patchIonBackedges(JSContext* cx, BackedgeTarget target)
-{
-    if (target == BackedgeLoopHeader) {
-        // We must be on the active thread. The caller must use
-        // AutoPreventBackedgePatching to ensure we don't reenter.
-        MOZ_ASSERT(cx->runtime()->jitRuntime()->preventBackedgePatching());
-        MOZ_ASSERT(CurrentThreadCanAccessRuntime(cx->runtime()));
-    } else {
-        // We must be called from jit::InterruptRunningCode, or a signal handler
-        // triggered there. rt->handlingJitInterrupt() ensures we can't reenter
-        // this code.
-        MOZ_ASSERT(!cx->runtime()->jitRuntime()->preventBackedgePatching());
-        MOZ_ASSERT(cx->handlingJitInterrupt());
-    }
-
-    // Do nothing if we know all backedges are already jumping to `target`.
-    if (backedgeTarget_ == target)
-        return;
-
-    backedgeTarget_ = target;
-
-    cx->runtime()->jitRuntime()->backedgeExecAlloc().makeAllWritable();
-
-    // Patch all loop backedges in Ion code so that they either jump to the
-    // normal loop header or to an interrupt handler each time they run.
-    for (InlineListIterator<PatchableBackedge> iter(backedgeList().begin());
-         iter != backedgeList().end();
-         iter++)
-    {
-        PatchableBackedge* patchableBackedge = *iter;
-        if (target == BackedgeLoopHeader)
-            PatchBackedge(patchableBackedge->backedge, patchableBackedge->loopHeader, target);
-        else
-            PatchBackedge(patchableBackedge->backedge, patchableBackedge->interruptCheck, target);
-    }
-
-    cx->runtime()->jitRuntime()->backedgeExecAlloc().makeAllExecutable();
-}
-
-JitZoneGroup::JitZoneGroup(ZoneGroup* group)
-  : backedgeTarget_(group, BackedgeLoopHeader),
-    backedgeList_(group)
-{}
-
 JitCompartment::JitCompartment()
   : stubCodes_(nullptr)
 {
 }
 
 JitCompartment::~JitCompartment()
 {
     js_delete(stubCodes_);
@@ -893,35 +846,32 @@ IonScript::IonScript(IonCompilationId co
     bailoutEntries_(0),
     osiIndexOffset_(0),
     osiIndexEntries_(0),
     snapshots_(0),
     snapshotsListSize_(0),
     snapshotsRVATableSize_(0),
     constantTable_(0),
     constantEntries_(0),
-    backedgeList_(0),
-    backedgeEntries_(0),
     invalidationCount_(0),
     compilationId_(compilationId),
     osrPcMismatchCounter_(0),
     fallbackStubSpace_()
 {
 }
 
 IonScript*
 IonScript::New(JSContext* cx, IonCompilationId compilationId,
                uint32_t frameSlots, uint32_t argumentSlots, uint32_t frameSize,
                size_t snapshotsListSize, size_t snapshotsRVATableSize,
                size_t recoversSize, size_t bailoutEntries,
                size_t constants, size_t safepointIndices,
                size_t osiIndices, size_t icEntries,
                size_t runtimeSize,  size_t safepointsSize,
-               size_t backedgeEntries, size_t sharedStubEntries,
-               OptimizationLevel optimizationLevel)
+               size_t sharedStubEntries, OptimizationLevel optimizationLevel)
 {
     constexpr size_t DataAlignment = sizeof(void*);
 
     if (snapshotsListSize >= MAX_BUFFER_SIZE ||
         (bailoutEntries >= MAX_BUFFER_SIZE / sizeof(uint32_t)))
     {
         ReportOutOfMemory(cx);
         return nullptr;
@@ -934,29 +884,27 @@ IonScript::New(JSContext* cx, IonCompila
     size_t paddedRecoversSize = AlignBytes(recoversSize, DataAlignment);
     size_t paddedBailoutSize = AlignBytes(bailoutEntries * sizeof(uint32_t), DataAlignment);
     size_t paddedConstantsSize = AlignBytes(constants * sizeof(Value), DataAlignment);
     size_t paddedSafepointIndicesSize = AlignBytes(safepointIndices * sizeof(SafepointIndex), DataAlignment);
     size_t paddedOsiIndicesSize = AlignBytes(osiIndices * sizeof(OsiIndex), DataAlignment);
     size_t paddedICEntriesSize = AlignBytes(icEntries * sizeof(uint32_t), DataAlignment);
     size_t paddedRuntimeSize = AlignBytes(runtimeSize, DataAlignment);
     size_t paddedSafepointSize = AlignBytes(safepointsSize, DataAlignment);
-    size_t paddedBackedgeSize = AlignBytes(backedgeEntries * sizeof(PatchableBackedge), DataAlignment);
     size_t paddedSharedStubSize = AlignBytes(sharedStubEntries * sizeof(IonICEntry), DataAlignment);
 
     size_t bytes = paddedSnapshotsSize +
                    paddedRecoversSize +
                    paddedBailoutSize +
                    paddedConstantsSize +
                    paddedSafepointIndicesSize +
                    paddedOsiIndicesSize +
                    paddedICEntriesSize +
                    paddedRuntimeSize +
                    paddedSafepointSize +
-                   paddedBackedgeSize +
                    paddedSharedStubSize;
     IonScript* script = cx->zone()->pod_malloc_with_extra<IonScript, uint8_t>(bytes);
     if (!script)
         return nullptr;
     new (script) IonScript(compilationId);
 
     uint32_t offsetCursor = sizeof(IonScript);
 
@@ -992,20 +940,16 @@ IonScript::New(JSContext* cx, IonCompila
     script->recovers_ = offsetCursor;
     script->recoversSize_ = recoversSize;
     offsetCursor += paddedRecoversSize;
 
     script->constantTable_ = offsetCursor;
     script->constantEntries_ = constants;
     offsetCursor += paddedConstantsSize;
 
-    script->backedgeList_ = offsetCursor;
-    script->backedgeEntries_ = backedgeEntries;
-    offsetCursor += paddedBackedgeSize;
-
     script->sharedStubList_ = offsetCursor;
     script->sharedStubEntries_ = sharedStubEntries;
     offsetCursor += paddedSharedStubSize;
 
     script->frameSlots_ = frameSlots;
     script->argumentSlots_ = argumentSlots;
 
     script->frameSize_ = frameSize;
@@ -1085,45 +1029,16 @@ IonScript::copyBailoutTable(const Snapsh
 void
 IonScript::copyConstants(const Value* vp)
 {
     for (size_t i = 0; i < constantEntries_; i++)
         constants()[i].init(vp[i]);
 }
 
 void
-IonScript::copyPatchableBackedges(JSContext* cx, JitCode* code,
-                                  PatchableBackedgeInfo* backedges,
-                                  MacroAssembler& masm)
-{
-    JitZoneGroup* jzg = cx->zone()->group()->jitZoneGroup;
-    JitRuntime::AutoPreventBackedgePatching apbp(cx->runtime());
-
-    for (size_t i = 0; i < backedgeEntries_; i++) {
-        PatchableBackedgeInfo& info = backedges[i];
-        PatchableBackedge* patchableBackedge = &backedgeList()[i];
-
-        info.backedge.fixup(&masm);
-        CodeLocationJump backedge(code, info.backedge);
-        CodeLocationLabel loopHeader(code, CodeOffset(info.loopHeader->offset()));
-        CodeLocationLabel interruptCheck(code, CodeOffset(info.interruptCheck->offset()));
-        new(patchableBackedge) PatchableBackedge(backedge, loopHeader, interruptCheck);
-
-        // Point the backedge to either of its possible targets, matching the
-        // other backedges in the runtime.
-        if (jzg->backedgeTarget() == JitZoneGroup::BackedgeInterruptCheck)
-            PatchBackedge(backedge, interruptCheck, JitZoneGroup::BackedgeInterruptCheck);
-        else
-            PatchBackedge(backedge, loopHeader, JitZoneGroup::BackedgeLoopHeader);
-
-        jzg->addPatchableBackedge(cx->runtime()->jitRuntime(), patchableBackedge);
-    }
-}
-
-void
 IonScript::copySafepointIndices(const SafepointIndex* si)
 {
     // Jumps in the caches reflect the offset of those jumps in the compiled
     // code, not the absolute positions of the jumps. Update according to the
     // final code address now.
     SafepointIndex* table = safepointIndices();
     memcpy(table, si, safepointIndexEntries_ * sizeof(SafepointIndex));
 }
@@ -1229,18 +1144,16 @@ IonScript::Trace(JSTracer* trc, IonScrip
 {
     if (script != ION_DISABLED_SCRIPT)
         script->trace(trc);
 }
 
 void
 IonScript::Destroy(FreeOp* fop, IonScript* script)
 {
-    script->unlinkFromRuntime(fop);
-
     /*
      * When the script contains pointers to nursery things, the store buffer can
      * contain entries that point into the fallback stub space. Since we can
      * destroy scripts outside the context of a GC, this situation could result
      * in us trying to mark invalid store buffer entries.
      *
      * Defer freeing any allocated blocks until after the next minor GC.
      */
@@ -1313,33 +1226,16 @@ IonScript::purgeOptimizedStubs(Zone* zon
 
 void
 IonScript::purgeICs(Zone* zone)
 {
     for (size_t i = 0; i < numICs(); i++)
         getICFromIndex(i).reset(zone);
 }
 
-void
-IonScript::unlinkFromRuntime(FreeOp* fop)
-{
-    // The writes to the executable buffer below may clobber backedge jumps, so
-    // make sure that those backedges are unlinked from the runtime and not
-    // reclobbered with garbage if an interrupt is requested.
-    JitZoneGroup* jzg = method()->zone()->group()->jitZoneGroup;
-    JitRuntime::AutoPreventBackedgePatching apbp(fop->runtime());
-    for (size_t i = 0; i < backedgeEntries_; i++)
-        jzg->removePatchableBackedge(fop->runtime()->jitRuntime(), &backedgeList()[i]);
-
-    // Clear the list of backedges, so that this method is idempotent. It is
-    // called during destruction, and may be additionally called when the
-    // script is invalidated.
-    backedgeEntries_ = 0;
-}
-
 namespace js {
 namespace jit {
 
 static void
 OptimizeSinCos(MIRGraph &graph)
 {
     // Now, we are looking for:
     // var y = sin(x);
@@ -2861,20 +2757,16 @@ InvalidateActivation(FreeOp* fop, const 
         IonScript* ionScript = script->ionScript();
 
         // Purge ICs before we mark this script as invalidated. This will
         // prevent lastJump_ from appearing to be a bogus pointer, just
         // in case anyone tries to read it.
         ionScript->purgeICs(script->zone());
         ionScript->purgeOptimizedStubs(script->zone());
 
-        // Clean up any pointers from elsewhere in the runtime to this IonScript
-        // which is about to become disconnected from its JSScript.
-        ionScript->unlinkFromRuntime(fop);
-
         // This frame needs to be invalidated. We do the following:
         //
         // 1. Increment the reference counter to keep the ionScript alive
         //    for the invalidation bailout or for the exception handler.
         // 2. Determine safepoint that corresponds to the current call.
         // 3. From safepoint, get distance to the OSI-patchable offset.
         // 4. From the IonScript, determine the distance between the
         //    call-patchable offset and the invalidation epilogue.
@@ -3333,173 +3225,8 @@ jit::JitSupportsAtomics()
 #else
     return true;
 #endif
 }
 
 // If you change these, please also change the comment in TempAllocator.
 /* static */ const size_t TempAllocator::BallastSize            = 16 * 1024;
 /* static */ const size_t TempAllocator::PreferredLifoChunkSize = 32 * 1024;
-
-static void
-RedirectIonBackedgesToInterruptCheck(JSContext* cx)
-{
-    // Jitcode may only be modified on the runtime's main thread.
-    MOZ_ASSERT(cx == cx->runtime()->mainContextFromAnyThread());
-
-    // The faulting thread is suspended so we can access cx fields that can
-    // normally only be accessed by the cx's active thread.
-    AutoNoteSingleThreadedRegion anstr;
-
-    Zone* zone = cx->zoneRaw();
-    if (zone && !zone->isAtomsZone()) {
-        jit::JitRuntime* jitRuntime = cx->runtime()->jitRuntime();
-        if (!jitRuntime)
-            return;
-
-        // If the backedge list is being mutated, the pc must be in C++ code and
-        // thus not in a JIT iloop. We assume that the interrupt flag will be
-        // checked at least once before entering JIT code (if not, no big deal;
-        // the browser will just request another interrupt in a second).
-        if (!jitRuntime->preventBackedgePatching()) {
-            jit::JitZoneGroup* jzg = zone->group()->jitZoneGroup;
-            jzg->patchIonBackedges(cx, jit::JitZoneGroup::BackedgeInterruptCheck);
-        }
-    }
-}
-
-#if !defined(XP_WIN)
-// For the interrupt signal, pick a signal number that:
-//  - is not otherwise used by mozilla or standard libraries
-//  - defaults to nostop and noprint on gdb/lldb so that noone is bothered
-// SIGVTALRM a relative of SIGALRM, so intended for user code, but, unlike
-// SIGALRM, not used anywhere else in Mozilla.
-static const int sJitAsyncInterruptSignal = SIGVTALRM;
-
-static void
-JitAsyncInterruptHandler(int signum, siginfo_t*, void*)
-{
-    MOZ_RELEASE_ASSERT(signum == sJitAsyncInterruptSignal);
-
-    JSContext* cx = TlsContext.get();
-    if (!cx)
-        return;
-
-#if defined(JS_SIMULATOR_ARM) || defined(JS_SIMULATOR_MIPS32) || defined(JS_SIMULATOR_MIPS64)
-    SimulatorProcess::ICacheCheckingDisableCount++;
-#endif
-
-    RedirectIonBackedgesToInterruptCheck(cx);
-
-#if defined(JS_SIMULATOR_ARM) || defined(JS_SIMULATOR_MIPS32) || defined(JS_SIMULATOR_MIPS64)
-    SimulatorProcess::cacheInvalidatedBySignalHandler_ = true;
-    SimulatorProcess::ICacheCheckingDisableCount--;
-#endif
-
-    cx->finishHandlingJitInterrupt();
-}
-#endif
-
-static bool sTriedInstallAsyncInterrupt = false;
-static bool sHaveAsyncInterrupt = false;
-
-void
-jit::EnsureAsyncInterrupt(JSContext* cx)
-{
-    // We assume that there are no races creating the first JSRuntime of the process.
-    if (sTriedInstallAsyncInterrupt)
-        return;
-    sTriedInstallAsyncInterrupt = true;
-
-#if defined(ANDROID) && !defined(__aarch64__)
-    // Before Android 4.4 (SDK version 19), there is a bug
-    //   https://android-review.googlesource.com/#/c/52333
-    // in Bionic's pthread_join which causes pthread_join to return early when
-    // pthread_kill is used (on any thread). Nobody expects the pthread_cond_wait
-    // EINTRquisition.
-    char version_string[PROP_VALUE_MAX];
-    mozilla::PodArrayZero(version_string);
-    if (__system_property_get("ro.build.version.sdk", version_string) > 0) {
-        if (atol(version_string) < 19)
-            return;
-    }
-#endif
-
-#if defined(XP_WIN)
-    // Windows uses SuspendThread to stop the active thread from another thread.
-#else
-    struct sigaction interruptHandler;
-    interruptHandler.sa_flags = SA_SIGINFO;
-    interruptHandler.sa_sigaction = &JitAsyncInterruptHandler;
-    sigemptyset(&interruptHandler.sa_mask);
-    struct sigaction prev;
-    if (sigaction(sJitAsyncInterruptSignal, &interruptHandler, &prev))
-        MOZ_CRASH("unable to install interrupt handler");
-
-    // There shouldn't be any other handlers installed for
-    // sJitAsyncInterruptSignal. If there are, we could always forward, but we
-    // need to understand what we're doing to avoid problematic interference.
-    if ((prev.sa_flags & SA_SIGINFO && prev.sa_sigaction) ||
-        (prev.sa_handler != SIG_DFL && prev.sa_handler != SIG_IGN))
-    {
-        MOZ_CRASH("contention for interrupt signal");
-    }
-#endif // defined(XP_WIN)
-
-    sHaveAsyncInterrupt = true;
-}
-
-bool
-jit::HaveAsyncInterrupt()
-{
-    MOZ_ASSERT(sTriedInstallAsyncInterrupt);
-    return sHaveAsyncInterrupt;
-}
-
-// JSRuntime::requestInterrupt sets interrupt_ (which is checked frequently by
-// C++ code at every Baseline JIT loop backedge) and jitStackLimit_ (which is
-// checked at every Baseline and Ion JIT function prologue). The remaining
-// sources of potential iloops (Ion loop backedges) are handled by this
-// function: Ion loop backedges are patched to instead point to a stub that
-// handles the interrupt;
-void
-jit::InterruptRunningCode(JSContext* cx)
-{
-    // If signal handlers weren't installed, then Ion emit normal interrupt
-    // checks and don't need asynchronous interruption.
-    MOZ_ASSERT(sTriedInstallAsyncInterrupt);
-    if (!sHaveAsyncInterrupt)
-        return;
-
-    // Do nothing if we're already handling an interrupt here, to avoid races
-    // below and in JitRuntime::patchIonBackedges.
-    if (!cx->startHandlingJitInterrupt())
-        return;
-
-    // If we are on context's thread, then we can patch Ion backedges without
-    // any special synchronization.
-    if (cx == TlsContext.get()) {
-        RedirectIonBackedgesToInterruptCheck(cx);
-        cx->finishHandlingJitInterrupt();
-        return;
-    }
-
-    // We are not on the runtime's active thread, so we need to halt the
-    // runtime's active thread first.
-#if defined(XP_WIN)
-    // On Windows, we can simply suspend the active thread. SuspendThread can
-    // sporadically fail if the thread is in the middle of a syscall. Rather
-    // than retrying in a loop, just wait for the next request for interrupt.
-    HANDLE thread = (HANDLE)cx->threadNative();
-    if (SuspendThread(thread) != (DWORD)-1) {
-        RedirectIonBackedgesToInterruptCheck(cx);
-        ResumeThread(thread);
-    }
-    cx->finishHandlingJitInterrupt();
-#else
-    // On Unix, we instead deliver an async signal to the active thread which
-    // halts the thread and callers our JitAsyncInterruptHandler (which has
-    // already been installed by EnsureSignalHandlersInstalled).
-    pthread_t thread = (pthread_t)cx->threadNative();
-    pthread_kill(thread, sJitAsyncInterruptSignal);
-#endif
-}
-
--- a/js/src/jit/IonCode.h
+++ b/js/src/jit/IonCode.h
@@ -21,17 +21,16 @@
 #include "js/UbiNode.h"
 #include "vm/TraceLogging.h"
 #include "vm/TypeInference.h"
 
 namespace js {
 namespace jit {
 
 class MacroAssembler;
-class PatchableBackedge;
 class IonBuilder;
 class IonICEntry;
 class JitCode;
 
 typedef Vector<JSObject*, 4, JitAllocPolicy> ObjectVector;
 typedef Vector<TraceLoggerEvent, 0, SystemAllocPolicy> TraceLoggerEventVector;
 
 // Header at start of raw code buffer
@@ -128,18 +127,16 @@ class JitCode : public gc::TenuredCell
     void setInvalidated() {
         invalidated_ = true;
     }
 
     void setHasBytecodeMap() {
         hasBytecodeMap_ = true;
     }
 
-    void togglePreBarriers(bool enabled, ReprotectCode reprotect);
-
     // If this JitCode object has been, effectively, corrupted due to
     // invalidation patching, then we have to remember this so we don't try and
     // trace relocation entries that may now be corrupt.
     bool invalidated() const {
         return !!invalidated_;
     }
 
     template <typename T> T as() const {
@@ -174,17 +171,16 @@ class JitCode : public gc::TenuredCell
 };
 
 class SnapshotWriter;
 class RecoverWriter;
 class SafepointWriter;
 class SafepointIndex;
 class OsiIndex;
 class IonIC;
-struct PatchableBackedgeInfo;
 
 // An IonScript attaches Ion-generated information to a JSScript.
 struct IonScript
 {
   private:
     // Code pointer containing the actual method.
     PreBarrieredJitCode method_;
 
@@ -261,20 +257,16 @@ struct IonScript
     // List of instructions needed to recover stack frames.
     uint32_t recovers_;
     uint32_t recoversSize_;
 
     // Constant table for constants stored in snapshots.
     uint32_t constantTable_;
     uint32_t constantEntries_;
 
-    // List of patchable backedges which are threaded into the runtime's list.
-    uint32_t backedgeList_;
-    uint32_t backedgeEntries_;
-
     // List of entries to the shared stub.
     uint32_t sharedStubList_;
     uint32_t sharedStubEntries_;
 
     // Number of references from invalidation records.
     uint32_t invalidationCount_;
 
     // Identifier of the compilation which produced this code.
@@ -322,19 +314,16 @@ struct IonScript
         return (OsiIndex*) &bottomBuffer()[osiIndexOffset_];
     }
     uint32_t* icIndex() {
         return (uint32_t*) &bottomBuffer()[icIndex_];
     }
     uint8_t* runtimeData() {
         return  &bottomBuffer()[runtimeData_];
     }
-    PatchableBackedge* backedgeList() {
-        return (PatchableBackedge*) &bottomBuffer()[backedgeList_];
-    }
 
   private:
     void trace(JSTracer* trc);
 
   public:
     // Do not call directly, use IonScript::New. This is public for cx->new_.
     explicit IonScript(IonCompilationId compilationId);
 
@@ -346,18 +335,17 @@ struct IonScript
 
     static IonScript* New(JSContext* cx, IonCompilationId compilationId,
                           uint32_t frameSlots, uint32_t argumentSlots, uint32_t frameSize,
                           size_t snapshotsListSize, size_t snapshotsRVATableSize,
                           size_t recoversSize, size_t bailoutEntries,
                           size_t constants, size_t safepointIndexEntries,
                           size_t osiIndexEntries, size_t icEntries,
                           size_t runtimeSize, size_t safepointsSize,
-                          size_t backedgeEntries, size_t sharedStubEntries,
-                          OptimizationLevel optimizationLevel);
+                          size_t sharedStubEntries, OptimizationLevel optimizationLevel);
     static void Trace(JSTracer* trc, IonScript* script);
     static void Destroy(FreeOp* fop, IonScript* script);
 
     static inline size_t offsetOfMethod() {
         return offsetof(IonScript, method_);
     }
     static inline size_t offsetOfOsrEntryOffset() {
         return offsetof(IonScript, osrEntryOffset_);
@@ -512,29 +500,25 @@ struct IonScript
     }
     size_t numSharedStubs() const {
         return sharedStubEntries_;
     }
     size_t runtimeSize() const {
         return runtimeSize_;
     }
     void purgeICs(Zone* zone);
-    void unlinkFromRuntime(FreeOp* fop);
     void copySnapshots(const SnapshotWriter* writer);
     void copyRecovers(const RecoverWriter* writer);
     void copyBailoutTable(const SnapshotOffset* table);
     void copyConstants(const Value* vp);
     void copySafepointIndices(const SafepointIndex* firstSafepointIndex);
     void copyOsiIndices(const OsiIndex* firstOsiIndex);
     void copyRuntimeData(const uint8_t* data);
     void copyICEntries(const uint32_t* caches);
     void copySafepoints(const SafepointWriter* writer);
-    void copyPatchableBackedges(JSContext* cx, JitCode* code,
-                                PatchableBackedgeInfo* backedges,
-                                MacroAssembler& masm);
 
     bool invalidated() const {
         return invalidationCount_ != 0;
     }
 
     // Invalidate the current compilation.
     void invalidate(JSContext* cx, JSScript* script, bool resetUses, const char* reason);
 
--- a/js/src/jit/JitCompartment.h
+++ b/js/src/jit/JitCompartment.h
@@ -51,47 +51,24 @@ struct EnterJitData
 };
 
 typedef void (*EnterJitCode)(void* code, unsigned argc, Value* argv, InterpreterFrame* fp,
                              CalleeToken calleeToken, JSObject* envChain,
                              size_t numStackValues, Value* vp);
 
 class JitcodeGlobalTable;
 
-// Information about a loop backedge in the runtime, which can be set to
-// point to either the loop header or to an OOL interrupt checking stub,
-// if signal handlers are being used to implement interrupts.
-class PatchableBackedge : public InlineListNode<PatchableBackedge>
-{
-    friend class JitZoneGroup;
-
-    CodeLocationJump backedge;
-    CodeLocationLabel loopHeader;
-    CodeLocationLabel interruptCheck;
-
-  public:
-    PatchableBackedge(CodeLocationJump backedge,
-                      CodeLocationLabel loopHeader,
-                      CodeLocationLabel interruptCheck)
-      : backedge(backedge), loopHeader(loopHeader), interruptCheck(interruptCheck)
-    {}
-};
-
 class JitRuntime
 {
   private:
     friend class JitCompartment;
 
-    // Executable allocator for all code except wasm code and Ion code with
-    // patchable backedges (see below).
+    // Executable allocator for all code except wasm code.
     ActiveThreadData<ExecutableAllocator> execAlloc_;
 
-    // Executable allocator for Ion scripts with patchable backedges.
-    ActiveThreadData<ExecutableAllocator> backedgeExecAlloc_;
-
     ActiveThreadData<uint64_t> nextCompilationId_;
 #ifdef DEBUG
     ActiveThreadData<mozilla::Maybe<IonCompilationId>> currentCompilationId_;
 #endif
 
     // Shared exception-handler tail.
     ExclusiveAccessLockWriteOnceData<uint32_t> exceptionTailOffset_;
 
@@ -153,20 +130,16 @@ class JitRuntime
     // Code for trampolines and VMFunction wrappers.
     ExclusiveAccessLockWriteOnceData<JitCode*> trampolineCode_;
 
     // Map VMFunction addresses to the offset of the wrapper in
     // trampolineCode_.
     using VMWrapperMap = HashMap<const VMFunction*, uint32_t, VMFunction>;
     ExclusiveAccessLockWriteOnceData<VMWrapperMap*> functionWrappers_;
 
-    // If true, the signal handler to interrupt Ion code should not attempt to
-    // patch backedges, as some thread is busy modifying data structures.
-    mozilla::Atomic<bool> preventBackedgePatching_;
-
     // Global table of jitcode native address => bytecode address mappings.
     UnprotectedData<JitcodeGlobalTable*> jitcodeGlobalTable_;
 
   private:
     void generateLazyLinkStub(MacroAssembler& masm);
     void generateInterpreterStub(MacroAssembler& masm);
     void generateProfilerExitFrameTailStub(MacroAssembler& masm, Label* profilerExitTail);
     void generateExceptionTailStub(MacroAssembler& masm, void* handler, Label* profilerExitTail);
@@ -196,76 +169,38 @@ class JitRuntime
 
     TrampolinePtr trampolineCode(uint32_t offset) const {
         MOZ_ASSERT(offset > 0);
         MOZ_ASSERT(offset < trampolineCode_->instructionsSize());
         return TrampolinePtr(trampolineCode_->raw() + offset);
     }
 
   public:
-    explicit JitRuntime(JSRuntime* rt);
+    JitRuntime();
     ~JitRuntime();
     MOZ_MUST_USE bool initialize(JSContext* cx, js::AutoLockForExclusiveAccess& lock);
 
     static void Trace(JSTracer* trc, js::AutoLockForExclusiveAccess& lock);
     static void TraceJitcodeGlobalTableForMinorGC(JSTracer* trc);
     static MOZ_MUST_USE bool MarkJitcodeGlobalTableIteratively(GCMarker* marker);
     static void SweepJitcodeGlobalTable(JSRuntime* rt);
 
     ExecutableAllocator& execAlloc() {
         return execAlloc_.ref();
     }
-    ExecutableAllocator& backedgeExecAlloc() {
-        return backedgeExecAlloc_.ref();
-    }
 
     IonCompilationId nextCompilationId() {
         return IonCompilationId(nextCompilationId_++);
     }
 #ifdef DEBUG
     mozilla::Maybe<IonCompilationId>& currentCompilationId() {
         return currentCompilationId_.ref();
     }
 #endif
 
-    class AutoPreventBackedgePatching
-    {
-        mozilla::DebugOnly<JSRuntime*> rt_;
-        JitRuntime* jrt_;
-        bool prev_;
-
-      public:
-        // This two-arg constructor is provided for JSRuntime::createJitRuntime,
-        // where we have a JitRuntime but didn't set rt->jitRuntime_ yet.
-        AutoPreventBackedgePatching(JSRuntime* rt, JitRuntime* jrt)
-          : rt_(rt),
-            jrt_(jrt),
-            prev_(false)  // silence GCC warning
-        {
-            if (jrt_) {
-                prev_ = jrt_->preventBackedgePatching_;
-                jrt_->preventBackedgePatching_ = true;
-            }
-        }
-        explicit AutoPreventBackedgePatching(JSRuntime* rt)
-          : AutoPreventBackedgePatching(rt, rt->jitRuntime())
-        {}
-        ~AutoPreventBackedgePatching() {
-            MOZ_ASSERT(jrt_ == rt_->jitRuntime());
-            if (jrt_) {
-                MOZ_ASSERT(jrt_->preventBackedgePatching_);
-                jrt_->preventBackedgePatching_ = prev_;
-            }
-        }
-    };
-
-    bool preventBackedgePatching() const {
-        return preventBackedgePatching_;
-    }
-
     TrampolinePtr getVMWrapper(const VMFunction& f) const;
     JitCode* debugTrapHandler(JSContext* cx);
     JitCode* getBaselineDebugModeOSRHandler(JSContext* cx);
     void* getBaselineDebugModeOSRHandlerAddress(JSContext* cx, bool popFrameReg);
 
     TrampolinePtr getGenericBailoutHandler() const {
         return trampolineCode(bailoutHandlerOffset_);
     }
@@ -345,54 +280,16 @@ class JitRuntime
         return rt->geckoProfiler().enabled();
     }
 
     bool isOptimizationTrackingEnabled(ZoneGroup* group) {
         return isProfilerInstrumentationEnabled(group->runtime);
     }
 };
 
-class JitZoneGroup
-{
-  public:
-    enum BackedgeTarget {
-        BackedgeLoopHeader,
-        BackedgeInterruptCheck
-    };
-
-  private:
-    // Whether patchable backedges currently jump to the loop header or the
-    // interrupt check.
-    ZoneGroupData<BackedgeTarget> backedgeTarget_;
-
-    // List of all backedges in all Ion code. The backedge edge list is accessed
-    // asynchronously when the active thread is paused and preventBackedgePatching_
-    // is false. Thus, the list must only be mutated while preventBackedgePatching_
-    // is true.
-    ZoneGroupData<InlineList<PatchableBackedge>> backedgeList_;
-    InlineList<PatchableBackedge>& backedgeList() { return backedgeList_.ref(); }
-
-  public:
-    explicit JitZoneGroup(ZoneGroup* group);
-
-    BackedgeTarget backedgeTarget() const {
-        return backedgeTarget_;
-    }
-    void addPatchableBackedge(JitRuntime* jrt, PatchableBackedge* backedge) {
-        MOZ_ASSERT(jrt->preventBackedgePatching());
-        backedgeList().pushFront(backedge);
-    }
-    void removePatchableBackedge(JitRuntime* jrt, PatchableBackedge* backedge) {
-        MOZ_ASSERT(jrt->preventBackedgePatching());
-        backedgeList().remove(backedge);
-    }
-
-    void patchIonBackedges(JSContext* cx, BackedgeTarget target);
-};
-
 enum class CacheKind : uint8_t;
 class CacheIRStubInfo;
 
 enum class ICStubEngine : uint8_t {
     // Baseline IC, see SharedIC.h and BaselineIC.h.
     Baseline = 0,
 
     // Ion IC that reuses Baseline IC code, see SharedIC.h.
@@ -702,26 +599,23 @@ void FinishInvalidation(FreeOp* fop, JSS
 const unsigned WINDOWS_BIG_FRAME_TOUCH_INCREMENT = 4096 - 1;
 #endif
 
 // If NON_WRITABLE_JIT_CODE is enabled, this class will ensure
 // JIT code is writable (has RW permissions) in its scope.
 // Otherwise it's a no-op.
 class MOZ_STACK_CLASS AutoWritableJitCode
 {
-    // Backedge patching from the signal handler will change memory protection
-    // flags, so don't allow it in a AutoWritableJitCode scope.
-    JitRuntime::AutoPreventBackedgePatching preventPatching_;
     JSRuntime* rt_;
     void* addr_;
     size_t size_;
 
   public:
     AutoWritableJitCode(JSRuntime* rt, void* addr, size_t size)
-      : preventPatching_(rt), rt_(rt), addr_(addr), size_(size)
+      : rt_(rt), addr_(addr), size_(size)
     {
         rt_->toggleAutoWritableJitCodeActive(true);
         if (!ExecutableAllocator::makeWritable(addr_, size_))
             MOZ_CRASH();
     }
     AutoWritableJitCode(void* addr, size_t size)
       : AutoWritableJitCode(TlsContext.get()->runtime(), addr, size)
     {}
@@ -745,26 +639,12 @@ class MOZ_STACK_CLASS MaybeAutoWritableJ
             awjc_.emplace(addr, size);
     }
     MaybeAutoWritableJitCode(JitCode* code, ReprotectCode reprotect) {
         if (reprotect)
             awjc_.emplace(code);
     }
 };
 
-// Ensure the given JSRuntime is set up to use async interrupts. Failure to
-// enable signal handlers indicates some catastrophic failure and creation of
-// the runtime must fail.
-void
-EnsureAsyncInterrupt(JSContext* cx);
-
-// Return whether the async interrupt can be used to interrupt Ion code.
-bool
-HaveAsyncInterrupt();
-
-// Force any currently-executing JIT code to call HandleExecutionInterrupt.
-extern void
-InterruptRunningCode(JSContext* cx);
-
 } // namespace jit
 } // namespace js
 
 #endif /* jit_JitCompartment_h */
--- a/js/src/jit/JitOptions.cpp
+++ b/js/src/jit/JitOptions.cpp
@@ -253,20 +253,16 @@ DefaultJitOptions::DefaultJitOptions()
     // as well as the transition from one tier to the other.
     SET_DEFAULT(wasmDelayTier2, false);
 
     // Until which wasm bytecode size should we accumulate functions, in order
     // to compile efficiently on helper threads. Baseline code compiles much
     // faster than Ion code so use scaled thresholds (see also bug 1320374).
     SET_DEFAULT(wasmBatchBaselineThreshold, 10000);
     SET_DEFAULT(wasmBatchIonThreshold, 1100);
-
-    // Determines whether we suppress using signal handlers
-    // for interrupting jit-ed code. This is used only for testing.
-    SET_DEFAULT(ionInterruptWithoutSignals, false);
 }
 
 bool
 DefaultJitOptions::isSmallFunction(JSScript* script) const
 {
     return script->length() <= smallFunctionMaxBytecodeLength_;
 }
 
--- a/js/src/jit/JitOptions.h
+++ b/js/src/jit/JitOptions.h
@@ -70,17 +70,16 @@ struct DefaultJitOptions
     bool eagerCompilation;
     bool forceInlineCaches;
     bool fullDebugChecks;
     bool limitScriptSize;
     bool osr;
     bool asmJSAtomicsEnable;
     bool wasmFoldOffsets;
     bool wasmDelayTier2;
-    bool ionInterruptWithoutSignals;
     uint32_t baselineWarmUpThreshold;
     uint32_t exceptionBailoutThreshold;
     uint32_t frequentBailoutThreshold;
     uint32_t maxStackArgs;
     uint32_t osrPcMismatchesBeforeRecompile;
     uint32_t smallFunctionMaxBytecodeLength_;
     uint32_t jumpThreshold;
     uint32_t branchPruningHitCountFactor;
--- a/js/src/jit/Linker.cpp
+++ b/js/src/jit/Linker.cpp
@@ -9,20 +9,18 @@
 #include "gc/GC.h"
 
 #include "gc/StoreBuffer-inl.h"
 
 namespace js {
 namespace jit {
 
 JitCode*
-Linker::newCode(JSContext* cx, CodeKind kind, bool hasPatchableBackedges /* = false */)
+Linker::newCode(JSContext* cx, CodeKind kind)
 {
-    MOZ_ASSERT_IF(hasPatchableBackedges, kind == CodeKind::Ion);
-
     JS::AutoAssertNoGC nogc(cx);
     if (masm.oom())
         return fail(cx);
 
     masm.performPendingReadBarriers();
 
     static const size_t ExecutableAllocatorAlignment = sizeof(void*);
     static_assert(CodeAlignment >= ExecutableAllocatorAlignment,
@@ -33,22 +31,19 @@ Linker::newCode(JSContext* cx, CodeKind 
                          sizeof(JitCodeHeader) +
                          (CodeAlignment - ExecutableAllocatorAlignment);
     if (bytesNeeded >= MAX_BUFFER_SIZE)
         return fail(cx);
 
     // ExecutableAllocator requires bytesNeeded to be aligned.
     bytesNeeded = AlignBytes(bytesNeeded, ExecutableAllocatorAlignment);
 
-    ExecutableAllocator& execAlloc = hasPatchableBackedges
-                                     ? cx->runtime()->jitRuntime()->backedgeExecAlloc()
-                                     : cx->runtime()->jitRuntime()->execAlloc();
-
     ExecutablePool* pool;
-    uint8_t* result = (uint8_t*)execAlloc.alloc(cx, bytesNeeded, &pool, kind);
+    uint8_t* result =
+        (uint8_t*)cx->runtime()->jitRuntime()->execAlloc().alloc(cx, bytesNeeded, &pool, kind);
     if (!result)
         return fail(cx);
 
     // The JitCodeHeader will be stored right before the code buffer.
     uint8_t* codeStart = result + sizeof(JitCodeHeader);
 
     // Bump the code up to a nice alignment.
     codeStart = (uint8_t*)AlignBytes((uintptr_t)codeStart, CodeAlignment);
--- a/js/src/jit/Linker.h
+++ b/js/src/jit/Linker.h
@@ -34,15 +34,15 @@ class Linker
     {
         masm.finish();
     }
 
     // Create a new JitCode object and populate it with the contents of the
     // macro assember buffer.
     //
     // This method cannot GC. Errors are reported to the context.
-    JitCode* newCode(JSContext* cx, CodeKind kind, bool hasPatchableBackedges = false);
+    JitCode* newCode(JSContext* cx, CodeKind kind);
 };
 
 } // namespace jit
 } // namespace js
 
 #endif /* jit_Linker_h */
--- a/js/src/jit/Lowering.cpp
+++ b/js/src/jit/Lowering.cpp
@@ -86,62 +86,19 @@ LIRGenerator::visitCallee(MCallee* ins)
 }
 
 void
 LIRGenerator::visitIsConstructing(MIsConstructing* ins)
 {
     define(new(alloc()) LIsConstructing(), ins);
 }
 
-static void
-TryToUseImplicitInterruptCheck(MIRGraph& graph, MBasicBlock* backedge)
-{
-    // Implicit interrupt checks require JIT async interrupt support.
-    if (!jit::HaveAsyncInterrupt() || JitOptions.ionInterruptWithoutSignals)
-        return;
-
-    // To avoid triggering expensive interrupts (backedge patching) in
-    // requestMajorGC and requestMinorGC, use an implicit interrupt check only
-    // if the loop body can not trigger GC or affect GC state like the store
-    // buffer. We do this by checking there are no safepoints attached to LIR
-    // instructions inside the loop.
-
-    MBasicBlockIterator block = graph.begin(backedge->loopHeaderOfBackedge());
-    LInterruptCheck* check = nullptr;
-    while (true) {
-        LBlock* lir = block->lir();
-        for (LInstructionIterator iter = lir->begin(); iter != lir->end(); iter++) {
-            if (iter->isInterruptCheck()) {
-                if (!check) {
-                    MOZ_ASSERT(*block == backedge->loopHeaderOfBackedge());
-                    check = iter->toInterruptCheck();
-                }
-                continue;
-            }
-
-            MOZ_ASSERT_IF(iter->isPostWriteBarrierO() || iter->isPostWriteBarrierV() || iter->isPostWriteBarrierS(),
-                          iter->safepoint());
-
-            if (iter->safepoint())
-                return;
-        }
-        if (*block == backedge)
-            break;
-        block++;
-    }
-
-    check->setImplicit();
-}
-
 void
 LIRGenerator::visitGoto(MGoto* ins)
 {
-    if (!gen->compilingWasm() && ins->block()->isLoopBackedge())
-        TryToUseImplicitInterruptCheck(graph, ins->block());
-
     add(new(alloc()) LGoto(ins->target()));
 }
 
 void
 LIRGenerator::visitTableSwitch(MTableSwitch* tableswitch)
 {
     MDefinition* opd = tableswitch->getOperand(0);
 
--- a/js/src/jit/MacroAssembler.h
+++ b/js/src/jit/MacroAssembler.h
@@ -993,21 +993,16 @@ class MacroAssembler : public MacroAssem
 
     inline void branchPtr(Condition cond, wasm::SymbolicAddress lhs, Register rhs, Label* label)
         DEFINED_ON(arm, arm64, mips_shared, x86, x64);
 
     // Given a pointer to a GC Cell, retrieve the StoreBuffer pointer from its
     // chunk trailer, or nullptr if it is in the tenured heap.
     void loadStoreBuffer(Register ptr, Register buffer) PER_ARCH;
 
-    template <typename T>
-    inline CodeOffsetJump branchPtrWithPatch(Condition cond, Register lhs, T rhs, RepatchLabel* label) PER_SHARED_ARCH;
-    template <typename T>
-    inline CodeOffsetJump branchPtrWithPatch(Condition cond, Address lhs, T rhs, RepatchLabel* label) PER_SHARED_ARCH;
-
     void branchPtrInNurseryChunk(Condition cond, Register ptr, Register temp, Label* label)
         DEFINED_ON(arm, arm64, mips_shared, x86, x64);
     void branchPtrInNurseryChunk(Condition cond, const Address& address, Register temp, Label* label)
         DEFINED_ON(x86);
     void branchValueIsNurseryObject(Condition cond, ValueOperand value, Register temp, Label* label) PER_ARCH;
     void branchValueIsNurseryCell(Condition cond, const Address& address, Register temp, Label* label) PER_ARCH;
     void branchValueIsNurseryCell(Condition cond, ValueOperand value, Register temp, Label* label) PER_ARCH;
 
--- a/js/src/jit/VMFunctions.cpp
+++ b/js/src/jit/VMFunctions.cpp
@@ -553,22 +553,16 @@ SetProperty(JSContext* cx, HandleObject 
     return result.checkStrictErrorOrWarning(cx, obj, id, strict);
 }
 
 bool
 InterruptCheck(JSContext* cx)
 {
     gc::MaybeVerifyBarriers(cx);
 
-    {
-        JSRuntime* rt = cx->runtime();
-        JitRuntime::AutoPreventBackedgePatching apbp(rt);
-        cx->zone()->group()->jitZoneGroup->patchIonBackedges(cx, JitZoneGroup::BackedgeLoopHeader);
-    }
-
     return CheckForInterrupt(cx);
 }
 
 void*
 MallocWrapper(JS::Zone* zone, size_t nbytes)
 {
     AutoUnsafeCallWithABI unsafe;
     return zone->pod_malloc<uint8_t>(nbytes);
--- a/js/src/jit/arm/Assembler-arm.cpp
+++ b/js/src/jit/arm/Assembler-arm.cpp
@@ -616,41 +616,34 @@ Imm16::Imm16(uint32_t imm)
     MOZ_ASSERT(decode() == imm);
 }
 
 Imm16::Imm16()
   : invalid_(0xfff)
 { }
 
 void
-jit::PatchJump(CodeLocationJump& jump_, CodeLocationLabel label, ReprotectCode reprotect)
+jit::PatchJump(CodeLocationJump& jump_, CodeLocationLabel label)
 {
     // We need to determine if this jump can fit into the standard 24+2 bit
     // address or if we need a larger branch (or just need to use our pool
     // entry).
     Instruction* jump = (Instruction*)jump_.raw();
     // jumpWithPatch() returns the offset of the jump and never a pool or nop.
     Assembler::Condition c = jump->extractCond();
     MOZ_ASSERT(jump->is<InstBranchImm>() || jump->is<InstLDR>());
 
     int jumpOffset = label.raw() - jump_.raw();
     if (BOffImm::IsInRange(jumpOffset)) {
         // This instruction started off as a branch, and will remain one.
-        MaybeAutoWritableJitCode awjc(jump, sizeof(Instruction), reprotect);
         Assembler::RetargetNearBranch(jump, jumpOffset, c);
     } else {
         // This instruction started off as a branch, but now needs to be demoted
         // to an ldr.
         uint8_t** slot = reinterpret_cast<uint8_t**>(jump_.jumpTableEntry());
-
-        // Ensure both the branch and the slot are writable.
-        MOZ_ASSERT(uintptr_t(slot) > uintptr_t(jump));
-        size_t size = uintptr_t(slot) - uintptr_t(jump) + sizeof(void*);
-        MaybeAutoWritableJitCode awjc(jump, size, reprotect);
-
         Assembler::RetargetFarBranch(jump, slot, label.raw(), c);
     }
 }
 
 void
 Assembler::finish()
 {
     flush();
--- a/js/src/jit/arm/Assembler-arm.h
+++ b/js/src/jit/arm/Assembler-arm.h
@@ -1129,24 +1129,17 @@ Imm64::firstHalf() const
 
 inline Imm32
 Imm64::secondHalf() const
 {
     return hi();
 }
 
 void
-PatchJump(CodeLocationJump& jump_, CodeLocationLabel label,
-          ReprotectCode reprotect = DontReprotect);
-
-static inline void
-PatchBackedge(CodeLocationJump& jump_, CodeLocationLabel label, JitZoneGroup::BackedgeTarget target)
-{
-    PatchJump(jump_, label);
-}
+PatchJump(CodeLocationJump& jump_, CodeLocationLabel label);
 
 class InstructionIterator
 {
   private:
     Instruction* inst_;
 
   public:
     explicit InstructionIterator(Instruction* inst)
--- a/js/src/jit/arm/MacroAssembler-arm-inl.h
+++ b/js/src/jit/arm/MacroAssembler-arm-inl.h
@@ -1508,37 +1508,16 @@ MacroAssembler::branchPtr(Condition cond
 }
 
 void
 MacroAssembler::branchPtr(Condition cond, const BaseIndex& lhs, ImmWord rhs, Label* label)
 {
     branch32(cond, lhs, Imm32(rhs.value), label);
 }
 
-template <typename T>
-inline CodeOffsetJump
-MacroAssembler::branchPtrWithPatch(Condition cond, Register lhs, T rhs, RepatchLabel* label)
-{
-    cmpPtr(lhs, rhs);
-    return jumpWithPatch(label, cond);
-}
-
-template <typename T>
-inline CodeOffsetJump
-MacroAssembler::branchPtrWithPatch(Condition cond, Address lhs, T rhs, RepatchLabel* label)
-{
-    SecondScratchRegisterScope scratch2(*this);
-    {
-        ScratchRegisterScope scratch(*this);
-        ma_ldr(lhs, scratch2, scratch);
-    }
-    cmpPtr(scratch2, rhs);
-    return jumpWithPatch(label, cond);
-}
-
 void
 MacroAssembler::branchPrivatePtr(Condition cond, const Address& lhs, Register rhs, Label* label)
 {
     branchPtr(cond, lhs, rhs, label);
 }
 
 void
 MacroAssembler::branchFloat(DoubleCondition cond, FloatRegister lhs, FloatRegister rhs,
--- a/js/src/jit/arm/MacroAssembler-arm.cpp
+++ b/js/src/jit/arm/MacroAssembler-arm.cpp
@@ -4103,20 +4103,20 @@ MacroAssemblerARMCompat::roundf(FloatReg
     // which means the result is actually -0.0 which also requires special
     // handling.
     ma_b(bail, NotSigned);
 
     bind(&fin);
 }
 
 CodeOffsetJump
-MacroAssemblerARMCompat::jumpWithPatch(RepatchLabel* label, Condition cond, Label* documentation)
+MacroAssemblerARMCompat::jumpWithPatch(RepatchLabel* label)
 {
     ARMBuffer::PoolEntry pe;
-    BufferOffset bo = as_BranchPool(0xdeadbeef, label, refLabel(documentation), &pe, cond);
+    BufferOffset bo = as_BranchPool(0xdeadbeef, label, LabelDoc(), &pe);
     // Fill in a new CodeOffset with both the load and the pool entry that the
     // instruction loads from.
     CodeOffsetJump ret(bo.getOffset(), pe.index());
     return ret;
 }
 
 void
 MacroAssemblerARMCompat::profilerEnterFrame(Register framePtr, Register scratch)
--- a/js/src/jit/arm/MacroAssembler-arm.h
+++ b/js/src/jit/arm/MacroAssembler-arm.h
@@ -861,21 +861,17 @@ class MacroAssemblerARMCompat : public M
     Condition testBooleanTruthy(bool truthy, const ValueOperand& operand);
     Condition testDoubleTruthy(bool truthy, FloatRegister reg);
     Condition testStringTruthy(bool truthy, const ValueOperand& value);
 
     void boolValueToFloat32(const ValueOperand& operand, FloatRegister dest);
     void int32ValueToFloat32(const ValueOperand& operand, FloatRegister dest);
     void loadConstantFloat32(float f, FloatRegister dest);
 
-    CodeOffsetJump jumpWithPatch(RepatchLabel* label, Condition cond = Always,
-                                 Label* documentation = nullptr);
-    CodeOffsetJump backedgeJump(RepatchLabel* label, Label* documentation) {
-        return jumpWithPatch(label, Always, documentation);
-    }
+    CodeOffsetJump jumpWithPatch(RepatchLabel* label);
 
     void loadUnboxedValue(Address address, MIRType type, AnyRegister dest) {
         if (dest.isFloat()) {
             loadInt32OrDouble(address, dest.fpu());
         } else {
             ScratchRegisterScope scratch(asMasm());
             ma_ldr(address, dest.gpr(), scratch);
         }
--- a/js/src/jit/arm/Simulator-arm.cpp
+++ b/js/src/jit/arm/Simulator-arm.cpp
@@ -401,18 +401,16 @@ class AutoLockSimulatorCache : public Lo
   public:
     explicit AutoLockSimulatorCache()
       : Base(SimulatorProcess::singleton_->cacheLock_)
     {}
 };
 
 mozilla::Atomic<size_t, mozilla::ReleaseAcquire>
     SimulatorProcess::ICacheCheckingDisableCount(1); // Checking is disabled by default.
-mozilla::Atomic<bool, mozilla::ReleaseAcquire>
-    SimulatorProcess::cacheInvalidatedBySignalHandler_(false);
 SimulatorProcess* SimulatorProcess::singleton_ = nullptr;
 
 int64_t Simulator::StopSimAt = -1L;
 
 Simulator*
 Simulator::Create(JSContext* cx)
 {
     Simulator* sim = js_new<Simulator>(cx);
@@ -1088,35 +1086,21 @@ SimulatorProcess::checkICacheLocked(SimI
     void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
     void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
     int offset = (address & CachePage::kPageMask);
     CachePage* cache_page = GetCachePageLocked(icache(), page);
     char* cache_valid_byte = cache_page->validityByte(offset);
     bool cache_hit = (*cache_valid_byte == CachePage::LINE_VALID);
     char* cached_line = cache_page->cachedData(offset & ~CachePage::kLineMask);
 
-    // Read all state before considering signal handler effects.
-    int cmpret = 0;
     if (cache_hit) {
         // Check that the data in memory matches the contents of the I-cache.
-        cmpret = memcmp(reinterpret_cast<void*>(instr),
-                        cache_page->cachedData(offset),
-                        SimInstruction::kInstrSize);
-    }
-
-    // Check for signal handler interruption between reading state and asserting.
-    // It is safe for the signal to arrive during the !cache_hit path, since it
-    // will be cleared the next time this function is called.
-    if (cacheInvalidatedBySignalHandler_) {
-        icache().clear();
-        cacheInvalidatedBySignalHandler_ = false;
-        return;
-    }
-
-    if (cache_hit) {
+        int cmpret = memcmp(reinterpret_cast<void*>(instr),
+                            cache_page->cachedData(offset),
+                            SimInstruction::kInstrSize);
         MOZ_ASSERT(cmpret == 0);
     } else {
         // Cache miss. Load memory into the cache.
         memcpy(cached_line, line, CachePage::kLineLength);
         *cache_valid_byte = CachePage::LINE_VALID;
     }
 }
 
--- a/js/src/jit/arm/Simulator-arm.h
+++ b/js/src/jit/arm/Simulator-arm.h
@@ -478,22 +478,16 @@ class SimulatorProcess
     };
 
   public:
     typedef HashMap<void*, CachePage*, ICacheHasher, SystemAllocPolicy> ICacheMap;
 
     static mozilla::Atomic<size_t, mozilla::ReleaseAcquire> ICacheCheckingDisableCount;
     static void FlushICache(void* start, size_t size);
 
-    // Jitcode may be rewritten from a signal handler, but is prevented from
-    // calling FlushICache() because the signal may arrive within the critical
-    // area of an AutoLockSimulatorCache. This flag instructs the Simulator
-    // to remove all cache entries the next time it checks, avoiding false negatives.
-    static mozilla::Atomic<bool, mozilla::ReleaseAcquire> cacheInvalidatedBySignalHandler_;
-
     static void checkICacheLocked(SimInstruction* instr);
 
     static bool initialize() {
         singleton_ = js_new<SimulatorProcess>();
         return singleton_ && singleton_->init();
     }
     static void destroy() {
         js_delete(singleton_);
--- a/js/src/jit/arm64/Assembler-arm64.cpp
+++ b/js/src/jit/arm64/Assembler-arm64.cpp
@@ -354,17 +354,17 @@ Assembler::addPatchableJump(BufferOffset
         addJumpRelocation(src, reloc);
 
     size_t extendedTableIndex = pendingJumps_.length();
     enoughMemory_ &= pendingJumps_.append(RelativePatch(src, nullptr, reloc));
     return extendedTableIndex;
 }
 
 void
-PatchJump(CodeLocationJump& jump_, CodeLocationLabel label, ReprotectCode reprotect)
+PatchJump(CodeLocationJump& jump_, CodeLocationLabel label)
 {
     MOZ_CRASH("PatchJump");
 }
 
 void
 Assembler::PatchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue,
                                    PatchedImmPtr expected)
 {
--- a/js/src/jit/arm64/Assembler-arm64.h
+++ b/js/src/jit/arm64/Assembler-arm64.h
@@ -510,24 +510,17 @@ Imm64::firstHalf() const
 }
 
 inline Imm32
 Imm64::secondHalf() const
 {
     return hi();
 }
 
-void PatchJump(CodeLocationJump& jump_, CodeLocationLabel label,
-               ReprotectCode reprotect = DontReprotect);
-
-static inline void
-PatchBackedge(CodeLocationJump& jump_, CodeLocationLabel label, JitZoneGroup::BackedgeTarget target)
-{
-    PatchJump(jump_, label);
-}
+void PatchJump(CodeLocationJump& jump_, CodeLocationLabel label);
 
 // Forbids pool generation during a specified interval. Not nestable.
 class AutoForbidPools
 {
     Assembler* asm_;
 
   public:
     AutoForbidPools(Assembler* asm_, size_t maxInst)
--- a/js/src/jit/arm64/MacroAssembler-arm64-inl.h
+++ b/js/src/jit/arm64/MacroAssembler-arm64-inl.h
@@ -1180,39 +1180,16 @@ MacroAssembler::branchPtr(Condition cond
     vixl::UseScratchRegisterScope temps(this);
     const Register scratch = temps.AcquireX().asUnsized();
     MOZ_ASSERT(scratch != lhs.base);
     MOZ_ASSERT(scratch != lhs.index);
     loadPtr(lhs, scratch);
     branchPtr(cond, scratch, rhs, label);
 }
 
-template <typename T>
-CodeOffsetJump
-MacroAssembler::branchPtrWithPatch(Condition cond, Register lhs, T rhs, RepatchLabel* label)
-{
-    cmpPtr(lhs, rhs);
-    return jumpWithPatch(label, cond);
-}
-
-template <typename T>
-CodeOffsetJump
-MacroAssembler::branchPtrWithPatch(Condition cond, Address lhs, T rhs, RepatchLabel* label)
-{
-    // The scratch register is unused after the condition codes are set.
-    {
-        vixl::UseScratchRegisterScope temps(this);
-        const Register scratch = temps.AcquireX().asUnsized();
-        MOZ_ASSERT(scratch != lhs.base);
-        loadPtr(lhs, scratch);
-        cmpPtr(scratch, rhs);
-    }
-    return jumpWithPatch(label, cond);
-}
-
 void
 MacroAssembler::branchPrivatePtr(Condition cond, const Address& lhs, Register rhs, Label* label)
 {
     vixl::UseScratchRegisterScope temps(this);
     const Register scratch = temps.AcquireX().asUnsized();
     if (rhs != scratch)
         movePtr(rhs, scratch);
     // Instead of unboxing lhs, box rhs and do direct comparison with lhs.
--- a/js/src/jit/arm64/MacroAssembler-arm64.h
+++ b/js/src/jit/arm64/MacroAssembler-arm64.h
@@ -1270,50 +1270,36 @@ class MacroAssemblerCompat : public vixl
         B(label, cond);
     }
     void branch(JitCode* target) {
         syncStackPtr();
         BufferOffset loc = b(-1, LabelDoc()); // The jump target will be patched by executableCopy().
         addPendingJump(loc, ImmPtr(target->raw()), Relocation::JITCODE);
     }
 
-    CodeOffsetJump jumpWithPatch(RepatchLabel* label, Condition cond = Always, Label* documentation = nullptr)
+    CodeOffsetJump jumpWithPatch(RepatchLabel* label)
     {
-#ifdef JS_DISASM_ARM64
-        LabelDoc doc = spew_.refLabel(documentation);
-#else
-        LabelDoc doc;
-#endif
         ARMBuffer::PoolEntry pe;
         BufferOffset load_bo;
-        BufferOffset branch_bo;
 
         // Does not overwrite condition codes from the caller.
         {
             vixl::UseScratchRegisterScope temps(this);
             const ARMRegister scratch64 = temps.AcquireX();
             load_bo = immPool64(scratch64, (uint64_t)label, &pe);
         }
 
         MOZ_ASSERT(!label->bound());
-        if (cond != Always) {
-            Label notTaken;
-            B(&notTaken, Assembler::InvertCondition(cond));
-            branch_bo = b(-1, doc);
-            bind(&notTaken);
-        } else {
-            nop();
-            branch_bo = b(-1, doc);
-        }
+
+        nop();
+        BufferOffset branch_bo = b(-1, LabelDoc());
         label->use(branch_bo.getOffset());
+
         return CodeOffsetJump(load_bo.getOffset(), pe.index());
     }
-    CodeOffsetJump backedgeJump(RepatchLabel* label, Label* documentation) {
-        return jumpWithPatch(label, Always, documentation);
-    }
 
     void compareDouble(DoubleCondition cond, FloatRegister lhs, FloatRegister rhs) {
         Fcmp(ARMFPRegister(lhs, 64), ARMFPRegister(rhs, 64));
     }
 
     void compareFloat(DoubleCondition cond, FloatRegister lhs, FloatRegister rhs) {
         Fcmp(ARMFPRegister(lhs, 32), ARMFPRegister(rhs, 32));
     }
--- a/js/src/jit/mips-shared/Assembler-mips-shared.h
+++ b/js/src/jit/mips-shared/Assembler-mips-shared.h
@@ -714,21 +714,17 @@ Imm64::firstHalf() const
 
 inline Imm32
 Imm64::secondHalf() const
 {
     return hi();
 }
 
 void
-PatchJump(CodeLocationJump& jump_, CodeLocationLabel label,
-          ReprotectCode reprotect = DontReprotect);
-
-void
-PatchBackedge(CodeLocationJump& jump_, CodeLocationLabel label, JitZoneGroup::BackedgeTarget target);
+PatchJump(CodeLocationJump& jump_, CodeLocationLabel label);
 
 static constexpr int32_t SliceSize = 1024;
 typedef js::jit::AssemblerBuffer<SliceSize, Instruction> MIPSBuffer;
 
 class MIPSBufferWithExecutableCopy : public MIPSBuffer
 {
   public:
     void executableCopy(uint8_t* buffer) {
--- a/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
+++ b/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
@@ -77,43 +77,21 @@ CodeGeneratorMIPSShared::ToOperandOrRegi
 }
 #endif
 
 void
 CodeGeneratorMIPSShared::branchToBlock(Assembler::FloatFormat fmt, FloatRegister lhs, FloatRegister rhs,
                                        MBasicBlock* mir, Assembler::DoubleCondition cond)
 {
     // Skip past trivial blocks.
-    mir = skipTrivialBlocks(mir);
-
-    Label* label = mir->lir()->label();
-    if (Label* oolEntry = labelForBackedgeWithImplicitCheck(mir)) {
-        // Note: the backedge is initially a jump to the next instruction.
-        // It will be patched to the target block's label during link().
-        RepatchLabel rejoin;
-
-        CodeOffsetJump backedge;
-        Label skip;
-        if (fmt == Assembler::DoubleFloat)
-            masm.ma_bc1d(lhs, rhs, &skip, Assembler::InvertCondition(cond), ShortJump);
-        else
-            masm.ma_bc1s(lhs, rhs, &skip, Assembler::InvertCondition(cond), ShortJump);
-
-        backedge = masm.backedgeJump(&rejoin);
-        masm.bind(&rejoin);
-        masm.bind(&skip);
-
-        if (!patchableBackedges_.append(PatchableBackedgeInfo(backedge, label, oolEntry)))
-            MOZ_CRASH();
-    } else {
-        if (fmt == Assembler::DoubleFloat)
-            masm.branchDouble(cond, lhs, rhs, mir->lir()->label());
-        else
-            masm.branchFloat(cond, lhs, rhs, mir->lir()->label());
-    }
+    Label* label = skipTrivialBlocks(mir)->lir()->label();
+    if (fmt == Assembler::DoubleFloat)
+        masm.branchDouble(cond, lhs, rhs, label);
+    else
+        masm.branchFloat(cond, lhs, rhs, label);
 }
 
 FrameSizeClass
 FrameSizeClass::FromDepth(uint32_t frameDepth)
 {
     return FrameSizeClass::None();
 }
 
--- a/js/src/jit/mips-shared/CodeGenerator-mips-shared.h
+++ b/js/src/jit/mips-shared/CodeGenerator-mips-shared.h
@@ -71,36 +71,17 @@ class CodeGeneratorMIPSShared : public C
     void bailoutFrom(Label* label, LSnapshot* snapshot);
     void bailout(LSnapshot* snapshot);
 
     bool generateOutOfLineCode();
 
     template <typename T>
     void branchToBlock(Register lhs, T rhs, MBasicBlock* mir, Assembler::Condition cond)
     {
-        mir = skipTrivialBlocks(mir);
-
-        Label* label = mir->lir()->label();
-        if (Label* oolEntry = labelForBackedgeWithImplicitCheck(mir)) {
-            // Note: the backedge is initially a jump to the next instruction.
-            // It will be patched to the target block's label during link().
-            RepatchLabel rejoin;
-            CodeOffsetJump backedge;
-            Label skip;
-
-            masm.ma_b(lhs, rhs, &skip, Assembler::InvertCondition(cond), ShortJump);
-            backedge = masm.backedgeJump(&rejoin);
-            masm.bind(&rejoin);
-            masm.bind(&skip);
-
-            if (!patchableBackedges_.append(PatchableBackedgeInfo(backedge, label, oolEntry)))
-                MOZ_CRASH();
-        } else {
-            masm.ma_b(lhs, rhs, label, cond);
-        }
+        masm.ma_b(lhs, rhs, skipTrivialBlocks(mir)->lir()->label(), cond);
     }
     void branchToBlock(Assembler::FloatFormat fmt, FloatRegister lhs, FloatRegister rhs,
                        MBasicBlock* mir, Assembler::DoubleCondition cond);
 
     // Emits a branch that directs control flow to the true block if |cond| is
     // true, and the false block if |cond| is false.
     template <typename T>
     void emitBranch(Register lhs, T rhs, Assembler::Condition cond,
--- a/js/src/jit/mips-shared/MacroAssembler-mips-shared-inl.h
+++ b/js/src/jit/mips-shared/MacroAssembler-mips-shared-inl.h
@@ -573,41 +573,16 @@ MacroAssembler::branchPtr(Condition cond
 
 void
 MacroAssembler::branchPtr(Condition cond, const BaseIndex& lhs, ImmWord rhs, Label* label)
 {
     loadPtr(lhs, SecondScratchReg);
     branchPtr(cond, SecondScratchReg, rhs, label);
 }
 
-template <typename T>
-CodeOffsetJump
-MacroAssembler::branchPtrWithPatch(Condition cond, Register lhs, T rhs, RepatchLabel* label)
-{
-    movePtr(rhs, ScratchRegister);
-    Label skipJump;
-    ma_b(lhs, ScratchRegister, &skipJump, InvertCondition(cond), ShortJump);
-    CodeOffsetJump off = jumpWithPatch(label);
-    bind(&skipJump);
-    return off;
-}
-
-template <typename T>
-CodeOffsetJump
-MacroAssembler::branchPtrWithPatch(Condition cond, Address lhs, T rhs, RepatchLabel* label)
-{
-    loadPtr(lhs, SecondScratchReg);
-    movePtr(rhs, ScratchRegister);
-    Label skipJump;
-    ma_b(SecondScratchReg, ScratchRegister, &skipJump, InvertCondition(cond), ShortJump);
-    CodeOffsetJump off = jumpWithPatch(label);
-    bind(&skipJump);
-    return off;
-}
-
 void
 MacroAssembler::branchFloat(DoubleCondition cond, FloatRegister lhs, FloatRegister rhs,
                             Label* label)
 {
     ma_bc1s(lhs, rhs, label, cond);
 }
 
 void
--- a/js/src/jit/mips32/Assembler-mips32.cpp
+++ b/js/src/jit/mips32/Assembler-mips32.cpp
@@ -113,55 +113,26 @@ uint32_t
 js::jit::SA(FloatRegister r)
 {
     MOZ_ASSERT(r.id() < FloatRegisters::RegisterIdLimit);
     return r.id() << SAShift;
 }
 
 // Used to patch jumps created by MacroAssemblerMIPSCompat::jumpWithPatch.
 void
-jit::PatchJump(CodeLocationJump& jump_, CodeLocationLabel label, ReprotectCode reprotect)
+jit::PatchJump(CodeLocationJump& jump_, CodeLocationLabel label)
 {
     Instruction* inst1 = (Instruction*)jump_.raw();
     Instruction* inst2 = inst1->next();
 
-    MaybeAutoWritableJitCode awjc(inst1, 8, reprotect);
     AssemblerMIPSShared::UpdateLuiOriValue(inst1, inst2, (uint32_t)label.raw());
 
     AutoFlushICache::flush(uintptr_t(inst1), 8);
 }
 
-// For more infromation about backedges look at comment in
-// MacroAssemblerMIPSCompat::backedgeJump()
-void
-jit::PatchBackedge(CodeLocationJump& jump, CodeLocationLabel label,
-                   JitZoneGroup::BackedgeTarget target)
-{
-    uint32_t sourceAddr = (uint32_t)jump.raw();
-    uint32_t targetAddr = (uint32_t)label.raw();
-    InstImm* branch = (InstImm*)jump.raw();
-
-    MOZ_ASSERT(branch->extractOpcode() == (uint32_t(op_beq) >> OpcodeShift));
-
-    if (BOffImm16::IsInRange(targetAddr - sourceAddr)) {
-        branch->setBOffImm16(BOffImm16(targetAddr - sourceAddr));
-    } else {
-        if (target == JitZoneGroup::BackedgeLoopHeader) {
-            Instruction* lui = &branch[1];
-            AssemblerMIPSShared::UpdateLuiOriValue(lui, lui->next(), targetAddr);
-            // Jump to ori. The lui will be executed in delay slot.
-            branch->setBOffImm16(BOffImm16(2 * sizeof(uint32_t)));
-        } else {
-            Instruction* lui = &branch[4];
-            AssemblerMIPSShared::UpdateLuiOriValue(lui, lui->next(), targetAddr);
-            branch->setBOffImm16(BOffImm16(4 * sizeof(uint32_t)));
-        }
-    }
-}
-
 void
 Assembler::executableCopy(uint8_t* buffer, bool flushICache)
 {
     MOZ_ASSERT(isFinished);
     m_buffer.executableCopy(buffer);
 
     if (flushICache)
         AutoFlushICache::setRange(uintptr_t(buffer), m_buffer.size());
--- a/js/src/jit/mips32/MacroAssembler-mips32.cpp
+++ b/js/src/jit/mips32/MacroAssembler-mips32.cpp
@@ -1631,69 +1631,18 @@ void
 MacroAssemblerMIPSCompat::moveData(const Value& val, Register data)
 {
     if (val.isGCThing())
         ma_li(data, ImmGCPtr(val.toGCThing()));
     else
         ma_li(data, Imm32(val.toNunboxPayload()));
 }
 
-/* There are 3 paths trough backedge jump. They are listed here in the order
- * in which instructions are executed.
- *  - The short jump is simple:
- *     b offset            # Jumps directly to target.
- *     lui at, addr1_hi    # In delay slot. Don't care about 'at' here.
- *
- *  - The long jump to loop header:
- *      b label1
- *      lui at, addr1_hi   # In delay slot. We use the value in 'at' later.
- *    label1:
- *      ori at, addr1_lo
- *      jr at
- *      lui at, addr2_hi   # In delay slot. Don't care about 'at' here.
- *
- *  - The long jump to interrupt loop:
- *      b label2
- *      lui at, addr1_hi   # In delay slot. Don't care about 'at' here.
- *    label2:
- *      lui at, addr2_hi
- *      ori at, addr2_lo
- *      jr at
- *      nop                # In delay slot.
- *
- * The backedge is done this way to avoid patching lui+ori pair while it is
- * being executed. Look also at jit::PatchBackedge().
- */
 CodeOffsetJump
-MacroAssemblerMIPSCompat::backedgeJump(RepatchLabel* label, Label* documentation)
-{
-    // Only one branch per label.
-    MOZ_ASSERT(!label->used());
-
-    BufferOffset bo = nextOffset();
-    label->use(bo.getOffset());
-
-    // Backedges are short jumps when bound, but can become long when patched.
-    m_buffer.ensureSpace(8 * sizeof(uint32_t));
-    // Jump to "label1" by default to jump to the loop header.
-    as_b(BOffImm16(2 * sizeof(uint32_t)));
-    // No need for nop here. We can safely put next instruction in delay slot.
-    ma_liPatchable(ScratchRegister, Imm32(LabelBase::INVALID_OFFSET));
-    MOZ_ASSERT(nextOffset().getOffset() - bo.getOffset() == 3 * sizeof(uint32_t));
-    as_jr(ScratchRegister);
-    // No need for nop here. We can safely put next instruction in delay slot.
-    ma_liPatchable(ScratchRegister, Imm32(LabelBase::INVALID_OFFSET));
-    as_jr(ScratchRegister);
-    as_nop();
-    MOZ_ASSERT(nextOffset().getOffset() - bo.getOffset() == 8 * sizeof(uint32_t));
-    return CodeOffsetJump(bo.getOffset());
-}
-
-CodeOffsetJump
-MacroAssemblerMIPSCompat::jumpWithPatch(RepatchLabel* label, Label* documentation)
+MacroAssemblerMIPSCompat::jumpWithPatch(RepatchLabel* label)
 {
     // Only one branch per label.
     MOZ_ASSERT(!label->used());
 
     BufferOffset bo = nextOffset();
     label->use(bo.getOffset());
     ma_liPatchable(ScratchRegister, Imm32(LabelBase::INVALID_OFFSET));
     as_jr(ScratchRegister);
--- a/js/src/jit/mips32/MacroAssembler-mips32.h
+++ b/js/src/jit/mips32/MacroAssembler-mips32.h
@@ -449,18 +449,17 @@ class MacroAssemblerMIPSCompat : public 
     }
 
     uint32_t getType(const Value& val);
     void moveData(const Value& val, Register data);
 
   public:
     void moveValue(const Value& val, Register type, Register data);
 
-    CodeOffsetJump backedgeJump(RepatchLabel* label, Label* documentation = nullptr);
-    CodeOffsetJump jumpWithPatch(RepatchLabel* label, Label* documentation = nullptr);
+    CodeOffsetJump jumpWithPatch(RepatchLabel* label);
 
     void loadUnboxedValue(Address address, MIRType type, AnyRegister dest) {
         if (dest.isFloat())
             loadInt32OrDouble(address, dest.fpu());
         else
             ma_lw(dest.gpr(), ToPayload(address));
     }
 
--- a/js/src/jit/mips32/Simulator-mips32.cpp
+++ b/js/src/jit/mips32/Simulator-mips32.cpp
@@ -511,18 +511,16 @@ class AutoLockSimulatorCache : public Lo
   public:
     AutoLockSimulatorCache()
       : Base(SimulatorProcess::singleton_->cacheLock_)
     {}
 };
 
 mozilla::Atomic<size_t, mozilla::ReleaseAcquire>
     SimulatorProcess::ICacheCheckingDisableCount(1); // Checking is disabled by default.
-mozilla::Atomic<bool, mozilla::ReleaseAcquire>
-    SimulatorProcess::cacheInvalidatedBySignalHandler_(false);
 SimulatorProcess* SimulatorProcess::singleton_ = nullptr;
 
 int Simulator::StopSimAt = -1;
 
 Simulator*
 Simulator::Create(JSContext* cx)
 {
     Simulator* sim = js_new<Simulator>();
@@ -1201,35 +1199,21 @@ SimulatorProcess::checkICacheLocked(SimI
     void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
     void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
     int offset = (address & CachePage::kPageMask);
     CachePage* cache_page = GetCachePageLocked(icache(), page);
     char* cache_valid_byte = cache_page->validityByte(offset);
     bool cache_hit = (*cache_valid_byte == CachePage::LINE_VALID);
     char* cached_line = cache_page->cachedData(offset & ~CachePage::kLineMask);
 
-    // Read all state before considering signal handler effects.
-    int cmpret = 0;
     if (cache_hit) {
         // Check that the data in memory matches the contents of the I-cache.
-        cmpret = memcmp(reinterpret_cast<void*>(instr),
-                        cache_page->cachedData(offset),
-                        SimInstruction::kInstrSize);
-    }
-
-    // Check for signal handler interruption between reading state and asserting.
-    // It is safe for the signal to arrive during the !cache_hit path, since it
-    // will be cleared the next time this function is called.
-    if (cacheInvalidatedBySignalHandler_) {
-        icache().clear();
-        cacheInvalidatedBySignalHandler_ = false;
-        return;
-    }
-
-    if (cache_hit) {
+        int cmpret = memcmp(reinterpret_cast<void*>(instr),
+                            cache_page->cachedData(offset),
+                            SimInstruction::kInstrSize);
         MOZ_ASSERT(cmpret == 0);
     } else {
         // Cache miss.  Load memory into the cache.
         memcpy(cached_line, line, CachePage::kLineLength);
         *cache_valid_byte = CachePage::LINE_VALID;
     }
 }
 
--- a/js/src/jit/mips32/Simulator-mips32.h
+++ b/js/src/jit/mips32/Simulator-mips32.h
@@ -398,22 +398,16 @@ class SimulatorProcess
     };
 
   public:
     typedef HashMap<void*, CachePage*, ICacheHasher, SystemAllocPolicy> ICacheMap;
 
     static mozilla::Atomic<size_t, mozilla::ReleaseAcquire> ICacheCheckingDisableCount;
     static void FlushICache(void* start, size_t size);
 
-    // Jitcode may be rewritten from a signal handler, but is prevented from
-    // calling FlushICache() because the signal may arrive within the critical
-    // area of an AutoLockSimulatorCache. This flag instructs the Simulator
-    // to remove all cache entries the next time it checks, avoiding false negatives.
-    static mozilla::Atomic<bool, mozilla::ReleaseAcquire> cacheInvalidatedBySignalHandler_;
-
     static void checkICacheLocked(SimInstruction* instr);
 
     static bool initialize() {
         singleton_ = js_new<SimulatorProcess>();
         return singleton_ && singleton_->init();
     }
     static void destroy() {
         js_delete(singleton_);
--- a/js/src/jit/mips64/Assembler-mips64.cpp
+++ b/js/src/jit/mips64/Assembler-mips64.cpp
@@ -80,56 +80,26 @@ uint32_t
 js::jit::SA(FloatRegister r)
 {
     MOZ_ASSERT(r.id() < FloatRegisters::TotalPhys);
     return r.id() << SAShift;
 }
 
 // Used to patch jumps created by MacroAssemblerMIPS64Compat::jumpWithPatch.
 void
-jit::PatchJump(CodeLocationJump& jump_, CodeLocationLabel label, ReprotectCode reprotect)
+jit::PatchJump(CodeLocationJump& jump_, CodeLocationLabel label)
 {
     Instruction* inst = (Instruction*)jump_.raw();
 
-    // Six instructions used in load 64-bit imm.
-    MaybeAutoWritableJitCode awjc(inst, 6 * sizeof(uint32_t), reprotect);
     Assembler::UpdateLoad64Value(inst, (uint64_t)label.raw());
 
+    // Six instructions used in load 64-bit imm.
     AutoFlushICache::flush(uintptr_t(inst), 6 * sizeof(uint32_t));
 }
 
-// For more infromation about backedges look at comment in
-// MacroAssemblerMIPS64Compat::backedgeJump()
-void
-jit::PatchBackedge(CodeLocationJump& jump, CodeLocationLabel label,
-                   JitZoneGroup::BackedgeTarget target)
-{
-    uintptr_t sourceAddr = (uintptr_t)jump.raw();
-    uintptr_t targetAddr = (uintptr_t)label.raw();
-    InstImm* branch = (InstImm*)jump.raw();
-
-    MOZ_ASSERT(branch->extractOpcode() == (uint32_t(op_beq) >> OpcodeShift));
-
-    if (BOffImm16::IsInRange(targetAddr - sourceAddr)) {
-        branch->setBOffImm16(BOffImm16(targetAddr - sourceAddr));
-    } else {
-        if (target == JitZoneGroup::BackedgeLoopHeader) {
-            Instruction* inst = &branch[1];
-            Assembler::UpdateLoad64Value(inst, targetAddr);
-            // Jump to first ori. The lui will be executed in delay slot.
-            branch->setBOffImm16(BOffImm16(2 * sizeof(uint32_t)));
-        } else {
-            Instruction* inst = &branch[6];
-            Assembler::UpdateLoad64Value(inst, targetAddr);
-            // Jump to first ori of interrupt loop.
-            branch->setBOffImm16(BOffImm16(6 * sizeof(uint32_t)));
-        }
-    }
-}
-
 void
 Assembler::executableCopy(uint8_t* buffer, bool flushICache)
 {
     MOZ_ASSERT(isFinished);
     m_buffer.executableCopy(buffer);
 
     if (flushICache)
         AutoFlushICache::setRange(uintptr_t(buffer), m_buffer.size());
--- a/js/src/jit/mips64/MacroAssembler-mips64.cpp
+++ b/js/src/jit/mips64/MacroAssembler-mips64.cpp
@@ -1575,74 +1575,18 @@ MacroAssemblerMIPS64Compat::extractTag(c
 
 Register
 MacroAssemblerMIPS64Compat::extractTag(const BaseIndex& address, Register scratch)
 {
     computeScaledAddress(address, scratch);
     return extractTag(Address(scratch, address.offset), scratch);
 }
 
-/* There are 3 paths trough backedge jump. They are listed here in the order
- * in which instructions are executed.
- *  - The short jump is simple:
- *     b offset            # Jumps directly to target.
- *     lui at, addr1_hl    # In delay slot. Don't care about 'at' here.
- *
- *  - The long jump to loop header:
- *      b label1
- *      lui at, addr1_hl   # In delay slot. We use the value in 'at' later.
- *    label1:
- *      ori at, addr1_lh
- *      drotr32 at, at, 48
- *      ori at, addr1_ll
- *      jr at
- *      lui at, addr2_hl   # In delay slot. Don't care about 'at' here.
- *
- *  - The long jump to interrupt loop:
- *      b label2
- *      ...
- *      jr at
- *    label2:
- *      lui at, addr2_hl   # In delay slot. Don't care about 'at' here.
- *      ori at, addr2_lh
- *      drotr32 at, at, 48
- *      ori at, addr2_ll
- *      jr at
- *      nop                # In delay slot.
- *
- * The backedge is done this way to avoid patching lui+ori pair while it is
- * being executed. Look also at jit::PatchBackedge().
- */
 CodeOffsetJump
-MacroAssemblerMIPS64Compat::backedgeJump(RepatchLabel* label, Label* documentation)
-{
-    // Only one branch per label.
-    MOZ_ASSERT(!label->used());
-
-    BufferOffset bo = nextOffset();
-    label->use(bo.getOffset());
-
-    // Backedges are short jumps when bound, but can become long when patched.
-    m_buffer.ensureSpace(16 * sizeof(uint32_t));
-    // Jump to "label1" by default to jump to the loop header.
-    as_b(BOffImm16(2 * sizeof(uint32_t)));
-    // No need for nop here. We can safely put next instruction in delay slot.
-    ma_liPatchable(ScratchRegister, ImmWord(LabelBase::INVALID_OFFSET));
-    MOZ_ASSERT(nextOffset().getOffset() - bo.getOffset() == 5 * sizeof(uint32_t));
-    as_jr(ScratchRegister);
-    // No need for nop here. We can safely put next instruction in delay slot.
-    ma_liPatchable(ScratchRegister, ImmWord(LabelBase::INVALID_OFFSET));
-    as_jr(ScratchRegister);
-    as_nop();
-    MOZ_ASSERT(nextOffset().getOffset() - bo.getOffset() == 12 * sizeof(uint32_t));
-    return CodeOffsetJump(bo.getOffset());
-}
-
-CodeOffsetJump
-MacroAssemblerMIPS64Compat::jumpWithPatch(RepatchLabel* label, Label* documentation)
+MacroAssemblerMIPS64Compat::jumpWithPatch(RepatchLabel* label)
 {
     // Only one branch per label.
     MOZ_ASSERT(!label->used());
 
     BufferOffset bo = nextOffset();
     label->use(bo.getOffset());
     ma_liPatchable(ScratchRegister, ImmWord(LabelBase::INVALID_OFFSET));
     as_jr(ScratchRegister);
--- a/js/src/jit/mips64/MacroAssembler-mips64.h
+++ b/js/src/jit/mips64/MacroAssembler-mips64.h
@@ -497,18 +497,17 @@ class MacroAssemblerMIPS64Compat : publi
 
     void testUndefinedSet(Condition cond, const ValueOperand& value, Register dest);
 
     // higher level tag testing code
     Address ToPayload(Address value) {
         return value;
     }
 
-    CodeOffsetJump backedgeJump(RepatchLabel* label, Label* documentation = nullptr);
-    CodeOffsetJump jumpWithPatch(RepatchLabel* label, Label* documentation = nullptr);
+    CodeOffsetJump jumpWithPatch(RepatchLabel* label);
 
     template <typename T>
     void loadUnboxedValue(const T& address, MIRType type, AnyRegister dest) {
         if (dest.isFloat())
             loadInt32OrDouble(address, dest.fpu());
         else if (type == MIRType::ObjectOrNull)
             unboxObjectOrNull(address, dest.gpr());
         else
--- a/js/src/jit/mips64/Simulator-mips64.cpp
+++ b/js/src/jit/mips64/Simulator-mips64.cpp
@@ -545,18 +545,16 @@ class AutoLockSimulatorCache : public Lo
   public:
     explicit AutoLockSimulatorCache()
       : Base(SimulatorProcess::singleton_->cacheLock_)
     {}
 };
 
 mozilla::Atomic<size_t, mozilla::ReleaseAcquire>
     SimulatorProcess::ICacheCheckingDisableCount(1);  // Checking is disabled by default.
-mozilla::Atomic<bool, mozilla::ReleaseAcquire>
-    SimulatorProcess::cacheInvalidatedBySignalHandler_(false);
 SimulatorProcess* SimulatorProcess::singleton_ = nullptr;
 
 int64_t Simulator::StopSimAt = -1;
 
 Simulator *
 Simulator::Create(JSContext* cx)
 {
     Simulator* sim = js_new<Simulator>();
@@ -1210,35 +1208,21 @@ SimulatorProcess::checkICacheLocked(SimI
     void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
     void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
     int offset = (address & CachePage::kPageMask);
     CachePage* cache_page = GetCachePageLocked(icache(), page);
     char* cache_valid_byte = cache_page->validityByte(offset);
     bool cache_hit = (*cache_valid_byte == CachePage::LINE_VALID);
     char* cached_line = cache_page->cachedData(offset & ~CachePage::kLineMask);
 
-    // Read all state before considering signal handler effects.
-    int cmpret = 0;
     if (cache_hit) {
         // Check that the data in memory matches the contents of the I-cache.
-        cmpret = memcmp(reinterpret_cast<void*>(instr),
-                        cache_page->cachedData(offset),
-                        SimInstruction::kInstrSize);
-    }
-
-    // Check for signal handler interruption between reading state and asserting.
-    // It is safe for the signal to arrive during the !cache_hit path, since it
-    // will be cleared the next time this function is called.
-    if (cacheInvalidatedBySignalHandler_) {
-        icache().clear();
-        cacheInvalidatedBySignalHandler_ = false;
-        return;
-    }
-
-    if (cache_hit) {
+        int cmpret = memcmp(reinterpret_cast<void*>(instr),
+                            cache_page->cachedData(offset),
+                            SimInstruction::kInstrSize);
         MOZ_ASSERT(cmpret == 0);
     } else {
         // Cache miss.  Load memory into the cache.
         memcpy(cached_line, line, CachePage::kLineLength);
         *cache_valid_byte = CachePage::LINE_VALID;
     }
 }
 
--- a/js/src/jit/mips64/Simulator-mips64.h
+++ b/js/src/jit/mips64/Simulator-mips64.h
@@ -416,22 +416,16 @@ class SimulatorProcess
     };
 
   public:
     typedef HashMap<void*, CachePage*, ICacheHasher, SystemAllocPolicy> ICacheMap;
 
     static mozilla::Atomic<size_t, mozilla::ReleaseAcquire> ICacheCheckingDisableCount;
     static void FlushICache(void* start, size_t size);
 
-    // Jitcode may be rewritten from a signal handler, but is prevented from
-    // calling FlushICache() because the signal may arrive within the critical
-    // area of an AutoLockSimulatorCache. This flag instructs the Simulator
-    // to remove all cache entries the next time it checks, avoiding false negatives.
-    static mozilla::Atomic<bool, mozilla::ReleaseAcquire> cacheInvalidatedBySignalHandler_;
-
     static void checkICacheLocked(SimInstruction* instr);
 
     static bool initialize() {
         singleton_ = js_new<SimulatorProcess>();
         return singleton_ && singleton_->init();
     }
     static void destroy() {
         js_delete(singleton_);
--- a/js/src/jit/none/MacroAssembler-none.h
+++ b/js/src/jit/none/MacroAssembler-none.h
@@ -250,19 +250,17 @@ class MacroAssemblerNone : public Assemb
     void tagValue(JSValueType, Register, ValueOperand) { MOZ_CRASH(); }
     void retn(Imm32 n) { MOZ_CRASH(); }
     template <typename T> void push(T) { MOZ_CRASH(); }
     template <typename T> void Push(T) { MOZ_CRASH(); }
     template <typename T> void pop(T) { MOZ_CRASH(); }
     template <typename T> void Pop(T) { MOZ_CRASH(); }
     template <typename T> CodeOffset pushWithPatch(T) { MOZ_CRASH(); }
 
-    CodeOffsetJump jumpWithPatch(RepatchLabel*, Label* doc = nullptr) { MOZ_CRASH(); }
-    CodeOffsetJump jumpWithPatch(RepatchLabel*, Condition, Label* doc = nullptr) { MOZ_CRASH(); }
-    CodeOffsetJump backedgeJump(RepatchLabel* label, Label* doc = nullptr) { MOZ_CRASH(); }
+    CodeOffsetJump jumpWithPatch(RepatchLabel*) { MOZ_CRASH(); }
 
     void testNullSet(Condition, ValueOperand, Register) { MOZ_CRASH(); }
     void testObjectSet(Condition, ValueOperand, Register) { MOZ_CRASH(); }
     void testUndefinedSet(Condition, ValueOperand, Register) { MOZ_CRASH(); }
 
     template <typename T, typename S> void cmpPtrSet(Condition, T, S, Register) { MOZ_CRASH(); }
     template <typename T, typename S> void cmp32Set(Condition, T, S, Register) { MOZ_CRASH(); }
 
@@ -399,25 +397,19 @@ class ABIArgGenerator
   public:
     ABIArgGenerator() { MOZ_CRASH(); }
     ABIArg next(MIRType) { MOZ_CRASH(); }
     ABIArg& current() { MOZ_CRASH(); }
     uint32_t stackBytesConsumedSoFar() const { MOZ_CRASH(); }
 };
 
 static inline void
-PatchJump(CodeLocationJump&, CodeLocationLabel, ReprotectCode reprotect = DontReprotect)
+PatchJump(CodeLocationJump&, CodeLocationLabel)
 {
     MOZ_CRASH();
 }
 
 static inline bool GetTempRegForIntArg(uint32_t, uint32_t, Register*) { MOZ_CRASH(); }
 
-static inline
-void PatchBackedge(CodeLocationJump& jump_, CodeLocationLabel label, JitZoneGroup::BackedgeTarget target)
-{
-    MOZ_CRASH();
-}
-
 } // namespace jit
 } // namespace js
 
 #endif /* jit_none_MacroAssembler_none_h */
--- a/js/src/jit/shared/CodeGenerator-shared.cpp
+++ b/js/src/jit/shared/CodeGenerator-shared.cpp
@@ -1508,107 +1508,43 @@ CodeGeneratorShared::emitPreBarrier(Regi
 }
 
 void
 CodeGeneratorShared::emitPreBarrier(Address address)
 {
     masm.guardedCallPreBarrier(address, MIRType::Value);
 }
 
-Label*
-CodeGeneratorShared::labelForBackedgeWithImplicitCheck(MBasicBlock* mir)
-{
-    // If this is a loop backedge to a loop header with an implicit interrupt
-    // check, use a patchable jump. Skip this search if compiling without a
-    // script for wasm, as there will be no interrupt check instruction.
-    // Due to critical edge unsplitting there may no longer be unique loop
-    // backedges, so just look for any edge going to an earlier block in RPO.
-    if (!gen->compilingWasm() && mir->isLoopHeader() && mir->id() <= current->mir()->id()) {
-        for (LInstructionIterator iter = mir->lir()->begin(); iter != mir->lir()->end(); iter++) {
-            if (iter->isMoveGroup()) {
-                // Continue searching for an interrupt check.
-            } else {
-                // The interrupt check should be the first instruction in the
-                // loop header other than move groups.
-                MOZ_ASSERT(iter->isInterruptCheck());
-                if (iter->toInterruptCheck()->implicit())
-                    return iter->toInterruptCheck()->oolEntry();
-                return nullptr;
-            }
-        }
-    }
-
-    return nullptr;
-}
-
 void
 CodeGeneratorShared::jumpToBlock(MBasicBlock* mir)
 {
     // Skip past trivial blocks.
     mir = skipTrivialBlocks(mir);
 
     // No jump necessary if we can fall through to the next block.
     if (isNextBlock(mir->lir()))
         return;
 
-    if (Label* oolEntry = labelForBackedgeWithImplicitCheck(mir)) {
-        // Note: the backedge is initially a jump to the next instruction.
-        // It will be patched to the target block's label during link().
-        RepatchLabel rejoin;
-        CodeOffsetJump backedge = masm.backedgeJump(&rejoin, mir->lir()->label());
-        masm.bind(&rejoin);
-
-        masm.propagateOOM(patchableBackedges_.append(PatchableBackedgeInfo(backedge, mir->lir()->label(), oolEntry)));
-    } else {
-        masm.jump(mir->lir()->label());
-    }
+    masm.jump(mir->lir()->label());
 }
 
 Label*
 CodeGeneratorShared::getJumpLabelForBranch(MBasicBlock* block)
 {
     // Skip past trivial blocks.
-    block = skipTrivialBlocks(block);
-
-    if (!labelForBackedgeWithImplicitCheck(block))
-        return block->lir()->label();
-
-    // We need to use a patchable jump for this backedge, but want to treat
-    // this as a normal label target to simplify codegen. Efficiency isn't so
-    // important here as these tests are extremely unlikely to be used in loop
-    // backedges, so emit inline code for the patchable jump. Heap allocating
-    // the label allows it to be used by out of line blocks.
-    Label* res = alloc().lifoAlloc()->newInfallible<Label>();
-    Label after;
-    masm.jump(&after);
-    masm.bind(res);
-    jumpToBlock(block);
-    masm.bind(&after);
-    return res;
+    return skipTrivialBlocks(block)->lir()->label();
 }
 
 // This function is not used for MIPS/MIPS64. MIPS has branchToBlock.
 #if !defined(JS_CODEGEN_MIPS32) && !defined(JS_CODEGEN_MIPS64)
 void
 CodeGeneratorShared::jumpToBlock(MBasicBlock* mir, Assembler::Condition cond)
 {
     // Skip past trivial blocks.
-    mir = skipTrivialBlocks(mir);
-
-    if (Label* oolEntry = labelForBackedgeWithImplicitCheck(mir)) {
-        // Note: the backedge is initially a jump to the next instruction.
-        // It will be patched to the target block's label during link().
-        RepatchLabel rejoin;
-        CodeOffsetJump backedge = masm.jumpWithPatch(&rejoin, cond, mir->lir()->label());
-        masm.bind(&rejoin);
-
-        masm.propagateOOM(patchableBackedges_.append(PatchableBackedgeInfo(backedge, mir->lir()->label(), oolEntry)));
-    } else {
-        masm.j(cond, mir->lir()->label());
-    }
+    masm.j(cond, skipTrivialBlocks(mir)->lir()->label());
 }
 #endif
 
 ReciprocalMulConstants
 CodeGeneratorShared::computeDivisionConstants(uint32_t d, int maxLog) {
     MOZ_ASSERT(maxLog >= 2 && maxLog <= 32);
     // In what follows, 0 < d < 2^maxLog and d is not a power of 2.
     MOZ_ASSERT(d < (uint64_t(1) << maxLog) && (d & (d - 1)) != 0);
--- a/js/src/jit/shared/CodeGenerator-shared.h
+++ b/js/src/jit/shared/CodeGenerator-shared.h
@@ -29,27 +29,16 @@ class CodeGenerator;
 class MacroAssembler;
 class IonIC;
 
 template <class ArgSeq, class StoreOutputTo>
 class OutOfLineCallVM;
 
 class OutOfLineTruncateSlow;
 
-struct PatchableBackedgeInfo
-{
-    CodeOffsetJump backedge;
-    Label* loopHeader;
-    Label* interruptCheck;
-
-    PatchableBackedgeInfo(CodeOffsetJump backedge, Label* loopHeader, Label* interruptCheck)
-      : backedge(backedge), loopHeader(loopHeader), interruptCheck(interruptCheck)
-    {}
-};
-
 struct ReciprocalMulConstants {
     int64_t multiplier;
     int32_t shiftAmount;
 };
 
 // This should be nested in CodeGeneratorShared, but it is used in
 // optimization tracking implementation and nested classes cannot be
 // forward-declared.
@@ -105,19 +94,16 @@ class CodeGeneratorShared : public LElem
 
     // IC data we need at compile-time. Discarded after creating the IonScript.
     struct CompileTimeICInfo {
         CodeOffset icOffsetForJump;
         CodeOffset icOffsetForPush;
     };
     js::Vector<CompileTimeICInfo, 0, SystemAllocPolicy> icInfo_;
 
-    // Patchable backedges generated for loops.
-    Vector<PatchableBackedgeInfo, 0, SystemAllocPolicy> patchableBackedges_;
-
 #ifdef JS_TRACE_LOGGING
     struct PatchableTLEvent {
         CodeOffset offset;
         const char* event;
         PatchableTLEvent(CodeOffset offset, const char* event)
             : offset(offset), event(event)
         {}
     };
@@ -479,26 +465,21 @@ class CodeGeneratorShared : public LElem
     bool generateEpilogue();
 
     void addOutOfLineCode(OutOfLineCode* code, const MInstruction* mir);
     void addOutOfLineCode(OutOfLineCode* code, const BytecodeSite* site);
     bool generateOutOfLineCode();
 
     Label* getJumpLabelForBranch(MBasicBlock* block);
 
-    // Generate a jump to the start of the specified block, adding information
-    // if this is a loop backedge. Use this in place of jumping directly to
-    // mir->lir()->label(), or use getJumpLabelForBranch() if a label to use
-    // directly is needed.
+    // Generate a jump to the start of the specified block. Use this in place of
+    // jumping directly to mir->lir()->label(), or use getJumpLabelForBranch()
+    // if a label to use directly is needed.
     void jumpToBlock(MBasicBlock* mir);
 
-    // Get a label for the start of block which can be used for jumping, in
-    // place of jumpToBlock.
-    Label* labelForBackedgeWithImplicitCheck(MBasicBlock* mir);
-
 // This function is not used for MIPS. MIPS has branchToBlock.
 #if !defined(JS_CODEGEN_MIPS32) && !defined(JS_CODEGEN_MIPS64)
     void jumpToBlock(MBasicBlock* mir, Assembler::Condition cond);
 #endif
 
   private:
     void generateInvalidateEpilogue();
 
--- a/js/src/jit/shared/LIR-shared.h
+++ b/js/src/jit/shared/LIR-shared.h
@@ -1608,51 +1608,25 @@ class LRotateI64 : public details::Rotat
 
     const LInt64Allocation input() { return getInt64Operand(Input); }
     const LDefinition* temp() { return getTemp(0); }
     LAllocation* count() { return getOperand(Count); }
 };
 
 class LInterruptCheck : public LInstructionHelper<0, 0, 0>
 {
-    Label* oolEntry_;
-
-    // Whether this is an implicit interrupt check. Implicit interrupt checks
-    // use a patchable backedge and signal handlers instead of an explicit
-    // cx->interrupt check.
-    bool implicit_;
-
   public:
     LIR_HEADER(InterruptCheck)
 
     LInterruptCheck()
-      : LInstructionHelper(classOpcode),
-        oolEntry_(nullptr),
-        implicit_(false)
+      : LInstructionHelper(classOpcode)
     {}
-
-    Label* oolEntry() {
-        MOZ_ASSERT(implicit_);
-        return oolEntry_;
-    }
-
-    void setOolEntry(Label* oolEntry) {
-        MOZ_ASSERT(implicit_);
-        oolEntry_ = oolEntry;
-    }
     MInterruptCheck* mir() const {
         return mir_->toInterruptCheck();
     }
-
-    void setImplicit() {
-        implicit_ = true;
-    }
-    bool implicit() const {
-        return implicit_;
-    }
 };
 
 class LWasmInterruptCheck : public LInstructionHelper<0, 1, 0>
 {
   public:
     LIR_HEADER(WasmInterruptCheck)
 
     explicit LWasmInterruptCheck(const LAllocation& tlsData)
--- a/js/src/jit/x64/Assembler-x64.cpp
+++ b/js/src/jit/x64/Assembler-x64.cpp
@@ -167,20 +167,19 @@ Assembler::PatchableJumpAddress(JitCode*
     jumpOffset += index * SizeOfJumpTableEntry;
 
     MOZ_ASSERT(jumpOffset + SizeOfExtendedJump <= code->instructionsSize());
     return code->raw() + jumpOffset;
 }
 
 /* static */
 void
-Assembler::PatchJumpEntry(uint8_t* entry, uint8_t* target, ReprotectCode reprotect)
+Assembler::PatchJumpEntry(uint8_t* entry, uint8_t* target)
 {
     uint8_t** index = (uint8_t**) (entry + SizeOfExtendedJump - sizeof(void*));
-    MaybeAutoWritableJitCode awjc(index, sizeof(void*), reprotect);
     *index = target;
 }
 
 void
 Assembler::finish()
 {
     if (oom())
         return;
--- a/js/src/jit/x64/Assembler-x64.h
+++ b/js/src/jit/x64/Assembler-x64.h
@@ -295,17 +295,17 @@ class Assembler : public AssemblerX86Sha
   public:
     using AssemblerX86Shared::j;
     using AssemblerX86Shared::jmp;
     using AssemblerX86Shared::push;
     using AssemblerX86Shared::pop;
     using AssemblerX86Shared::vmovq;
 
     static uint8_t* PatchableJumpAddress(JitCode* code, size_t index);
-    static void PatchJumpEntry(uint8_t* entry, uint8_t* target, ReprotectCode reprotect);
+    static void PatchJumpEntry(uint8_t* entry, uint8_t* target);
 
     Assembler()
       : extendedJumpTable_(0)
     {
     }
 
     static void TraceJumpRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader);
 
@@ -1114,36 +1114,26 @@ class Assembler : public AssemblerX86Sha
         masm.vcvtsq2sd_rr(src1.encoding(), src0.encoding(), dest.encoding());
     }
     void vcvtsq2ss(Register src1, FloatRegister src0, FloatRegister dest) {
         masm.vcvtsq2ss_rr(src1.encoding(), src0.encoding(), dest.encoding());
     }
 };
 
 static inline void
-PatchJump(CodeLocationJump jump, CodeLocationLabel label, ReprotectCode reprotect = DontReprotect)
+PatchJump(CodeLocationJump jump, CodeLocationLabel label)
 {
     if (X86Encoding::CanRelinkJump(jump.raw(), label.raw())) {
-        MaybeAutoWritableJitCode awjc(jump.raw() - 8, 8, reprotect);
         X86Encoding::SetRel32(jump.raw(), label.raw());
     } else {
-        {
-            MaybeAutoWritableJitCode awjc(jump.raw() - 8, 8, reprotect);
-            X86Encoding::SetRel32(jump.raw(), jump.jumpTableEntry());
-        }
-        Assembler::PatchJumpEntry(jump.jumpTableEntry(), label.raw(), reprotect);
+        X86Encoding::SetRel32(jump.raw(), jump.jumpTableEntry());
+        Assembler::PatchJumpEntry(jump.jumpTableEntry(), label.raw());
     }
 }
 
-static inline void
-PatchBackedge(CodeLocationJump& jump_, CodeLocationLabel label, JitZoneGroup::BackedgeTarget target)
-{
-    PatchJump(jump_, label);
-}
-
 static inline bool
 GetIntArgReg(uint32_t intArg, uint32_t floatArg, Register* out)
 {
 #if defined(_WIN64)
     uint32_t arg = intArg + floatArg;
 #else
     uint32_t arg = intArg;
 #endif
--- a/js/src/jit/x64/MacroAssembler-x64.h
+++ b/js/src/jit/x64/MacroAssembler-x64.h
@@ -539,32 +539,21 @@ class MacroAssemblerX64 : public MacroAs
     void testPtr(const Operand& lhs, Imm32 rhs) {
         testq(rhs, lhs);
     }
 
     /////////////////////////////////////////////////////////////////
     // Common interface.
     /////////////////////////////////////////////////////////////////
 
-    CodeOffsetJump jumpWithPatch(RepatchLabel* label, Label* documentation = nullptr) {
+    CodeOffsetJump jumpWithPatch(RepatchLabel* label) {
         JmpSrc src = jmpSrc(label);
         return CodeOffsetJump(size(), addPatchableJump(src, Relocation::HARDCODED));
     }
 
-    CodeOffsetJump jumpWithPatch(RepatchLabel* label, Condition cond,
-                                 Label* documentation = nullptr)
-    {
-        JmpSrc src = jSrc(cond, label);
-        return CodeOffsetJump(size(), addPatchableJump(src, Relocation::HARDCODED));
-    }
-
-    CodeOffsetJump backedgeJump(RepatchLabel* label, Label* documentation = nullptr) {
-        return jumpWithPatch(label);
-    }
-
     void movePtr(Register src, Register dest) {
         movq(src, dest);
     }
     void movePtr(Register src, const Operand& dest) {
         movq(src, dest);
     }
     void movePtr(ImmWord imm, Register dest) {
         mov(imm, dest);
--- a/js/src/jit/x86-shared/MacroAssembler-x86-shared-inl.h
+++ b/js/src/jit/x86-shared/MacroAssembler-x86-shared-inl.h
@@ -591,32 +591,16 @@ MacroAssembler::branchPtr(Condition cond
 template <typename T, typename S, typename L>
 void
 MacroAssembler::branchPtrImpl(Condition cond, const T& lhs, const S& rhs, L label)
 {
     cmpPtr(Operand(lhs), rhs);
     j(cond, label);
 }
 
-template <typename T>
-CodeOffsetJump
-MacroAssembler::branchPtrWithPatch(Condition cond, Register lhs, T rhs, RepatchLabel* label)
-{
-    cmpPtr(lhs, rhs);
-    return jumpWithPatch(label, cond);
-}
-
-template <typename T>
-CodeOffsetJump
-MacroAssembler::branchPtrWithPatch(Condition cond, Address lhs, T rhs, RepatchLabel* label)
-{
-    cmpPtr(lhs, rhs);
-    return jumpWithPatch(label, cond);
-}
-
 void
 MacroAssembler::branchFloat(DoubleCondition cond, FloatRegister lhs, FloatRegister rhs,
                             Label* label)
 {
     compareFloat(cond, lhs, rhs);
 
     if (cond == DoubleEqual) {
         Label unordered;
--- a/js/src/jit/x86/Assembler-x86.h
+++ b/js/src/jit/x86/Assembler-x86.h
@@ -176,34 +176,28 @@ static const Scale ScalePointer = TimesF
 } // namespace js
 
 #include "jit/x86-shared/Assembler-x86-shared.h"
 
 namespace js {
 namespace jit {
 
 static inline void
-PatchJump(CodeLocationJump jump, CodeLocationLabel label, ReprotectCode reprotect = DontReprotect)
+PatchJump(CodeLocationJump jump, CodeLocationLabel label)
 {
 #ifdef DEBUG
     // Assert that we're overwriting a jump instruction, either:
     //   0F 80+cc <imm32>, or
     //   E9 <imm32>
     unsigned char* x = (unsigned char*)jump.raw() - 5;
     MOZ_ASSERT(((*x >= 0x80 && *x <= 0x8F) && *(x - 1) == 0x0F) ||
                (*x == 0xE9));
 #endif
-    MaybeAutoWritableJitCode awjc(jump.raw() - 8, 8, reprotect);
     X86Encoding::SetRel32(jump.raw(), label.raw());
 }
-static inline void
-PatchBackedge(CodeLocationJump& jump_, CodeLocationLabel label, JitZoneGroup::BackedgeTarget target)
-{
-    PatchJump(jump_, label);
-}
 
 static inline Operand
 LowWord(const Operand& op) {
     switch (op.kind()) {
       case Operand::MEM_REG_DISP: return Operand(LowWord(op.toAddress()));
       case Operand::MEM_SCALE:    return Operand(LowWord(op.toBaseIndex()));
       default:                    MOZ_CRASH("Invalid operand type");
     }
--- a/js/src/jit/x86/MacroAssembler-x86.h
+++ b/js/src/jit/x86/MacroAssembler-x86.h
@@ -574,32 +574,21 @@ class MacroAssemblerX86 : public MacroAs
     /////////////////////////////////////////////////////////////////
 
     template <typename T, typename S>
     void branchPtr(Condition cond, T lhs, S ptr, RepatchLabel* label) {
         cmpPtr(Operand(lhs), ptr);
         j(cond, label);
     }
 
-    CodeOffsetJump jumpWithPatch(RepatchLabel* label, Label* documentation = nullptr) {
+    CodeOffsetJump jumpWithPatch(RepatchLabel* label) {
         jump(label);
         return CodeOffsetJump(size());
     }
 
-    CodeOffsetJump jumpWithPatch(RepatchLabel* label, Assembler::Condition cond,
-                                 Label* documentation = nullptr)
-    {
-        j(cond, label);
-        return CodeOffsetJump(size());
-    }
-
-    CodeOffsetJump backedgeJump(RepatchLabel* label, Label* documentation = nullptr) {
-        return jumpWithPatch(label);
-    }
-
     void branchPtr(Condition cond, Register lhs, Register rhs, RepatchLabel* label) {
         cmpPtr(lhs, rhs);
         j(cond, label);
     }
 
     void movePtr(ImmWord imm, Register dest) {
         movl(Imm32(imm.value), dest);
     }
--- a/js/src/jsapi.cpp
+++ b/js/src/jsapi.cpp
@@ -7277,19 +7277,16 @@ JS_SetGlobalJitCompilerOption(JSContext*
         jit::JitOptions.asmJSAtomicsEnable = !!value;
         break;
       case JSJITCOMPILER_WASM_FOLD_OFFSETS:
         jit::JitOptions.wasmFoldOffsets = !!value;
         break;
       case JSJITCOMPILER_WASM_DELAY_TIER2:
         jit::JitOptions.wasmDelayTier2 = !!value;
         break;
-      case JSJITCOMPILER_ION_INTERRUPT_WITHOUT_SIGNAL:
-        jit::JitOptions.ionInterruptWithoutSignals = !!value;
-        break;
 #ifdef DEBUG
       case JSJITCOMPILER_FULL_DEBUG_CHECKS:
         jit::JitOptions.fullDebugChecks = !!value;
         break;
 #endif
       default:
         break;
     }
@@ -7325,19 +7322,16 @@ JS_GetGlobalJitCompilerOption(JSContext*
         *valueOut = rt->canUseOffthreadIonCompilation();
         break;
       case JSJITCOMPILER_ASMJS_ATOMICS_ENABLE:
         *valueOut = jit::JitOptions.asmJSAtomicsEnable ? 1 : 0;
         break;
       case JSJITCOMPILER_WASM_FOLD_OFFSETS:
         *valueOut = jit::JitOptions.wasmFoldOffsets ? 1 : 0;
         break;
-      case JSJITCOMPILER_ION_INTERRUPT_WITHOUT_SIGNAL:
-        *valueOut = jit::JitOptions.ionInterruptWithoutSignals ? 1 : 0;
-        break;
 #ifdef DEBUG
       case JSJITCOMPILER_FULL_DEBUG_CHECKS:
         *valueOut = jit::JitOptions.fullDebugChecks ? 1 : 0;
         break;
 #endif
       default:
         return false;
     }
--- a/js/src/jsapi.h
+++ b/js/src/jsapi.h
@@ -5814,17 +5814,16 @@ extern JS_PUBLIC_API(void)
 JS_SetOffthreadIonCompilationEnabled(JSContext* cx, bool enabled);
 
 #define JIT_COMPILER_OPTIONS(Register)                                      \
     Register(BASELINE_WARMUP_TRIGGER, "baseline.warmup.trigger")            \
     Register(ION_WARMUP_TRIGGER, "ion.warmup.trigger")                      \
     Register(ION_GVN_ENABLE, "ion.gvn.enable")                              \
     Register(ION_FORCE_IC, "ion.forceinlineCaches")                         \
     Register(ION_ENABLE, "ion.enable")                                      \
-    Register(ION_INTERRUPT_WITHOUT_SIGNAL, "ion.interrupt-without-signals") \
     Register(ION_CHECK_RANGE_ANALYSIS, "ion.check-range-analysis")          \
     Register(BASELINE_ENABLE, "baseline.enable")                            \
     Register(OFFTHREAD_COMPILATION_ENABLE, "offthread-compilation.enable")  \
     Register(FULL_DEBUG_CHECKS, "jit.full-debug-checks")                    \
     Register(JUMP_THRESHOLD, "jump-threshold")                              \
     Register(TRACK_OPTIMIZATIONS, "jit.track-optimizations")                \
     Register(SIMULATOR_ALWAYS_INTERRUPT, "simulator.always-interrupt")      \
     Register(SPECTRE_INDEX_MASKING, "spectre.index-masking")                \
--- a/js/src/threading/ProtectedData.cpp
+++ b/js/src/threading/ProtectedData.cpp
@@ -47,27 +47,21 @@ CheckThreadLocal::check() const
     else
         MOZ_ASSERT(id == ThisThread::GetId());
 }
 
 template <AllowedHelperThread Helper>
 void
 CheckActiveThread<Helper>::check() const
 {
-    // When interrupting a thread on Windows, changes are made to the runtime
-    // and active thread's state from another thread while the active thread is
-    // suspended. We need a way to mark these accesses as being tantamount to
-    // accesses by the active thread. See bug 1323066.
-#ifndef XP_WIN
     if (OnHelperThread<Helper>())
         return;
 
     JSContext* cx = TlsContext.get();
     MOZ_ASSERT(CurrentThreadCanAccessRuntime(cx->runtime()));
-#endif // XP_WIN
 }
 
 template class CheckActiveThread<AllowedHelperThread::None>;
 template class CheckActiveThread<AllowedHelperThread::GCTask>;
 template class CheckActiveThread<AllowedHelperThread::IonCompile>;
 
 template <AllowedHelperThread Helper>
 void
@@ -76,30 +70,26 @@ CheckZoneGroup<Helper>::check() const
     if (OnHelperThread<Helper>())
         return;
 
     JSContext* cx = TlsContext.get();
     if (group) {
         if (group->usedByHelperThread()) {
             MOZ_ASSERT(group->ownedByCurrentHelperThread());
         } else {
-            // This check is disabled on windows for the same reason as in
-            // CheckActiveThread.
-#ifndef XP_WIN
             // In a cooperatively scheduled runtime the active thread is
             // permitted access to all zone groups --- even those it has not
             // entered --- for GC and similar purposes. Since all other
             // cooperative threads are suspended, these accesses are threadsafe
             // if the zone group is not in use by a helper thread.
             //
             // A corollary to this is that suspended cooperative threads may
             // not access anything in a zone group, even zone groups they own,
             // because they're not allowed to interact with the JS API.
             MOZ_ASSERT(CurrentThreadCanAccessRuntime(cx->runtime()));
-#endif
         }
     } else {
         // |group| will be null for data in the atoms zone. This is protected
         // by the exclusive access lock.
         MOZ_ASSERT(cx->runtime()->currentThreadHasExclusiveAccess());
     }
 }
 
--- a/js/src/vm/JSCompartment.cpp
+++ b/js/src/vm/JSCompartment.cpp
@@ -169,24 +169,22 @@ JSRuntime::createJitRuntime(JSContext* c
     MOZ_ASSERT(!jitRuntime_);
 
     if (!CanLikelyAllocateMoreExecutableMemory()) {
         // Report OOM instead of potentially hitting the MOZ_CRASH below.
         ReportOutOfMemory(cx);
         return nullptr;
     }
 
-    jit::JitRuntime* jrt = cx->new_<jit::JitRuntime>(cx->runtime());
+    jit::JitRuntime* jrt = cx->new_<jit::JitRuntime>();
     if (!jrt)
         return nullptr;
 
-    // Protect jitRuntime_ from being observed (by jit::InterruptRunningCode)
-    // while it is being initialized. Unfortunately, initialization depends on
-    // jitRuntime_ being non-null, so we can't just wait to assign jitRuntime_.
-    JitRuntime::AutoPreventBackedgePatching apbp(cx->runtime(), jrt);
+    // Unfortunately, initialization depends on jitRuntime_ being non-null, so
+    // we can't just wait to assign jitRuntime_.
     jitRuntime_ = jrt;
 
     AutoEnterOOMUnsafeRegion noOOM;
     if (!jitRuntime_->initialize(cx, atomsLock)) {
         // Handling OOM here is complicated: if we delete jitRuntime_ now, we
         // will destroy the ExecutableAllocator, even though there may still be
         // JitCode instances holding references to ExecutablePools.
         noOOM.crash("OOM in createJitRuntime");
--- a/js/src/vm/JSContext.cpp
+++ b/js/src/vm/JSContext.cpp
@@ -96,43 +96,28 @@ js::AutoCycleDetector::~AutoCycleDetecto
     }
 }
 
 bool
 JSContext::init(ContextKind kind)
 {
     // Skip most of the initialization if this thread will not be running JS.
     if (kind == ContextKind::Cooperative) {
-        // Get a platform-native handle for this thread, used by jit::InterruptRunningCode.
-#ifdef XP_WIN
-        size_t openFlags = THREAD_GET_CONTEXT | THREAD_SET_CONTEXT | THREAD_SUSPEND_RESUME |
-                           THREAD_QUERY_INFORMATION;
-        HANDLE self = OpenThread(openFlags, false, GetCurrentThreadId());
-        if (!self)
-        return false;
-        static_assert(sizeof(HANDLE) <= sizeof(threadNative_), "need bigger field");
-        threadNative_ = (size_t)self;
-#else
-        static_assert(sizeof(pthread_t) <= sizeof(threadNative_), "need bigger field");
-        threadNative_ = (size_t)pthread_self();
-#endif
-
         if (!regexpStack.ref().init())
             return false;
 
         if (!fx.initInstance())
             return false;
 
 #ifdef JS_SIMULATOR
         simulator_ = jit::Simulator::Create(this);
         if (!simulator_)
             return false;
 #endif
 
-        jit::EnsureAsyncInterrupt(this);
         if (!wasm::EnsureSignalHandlers(this))
             return false;
     }
 
     // Set the ContextKind last, so that ProtectedData checks will allow us to
     // initialize this context before it becomes the runtime's active context.
     kind_ = kind;
 
@@ -1233,17 +1218,16 @@ JSContext::alreadyReportedError()
         MOZ_ASSERT(isExceptionPending());
 #endif
     return mozilla::Err(reportedError);
 }
 
 JSContext::JSContext(JSRuntime* runtime, const JS::ContextOptions& options)
   : runtime_(runtime),
     kind_(ContextKind::Background),
-    threadNative_(0),
     helperThread_(nullptr),
     options_(options),
     arenas_(nullptr),
     enterCompartmentDepth_(0),
     jitActivation(nullptr),
     activation_(nullptr),
     profilingActivation_(nullptr),
     nativeStackBase(GetNativeStackBase()),
@@ -1304,17 +1288,16 @@ JSContext::JSContext(JSRuntime* runtime,
     data(nullptr),
     outstandingRequests(0),
     jitIsBroken(false),
     asyncCauseForNewActivations(nullptr),
     asyncCallIsExplicit(false),
     interruptCallbackDisabled(false),
     interrupt_(false),
     interruptRegExpJit_(false),
-    handlingJitInterrupt_(false),
     osrTempData_(nullptr),
     ionReturnOverride_(MagicValue(JS_ARG_POISON)),
     jitStackLimit(UINTPTR_MAX),
     jitStackLimitNoInterrupt(UINTPTR_MAX),
     getIncumbentGlobalCallback(nullptr),
     enqueuePromiseJobCallback(nullptr),
     enqueuePromiseJobCallbackData(nullptr),
     jobQueue(nullptr),
@@ -1334,21 +1317,16 @@ JSContext::JSContext(JSRuntime* runtime,
 }
 
 JSContext::~JSContext()
 {
     // Clear the ContextKind first, so that ProtectedData checks will allow us to
     // destroy this context even if the runtime is already gone.
     kind_ = ContextKind::Background;
 
-#ifdef XP_WIN
-    if (threadNative_)
-        CloseHandle((HANDLE)threadNative_.ref());
-#endif
-
     /* Free the stuff hanging off of cx. */
     MOZ_ASSERT(!resolvingList);
 
     js_delete(ionPcScriptCache.ref());
 
     if (dtoaState)
         DestroyDtoaState(dtoaState);
 
--- a/js/src/vm/JSContext.h
+++ b/js/src/vm/JSContext.h
@@ -102,36 +102,32 @@ struct JSContext : public JS::RootingCon
     ~JSContext();
 
     bool init(js::ContextKind kind);
 
   private:
     js::UnprotectedData<JSRuntime*> runtime_;
     js::WriteOnceData<js::ContextKind> kind_;
 
-    // System handle for the thread this context is associated with.
-    js::WriteOnceData<size_t> threadNative_;
-
     // The thread on which this context is running if this is not the main thread.
     js::ThreadLocalData<js::HelperThread*> helperThread_;
 
     friend class js::gc::AutoSuppressNurseryCellAlloc;
     js::ThreadLocalData<size_t> nurserySuppressions_;
 
     js::ThreadLocalData<JS::ContextOptions> options_;
 
     js::ThreadLocalData<js::gc::ArenaLists*> arenas_;
 
   public:
     // This is used by helper threads to change the runtime their context is
     // currently operating on.
     void setRuntime(JSRuntime* rt);
 
     bool isCooperativelyScheduled() const { return kind_ == js::ContextKind::Cooperative; }
-    size_t threadNative() const { return threadNative_; }
 
     inline js::gc::ArenaLists* arenas() const { return arenas_; }
 
     template <typename T>
     bool isInsideCurrentZone(T thing) const {
         return thing->zoneFromAnyThread() == zone_;
     }
 
@@ -836,35 +832,17 @@ struct JSContext : public JS::RootingCon
     void requestInterrupt(InterruptMode mode);
     bool handleInterrupt();
 
     MOZ_ALWAYS_INLINE bool hasPendingInterrupt() const {
         static_assert(sizeof(interrupt_) == sizeof(uint32_t), "Assumed by JIT callers");
         return interrupt_;
     }
 
-  private:
-    // Set when we're handling an interrupt of JIT/wasm code in
-    // InterruptRunningJitCode.
-    mozilla::Atomic<bool> handlingJitInterrupt_;
-
   public:
-    bool startHandlingJitInterrupt() {
-        // Return true if we changed handlingJitInterrupt_ from
-        // false to true.
-        return handlingJitInterrupt_.compareExchange(false, true);
-    }
-    void finishHandlingJitInterrupt() {
-        MOZ_ASSERT(handlingJitInterrupt_);
-        handlingJitInterrupt_ = false;
-    }
-    bool handlingJitInterrupt() const {
-        return handlingJitInterrupt_;
-    }
-
     void* addressOfInterrupt() {
         return &interrupt_;
     }
     void* addressOfInterruptRegExpJit() {
         return &interruptRegExpJit_;
     }
     void* addressOfJitStackLimit() {
         return &jitStackLimit;
--- a/js/src/vm/Runtime.cpp
+++ b/js/src/vm/Runtime.cpp
@@ -413,20 +413,18 @@ JSRuntime::addSizeOfIncludingThis(mozill
 
     {
         AutoLockScriptData lock(this);
         rtSizes->scriptData += scriptDataTable(lock).sizeOfExcludingThis(mallocSizeOf);
         for (ScriptDataTable::Range r = scriptDataTable(lock).all(); !r.empty(); r.popFront())
             rtSizes->scriptData += mallocSizeOf(r.front());
     }
 
-    if (jitRuntime_) {
+    if (jitRuntime_)
         jitRuntime_->execAlloc().addSizeOfCode(&rtSizes->code);
-        jitRuntime_->backedgeExecAlloc().addSizeOfCode(&rtSizes->code);
-    }
 
     rtSizes->wasmRuntime += wasmInstances.lock()->sizeOfExcludingThis(mallocSizeOf);
 }
 
 static bool
 InvokeInterruptCallback(JSContext* cx)
 {
     MOZ_ASSERT(cx->requestDepth >= 1);
@@ -501,24 +499,22 @@ void
 JSContext::requestInterrupt(InterruptMode mode)
 {
     interrupt_ = true;
     jitStackLimit = UINTPTR_MAX;
 
     if (mode == JSContext::RequestInterruptUrgent) {
         // If this interrupt is urgent (slow script dialog for instance), take
         // additional steps to interrupt corner cases where the above fields are
-        // not regularly polled. Wake ilooping Ion code, irregexp JIT code and
-        // Atomics.wait()
+        // not regularly polled. Wake Atomics.wait() and irregexp JIT code.
         interruptRegExpJit_ = true;
         FutexThread::lock();
         if (fx.isWaiting())
             fx.wake(FutexThread::WakeForJSInterrupt);
         fx.unlock();
-        jit::InterruptRunningCode(this);
         wasm::InterruptRunningCode(this);
     }
 }
 
 bool
 JSContext::handleInterrupt()
 {
     MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime()));
--- a/layout/reftests/w3c-css/submitted/ib-split/table-pseudo-in-part3-1-ref.html
+++ b/layout/reftests/w3c-css/submitted/ib-split/table-pseudo-in-part3-1-ref.html
@@ -3,9 +3,9 @@
   <head>
   </head>
   <body>
     <div>
       aaa
       <div>bbb</div>
     </div>
   </body>
-</html> 
+</html>
--- a/netwerk/base/nsNetUtil.cpp
+++ b/netwerk/base/nsNetUtil.cpp
@@ -2106,16 +2106,67 @@ NS_HasBeenCrossOrigin(nsIChannel* aChann
   if (aboutBlankInherits && NS_IsAboutBlank(uri)) {
     return false;
   }
 
   return NS_FAILED(loadingPrincipal->CheckMayLoad(uri, aReport, dataInherits));
 }
 
 bool
+NS_IsSafeTopLevelNav(nsIChannel* aChannel)
+{
+  if (!aChannel) {
+    return false;
+  }
+  nsCOMPtr<nsILoadInfo> loadInfo = aChannel->GetLoadInfo();
+  if (!loadInfo) {
+    return false;
+  }
+  if (loadInfo->GetExternalContentPolicyType() != nsIContentPolicy::TYPE_DOCUMENT) {
+    return false;
+  }
+  RefPtr<HttpBaseChannel> baseChan = do_QueryObject(aChannel);
+  if (!baseChan) {
+    return false;
+  }
+  nsHttpRequestHead *requestHead = baseChan->GetRequestHead();
+  if (!requestHead) {
+    return false;
+  }
+  return requestHead->IsSafeMethod();
+}
+
+bool NS_IsTopLevelForeign(nsIChannel* aChannel)
+{
+  if (!aChannel) {
+    return false;
+  }
+  nsCOMPtr<nsILoadInfo> loadInfo = aChannel->GetLoadInfo();
+  if (!loadInfo) {
+    return false;
+  }
+  if (loadInfo->GetExternalContentPolicyType() != nsIContentPolicy::TYPE_DOCUMENT) {
+    return false;
+  }
+
+  nsCOMPtr<mozIThirdPartyUtil> thirdPartyUtil =
+    do_GetService(THIRDPARTYUTIL_CONTRACTID);
+  if (!thirdPartyUtil) {
+    return false;
+  }
+
+  nsCOMPtr<nsIURI> uri;
+  loadInfo->TriggeringPrincipal()->GetURI(getter_AddRefs(uri));
+
+  bool isForeign = false;
+  thirdPartyUtil->IsThirdPartyChannel(aChannel, uri, &isForeign);
+  return isForeign;
+}
+
+bool
 NS_ShouldCheckAppCache(nsIPrincipal *aPrincipal)
 {
     uint32_t privateBrowsingId = 0;
     nsresult rv = aPrincipal->GetPrivateBrowsingId(&privateBrowsingId);
     if (NS_SUCCEEDED(rv) && (privateBrowsingId > 0)) {
         return false;
     }
 
--- a/netwerk/base/nsNetUtil.h
+++ b/netwerk/base/nsNetUtil.h
@@ -674,16 +674,26 @@ bool NS_GetOriginAttributes(nsIChannel *
                             mozilla::OriginAttributes &aAttributes);
 
 /**
  * Returns true if the channel has visited any cross-origin URLs on any
  * URLs that it was redirected through.
  */
 bool NS_HasBeenCrossOrigin(nsIChannel* aChannel, bool aReport = false);
 
+/**
+ * Returns true if the channel is a safe top-level navigation.
+ */
+bool NS_IsSafeTopLevelNav(nsIChannel* aChannel);
+
+/**
+ * Returns true if the channel is a foreign navigation.
+ */
+bool NS_IsTopLevelForeign(nsIChannel* aChannel);
+
 // Constants duplicated from nsIScriptSecurityManager so we avoid having necko
 // know about script security manager.
 #define NECKO_NO_APP_ID 0
 #define NECKO_UNKNOWN_APP_ID UINT32_MAX
 
 // Unique first-party domain for separating the safebrowsing cookie.
 // Note if this value is changed, code in test_cookiejars_safebrowsing.js and
 // nsUrlClassifierHashCompleter.js should also be changed.
--- a/netwerk/cookie/CookieServiceChild.cpp
+++ b/netwerk/cookie/CookieServiceChild.cpp
@@ -11,16 +11,17 @@
 #include "mozilla/dom/ContentChild.h"
 #include "mozilla/ipc/URIUtils.h"
 #include "mozilla/net/NeckoChild.h"
 #include "mozilla/SystemGroup.h"
 #include "nsCookie.h"
 #include "nsCookieService.h"
 #include "nsContentUtils.h"
 #include "nsNetCID.h"
+#include "nsNetUtil.h"
 #include "nsIChannel.h"
 #include "nsICookiePermission.h"
 #include "nsIEffectiveTLDService.h"
 #include "nsIURI.h"
 #include "nsIPrefService.h"
 #include "nsIPrefBranch.h"
 #include "nsServiceManagerUtils.h"
 
@@ -126,17 +127,20 @@ CookieServiceChild::TrackCookieLoad(nsIC
   }
   nsCOMPtr<nsILoadInfo> loadInfo = aChannel->GetLoadInfo();
   mozilla::OriginAttributes attrs;
   if (loadInfo) {
     attrs = loadInfo->GetOriginAttributes();
   }
   URIParams uriParams;
   SerializeURI(uri, uriParams);
-  SendPrepareCookieList(uriParams, isForeign, attrs);
+  bool isSafeTopLevelNav = NS_IsSafeTopLevelNav(aChannel);
+  bool isTopLevelForeign = NS_IsTopLevelForeign(aChannel);
+  SendPrepareCookieList(uriParams, isForeign, isSafeTopLevelNav,
+                        isTopLevelForeign, attrs);
 }
 
 mozilla::ipc::IPCResult
 CookieServiceChild::RecvRemoveAll(){
   mCookiesMap.Clear();
   return IPC_OK();
 }
 
@@ -251,16 +255,18 @@ CookieServiceChild::PrefChanged(nsIPrefB
     mThirdPartyUtil = do_GetService(THIRDPARTYUTIL_CONTRACTID);
     NS_ASSERTION(mThirdPartyUtil, "require ThirdPartyUtil service");
   }
 }
 
 void
 CookieServiceChild::GetCookieStringFromCookieHashTable(nsIURI                 *aHostURI,
                                                        bool                   aIsForeign,
+                                                       bool                   aIsSafeTopLevelNav,
+                                                       bool                   aIsTopLevelForeign,
                                                        const OriginAttributes &aOriginAttrs,
                                                        nsCString              &aCookieString)
 {
   nsCOMPtr<nsIEffectiveTLDService> TLDService =
     do_GetService(NS_EFFECTIVETLDSERVICE_CONTRACTID);
   NS_ASSERTION(TLDService, "Can't get TLDService");
   bool requireHostMatch;
   nsAutoCString baseDomain;
@@ -301,16 +307,31 @@ CookieServiceChild::GetCookieStringFromC
     // check the host, since the base domain lookup is conservative.
     if (!nsCookieService::DomainMatches(cookie, hostFromURI))
       continue;
 
     // if the cookie is secure and the host scheme isn't, we can't send it
     if (cookie->IsSecure() && !isSecure)
       continue;
 
+    int32_t sameSiteAttr = 0;
+    cookie->GetSameSite(&sameSiteAttr);
+    if (aIsForeign || aIsTopLevelForeign) {
+      // it if's a cross origin request and the cookie is same site only (strict)
+      // don't send it
+      if (sameSiteAttr == nsICookie2::SAMESITE_STRICT) {
+        continue;
+      }
+      // if it's a cross origin request, the cookie is same site lax, but it's not
+      // a top-level navigation, don't send it
+      if (sameSiteAttr == nsICookie2::SAMESITE_LAX && !aIsSafeTopLevelNav) {
+        continue;
+      }
+    }
+
     // if the nsIURI path doesn't match the cookie path, don't send it back
     if (!nsCookieService::PathMatches(cookie, pathFromURI))
       continue;
 
     // check if the cookie has expired
     if (cookie->Expiry() <= currentTime) {
       continue;
     }
@@ -328,23 +349,25 @@ CookieServiceChild::GetCookieStringFromC
       }
     }
   }
 }
 
 void
 CookieServiceChild::GetCookieStringSyncIPC(nsIURI                 *aHostURI,
                                            bool                   aIsForeign,
+                                           bool                   aIsSafeTopLevelNav,
+                                           bool                   aIsTopLevelForeign,
                                            const OriginAttributes &aAttrs,
                                            nsAutoCString          &aCookieString)
 {
   URIParams uriParams;
   SerializeURI(aHostURI, uriParams);
 
-  SendGetCookieString(uriParams, aIsForeign, aAttrs, &aCookieString);
+  SendGetCookieString(uriParams, aIsForeign, aIsSafeTopLevelNav, aIsTopLevelForeign, aAttrs, &aCookieString);
 }
 
 uint32_t
 CookieServiceChild::CountCookiesFromHashTable(const nsCString &aBaseDomain,
                                               const OriginAttributes &aOriginAttrs)
 {
   CookiesList *cookiesList = nullptr;
 
@@ -407,16 +430,17 @@ CookieServiceChild::RecordDocumentCookie
   for (uint32_t i = 0; i < cookiesList->Length(); i++) {
     nsCookie *cookie = cookiesList->ElementAt(i);
     if (cookie->Name().Equals(aCookie->Name()) &&
         cookie->Host().Equals(aCookie->Host()) &&
         cookie->Path().Equals(aCookie->Path())) {
       if (cookie->Value().Equals(aCookie->Value()) &&
           cookie->Expiry() == aCookie->Expiry() &&
           cookie->IsSecure() == aCookie->IsSecure() &&
+          cookie->SameSite() == aCookie->SameSite() &&
           cookie->IsSession() == aCookie->IsSession() &&
           cookie->IsHttpOnly() == aCookie->IsHttpOnly()) {
         cookie->SetLastAccessed(aCookie->LastAccessed());
         return;
       }
       cookiesList->RemoveElementAt(i);
       break;
     }
@@ -456,24 +480,30 @@ CookieServiceChild::GetCookieStringInter
       attrs = loadInfo->GetOriginAttributes();
     }
   }
 
   // Asynchronously call the parent.
   bool isForeign = true;
   if (RequireThirdPartyCheck())
     mThirdPartyUtil->IsThirdPartyChannel(aChannel, aHostURI, &isForeign);
+
+  bool isSafeTopLevelNav = NS_IsSafeTopLevelNav(aChannel);
+  bool isTopLevelForeign = NS_IsTopLevelForeign(aChannel);
+
   nsAutoCString result;
   if (!mIPCSync) {
-    GetCookieStringFromCookieHashTable(aHostURI, !!isForeign, attrs, result);
+    GetCookieStringFromCookieHashTable(aHostURI, !!isForeign, isSafeTopLevelNav,
+                                       isTopLevelForeign, attrs, result);
   } else {
     if (!mIPCOpen) {
       return NS_ERROR_NOT_AVAILABLE;
     }
-    GetCookieStringSyncIPC(aHostURI, !!isForeign, attrs, result);
+    GetCookieStringSyncIPC(aHostURI, !!isForeign, isSafeTopLevelNav,
+                           isTopLevelForeign, attrs, result);
   }
 
   if (!result.IsEmpty())
     *aCookieString = ToNewCString(result);
 
   return NS_OK;
 }
 
--- a/netwerk/cookie/CookieServiceChild.h
+++ b/netwerk/cookie/CookieServiceChild.h
@@ -56,22 +56,26 @@ protected:
                      nsCString &aOriginatingCharset);
 
   nsresult GetCookieStringInternal(nsIURI *aHostURI,
                                    nsIChannel *aChannel,
                                    char **aCookieString);
 
   void GetCookieStringFromCookieHashTable(nsIURI *aHostURI,
                                           bool aIsForeign,
+                                          bool aIsSafeTopLevelNav,
+                                          bool aIsTopLevelForeign,
                                           const OriginAttributes &aAttrs,
                                           nsCString &aCookieString);
 
   void
   GetCookieStringSyncIPC(nsIURI                 *aHostURI,
                          bool                    aIsForeign,
+                         bool                    aIsSafeTopLevelNav,
+                         bool                    aIsTopLevelForeign,
                          const OriginAttributes &aAttrs,
                          nsAutoCString          &aCookieString);
 
   nsresult SetCookieStringInternal(nsIURI *aHostURI,
                                    nsIChannel *aChannel,
                                    const char *aCookieString,
                                    const char *aServerTime,
                                    bool aFromHttp);
--- a/netwerk/cookie/CookieServiceParent.cpp
+++ b/netwerk/cookie/CookieServiceParent.cpp
@@ -150,25 +150,27 @@ CookieServiceParent::TrackCookieLoad(nsI
   nsCOMPtr<nsIURI> uri;
   aChannel->GetURI(getter_AddRefs(uri));
 
   nsCOMPtr<nsILoadInfo> loadInfo = aChannel->GetLoadInfo();
   mozilla::OriginAttributes attrs;
   if (loadInfo) {
     attrs = loadInfo->GetOriginAttributes();
   }
+  bool isSafeTopLevelNav = NS_IsSafeTopLevelNav(aChannel);
+  bool isTopLevelForeign = NS_IsTopLevelForeign(aChannel);
 
   // Send matching cookies to Child.
   nsCOMPtr<mozIThirdPartyUtil> thirdPartyUtil;
   thirdPartyUtil = do_GetService(THIRDPARTYUTIL_CONTRACTID);
   bool isForeign = true;
   thirdPartyUtil->IsThirdPartyChannel(aChannel, uri, &isForeign);
   nsTArray<nsCookie*> foundCookieList;
-  mCookieService->GetCookiesForURI(uri, isForeign, false,
-                                   attrs, foundCookieList);
+  mCookieService->GetCookiesForURI(uri, isForeign, isSafeTopLevelNav, isTopLevelForeign,
+                                   false, attrs, foundCookieList);
   nsTArray<CookieStruct> matchingCookiesList;
   SerialializeCookieList(foundCookieList, matchingCookiesList, uri);
   Unused << SendTrackCookiesLoad(matchingCookiesList, attrs);
 }
 
 void
 CookieServiceParent::SerialializeCookieList(const nsTArray<nsCookie*> &aFoundCookieList,
                                             nsTArray<CookieStruct>    &aCookiesList,
@@ -188,53 +190,57 @@ CookieServiceParent::SerialializeCookieL
     cookieStruct->isSecure() = cookie->IsSecure();
     cookieStruct->sameSite() = cookie->SameSite();
   }
 }
 
 mozilla::ipc::IPCResult
 CookieServiceParent::RecvPrepareCookieList(const URIParams        &aHost,
                                            const bool             &aIsForeign,
+                                           const bool             &aIsSafeTopLevelNav,
+                                           const bool             &aIsTopLevelForeign,
                                            const OriginAttributes &aAttrs)
 {
   nsCOMPtr<nsIURI> hostURI = DeserializeURI(aHost);
 
   // Send matching cookies to Child.
   nsTArray<nsCookie*> foundCookieList;
-  mCookieService->GetCookiesForURI(hostURI, aIsForeign, false,
-                                   aAttrs, foundCookieList);
+  mCookieService->GetCookiesForURI(hostURI, aIsForeign, aIsSafeTopLevelNav, aIsTopLevelForeign,
+                                   false, aAttrs, foundCookieList);
   nsTArray<CookieStruct> matchingCookiesList;
   SerialializeCookieList(foundCookieList, matchingCookiesList, hostURI);
   Unused << SendTrackCookiesLoad(matchingCookiesList, aAttrs);
   return IPC_OK();
 }
 
 void
 CookieServiceParent::ActorDestroy(ActorDestroyReason aWhy)
 {
   // Nothing needed here. Called right before destructor since this is a
   // non-refcounted class.
 }
 
 mozilla::ipc::IPCResult
 CookieServiceParent::RecvGetCookieString(const URIParams& aHost,
                                          const bool& aIsForeign,
+                                         const bool& aIsSafeTopLevelNav,
+                                         const bool& aIsToplevelForeign,
                                          const OriginAttributes& aAttrs,
                                          nsCString* aResult)
 {
   if (!mCookieService)
     return IPC_OK();
 
   // Deserialize URI. Having a host URI is mandatory and should always be
   // provided by the child; thus we consider failure fatal.
   nsCOMPtr<nsIURI> hostURI = DeserializeURI(aHost);
   if (!hostURI)
     return IPC_FAIL_NO_REASON(this);
-
-  mCookieService->GetCookieStringInternal(hostURI, aIsForeign, false, aAttrs, *aResult);
+  mCookieService->GetCookieStringInternal(hostURI, aIsForeign, aIsSafeTopLevelNav, aIsToplevelForeign,
+                                          false, aAttrs, *aResult);
   return IPC_OK();
 }
 
 mozilla::ipc::IPCResult
 CookieServiceParent::RecvSetCookieString(const URIParams& aHost,
                                          const bool& aIsForeign,
                                          const nsCString& aCookieString,
                                          const nsCString& aServerTime,
--- a/netwerk/cookie/CookieServiceParent.h
+++ b/netwerk/cookie/CookieServiceParent.h
@@ -32,28 +32,32 @@ public:
 
   void AddCookie(nsICookie *aCookie);
 
 protected:
   virtual void ActorDestroy(ActorDestroyReason aWhy) override;
 
   virtual mozilla::ipc::IPCResult RecvGetCookieString(const URIParams& aHost,
                                                       const bool& aIsForeign,
+                                                      const bool& aIsSafeTopLevelNav,
+                                                      const bool& aIsTopLevelForeign,
                                                       const OriginAttributes& aAttrs,
                                                       nsCString* aResult) override;
 
   virtual mozilla::ipc::IPCResult RecvSetCookieString(const URIParams& aHost,
                                                       const bool& aIsForeign,
                                                       const nsCString& aCookieString,
                                                       const nsCString& aServerTime,
                                                       const OriginAttributes& aAttrs,
                                                       const bool& aFromHttp) override;
   virtual
   mozilla::ipc::IPCResult RecvPrepareCookieList(const URIParams &aHost,
                                                 const bool &aIsForeign,
+                                                const bool &aIsSafeTopLevelNav,
+                                                const bool &aIsTopLevelForeign,
                                                 const OriginAttributes &aAttrs) override;
 
   void
   SerialializeCookieList(const nsTArray<nsCookie*> &aFoundCookieList,
                          nsTArray<CookieStruct> &aCookiesList,
                          nsIURI *aHostURI);
 
   RefPtr<nsCookieService> mCookieService;
--- a/netwerk/cookie/PCookieService.ipdl
+++ b/netwerk/cookie/PCookieService.ipdl
@@ -57,16 +57,18 @@ parent:
    * @see nsICookieService.getCookieString
    * @see nsICookieService.getCookieStringFromHttp
    * @see mozIThirdPartyUtil.isThirdPartyChannel
    *
    * @return the resulting cookie string.
    */
   nested(inside_cpow) sync GetCookieString(URIParams host,
                                            bool isForeign,
+                                           bool isSafeTopLevelNav,
+                                           bool isTopLevelForeign,
                                            OriginAttributes attrs)
        returns (nsCString result);
 
   /*
    * Set a cookie string.
    *
    * @param host
    *        Same as the 'aURI' argument to nsICookieService.setCookieString.
@@ -98,16 +100,18 @@ parent:
                                             bool isForeign,
                                             nsCString cookieString,
                                             nsCString serverTime,
                                             OriginAttributes attrs,
                                             bool aFromHttp);
 
   async PrepareCookieList(URIParams host,
                           bool isForeign,
+                          bool isSafeTopLevelNav,
+                          bool isTopLevelForeign,
                           OriginAttributes attrs);
 
   async __delete__();
 
 child:
   async TrackCookiesLoad(CookieStruct[] cookiesList,
                          OriginAttributes attrs);
 
--- a/netwerk/cookie/nsCookieService.cpp
+++ b/netwerk/cookie/nsCookieService.cpp
@@ -2035,18 +2035,21 @@ nsCookieService::GetCookieStringCommon(n
   mThirdPartyUtil->IsThirdPartyChannel(aChannel, aHostURI, &isForeign);
 
   // Get originAttributes.
   OriginAttributes attrs;
   if (aChannel) {
     NS_GetOriginAttributes(aChannel, attrs);
   }
 
+  bool isSafeTopLevelNav = NS_IsSafeTopLevelNav(aChannel);
+  bool isTopLevelForeign = NS_IsTopLevelForeign(aChannel);
   nsAutoCString result;
-  GetCookieStringInternal(aHostURI, isForeign, aHttpBound, attrs, result);
+  GetCookieStringInternal(aHostURI, isForeign, isSafeTopLevelNav, isTopLevelForeign,
+                          aHttpBound, attrs, result);
   *aCookie = result.IsEmpty() ? nullptr : ToNewCString(result);
   return NS_OK;
 }
 
 NS_IMETHODIMP
 nsCookieService::SetCookieString(nsIURI     *aHostURI,
                                  nsIPrompt  *aPrompt,
                                  const char *aCookieHeader,
@@ -3103,16 +3106,18 @@ nsCookieService::PathMatches(nsCookie* a
   // either the paths match exactly, or the cookie path is a prefix of
   // the given path.
   return true;
 }
 
 void
 nsCookieService::GetCookiesForURI(nsIURI *aHostURI,
                                   bool aIsForeign,
+                                  bool aIsSafeTopLevelNav,
+                                  bool aIsTopLevelForeign,
                                   bool aHttpBound,
                                   const OriginAttributes& aOriginAttrs,
                                   nsTArray<nsCookie*>& aCookieList)
 {
   NS_ASSERTION(aHostURI, "null host!");
 
   if (!mDBState) {
     NS_WARNING("No DBState! Profile already closed?");
@@ -3192,16 +3197,31 @@ nsCookieService::GetCookiesForURI(nsIURI
     // check the host, since the base domain lookup is conservative.
     if (!DomainMatches(cookie, hostFromURI))
       continue;
 
     // if the cookie is secure and the host scheme isn't, we can't send it
     if (cookie->IsSecure() && !isSecure)
       continue;
 
+    int32_t sameSiteAttr = 0;
+    cookie->GetSameSite(&sameSiteAttr);
+    if (aIsForeign || aIsTopLevelForeign) {
+      // it if's a cross origin request and the cookie is same site only (strict)
+      // don't send it
+      if (sameSiteAttr == nsICookie2::SAMESITE_STRICT) {
+        continue;
+      }
+      // if it's a cross origin request, the cookie is same site lax, but it's not
+      // a top-level navigation, don't send it
+      if (sameSiteAttr == nsICookie2::SAMESITE_LAX && !aIsSafeTopLevelNav) {
+        continue;
+      }
+    }
+
     // if the cookie is httpOnly and it's not going directly to the HTTP
     // connection, don't send it
     if (cookie->IsHttpOnly() && !aHttpBound)
       continue;
 
     // if the nsIURI path doesn't match the cookie path, don't send it back
     if (!PathMatches(cookie, pathFromURI))
       continue;
@@ -3259,22 +3279,25 @@ nsCookieService::GetCookiesForURI(nsIURI
   // this is required per RFC2109.  if cookies match in length,
   // then sort by creation time (see bug 236772).
   aCookieList.Sort(CompareCookiesForSending());
 }
 
 void
 nsCookieService::GetCookieStringInternal(nsIURI *aHostURI,
                                          bool aIsForeign,
+                                         bool aIsSafeTopLevelNav,
+                                         bool aIsTopLevelForeign,
                                          bool aHttpBound,
                                          const OriginAttributes& aOriginAttrs,
                                          nsCString &aCookieString)
 {
   AutoTArray<nsCookie*, 8> foundCookieList;
-  GetCookiesForURI(aHostURI, aIsForeign, aHttpBound, aOriginAttrs, foundCookieList);
+  GetCookiesForURI(aHostURI, aIsForeign, aIsSafeTopLevelNav, aIsTopLevelForeign,
+                   aHttpBound, aOriginAttrs, foundCookieList);
 
   nsCookie* cookie;
   for (uint32_t i = 0; i < foundCookieList.Length(); ++i) {
     cookie = foundCookieList.ElementAt(i);
 
     // check if we have anything to write
     if (!cookie->Name().IsEmpty() || !cookie->Value().IsEmpty()) {
       // if we've already added a cookie to the return list, append a "; " so
--- a/netwerk/cookie/nsCookieService.h
+++ b/netwerk/cookie/nsCookieService.h
@@ -263,17 +263,17 @@ class nsCookieService final : public nsI
   static nsCString GetPathFromURI(nsIURI *aHostURI);
   static nsresult GetBaseDomain(nsIEffectiveTLDService *aTLDService, nsIURI *aHostURI, nsCString &aBaseDomain, bool &aRequireHostMatch);
   static nsresult GetBaseDomainFromHost(nsIEffectiveTLDService *aTLDService, const nsACString &aHost, nsCString &aBaseDomain);
   static bool DomainMatches(nsCookie* aCookie, const nsACString& aHost);
   static bool PathMatches(nsCookie* aCookie, const nsACString& aPath);
   static bool CanSetCookie(nsIURI *aHostURI, const nsCookieKey& aKey, nsCookieAttributes &aCookieAttributes, bool aRequireHostMatch, CookieStatus aStatus, nsDependentCString &aCookieHeader, int64_t aServerTime, bool aFromHttp, nsIChannel* aChannel, bool aLeaveSercureAlone, bool &aSetCookie, mozIThirdPartyUtil* aThirdPartyUtil);
   static CookieStatus CheckPrefs(nsICookiePermission *aPermissionServices, uint8_t aCookieBehavior, bool aThirdPartySession, bool aThirdPartyNonsecureSession, nsIURI *aHostURI, bool aIsForeign, const char *aCookieHeader, const int aNumOfCookies, const OriginAttributes& aOriginAttrs);
   static int64_t ParseServerTime(const nsCString &aServerTime);
-  void GetCookiesForURI(nsIURI *aHostURI, bool aIsForeign, bool aHttpBound, const OriginAttributes& aOriginAttrs, nsTArray<nsCookie*>& aCookieList);
+  void GetCookiesForURI(nsIURI *aHostURI, bool aIsForeign, bool aIsSafeTopLevelNav, bool aIsTopLevelForeign, bool aHttpBound, const OriginAttributes& aOriginAttrs, nsTArray<nsCookie*>& aCookieList);
 
   protected:
     virtual ~nsCookieService();
 
     void                          PrefChanged(nsIPrefBranch *aPrefBranch);
     void                          InitDBStates();
     OpenDBResult                  TryInitDB(bool aDeleteExistingDB);
     void                          InitDBConn();
@@ -289,17 +289,17 @@ class nsCookieService final : public nsI
     void                          HandleDBClosed(DBState* aDBState);
     void                          HandleCorruptDB(DBState* aDBState);
     void                          RebuildCorruptDB(DBState* aDBState);
     OpenDBResult                  Read();
     mozilla::UniquePtr<ConstCookie> GetCookieFromRow(mozIStorageStatement *aRow, const OriginAttributes &aOriginAttributes);
     void                          EnsureReadComplete(bool aInitDBConn);
     nsresult                      NormalizeHost(nsCString &aHost);
     nsresult                      GetCookieStringCommon(nsIURI *aHostURI, nsIChannel *aChannel, bool aHttpBound, char** aCookie);
-    void                          GetCookieStringInternal(nsIURI *aHostURI, bool aIsForeign, bool aHttpBound, const OriginAttributes& aOriginAttrs, nsCString &aCookie);
+    void                          GetCookieStringInternal(nsIURI *aHostURI, bool aIsForeign, bool aIsSafeTopLevelNav, bool aIsTopLevelForeign, bool aHttpBound, const OriginAttributes& aOriginAttrs, nsCString &aCookie);
     nsresult                      SetCookieStringCommon(nsIURI *aHostURI, const char *aCookieHeader, const char *aServerTime, nsIChannel *aChannel, bool aFromHttp);
     void                          SetCookieStringInternal(nsIURI *aHostURI, bool aIsForeign, nsDependentCString &aCookieHeader, const nsCString &aServerTime, bool aFromHttp, bool aFromChild, const OriginAttributes &aOriginAttrs, nsIChannel* aChannel);
     bool                          SetCookieInternal(nsIURI *aHostURI, const nsCookieKey& aKey, bool aRequireHostMatch, CookieStatus aStatus, nsDependentCString &aCookieHeader, int64_t aServerTime, bool aFromHttp, bool aFromChild, nsIChannel* aChannel);
     void                          AddInternal(const nsCookieKey& aKey, nsCookie *aCookie, int64_t aCurrentTimeInUsec, nsIURI *aHostURI, const char *aCookieHeader, bool aFromHttp, bool aFromChild = false);
     void                          RemoveCookieFromList(const nsListIter &aIter, mozIStorageBindingParamsArray *aParamsArray = nullptr);
     void                          AddCookieToList(const nsCookieKey& aKey, nsCookie *aCookie, DBState *aDBState, mozIStorageBindingParamsArray *aParamsArray, bool aWriteToDB = true);
     void                          UpdateCookieInList(nsCookie *aCookie, int64_t aLastAccessed, mozIStorageBindingParamsArray *aParamsArray);
     static bool                   GetTokenValue(nsACString::const_char_iterator &aIter, nsACString::const_char_iterator &aEndIter, nsDependentCSubstring &aTokenString, nsDependentCSubstring &aTokenValue, bool &aEqualsFound);
--- a/netwerk/test/TestCookie.cpp
+++ b/netwerk/test/TestCookie.cpp
@@ -813,13 +813,28 @@ TEST(TestCookie,TestCookieMain)
         EXPECT_TRUE(sameSiteAttr == nsICookie2::SAMESITE_STRICT);
       } else if (name.EqualsLiteral("lax")) {
         EXPECT_TRUE(sameSiteAttr == nsICookie2::SAMESITE_LAX);
       }
     }
 
     EXPECT_TRUE(i == 6);
 
+    // *** SameSite attribute
+    // Clear the cookies
+    EXPECT_TRUE(NS_SUCCEEDED(cookieMgr->RemoveAll()));
+
+    // please note that the flag aForeign is always set to true using this test setup because no nsIChannel is
+    // passed to SetCookieString(). therefore we can only test that no cookies are sent for cross origin requests
+    // using same-site cookies.
+    SetACookie(cookieService, "http://www.samesite.com", nullptr, "test=sameSiteStrictVal; samesite=strict", nullptr);
+    GetACookie(cookieService, "http://www.notsamesite.com", nullptr, cookie);
+    EXPECT_TRUE(CheckResult(cookie.get(), MUST_BE_NULL));
+
+    SetACookie(cookieService, "http://www.samesite.test", nullptr, "test=sameSiteLaxVal; samesite=lax", nullptr);
+    GetACookie(cookieService, "http://www.notsamesite.com", nullptr, cookie);
+    EXPECT_TRUE(CheckResult(cookie.get(), MUST_BE_NULL));
+
     // XXX the following are placeholders: add these tests please!
     // *** "noncompliant cookie" tests
     // *** IP address tests
     // *** speed tests
 }
--- a/testing/web-platform/meta/streams/readable-byte-streams/detached-buffers.sharedworker.html.ini
+++ b/testing/web-platform/meta/streams/readable-byte-streams/detached-buffers.sharedworker.html.ini
@@ -4,16 +4,17 @@
   expected:
     if debug and e10s and (os == "mac") and (version == "OS X 10.10.5") and (processor == "x86_64") and (bits == 64): CRASH
     if debug and e10s and (os == "win") and (version == "10.0.15063") and (processor == "x86_64") and (bits == 64): CRASH
     if not debug and e10s and (os == "win") and (version == "10.0.15063") and (processor == "x86_64") and (bits == 64): TIMEOUT
     if not debug and e10s and (os == "mac") and (version == "OS X 10.10.5") and (processor == "x86_64") and (bits == 64): TIMEOUT
     if debug and not e10s and (os == "win") and (version == "6.1.7601") and (processor == "x86") and (bits == 32): CRASH
     if not debug and e10s and (os == "win") and (version == "6.1.7601") and (processor == "x86") and (bits == 32): TIMEOUT
     if debug and e10s and (os == "win") and (version == "6.1.7601") and (processor == "x86") and (bits == 32): CRASH
+    if not debug and e10s and (os == "linux") and (version == "Ubuntu 16.04") and (processor == "x86") and (bits == 32): TIMEOUT
     if not debug and e10s and (os == "linux") and (version == "Ubuntu 16.04") and (processor == "x86_64") and (bits == 64): TIMEOUT
     if debug and e10s and (os == "linux") and (version == "Ubuntu 16.04") and (processor == "x86_64") and (bits == 64): CRASH
     if debug and e10s and (os == "linux") and (version == "Ubuntu 16.04") and (processor == "x86") and (bits == 32): CRASH
     if debug and not e10s and (os == "linux") and (version == "Ubuntu 16.04") and (processor == "x86") and (bits == 32): CRASH
   [ReadableStream with byte source: read()ing from a closed stream still transfers the buffer]
     expected: FAIL
 
   [ReadableStream with byte source: read()ing from a stream with queued chunks still transfers the buffer]