Merge inbound to mozilla-central. a=merge
authorGurzau Raul <rgurzau@mozilla.com>
Tue, 24 Jul 2018 12:49:23 +0300
changeset 427950 1e5fa52a612e8985e12212d1950a732954e00e45
parent 427864 cfb544de8a72831a08e5f847ffae486d1f4d2a21 (current diff)
parent 427949 e3f3506f327c421dcffc00266382669bbe2d101a (diff)
child 427951 cb59a65fb720ee100bb03cc9f4b02e645755786b
child 427972 fa39cfe02f453936c5d1f34bc9bb3384584bd510
child 427986 f636b99a80913e4542b626a5bb0fe70c44cf5175
push id34320
push userrgurzau@mozilla.com
push dateTue, 24 Jul 2018 09:50:07 +0000
treeherdermozilla-central@1e5fa52a612e [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersmerge
milestone63.0a1
first release with
nightly linux32
1e5fa52a612e / 63.0a1 / 20180724100052 / files
nightly linux64
1e5fa52a612e / 63.0a1 / 20180724100052 / files
nightly mac
1e5fa52a612e / 63.0a1 / 20180724100052 / files
nightly win32
1e5fa52a612e / 63.0a1 / 20180724100052 / files
nightly win64
1e5fa52a612e / 63.0a1 / 20180724100052 / files
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
releases
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Merge inbound to mozilla-central. a=merge
dom/base/nsContentUtils.cpp
dom/base/nsDocument.cpp
dom/html/HTMLMediaElement.cpp
modules/libpref/init/all.js
netwerk/protocol/http/nsHttpChannel.cpp
third_party/rust/itoa-0.3.1/.cargo-checksum.json
third_party/rust/itoa-0.3.1/.travis.yml
third_party/rust/itoa-0.3.1/Cargo.toml
third_party/rust/itoa-0.3.1/LICENSE-APACHE
third_party/rust/itoa-0.3.1/LICENSE-MIT
third_party/rust/itoa-0.3.1/README.md
third_party/rust/itoa-0.3.1/benches/bench.rs
third_party/rust/itoa-0.3.1/performance.png
third_party/rust/itoa-0.3.1/src/lib.rs
third_party/rust/itoa-0.3.1/tests/test.rs
third_party/rust/parking_lot/src/raw_remutex.rs
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -136,16 +136,24 @@ name = "base64"
 version = "0.6.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "byteorder 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "safemem 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
+name = "bench-collections-gtest"
+version = "0.1.0"
+dependencies = [
+ "fnv 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "fxhash 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
 name = "binary-space-partition"
 version = "0.1.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
 name = "bincode"
 version = "1.0.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -795,17 +803,17 @@ version = "0.0.1"
 dependencies = [
  "atomic_refcell 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "cssparser 0.24.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "cstr 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
  "libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)",
  "log 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
  "malloc_size_of 0.0.1",
  "nsstring 0.1.0",
- "parking_lot 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "parking_lot 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)",
  "selectors 0.19.0",
  "servo_arc 0.1.1",
  "smallvec 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)",
  "style 0.0.1",
  "style_traits 0.0.1",
 ]
 
 [[package]]
@@ -815,16 +823,17 @@ dependencies = [
  "gkrust-shared 0.1.0",
  "stylo_tests 0.0.1",
 ]
 
 [[package]]
 name = "gkrust-gtest"
 version = "0.1.0"
 dependencies = [
+ "bench-collections-gtest 0.1.0",
  "gkrust-shared 0.1.0",
  "mp4parse-gtest 0.1.0",
  "nsstring-gtest 0.1.0",
  "xpcom-gtest 0.1.0",
 ]
 
 [[package]]
 name = "gkrust-shared"
@@ -951,21 +960,16 @@ name = "itertools"
 version = "0.7.6"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "either 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "itoa"
-version = "0.3.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-
-[[package]]
-name = "itoa"
 version = "0.4.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
 name = "js"
 version = "0.1.4"
 dependencies = [
  "bindgen 0.37.4 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -1106,16 +1110,25 @@ dependencies = [
 ]
 
 [[package]]
 name = "linked-hash-map"
 version = "0.5.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
+name = "lock_api"
+version = "0.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "owning_ref 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "scopeguard 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
 name = "log"
 version = "0.3.9"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "log 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
@@ -1474,20 +1487,20 @@ name = "owning_ref"
 version = "0.3.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "stable_deref_trait 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "parking_lot"
-version = "0.5.4"
+version = "0.6.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "owning_ref 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "lock_api 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
  "parking_lot_core 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "parking_lot_core"
 version = "0.2.7"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
@@ -1945,31 +1958,31 @@ dependencies = [
  "byteorder 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
  "cssparser 0.24.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "euclid 0.18.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "fallible 0.0.1",
  "fnv 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)",
  "hashglobe 0.1.0",
  "itertools 0.7.6 (registry+https://github.com/rust-lang/crates.io-index)",
- "itoa 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "itoa 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
  "lazy_static 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "log 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
  "malloc_size_of 0.0.1",
  "malloc_size_of_derive 0.0.1",
  "matches 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
  "new-ordered-float 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "new_debug_unreachable 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "nsstring 0.1.0",
  "num-integer 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
  "num-traits 0.1.43 (registry+https://github.com/rust-lang/crates.io-index)",
  "num_cpus 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "owning_ref 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "parking_lot 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "parking_lot 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)",
  "precomputed-hash 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "rayon 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "regex 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
  "selectors 0.19.0",
  "servo_arc 0.1.1",
  "smallbitvec 2.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "smallvec 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)",
  "style_derive 0.0.1",
@@ -2611,32 +2624,32 @@ dependencies = [
 "checksum httparse 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "af2f2dd97457e8fb1ae7c5a420db346af389926e36f43768b96f101546b04a07"
 "checksum humantime 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0484fda3e7007f2a4a0d9c3a703ca38c71c54c55602ce4660c419fd32e188c9e"
 "checksum hyper 0.10.13 (registry+https://github.com/rust-lang/crates.io-index)" = "368cb56b2740ebf4230520e2b90ebb0461e69034d85d1945febd9b3971426db2"
 "checksum ident_case 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3c9826188e666f2ed92071d2dadef6edc430b11b158b5b2b3f4babbcc891eaaa"
 "checksum idna 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "014b298351066f1512874135335d62a789ffe78a9974f94b43ed5621951eaf7d"
 "checksum iovec 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "29d062ee61fccdf25be172e70f34c9f6efc597e1fb8f6526e8437b2046ab26be"
 "checksum itertools 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)" = "d3f2be4da1690a039e9ae5fd575f706a63ad5a2120f161b1d653c9da3930dd21"
 "checksum itertools 0.7.6 (registry+https://github.com/rust-lang/crates.io-index)" = "b07332223953b5051bceb67e8c4700aa65291535568e1f12408c43c4a42c0394"
-"checksum itoa 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "eb2f404fbc66fd9aac13e998248505e7ecb2ad8e44ab6388684c5fb11c6c251c"
 "checksum itoa 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c069bbec61e1ca5a596166e55dfe4773ff745c3d16b700013bcaff9a6df2c682"
 "checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d"
 "checksum khronos_api 2.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "037ab472c33f67b5fbd3e9163a2645319e5356fcd355efa6d4eb7fff4bbcb554"
 "checksum lalrpop 0.15.1 (registry+https://github.com/rust-lang/crates.io-index)" = "88035943c3cfbb897a499a556212b2b053574f32b4238b71b61625bc470f80aa"
 "checksum lalrpop-intern 0.15.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cc4fd87be4a815fd373e02773983940f0d75fb26fde8c098e9e45f7af03154c0"
 "checksum lalrpop-snap 0.15.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5f244285324e4e33d486910b66fd3b7cb37e2072c5bf63319f506fe99ed72650"
 "checksum lalrpop-util 0.15.1 (registry+https://github.com/rust-lang/crates.io-index)" = "de408fd50dea8ad7a77107144983a25c7fdabf5f8faf707a6e020d68874ed06c"
 "checksum language-tags 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a91d884b6667cd606bb5a69aa0c99ba811a115fc68915e7056ec08a46e93199a"
 "checksum lazy_static 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "e6412c5e2ad9584b0b8e979393122026cdd6d2a80b933f890dcd694ddbe73739"
 "checksum lazycell 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ce12306c4739d86ee97c23139f3a34ddf0387bbf181bc7929d287025a8c3ef6b"
 "checksum libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)" = "f54263ad99207254cf58b5f701ecb432c717445ea2ee8af387334bdd1a03fdff"
 "checksum libloading 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9c3ad660d7cb8c5822cd83d10897b0f1f1526792737a179e73896152f85b88c2"
 "checksum libudev 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ea626d3bdf40a1c5aee3bcd4f40826970cae8d80a8fec934c82a63840094dcfe"
 "checksum libz-sys 1.0.16 (registry+https://github.com/rust-lang/crates.io-index)" = "3fdd64ef8ee652185674455c1d450b83cbc8ad895625d543b5324d923f82e4d8"
 "checksum linked-hash-map 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "70fb39025bc7cdd76305867c4eccf2f2dcf6e9a57f5b21a93e1c2d86cd03ec9e"
+"checksum lock_api 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "949826a5ccf18c1b3a7c3d57692778d21768b79e46eb9dd07bfc4c2160036c54"
 "checksum log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "e19e8d5c34a3e0e2223db8e060f9e8264aeeb5c5fc64a4ee9965c062211c024b"
 "checksum log 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6fddaa003a65722a7fb9e26b0ce95921fe4ba590542ced664d8ce2fa26f9f3ac"
 "checksum matches 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "100aabe6b8ff4e4a7e32c1c13523379802df0772b82466207ac25b013f193376"
 "checksum memchr 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "148fab2e51b4f1cfc66da2a7c32981d1d3c083a803978268bb11fe4b86925e7a"
 "checksum memchr 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "796fba70e76612589ed2ce7f45282f5af869e0fdd7cc6199fa1aa1f1d591ba9d"
 "checksum memmap 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "46f3c7359028b31999287dae4e5047ddfe90a23b7dca2282ce759b491080c99b"
 "checksum memoffset 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0f9dc261e2b62d7a622bf416ea3c5245cdd5d9a7fcc428c0d06804dfce1775b3"
 "checksum mime 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "ba626b8a6de5da682e1caa06bdb42a335aee5a84db8e5046a3e8ab17ba0a3ae0"
@@ -2657,17 +2670,17 @@ dependencies = [
 "checksum num 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)" = "a311b77ebdc5dd4cf6449d81e4135d9f0e3b153839ac90e648a8ef538f923525"
 "checksum num-integer 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)" = "d1452e8b06e448a07f0e6ebb0bb1d92b8890eea63288c0b627331d53514d0fba"
 "checksum num-iter 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)" = "7485fcc84f85b4ecd0ea527b14189281cf27d60e583ae65ebc9c088b13dffe01"
 "checksum num-traits 0.1.43 (registry+https://github.com/rust-lang/crates.io-index)" = "92e5113e9fd4cc14ded8e499429f396a20f98c772a47cc8622a736e1ec843c31"
 "checksum num-traits 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e7de20f146db9d920c45ee8ed8f71681fd9ade71909b48c3acbd766aa504cf10"
 "checksum num_cpus 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "514f0d73e64be53ff320680ca671b64fe3fb91da01e1ae2ddc99eb51d453b20d"
 "checksum ordermap 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "a86ed3f5f244b372d6b1a00b72ef7f8876d0bc6a78a4c9985c53614041512063"
 "checksum owning_ref 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "cdf84f41639e037b484f93433aa3897863b561ed65c6e59c7073d7c561710f37"
-"checksum parking_lot 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)" = "9fd9d732f2de194336fb02fe11f9eed13d9e76f13f4315b4d88a14ca411750cd"
+"checksum parking_lot 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)" = "69376b761943787ebd5cc85a5bc95958651a22609c5c1c2b65de21786baec72b"
 "checksum parking_lot_core 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)" = "6c677d78851950b3aec390e681a411f78cc250cba277d4f578758a377f727970"
 "checksum peeking_take_while 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099"
 "checksum percent-encoding 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "de154f638187706bde41d9b4738748933d64e6b37bdbffc0b47a97d16a6ae356"
 "checksum petgraph 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)" = "7a7e5234c228fbfa874c86a77f685886127f82e0aef602ad1d48333fcac6ad61"
 "checksum phf 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)" = "cb325642290f28ee14d8c6201159949a872f220c62af6e110a56ea914fbe42fc"
 "checksum phf_codegen 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)" = "d62594c0bb54c464f633175d502038177e90309daf2e0158be42ed5f023ce88f"
 "checksum phf_generator 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)" = "6b07ffcc532ccc85e3afc45865469bf5d9e4ef5bfcf9622e3cfe80c2d275ec03"
 "checksum phf_shared 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)" = "07e24b0ca9643bdecd0632f2b3da6b1b89bbb0030e0b992afc1113b23a7bc2f2"
--- a/dom/base/TabGroup.cpp
+++ b/dom/base/TabGroup.cpp
@@ -115,16 +115,21 @@ TabGroup::GetFromWindow(mozIDOMWindowPro
   return nullptr;
 }
 
 /* static */ TabGroup*
 TabGroup::GetFromActor(TabChild* aTabChild)
 {
   MOZ_RELEASE_ASSERT(NS_IsMainThread());
 
+  // Middleman processes do not assign event targets to their tab children.
+  if (recordreplay::IsMiddleman()) {
+    return GetChromeTabGroup();
+  }
+
   nsCOMPtr<nsIEventTarget> target = aTabChild->Manager()->GetEventTargetFor(aTabChild);
   if (!target) {
     return nullptr;
   }
 
   // We have an event target. We assume the IPC code created it via
   // TabGroup::CreateEventTarget.
   RefPtr<SchedulerGroup> group =
--- a/dom/base/nsCCUncollectableMarker.cpp
+++ b/dom/base/nsCCUncollectableMarker.cpp
@@ -466,16 +466,22 @@ mozilla::dom::TraceBlackJS(JSTracer* aTr
   }
 #endif
 
   if (!nsCCUncollectableMarker::sGeneration) {
     return;
   }
 
   if (ProcessGlobal::WasCreated() && nsFrameMessageManager::GetChildProcessManager()) {
+    // ProcessGlobal::Get() can perform recorded events such as lock accesses,
+    // which we're not supposed to do here since tracing occurs
+    // non-deterministically when recording/replaying. Sidestep this by not
+    // recording these events.
+    recordreplay::AutoPassThroughThreadEvents pt;
+
     ProcessGlobal* pg = ProcessGlobal::Get();
     if (pg) {
       mozilla::TraceScriptHolder(ToSupports(pg), aTrc);
     }
   }
 
   // Mark globals of active windows black.
   nsGlobalWindowOuter::OuterWindowByIdTable* windowsById =
--- a/dom/base/nsContentUtils.cpp
+++ b/dom/base/nsContentUtils.cpp
@@ -6945,18 +6945,17 @@ nsContentUtils::FindInternalContentViewe
   nsCOMPtr<nsICategoryManager> catMan(do_GetService(NS_CATEGORYMANAGER_CONTRACTID));
   if (!catMan)
     return nullptr;
 
   nsCOMPtr<nsIDocumentLoaderFactory> docFactory;
 
   nsCString contractID;
   nsresult rv = catMan->GetCategoryEntry("Gecko-Content-Viewers",
-                                         PromiseFlatCString(aType).get(),
-                                         getter_Copies(contractID));
+                                         aType, contractID);
   if (NS_SUCCEEDED(rv)) {
     docFactory = do_GetService(contractID.get());
     if (docFactory && aLoaderType) {
       if (contractID.EqualsLiteral(CONTENT_DLF_CONTRACTID))
         *aLoaderType = TYPE_CONTENT;
       else if (contractID.EqualsLiteral(PLUGIN_DLF_CONTRACTID))
         *aLoaderType = TYPE_PLUGIN;
       else
--- a/dom/base/nsDocument.cpp
+++ b/dom/base/nsDocument.cpp
@@ -1084,18 +1084,18 @@ nsExternalResourceMap::PendingLoad::Setu
     new LoadgroupCallbacks(callbacks);
   newLoadGroup->SetNotificationCallbacks(newCallbacks);
 
   // This is some serious hackery cribbed from docshell
   nsCOMPtr<nsICategoryManager> catMan =
     do_GetService(NS_CATEGORYMANAGER_CONTRACTID);
   NS_ENSURE_TRUE(catMan, NS_ERROR_NOT_AVAILABLE);
   nsCString contractId;
-  nsresult rv = catMan->GetCategoryEntry("Gecko-Content-Viewers", type.get(),
-                                         getter_Copies(contractId));
+  nsresult rv = catMan->GetCategoryEntry("Gecko-Content-Viewers", type,
+                                         contractId);
   NS_ENSURE_SUCCESS(rv, rv);
   nsCOMPtr<nsIDocumentLoaderFactory> docLoaderFactory =
     do_GetService(contractId.get());
   NS_ENSURE_TRUE(docLoaderFactory, NS_ERROR_NOT_AVAILABLE);
 
   nsCOMPtr<nsIContentViewer> viewer;
   nsCOMPtr<nsIStreamListener> listener;
   rv = docLoaderFactory->CreateInstance("external-resource", chan, newLoadGroup,
--- a/dom/base/nsFrameMessageManager.cpp
+++ b/dom/base/nsFrameMessageManager.cpp
@@ -658,29 +658,53 @@ public:
       }
     }
   }
 
   bool mWasHandlingMessage;
   RefPtr<nsFrameMessageManager> mMM;
 };
 
+// When recording or replaying, return whether a message should be received in
+// the middleman process instead of the recording/replaying process.
+static bool
+DirectMessageToMiddleman(const nsAString& aMessage)
+{
+  // Middleman processes run developer tools server code and need to receive
+  // debugger related messages. The session store flush message needs to be
+  // received in order to cleanly shutdown the process.
+  return StringBeginsWith(aMessage, NS_LITERAL_STRING("debug:"))
+      || aMessage.EqualsLiteral("SessionStore:flush");
+}
 
 void
 nsFrameMessageManager::ReceiveMessage(nsISupports* aTarget,
                                       nsFrameLoader* aTargetFrameLoader,
                                       bool aTargetClosed,
                                       const nsAString& aMessage,
                                       bool aIsSync,
                                       StructuredCloneData* aCloneData,
                                       mozilla::jsipc::CpowHolder* aCpows,
                                       nsIPrincipal* aPrincipal,
                                       nsTArray<StructuredCloneData>* aRetVal,
                                       ErrorResult& aError)
 {
+  // If we are recording or replaying, we will end up here in both the
+  // middleman process and the recording/replaying process. Ignore the message
+  // in one of the processes, so that it is only received in one place.
+  if (recordreplay::IsRecordingOrReplaying()) {
+    if (DirectMessageToMiddleman(aMessage)) {
+      return;
+    }
+  } else if (recordreplay::IsMiddleman()) {
+    if (!DirectMessageToMiddleman(aMessage)) {
+      return;
+    }
+  }
+
   MOZ_ASSERT(aTarget);
 
   nsAutoTObserverArray<nsMessageListenerInfo, 1>* listeners =
     mListeners.Get(aMessage);
   if (listeners) {
 
     MMListenerRemover lr(this);
 
--- a/dom/base/nsJSEnvironment.cpp
+++ b/dom/base/nsJSEnvironment.cpp
@@ -281,16 +281,19 @@ FindExceptionStackForConsoleReport(nsPID
   }
 }
 
 } /* namespace xpc */
 
 static PRTime
 GetCollectionTimeDelta()
 {
+  if (recordreplay::IsRecordingOrReplaying()) {
+    return 0;
+  }
   PRTime now = PR_Now();
   if (sFirstCollectionTime) {
     return now - sFirstCollectionTime;
   }
   sFirstCollectionTime = now;
   return 0;
 }
 
@@ -620,17 +623,17 @@ nsJSContext::~nsJSContext()
   mGlobalObjectRef = nullptr;
 
   Destroy();
 }
 
 void
 nsJSContext::Destroy()
 {
-  if (mGCOnDestruction) {
+  if (mGCOnDestruction && !recordreplay::IsRecordingOrReplaying()) {
     PokeGC(JS::gcreason::NSJSCONTEXT_DESTROY, mWindowProxy);
   }
 
   DropJSObjects(this);
 }
 
 // QueryInterface implementation for nsJSContext
 NS_IMPL_CYCLE_COLLECTION_CLASS(nsJSContext)
@@ -1618,32 +1621,39 @@ ICCRunnerFired(TimeStamp aDeadline)
       return false;
     }
   }
 
   nsJSContext::RunCycleCollectorSlice(aDeadline);
   return true;
 }
 
+// Whether to skip the generation of timers for future GC/CC activity.
+static bool
+SkipCollectionTimers()
+{
+  return sShuttingDown || recordreplay::IsRecordingOrReplaying();
+}
+
 //static
 void
 nsJSContext::BeginCycleCollectionCallback()
 {
   MOZ_ASSERT(NS_IsMainThread());
 
   gCCStats.mBeginTime = gCCStats.mBeginSliceTime.IsNull() ? TimeStamp::Now() : gCCStats.mBeginSliceTime;
   gCCStats.mSuspected = nsCycleCollector_suspectedCount();
 
   KillCCRunner();
 
   gCCStats.RunForgetSkippable();
 
   MOZ_ASSERT(!sICCRunner, "Tried to create a new ICC timer when one already existed.");
 
-  if (sShuttingDown) {
+  if (SkipCollectionTimers()) {
     return;
   }
 
   // Create an ICC timer even if ICC is globally disabled, because we could be manually triggering
   // an incremental collection, and we want to be sure to finish it.
   sICCRunner = IdleTaskRunner::Create(ICCRunnerFired,
                                       "BeginCycleCollectionCallback::ICCRunnerFired",
                                       kICCIntersliceDelay,
@@ -1865,17 +1875,17 @@ InterSliceGCRunnerFired(TimeStamp aDeadl
 }
 
 // static
 void
 GCTimerFired(nsITimer *aTimer, void *aClosure)
 {
   nsJSContext::KillGCTimer();
   nsJSContext::KillInterSliceGCRunner();
-  if (sShuttingDown) {
+  if (SkipCollectionTimers()) {
     return;
   }
 
   // Now start the actual GC after initial timer has fired.
   sInterSliceGCRunner = IdleTaskRunner::Create([aClosure](TimeStamp aDeadline) {
     return InterSliceGCRunnerFired(aDeadline, aClosure);
   }, "GCTimerFired::InterSliceGCRunnerFired",
      NS_INTERSLICE_GC_DELAY,
@@ -2168,33 +2178,33 @@ nsJSContext::PokeGC(JS::gcreason::Reason
 
   first = false;
 }
 
 // static
 void
 nsJSContext::PokeShrinkingGC()
 {
-  if (sShrinkingGCTimer || sShuttingDown) {
+  if (sShrinkingGCTimer || SkipCollectionTimers()) {
     return;
   }
 
   NS_NewTimerWithFuncCallback(&sShrinkingGCTimer,
                               ShrinkingGCTimerFired, nullptr,
                               StaticPrefs::javascript_options_compact_on_user_inactive_delay(),
                               nsITimer::TYPE_ONE_SHOT_LOW_PRIORITY,
                               "ShrinkingGCTimerFired",
                               SystemGroup::EventTargetFor(TaskCategory::GarbageCollection));
 }
 
 // static
 void
 nsJSContext::MaybePokeCC()
 {
-  if (sCCRunner || sICCRunner || sShuttingDown || !sHasRunGC) {
+  if (sCCRunner || sICCRunner || !sHasRunGC || SkipCollectionTimers()) {
     return;
   }
 
   uint32_t sinceLastCCEnd = TimeUntilNow(sLastCCEndTime);
   if (sinceLastCCEnd && sinceLastCCEnd < NS_CC_DELAY) {
     return;
   }
 
@@ -2355,30 +2365,31 @@ DOMGCSliceCallback(JSContext* aCx, JS::G
       sCCollectedZonesWaitingForGC = 0;
       sLikelyShortLivingObjectsNeedingGC = 0;
       sCleanupsSinceLastGC = 0;
       sNeedsFullCC = true;
       sHasRunGC = true;
       nsJSContext::MaybePokeCC();
 
       if (aDesc.isZone_) {
-        if (!sFullGCTimer && !sShuttingDown) {
+        if (!sFullGCTimer && !SkipCollectionTimers()) {
           NS_NewTimerWithFuncCallback(&sFullGCTimer,
                                       FullGCTimerFired,
                                       nullptr,
                                       NS_FULL_GC_DELAY,
                                       nsITimer::TYPE_ONE_SHOT_LOW_PRIORITY,
                                       "FullGCTimerFired",
                                       SystemGroup::EventTargetFor(TaskCategory::GarbageCollection));
         }
       } else {
         nsJSContext::KillFullGCTimer();
       }
 
-      if (ShouldTriggerCC(nsCycleCollector_suspectedCount())) {
+      if (!recordreplay::IsRecordingOrReplaying() &&
+          ShouldTriggerCC(nsCycleCollector_suspectedCount())) {
         nsCycleCollector_dispatchDeferredDeletion();
       }
 
       if (!aDesc.isZone_) {
         sNeedsFullGC = false;
       }
 
       break;
@@ -2388,29 +2399,30 @@ DOMGCSliceCallback(JSContext* aCx, JS::G
       break;
 
     case JS::GC_SLICE_END:
       sGCUnnotifiedTotalTime +=
         aDesc.lastSliceEnd(aCx) - aDesc.lastSliceStart(aCx);
 
       // Schedule another GC slice if the GC has more work to do.
       nsJSContext::KillInterSliceGCRunner();
-      if (!sShuttingDown && !aDesc.isComplete_) {
+      if (!SkipCollectionTimers() && !aDesc.isComplete_) {
         sInterSliceGCRunner =
           IdleTaskRunner::Create([](TimeStamp aDeadline) {
             return InterSliceGCRunnerFired(aDeadline, nullptr);
           }, "DOMGCSliceCallback::InterSliceGCRunnerFired",
              NS_INTERSLICE_GC_DELAY,
              sActiveIntersliceGCBudget,
              false,
              []{ return sShuttingDown; },
              TaskCategory::GarbageCollection);
       }
 
-      if (ShouldTriggerCC(nsCycleCollector_suspectedCount())) {
+      if (!recordreplay::IsRecordingOrReplaying() &&
+          ShouldTriggerCC(nsCycleCollector_suspectedCount())) {
         nsCycleCollector_dispatchDeferredDeletion();
       }
 
       if (StaticPrefs::javascript_options_mem_log()) {
         nsString gcstats;
         gcstats.Adopt(aDesc.formatSliceMessage(aCx));
         nsAutoString prefix;
         nsTextFormatter::ssprintf(prefix, u"[%s-%i] ",
--- a/dom/base/nsJSUtils.cpp
+++ b/dom/base/nsJSUtils.cpp
@@ -406,16 +406,31 @@ nsJSUtils::ExecutionContext::DecodeBinAS
   }
 
   return NS_OK;
 #else
   return NS_ERROR_NOT_IMPLEMENTED;
 #endif
 }
 
+static bool
+IsPromiseValue(JSContext* aCx, JS::Handle<JS::Value> aValue)
+{
+  if (!aValue.isObject()) {
+    return false;
+  }
+
+  JS::Rooted<JSObject*> obj(aCx, js::CheckedUnwrap(&aValue.toObject()));
+  if (!obj) {
+    return false;
+  }
+
+  return JS::IsPromiseObject(obj);
+}
+
 nsresult
 nsJSUtils::ExecutionContext::ExtractReturnValue(JS::MutableHandle<JS::Value> aRetValue)
 {
   MOZ_ASSERT(aRetValue.isUndefined());
   if (mSkip) {
     // Repeat earlier result, as NS_SUCCESS_DOM_SCRIPT_EVALUATION_THREW are not
     // failures cases.
 #ifdef DEBUG
@@ -423,16 +438,25 @@ nsJSUtils::ExecutionContext::ExtractRetu
 #endif
     return mRv;
   }
 
   MOZ_ASSERT(mWantsReturnValue);
 #ifdef DEBUG
   mWantsReturnValue = false;
 #endif
+  if (mCoerceToString && IsPromiseValue(mCx, mRetValue)) {
+    // We're a javascript: url and we should treat Promise return values as
+    // undefined.
+    //
+    // Once bug 1477821 is fixed this code might be able to go away, or will
+    // become enshrined in the spec, depending.
+    mRetValue.setUndefined();
+  }
+
   if (mCoerceToString && !mRetValue.isUndefined()) {
     JSString* str = JS::ToString(mCx, mRetValue);
     if (!str) {
       // ToString can be a function call, so an exception can be raised while
       // executing the function.
       mSkip = true;
       return EvaluationExceptionToNSResult(mCx);
     }
--- a/dom/base/nsWrapperCache.cpp
+++ b/dom/base/nsWrapperCache.cpp
@@ -38,16 +38,20 @@ void
 nsWrapperCache::SetWrapperJSObject(JSObject* aWrapper)
 {
   mWrapper = aWrapper;
   UnsetWrapperFlags(kWrapperFlagsMask);
 
   if (aWrapper && !JS::ObjectIsTenured(aWrapper)) {
     CycleCollectedJSRuntime::Get()->NurseryWrapperAdded(this);
   }
+
+  if (mozilla::recordreplay::IsReplaying()) {
+    mozilla::recordreplay::SetWeakPointerJSRoot(this, aWrapper);
+  }
 }
 
 void
 nsWrapperCache::ReleaseWrapper(void* aScriptObjectHolder)
 {
   if (PreservingWrapper()) {
     SetPreservingWrapper(false);
     cyclecollector::DropJSObjectsImpl(aScriptObjectHolder);
@@ -109,16 +113,22 @@ DebugWrapperTraceCallback(JS::GCCellPtr 
     callback->NoteJSChild(aPtr);
   }
 }
 
 void
 nsWrapperCache::CheckCCWrapperTraversal(void* aScriptObjectHolder,
                                         nsScriptObjectTracer* aTracer)
 {
+  // Skip checking if we are recording or replaying, as calling
+  // GetWrapperPreserveColor() can cause the cache's wrapper to be cleared.
+  if (recordreplay::IsRecordingOrReplaying()) {
+    return;
+  }
+
   JSObject* wrapper = GetWrapperPreserveColor();
   if (!wrapper) {
     return;
   }
 
   DebugWrapperTraversalCallback callback(wrapper);
 
   // The CC traversal machinery cannot trigger GC; however, the analysis cannot
--- a/dom/base/nsWrapperCache.h
+++ b/dom/base/nsWrapperCache.h
@@ -74,16 +74,20 @@ static_assert(sizeof(void*) == 4, "Only 
  * necessary. Instead a class hook (objectMovedOp) is provided that is called
  * when an object is moved and is responsible for ensuring pointers are
  * updated. It does this by calling UpdateWrapper() on the wrapper
  * cache. SetWrapper() asserts that the hook is implemented for any wrapper set.
  *
  * A number of the methods are implemented in nsWrapperCacheInlines.h because we
  * have to include some JS headers that don't play nicely with the rest of the
  * codebase. Include nsWrapperCacheInlines.h if you need to call those methods.
+ *
+ * When recording or replaying an execution, wrapper caches are instrumented so
+ * that they behave consistently even if the GC executes at different points
+ * and collects different objects.
  */
 
 class nsWrapperCache
 {
 public:
   NS_DECLARE_STATIC_IID_ACCESSOR(NS_WRAPPERCACHE_IID)
 
   nsWrapperCache()
@@ -91,16 +95,20 @@ public:
     , mFlags(0)
 #ifdef BOOL_FLAGS_ON_WRAPPER_CACHE
     , mBoolFlags(0)
 #endif
   {
   }
   ~nsWrapperCache()
   {
+    // Clear any JS root associated with this cache while replaying.
+    if (mozilla::recordreplay::IsReplaying()) {
+      mozilla::recordreplay::SetWeakPointerJSRoot(this, nullptr);
+    }
     MOZ_ASSERT(!PreservingWrapper(),
                "Destroying cache with a preserved wrapper!");
   }
 
   /**
    * Get the cached wrapper.
    *
    * This getter clears the gray bit before handing out the JSObject which means
@@ -128,16 +136,33 @@ public:
    *
    * This should only be called if you really need to see the raw contents of
    * this cache, for example as part of finalization. Don't store the result
    * anywhere or pass it into JSAPI functions that may cause the value to
    * escape.
    */
   JSObject* GetWrapperMaybeDead() const
   {
+    // Keep track of accesses on the cache when recording or replaying an
+    // execution. Accesses during a GC (when thread events are disallowed)
+    // fetch the underlying object without making sure the returned value
+    // is consistent between recording and replay.
+    if (mozilla::recordreplay::IsRecordingOrReplaying() &&
+        !mozilla::recordreplay::AreThreadEventsDisallowed() &&
+        !mozilla::recordreplay::HasDivergedFromRecording()) {
+      bool success = mozilla::recordreplay::RecordReplayValue(!!mWrapper);
+      if (mozilla::recordreplay::IsReplaying()) {
+        if (success) {
+          MOZ_RELEASE_ASSERT(mWrapper);
+        } else {
+          const_cast<nsWrapperCache*>(this)->ClearWrapper();
+        }
+      }
+    }
+
     return mWrapper;
   }
 
 #ifdef DEBUG
 private:
   static bool HasJSObjectMovedOp(JSObject* aWrapper);
 
 public:
--- a/dom/base/nsWrapperCacheInlines.h
+++ b/dom/base/nsWrapperCacheInlines.h
@@ -8,17 +8,17 @@
 #define nsWrapperCacheInline_h___
 
 #include "nsWrapperCache.h"
 #include "js/TracingAPI.h"
 
 inline JSObject*
 nsWrapperCache::GetWrapperPreserveColor() const
 {
-  JSObject* obj = mWrapper;
+  JSObject* obj = GetWrapperMaybeDead();
   if (obj && js::gc::EdgeNeedsSweepUnbarriered(&obj)) {
     // The object has been found to be dead and is in the process of being
     // finalized, so don't let the caller see it. As an optimisation, remove it
     // from the cache so we don't have to do this check in future.
     const_cast<nsWrapperCache*>(this)->ClearWrapper();
     return nullptr;
   }
   MOZ_ASSERT(obj == mWrapper);
--- a/dom/bindings/BindingUtils.h
+++ b/dom/bindings/BindingUtils.h
@@ -2732,16 +2732,22 @@ ToSupportsIsOnPrimaryInheritanceChain(T*
 // This function supplies a default value and is overloaded for specific native
 // object types.
 inline size_t
 BindingJSObjectMallocBytes(void *aNativePtr)
 {
   return 0;
 }
 
+// Register a thing which DeferredFinalize might be called on during GC
+// finalization. See DeferredFinalize.h
+template<class T>
+static void
+RecordReplayRegisterDeferredFinalize(T* aObject);
+
 // The BindingJSObjectCreator class is supposed to be used by a caller that
 // wants to create and initialise a binding JSObject. After initialisation has
 // been successfully completed it should call ForgetObject().
 // The BindingJSObjectCreator object will root the JSObject until ForgetObject()
 // is called on it. If the native object for the binding is refcounted it will
 // also hold a strong reference to it, that reference is transferred to the
 // JSObject (which holds the native in a slot) when ForgetObject() is called. If
 // the BindingJSObjectCreator object is destroyed and ForgetObject() was never
@@ -2773,16 +2779,17 @@ public:
     js::ProxyOptions options;
     options.setClass(aClass);
     aReflector.set(js::NewProxyObject(aCx, aHandler, aExpandoValue, aProto,
                                       options));
     if (aReflector) {
       js::SetProxyReservedSlot(aReflector, DOM_OBJECT_SLOT, JS::PrivateValue(aNative));
       mNative = aNative;
       mReflector = aReflector;
+      RecordReplayRegisterDeferredFinalize<T>(aNative);
     }
 
     if (size_t mallocBytes = BindingJSObjectMallocBytes(aNative)) {
       JS_updateMallocCounter(aCx, mallocBytes);
     }
   }
 
   void
@@ -2790,16 +2797,17 @@ public:
                JS::Handle<JSObject*> aProto,
                T* aNative, JS::MutableHandle<JSObject*> aReflector)
   {
     aReflector.set(JS_NewObjectWithGivenProto(aCx, aClass, aProto));
     if (aReflector) {
       js::SetReservedSlot(aReflector, DOM_OBJECT_SLOT, JS::PrivateValue(aNative));
       mNative = aNative;
       mReflector = aReflector;
+      RecordReplayRegisterDeferredFinalize<T>(aNative);
     }
 
     if (size_t mallocBytes = BindingJSObjectMallocBytes(aNative)) {
       JS_updateMallocCounter(aCx, mallocBytes);
     }
   }
 
   void
@@ -2903,35 +2911,57 @@ struct DeferredFinalizer
 {
   static void
   AddForDeferredFinalization(T* aObject)
   {
     typedef DeferredFinalizerImpl<T> Impl;
     DeferredFinalize(Impl::AppendDeferredFinalizePointer,
                      Impl::DeferredFinalize, aObject);
   }
+
+  static void
+  RecordReplayRegisterDeferredFinalize(T* aObject)
+  {
+    typedef DeferredFinalizerImpl<T> Impl;
+    RecordReplayRegisterDeferredFinalizeThing(Impl::AppendDeferredFinalizePointer,
+                                              Impl::DeferredFinalize,
+                                              aObject);
+  }
 };
 
 template<class T>
 struct DeferredFinalizer<T, true>
 {
   static void
   AddForDeferredFinalization(T* aObject)
   {
     DeferredFinalize(reinterpret_cast<nsISupports*>(aObject));
   }
+
+  static void
+  RecordReplayRegisterDeferredFinalize(T* aObject)
+  {
+    RecordReplayRegisterDeferredFinalizeThing(nullptr, nullptr, aObject);
+  }
 };
 
 template<class T>
 static void
 AddForDeferredFinalization(T* aObject)
 {
   DeferredFinalizer<T>::AddForDeferredFinalization(aObject);
 }
 
+template<class T>
+static void
+RecordReplayRegisterDeferredFinalize(T* aObject)
+{
+  DeferredFinalizer<T>::RecordReplayRegisterDeferredFinalize(aObject);
+}
+
 // This returns T's CC participant if it participates in CC or null if it
 // doesn't. This also returns null for classes that don't inherit from
 // nsISupports (QI should be used to get the participant for those).
 template<class T, bool isISupports=IsBaseOf<nsISupports, T>::value>
 class GetCCParticipant
 {
   // Helper for GetCCParticipant for classes that participate in CC.
   template<class U>
@@ -3059,16 +3089,17 @@ CreateGlobal(JSContext* aCx, T* aNative,
 
   JSAutoRealm ar(aCx, aGlobal);
 
   {
     js::SetReservedSlot(aGlobal, DOM_OBJECT_SLOT, JS::PrivateValue(aNative));
     NS_ADDREF(aNative);
 
     aCache->SetWrapper(aGlobal);
+    RecordReplayRegisterDeferredFinalize<T>(aNative);
 
     dom::AllocateProtoAndIfaceCache(aGlobal,
                                     CreateGlobalOptions<T>::ProtoAndIfaceCacheKind);
 
     if (!CreateGlobalOptions<T>::PostCreateGlobal(aCx, aGlobal)) {
       return false;
     }
   }
--- a/dom/bindings/parser/WebIDL.py
+++ b/dom/bindings/parser/WebIDL.py
@@ -2833,17 +2833,22 @@ class IDLWrapperType(IDLType):
             while iface:
                 if any(m.isMethod() and m.isToJSON() for m in iface.members):
                     return True
                 iface = iface.parent
             return False
         elif self.isEnum():
             return True
         elif self.isDictionary():
-            return all(m.type.isJSONType() for m in self.inner.members)
+            dictionary = self.inner
+            while dictionary:
+                if not all(m.type.isJSONType() for m in dictionary.members):
+                    return False
+                dictionary = dictionary.parent
+            return True
         else:
             raise WebIDLError("IDLWrapperType wraps type %s that we don't know if "
                               "is serializable" % type(self.inner), [self.location])
 
     def resolveType(self, parentScope):
         assert isinstance(parentScope, IDLScope)
         self.inner.resolve(parentScope)
 
--- a/dom/html/HTMLMediaElement.cpp
+++ b/dom/html/HTMLMediaElement.cpp
@@ -509,16 +509,23 @@ HTMLMediaElement::MediaLoadListener::OnS
 {
   nsContentUtils::UnregisterShutdownObserver(this);
 
   if (!mElement) {
     // We've been notified by the shutdown observer, and are shutting down.
     return NS_BINDING_ABORTED;
   }
 
+  // Media element playback is not currently supported when recording or
+  // replaying. See bug 1304146.
+  if (recordreplay::IsRecordingOrReplaying()) {
+    mElement->ReportLoadError("Media elements not available when recording", nullptr, 0);
+    return NS_ERROR_NOT_AVAILABLE;
+  }
+
   // The element is only needed until we've had a chance to call
   // InitializeDecoderForChannel. So make sure mElement is cleared here.
   RefPtr<HTMLMediaElement> element;
   element.swap(mElement);
 
   AbstractThread::AutoEnter context(element->AbstractMainThread());
 
   if (mLoadID != element->GetCurrentLoadID()) {
--- a/dom/ipc/ContentChild.cpp
+++ b/dom/ipc/ContentChild.cpp
@@ -589,33 +589,28 @@ NS_INTERFACE_MAP_BEGIN(ContentChild)
   NS_INTERFACE_MAP_ENTRY_AMBIGUOUS(nsISupports, nsIContentChild)
 NS_INTERFACE_MAP_END
 
 
 mozilla::ipc::IPCResult
 ContentChild::RecvSetXPCOMProcessAttributes(const XPCOMInitData& aXPCOMInit,
                                             const StructuredCloneData& aInitialData,
                                             nsTArray<LookAndFeelInt>&& aLookAndFeelIntCache,
-                                            nsTArray<SystemFontListEntry>&& aFontList,
-                                            const FileDescriptor& aSharedDataMapFile,
-                                            const uint32_t& aSharedDataMapSize)
+                                            nsTArray<SystemFontListEntry>&& aFontList)
 {
   if (!sShutdownCanary) {
     return IPC_OK();
   }
 
   mLookAndFeelCache = std::move(aLookAndFeelIntCache);
   mFontList = std::move(aFontList);
   gfx::gfxVars::SetValuesForInitialize(aXPCOMInit.gfxNonDefaultVarUpdates());
   InitXPCOM(aXPCOMInit, aInitialData);
   InitGraphicsDeviceData(aXPCOMInit.contentDeviceData());
 
-  mSharedData = new SharedMap(ProcessGlobal::Get(), aSharedDataMapFile,
-                              aSharedDataMapSize);
-
   return IPC_OK();
 }
 
 bool
 ContentChild::Init(MessageLoop* aIOLoop,
                    base::ProcessId aParentPid,
                    const char* aParentBuildID,
                    IPC::Channel* aChannel,
@@ -1247,18 +1242,22 @@ ContentChild::InitXPCOM(const XPCOMInitD
   RecvSetOffline(aXPCOMInit.isOffline());
   RecvSetConnectivity(aXPCOMInit.isConnected());
   LocaleService::GetInstance()->AssignAppLocales(aXPCOMInit.appLocales());
   LocaleService::GetInstance()->AssignRequestedLocales(aXPCOMInit.requestedLocales());
 
   RecvSetCaptivePortalState(aXPCOMInit.captivePortalState());
   RecvBidiKeyboardNotify(aXPCOMInit.isLangRTL(), aXPCOMInit.haveBidiKeyboards());
 
-  // Create the CPOW manager as soon as possible.
-  SendPJavaScriptConstructor();
+  // Create the CPOW manager as soon as possible. Middleman processes don't use
+  // CPOWs, because their recording child will also have a CPOW manager that
+  // communicates with the UI process.
+  if (!recordreplay::IsMiddleman()) {
+    SendPJavaScriptConstructor();
+  }
 
   if (aXPCOMInit.domainPolicy().active()) {
     nsIScriptSecurityManager* ssm = nsContentUtils::GetSecurityManager();
     MOZ_ASSERT(ssm);
     ssm->ActivateDomainPolicyInternal(getter_AddRefs(mPolicy));
     if (!mPolicy) {
       MOZ_CRASH("Failed to activate domain policy.");
     }
@@ -1800,17 +1799,21 @@ ContentChild::RecvBidiKeyboardNotify(con
 
 static StaticRefPtr<CancelableRunnable> gFirstIdleTask;
 
 static void
 FirstIdle(void)
 {
   MOZ_ASSERT(gFirstIdleTask);
   gFirstIdleTask = nullptr;
-  ContentChild::GetSingleton()->SendFirstIdle();
+
+  // When recording or replaying, the middleman process will send this message instead.
+  if (!recordreplay::IsRecordingOrReplaying()) {
+    ContentChild::GetSingleton()->SendFirstIdle();
+  }
 }
 
 mozilla::jsipc::PJavaScriptChild *
 ContentChild::AllocPJavaScriptChild()
 {
   MOZ_ASSERT(ManagedPJavaScriptChild().IsEmpty());
 
   return nsIContentChild::AllocPJavaScriptChild();
@@ -2047,16 +2050,19 @@ ContentChild::DeallocPTestShellChild(PTe
 }
 
 jsipc::CPOWManager*
 ContentChild::GetCPOWManager()
 {
   if (PJavaScriptChild* c = LoneManagedOrNullAsserts(ManagedPJavaScriptChild())) {
     return CPOWManagerFor(c);
   }
+  if (recordreplay::IsMiddleman()) {
+    return nullptr;
+  }
   return CPOWManagerFor(SendPJavaScriptConstructor());
 }
 
 mozilla::ipc::IPCResult
 ContentChild::RecvPTestShellConstructor(PTestShellChild* actor)
 {
   return IPC_OK();
 }
@@ -2575,25 +2581,28 @@ ContentChild::RecvRegisterStringBundles(
 }
 
 mozilla::ipc::IPCResult
 ContentChild::RecvUpdateSharedData(const FileDescriptor& aMapFile,
                                    const uint32_t& aMapSize,
                                    nsTArray<IPCBlob>&& aBlobs,
                                    nsTArray<nsCString>&& aChangedKeys)
 {
+  nsTArray<RefPtr<BlobImpl>> blobImpls(aBlobs.Length());
+  for (auto& ipcBlob : aBlobs) {
+    blobImpls.AppendElement(IPCBlobUtils::Deserialize(ipcBlob));
+  }
+
   if (mSharedData) {
-    nsTArray<RefPtr<BlobImpl>> blobImpls(aBlobs.Length());
-    for (auto& ipcBlob : aBlobs) {
-      blobImpls.AppendElement(IPCBlobUtils::Deserialize(ipcBlob));
-    }
-
     mSharedData->Update(aMapFile, aMapSize,
                         std::move(blobImpls),
                         std::move(aChangedKeys));
+  } else {
+    mSharedData = new SharedMap(ProcessGlobal::Get(), aMapFile,
+                                aMapSize, std::move(blobImpls));
   }
 
   return IPC_OK();
 }
 
 mozilla::ipc::IPCResult
 ContentChild::RecvGeolocationUpdate(nsIDOMGeoPosition* aPosition)
 {
--- a/dom/ipc/ContentChild.h
+++ b/dom/ipc/ContentChild.h
@@ -621,19 +621,17 @@ public:
           const bool& anonymize,
           const bool& minimizeMemoryUsage,
           const MaybeFileDesc& DMDFile) override;
 
   virtual mozilla::ipc::IPCResult
   RecvSetXPCOMProcessAttributes(const XPCOMInitData& aXPCOMInit,
                                 const StructuredCloneData& aInitialData,
                                 nsTArray<LookAndFeelInt>&& aLookAndFeelIntCache,
-                                nsTArray<SystemFontListEntry>&& aFontList,
-                                const FileDescriptor& aSharedDataMapFile,
-                                const uint32_t& aSharedDataMapSize) override;
+                                nsTArray<SystemFontListEntry>&& aFontList) override;
 
   virtual mozilla::ipc::IPCResult
   RecvProvideAnonymousTemporaryFile(const uint64_t& aID, const FileDescOrError& aFD) override;
 
   mozilla::ipc::IPCResult
   RecvSetPermissionsWithKey(const nsCString& aPermissionKey,
                             nsTArray<IPC::Permission>&& aPerms) override;
 
--- a/dom/ipc/ContentParent.cpp
+++ b/dom/ipc/ContentParent.cpp
@@ -2475,22 +2475,22 @@ ContentParent::InitInternal(ProcessPrior
 
   // Send the dynamic scalar definitions to the new process.
   TelemetryIPC::GetDynamicScalarDefinitions(xpcomInit.dynamicScalarDefs());
 
   // Must send screen info before send initialData
   ScreenManager& screenManager = ScreenManager::GetSingleton();
   screenManager.CopyScreensToRemote(this);
 
+  Unused << SendSetXPCOMProcessAttributes(xpcomInit, initialData, lnfCache,
+                                          fontList);
+
   ipc::WritableSharedMap* sharedData = nsFrameMessageManager::sParentProcessManager->SharedData();
   sharedData->Flush();
-
-  Unused << SendSetXPCOMProcessAttributes(xpcomInit, initialData, lnfCache,
-                                          fontList, sharedData->CloneMapFile(),
-                                          sharedData->MapSize());
+  sharedData->SendTo(this);
 
   nsCOMPtr<nsIChromeRegistry> registrySvc = nsChromeRegistry::GetService();
   nsChromeRegistryChrome* chromeRegistry =
     static_cast<nsChromeRegistryChrome*>(registrySvc.get());
   chromeRegistry->SendRegisteredChrome(this);
 
   nsCOMPtr<nsIStringBundleService> stringBundleService =
     services::GetStringBundleService();
--- a/dom/ipc/PContent.ipdl
+++ b/dom/ipc/PContent.ipdl
@@ -512,19 +512,17 @@ child:
      * Send BlobURLRegistrationData to child process.
      */
     async InitBlobURLs(BlobURLRegistrationData[] registrations);
 
     async SetXPCOMProcessAttributes(XPCOMInitData xpcomInit,
                                     StructuredCloneData initialData,
                                     LookAndFeelInt[] lookAndFeelIntCache,
                                     /* used on MacOSX and Linux only: */
-                                    SystemFontListEntry[] systemFontList,
-                                    FileDescriptor sharedDataMapFile,
-                                    uint32_t sharedDataMapSize);
+                                    SystemFontListEntry[] systemFontList);
 
     // Notify child that last-pb-context-exited notification was observed
     async LastPrivateDocShellDestroyed();
 
     async NotifyProcessPriorityChanged(ProcessPriority priority);
     async MinimizeMemoryUsage();
 
     /**
--- a/dom/ipc/ProcessHangMonitor.cpp
+++ b/dom/ipc/ProcessHangMonitor.cpp
@@ -345,17 +345,19 @@ HangMonitorChild::InterruptCallback()
     paintWhileInterruptingJS = mPaintWhileInterruptingJS;
     paintWhileInterruptingJSForce = mPaintWhileInterruptingJSForce;
     paintWhileInterruptingJSTab = mPaintWhileInterruptingJSTab;
     paintWhileInterruptingJSEpoch = mPaintWhileInterruptingJSEpoch;
 
     mPaintWhileInterruptingJS = false;
   }
 
-  if (paintWhileInterruptingJS) {
+  // Don't paint from the interrupt callback when recording or replaying, as
+  // the interrupt callback is triggered non-deterministically.
+  if (paintWhileInterruptingJS && !recordreplay::IsRecordingOrReplaying()) {
     RefPtr<TabChild> tabChild = TabChild::FindTabChild(paintWhileInterruptingJSTab);
     if (tabChild) {
       js::AutoAssertNoContentJS nojs(mContext);
       tabChild->PaintWhileInterruptingJS(paintWhileInterruptingJSEpoch,
                                          paintWhileInterruptingJSForce);
     }
   }
 }
--- a/dom/ipc/SharedMap.cpp
+++ b/dom/ipc/SharedMap.cpp
@@ -38,18 +38,19 @@ AlignTo(size_t* aOffset, size_t aAlign)
 }
 
 
 SharedMap::SharedMap()
   : DOMEventTargetHelper()
 {}
 
 SharedMap::SharedMap(nsIGlobalObject* aGlobal, const FileDescriptor& aMapFile,
-                     size_t aMapSize)
+                     size_t aMapSize, nsTArray<RefPtr<BlobImpl>>&& aBlobs)
   : DOMEventTargetHelper(aGlobal)
+  , mBlobImpls(std::move(aBlobs))
 {
   mMapFile.reset(new FileDescriptor(aMapFile));
   mMapSize = aMapSize;
 }
 
 
 bool
 SharedMap::Has(const nsACString& aName)
@@ -102,17 +103,17 @@ SharedMap::Entry::Read(JSContext* aCx,
   }
   if (mBlobCount) {
     holder.BlobImpls().AppendElements(Blobs());
   }
   holder.Read(aCx, aRetVal, aRv);
 }
 
 FileDescriptor
-SharedMap::CloneMapFile()
+SharedMap::CloneMapFile() const
 {
   if (mMap.initialized()) {
     return mMap.cloneHandle();
   }
   return *mMapFile;
 }
 
 void
@@ -278,18 +279,19 @@ WritableSharedMap::WritableSharedMap()
   Unused << Serialize();
   MOZ_RELEASE_ASSERT(mMap.initialized());
 }
 
 SharedMap*
 WritableSharedMap::GetReadOnly()
 {
   if (!mReadOnly) {
+    nsTArray<RefPtr<BlobImpl>> blobs(mBlobImpls);
     mReadOnly = new SharedMap(ProcessGlobal::Get(), CloneMapFile(),
-                              MapSize());
+                              MapSize(), std::move(blobs));
   }
   return mReadOnly;
 }
 
 Result<Ok, nsresult>
 WritableSharedMap::Serialize()
 {
   // Serializes a new snapshot of the map, initializes a new read-only shared
@@ -344,24 +346,25 @@ WritableSharedMap::Serialize()
   // We need to build the new array of blobs before we overwrite the existing
   // one, since previously-serialized entries will store their blob references
   // as indexes into our blobs array.
   nsTArray<RefPtr<BlobImpl>> blobImpls(blobCount);
 
   for (auto& entry : IterHash(mEntries)) {
     AlignTo(&offset, kStructuredCloneAlign);
 
-    entry->ExtractData(&ptr[offset], offset, blobImpls.Length());
+    size_t blobOffset = blobImpls.Length();
+    if (entry->BlobCount()) {
+      blobImpls.AppendElements(entry->Blobs());
+    }
+
+    entry->ExtractData(&ptr[offset], offset, blobOffset);
     entry->Code(header);
 
     offset += entry->Size();
-
-    if (entry->BlobCount()) {
-      mBlobImpls.AppendElements(entry->Blobs());
-    }
   }
 
   mBlobImpls = std::move(blobImpls);
 
   // FIXME: We should create a separate OutputBuffer class which can encode to
   // a static memory region rather than dynamically allocating and then
   // copying.
   MOZ_ASSERT(header.cursor() == headerSize);
@@ -370,41 +373,47 @@ WritableSharedMap::Serialize()
   // We've already updated offsets at this point. We need this to succeed.
   mMap.reset();
   MOZ_RELEASE_ASSERT(mem.Finalize(mMap).isOk());
 
   return Ok();
 }
 
 void
+WritableSharedMap::SendTo(ContentParent* aParent) const
+{
+    nsTArray<IPCBlob> blobs(mBlobImpls.Length());
+
+    for (auto& blobImpl : mBlobImpls) {
+      nsresult rv = IPCBlobUtils::Serialize(blobImpl, aParent,
+                                            *blobs.AppendElement());
+      if (NS_WARN_IF(NS_FAILED(rv))) {
+        continue;
+      }
+    }
+
+    Unused << aParent->SendUpdateSharedData(CloneMapFile(), mMap.size(),
+                                            blobs, mChangedKeys);
+}
+
+void
 WritableSharedMap::BroadcastChanges()
 {
   if (mChangedKeys.IsEmpty()) {
     return;
   }
 
   if (!Serialize().isOk()) {
     return;
   }
 
   nsTArray<ContentParent*> parents;
   ContentParent::GetAll(parents);
   for (auto& parent : parents) {
-    nsTArray<IPCBlob> blobs(mBlobImpls.Length());
-
-    for (auto& blobImpl : mBlobImpls) {
-      nsresult rv = IPCBlobUtils::Serialize(blobImpl, parent,
-                                            *blobs.AppendElement());
-      if (NS_WARN_IF(NS_FAILED(rv))) {
-        continue;
-      }
-    }
-
-    Unused << parent->SendUpdateSharedData(CloneMapFile(), mMap.size(),
-                                           blobs, mChangedKeys);
+    SendTo(parent);
   }
 
   if (mReadOnly) {
     nsTArray<RefPtr<BlobImpl>> blobImpls(mBlobImpls);
     mReadOnly->Update(CloneMapFile(), mMap.size(),
                       std::move(blobImpls),
                       std::move(mChangedKeys));
   }
--- a/dom/ipc/SharedMap.h
+++ b/dom/ipc/SharedMap.h
@@ -17,16 +17,19 @@
 #include "mozilla/Variant.h"
 #include "nsClassHashtable.h"
 #include "nsTArray.h"
 
 class nsIGlobalObject;
 
 namespace mozilla {
 namespace dom {
+
+class ContentParent;
+
 namespace ipc {
 
 /**
  * Together, the SharedMap and WritableSharedMap classes allow sharing a
  * dynamically-updated, shared-memory key-value store across processes.
  *
  * The maps may only ever be updated in the parent process, via
  * WritableSharedMap instances. When that map changes, its entire contents are
@@ -53,17 +56,18 @@ namespace ipc {
 class SharedMap : public DOMEventTargetHelper
 {
   using FileDescriptor = mozilla::ipc::FileDescriptor;
 
 public:
 
   SharedMap();
 
-  SharedMap(nsIGlobalObject* aGlobal, const FileDescriptor&, size_t);
+  SharedMap(nsIGlobalObject* aGlobal, const FileDescriptor&, size_t,
+            nsTArray<RefPtr<BlobImpl>>&& aBlobs);
 
   // Returns true if the map contains the given (UTF-8) key.
   bool Has(const nsACString& name);
 
   // If the map contains the given (UTF-8) key, decodes and returns a new copy
   // of its value. Otherwise returns null.
   void Get(JSContext* cx, const nsACString& name, JS::MutableHandleValue aRetVal,
            ErrorResult& aRv);
@@ -100,17 +104,17 @@ public:
                        JS::MutableHandle<JS::Value> aResult) const;
 
 
   /**
    * Returns a copy of the read-only file descriptor which backs the shared
    * memory region for this map. The file descriptor may be passed between
    * processes, and used to update corresponding instances in child processes.
    */
-  FileDescriptor CloneMapFile();
+  FileDescriptor CloneMapFile() const;
 
   /**
    * Returns the size of the memory mapped region that backs this map. Must be
    * passed to the SharedMap() constructor or Update() method along with the
    * descriptor returned by CloneMapFile() in order to initialize or update a
    * child SharedMap.
    */
   size_t MapSize() const { return mMap.size(); }
@@ -341,16 +345,20 @@ public:
   }
 
 
   // Flushes any queued changes to a new snapshot, and broadcasts it to all
   // child SharedMap instances.
   void Flush();
 
 
+  // Sends the current set of shared map data to the given content process.
+  void SendTo(ContentParent* aContentParent) const;
+
+
   /**
    * Returns the read-only SharedMap instance corresponding to this
    * WritableSharedMap for use in the parent process.
    */
   SharedMap* GetReadOnly();
 
 
   JSObject* WrapObject(JSContext* aCx, JS::HandleObject aGivenProto) override;
--- a/dom/ipc/StructuredCloneData.cpp
+++ b/dom/ipc/StructuredCloneData.cpp
@@ -53,16 +53,17 @@ StructuredCloneData::StructuredCloneData
 {}
 
 StructuredCloneData::~StructuredCloneData()
 {}
 
 StructuredCloneData&
 StructuredCloneData::operator=(StructuredCloneData&& aOther)
 {
+  mBlobImplArray = std::move(aOther.mBlobImplArray);
   mExternalData = std::move(aOther.mExternalData);
   mSharedData = std::move(aOther.mSharedData);
   mIPCStreams = std::move(aOther.mIPCStreams);
   mInitialized = aOther.mInitialized;
 
   return *this;
 }
 
--- a/dom/ipc/TabChild.cpp
+++ b/dom/ipc/TabChild.cpp
@@ -3043,18 +3043,21 @@ TabChild::DoSendBlockingMessage(JSContex
                                 nsTArray<StructuredCloneData>* aRetVal,
                                 bool aIsSync)
 {
   ClonedMessageData data;
   if (!BuildClonedMessageDataForChild(Manager(), aData, data)) {
     return false;
   }
   InfallibleTArray<CpowEntry> cpows;
-  if (aCpows && !Manager()->GetCPOWManager()->Wrap(aCx, aCpows, &cpows)) {
-    return false;
+  if (aCpows) {
+    jsipc::CPOWManager* mgr = Manager()->GetCPOWManager();
+    if (!mgr || !mgr->Wrap(aCx, aCpows, &cpows)) {
+      return false;
+    }
   }
   if (aIsSync) {
     return SendSyncMessage(PromiseFlatString(aMessage), data, cpows,
                            Principal(aPrincipal), aRetVal);
   }
 
   return SendRpcMessage(PromiseFlatString(aMessage), data, cpows,
                         Principal(aPrincipal), aRetVal);
@@ -3067,18 +3070,21 @@ TabChild::DoSendAsyncMessage(JSContext* 
                              JS::Handle<JSObject *> aCpows,
                              nsIPrincipal* aPrincipal)
 {
   ClonedMessageData data;
   if (!BuildClonedMessageDataForChild(Manager(), aData, data)) {
     return NS_ERROR_DOM_DATA_CLONE_ERR;
   }
   InfallibleTArray<CpowEntry> cpows;
-  if (aCpows && !Manager()->GetCPOWManager()->Wrap(aCx, aCpows, &cpows)) {
-    return NS_ERROR_UNEXPECTED;
+  if (aCpows) {
+    jsipc::CPOWManager* mgr = Manager()->GetCPOWManager();
+    if (!mgr || !mgr->Wrap(aCx, aCpows, &cpows)) {
+      return NS_ERROR_UNEXPECTED;
+    }
   }
   if (!SendAsyncMessage(PromiseFlatString(aMessage), cpows,
                         Principal(aPrincipal), data)) {
     return NS_ERROR_UNEXPECTED;
   }
   return NS_OK;
 }
 
--- a/dom/ipc/tests/test_sharedMap.js
+++ b/dom/ipc/tests/test_sharedMap.js
@@ -1,20 +1,36 @@
 "use strict";
 
 ChromeUtils.import("resource://gre/modules/AppConstants.jsm");
 ChromeUtils.import("resource://gre/modules/Services.jsm");
+ChromeUtils.import("resource://gre/modules/ExtensionUtils.jsm");
 ChromeUtils.import("resource://testing-common/ExtensionXPCShellUtils.jsm");
 
+const PROCESS_COUNT_PREF = "dom.ipc.processCount";
+
 const remote = AppConstants.platform !== "android";
 
 ExtensionTestUtils.init(this);
 
 let contentPage;
 
+Cu.importGlobalProperties(["Blob", "FileReader"]);
+
+async function readBlob(key, sharedData = Services.cpmm.sharedData) {
+  let reader = new FileReader();
+  reader.readAsText(sharedData.get(key));
+  await ExtensionUtils.promiseEvent(reader, "loadend");
+  return reader.result;
+}
+
+function getKey(key, sharedData = Services.cpmm.sharedData) {
+  return sharedData.get(key);
+}
+
 function getContents(sharedMap = Services.cpmm.sharedData) {
   return {
     keys: Array.from(sharedMap.keys()),
     values: Array.from(sharedMap.values()),
     entries: Array.from(sharedMap.entries()),
     getValues: Array.from(sharedMap.keys(),
                           key => sharedMap.get(key)),
   };
@@ -55,19 +71,33 @@ async function checkContentMaps(expected
 
   if (!parentOnly) {
     info("Checking out-of-process content map");
     let contents = await contentPage.spawn(undefined, getContents);
     checkMap(contents, expected);
   }
 }
 
+async function loadContentPage() {
+  let page = await ExtensionTestUtils.loadContentPage("about:blank", {remote});
+  registerCleanupFunction(() => page.close());
+
+  page.addFrameScriptHelper(`
+    ChromeUtils.import("resource://gre/modules/ExtensionUtils.jsm");
+    Cu.importGlobalProperties(["FileReader"]);
+  `);
+  return page;
+}
+
 add_task(async function setup() {
-  contentPage = await ExtensionTestUtils.loadContentPage("about:blank", {remote});
-  registerCleanupFunction(() => contentPage.close());
+  // Start with one content process so that we can increase the number
+  // later and test the behavior of a fresh content process.
+  Services.prefs.setIntPref(PROCESS_COUNT_PREF, 1);
+
+  contentPage = await loadContentPage();
 });
 
 add_task(async function test_sharedMap() {
   let {sharedData} = Services.ppmm;
 
   info("Check that parent and child maps are both initially empty");
 
   checkParentMap([]);
@@ -155,8 +185,66 @@ add_task(async function test_sharedMap()
 
   info("Wait for an idle timeout. Check that changes are now visible in all children");
 
   await new Promise(resolve => ChromeUtils.idleDispatch(resolve));
 
   checkParentMap(expected);
   await checkContentMaps(expected);
 });
+
+add_task(async function test_blobs() {
+  let {sharedData} = Services.ppmm;
+
+  let text = [
+    "The quick brown fox jumps over the lazy dog",
+    "Lorem ipsum dolor sit amet, consectetur adipiscing elit",
+    "sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.",
+  ];
+  let blobs = text.map(str => new Blob([str]));
+
+  let data = {foo: {bar: "baz"}};
+
+  sharedData.set("blob0", blobs[0]);
+  sharedData.set("blob1", blobs[1]);
+  sharedData.set("data", data);
+
+  equal(await readBlob("blob0", sharedData), text[0], "Expected text for blob0 in parent ppmm");
+
+  sharedData.flush();
+
+  equal(await readBlob("blob0", sharedData), text[0], "Expected text for blob0 in parent ppmm");
+  equal(await readBlob("blob1", sharedData), text[1], "Expected text for blob1 in parent ppmm");
+
+  equal(await readBlob("blob0"), text[0], "Expected text for blob0 in parent cpmm");
+  equal(await readBlob("blob1"), text[1], "Expected text for blob1 in parent cpmm");
+
+  equal(await contentPage.spawn("blob0", readBlob), text[0], "Expected text for blob0 in child 1 cpmm");
+  equal(await contentPage.spawn("blob1", readBlob), text[1], "Expected text for blob1 in child 1 cpmm");
+
+  // Start a second child process
+  Services.prefs.setIntPref(PROCESS_COUNT_PREF, 2);
+
+  let page2 = await loadContentPage();
+
+  equal(await page2.spawn("blob0", readBlob), text[0], "Expected text for blob0 in child 2 cpmm");
+  equal(await page2.spawn("blob1", readBlob), text[1], "Expected text for blob1 in child 2 cpmm");
+
+  sharedData.set("blob0", blobs[2]);
+
+  equal(await readBlob("blob0", sharedData), text[2], "Expected text for blob0 in parent ppmm");
+
+  sharedData.flush();
+
+  equal(await readBlob("blob0", sharedData), text[2], "Expected text for blob0 in parent ppmm");
+  equal(await readBlob("blob1", sharedData), text[1], "Expected text for blob1 in parent ppmm");
+
+  equal(await readBlob("blob0"), text[2], "Expected text for blob0 in parent cpmm");
+  equal(await readBlob("blob1"), text[1], "Expected text for blob1 in parent cpmm");
+
+  equal(await contentPage.spawn("blob0", readBlob), text[2], "Expected text for blob0 in child 1 cpmm");
+  equal(await contentPage.spawn("blob1", readBlob), text[1], "Expected text for blob1 in child 1 cpmm");
+
+  equal(await page2.spawn("blob0", readBlob), text[2], "Expected text for blob0 in child 2 cpmm");
+  equal(await page2.spawn("blob1", readBlob), text[1], "Expected text for blob1 in child 2 cpmm");
+
+  deepEqual(await page2.spawn("data", getKey), data, "Expected data for data key in child 2 cpmm");
+});
--- a/dom/jsurl/test/mochitest.ini
+++ b/dom/jsurl/test/mochitest.ini
@@ -8,8 +8,10 @@ support-files =
   load-stopping-1d.html
   pass.html
 
 [test_bug351633-1.html]
 [test_bug351633-2.html]
 [test_bug351633-3.html]
 [test_bug351633-4.html]
 [test_bug384981.html]
+[test_bug1382035-1.html]
+[test_bug1382035-2.html]
new file mode 100644
--- /dev/null
+++ b/dom/jsurl/test/test_bug1382035-1.html
@@ -0,0 +1,33 @@
+<!DOCTYPE HTML>
+<html>
+<!--
+https://bugzilla.mozilla.org/show_bug.cgi?id=1382035
+-->
+<head>
+  <meta charset="utf-8">
+  <title>Test for Bug 1382035</title>
+  <script type="application/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
+  <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css"/>
+  <script type="application/javascript">
+
+  /** Test for Bug 1382035 **/
+  addLoadEvent(function() {
+    is(frames[0].document.documentElement.textContent, "",
+       "Should not navigate to a stringified Promise");
+    SimpleTest.finish();
+  });
+
+  SimpleTest.waitForExplicitFinish();
+  </script>
+</head>
+<body>
+<a target="_blank" href="https://bugzilla.mozilla.org/show_bug.cgi?id=1382035">Mozilla Bug 1382035</a>
+<p id="display"></p>
+<div id="content" style="display: none">
+  <iframe src="javascript: Promise.resolve()">
+  </iframe>
+</div>
+<pre id="test">
+</pre>
+</body>
+</html>
new file mode 100644
--- /dev/null
+++ b/dom/jsurl/test/test_bug1382035-2.html
@@ -0,0 +1,38 @@
+<!DOCTYPE HTML>
+<html>
+<!--
+https://bugzilla.mozilla.org/show_bug.cgi?id=1382035
+-->
+<head>
+  <meta charset="utf-8">
+  <title>Test for Bug 1382035</title>
+  <script type="application/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
+  <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css"/>
+  <script type="application/javascript">
+
+  /** Test for Bug 1382035 **/
+  addLoadEvent(function() {
+    is(frames[0].document.documentElement.textContent, "test",
+       "Should not navigate to a stringified Promise");
+    SimpleTest.finish();
+  });
+  SimpleTest.waitForExplicitFinish();
+  </script>
+</head>
+<body>
+<a target="_blank" href="https://bugzilla.mozilla.org/show_bug.cgi?id=1382035">Mozilla Bug 1382035</a>
+<p id="display"></p>
+<div id="content" style="display: none">
+  <iframe>
+  </iframe>
+  <script>
+    var blob = new Blob(["test"], { type: "text/html" });
+    var url = URL.createObjectURL(blob);
+    frames[0].location.href = url;
+    frames[0].location.href = "javascript: Promise.resolve()";
+  </script>
+</div>
+<pre id="test">
+</pre>
+</body>
+</html>
--- a/dom/media/CubebUtils.cpp
+++ b/dom/media/CubebUtils.cpp
@@ -555,17 +555,17 @@ void InitLibrary()
     cubeb_set_log_callback(CUBEB_LOG_NORMAL, CubebLogCallback);
   }
 
 #ifndef MOZ_WIDGET_ANDROID
   AbstractThread::MainThread()->Dispatch(
     NS_NewRunnableFunction("CubebUtils::InitLibrary", &InitBrandName));
 #endif
 #ifdef MOZ_CUBEB_REMOTING
-  if (sCubebSandbox && XRE_IsContentProcess()) {
+  if (sCubebSandbox && XRE_IsContentProcess() && !recordreplay::IsMiddleman()) {
     InitAudioIPCConnection();
   }
 #endif
 }
 
 void ShutdownLibrary()
 {
   Preferences::UnregisterCallbacks(PrefChanged, gInitCallbackPrefs);
--- a/dom/media/webaudio/AudioContext.cpp
+++ b/dom/media/webaudio/AudioContext.cpp
@@ -215,16 +215,22 @@ AudioContext::WrapObject(JSContext* aCx,
   }
 }
 
 /* static */ already_AddRefed<AudioContext>
 AudioContext::Constructor(const GlobalObject& aGlobal,
                           const AudioContextOptions& aOptions,
                           ErrorResult& aRv)
 {
+  // Audio playback is not yet supported when recording or replaying. See bug 1304147.
+  if (recordreplay::IsRecordingOrReplaying()) {
+    aRv.Throw(NS_ERROR_NOT_AVAILABLE);
+    return nullptr;
+  }
+
   nsCOMPtr<nsPIDOMWindowInner> window = do_QueryInterface(aGlobal.GetAsSupports());
   if (!window) {
     aRv.Throw(NS_ERROR_FAILURE);
     return nullptr;
   }
 
   float sampleRate = MediaStreamGraph::REQUEST_DEFAULT_SAMPLE_RATE;
   if (Preferences::GetBool("media.webaudio.audiocontextoptions-samplerate.enabled")) {
@@ -266,16 +272,22 @@ AudioContext::Constructor(const GlobalOb
 
 /* static */ already_AddRefed<AudioContext>
 AudioContext::Constructor(const GlobalObject& aGlobal,
                           uint32_t aNumberOfChannels,
                           uint32_t aLength,
                           float aSampleRate,
                           ErrorResult& aRv)
 {
+  // Audio playback is not yet supported when recording or replaying. See bug 1304147.
+  if (recordreplay::IsRecordingOrReplaying()) {
+    aRv.Throw(NS_ERROR_NOT_AVAILABLE);
+    return nullptr;
+  }
+
   nsCOMPtr<nsPIDOMWindowInner> window = do_QueryInterface(aGlobal.GetAsSupports());
   if (!window) {
     aRv.Throw(NS_ERROR_FAILURE);
     return nullptr;
   }
 
   if (aNumberOfChannels == 0 ||
       aNumberOfChannels > WebAudioUtils::MaxChannelCount ||
--- a/dom/plugins/base/nsPluginHost.cpp
+++ b/dom/plugins/base/nsPluginHost.cpp
@@ -2766,45 +2766,42 @@ nsPluginHost::RegisterWithCategoryManage
               aMimeType.get(), aType == ePluginUnregister ? "yes" : "no"));
 
   nsCOMPtr<nsICategoryManager> catMan =
     do_GetService(NS_CATEGORYMANAGER_CONTRACTID);
   if (!catMan) {
     return;
   }
 
-  const char *contractId =
-    "@mozilla.org/content/plugin/document-loader-factory;1";
+  NS_NAMED_LITERAL_CSTRING(contractId,
+                           "@mozilla.org/content/plugin/document-loader-factory;1");
 
   if (aType == ePluginRegister) {
     catMan->AddCategoryEntry("Gecko-Content-Viewers",
-                             aMimeType.get(),
+                             aMimeType,
                              contractId,
                              false, /* persist: broken by bug 193031 */
-                             mOverrideInternalTypes,
-                             nullptr);
+                             mOverrideInternalTypes);
   } else {
     if (aType == ePluginMaybeUnregister) {
       // Bail out if this type is still used by an enabled plugin
       if (HavePluginForType(aMimeType)) {
         return;
       }
     } else {
       MOZ_ASSERT(aType == ePluginUnregister, "Unknown nsRegisterType");
     }
 
     // Only delete the entry if a plugin registered for it
     nsCString value;
     nsresult rv = catMan->GetCategoryEntry("Gecko-Content-Viewers",
-                                           aMimeType.get(),
-                                           getter_Copies(value));
-    if (NS_SUCCEEDED(rv) && strcmp(value.get(), contractId) == 0) {
+                                           aMimeType, value);
+    if (NS_SUCCEEDED(rv) && value == contractId) {
       catMan->DeleteCategoryEntry("Gecko-Content-Viewers",
-                                  aMimeType.get(),
-                                  true);
+                                  aMimeType, true);
     }
   }
 }
 
 nsresult
 nsPluginHost::WritePluginInfo()
 {
   MOZ_ASSERT(XRE_IsParentProcess());
--- a/dom/push/PushNotifier.cpp
+++ b/dom/push/PushNotifier.cpp
@@ -315,19 +315,17 @@ PushDispatcher::DoNotifyObservers(nsISup
   if (!obsService) {
     return NS_ERROR_FAILURE;
   }
   // If there's a service for this push category, make sure it is alive.
   nsCOMPtr<nsICategoryManager> catMan =
     do_GetService(NS_CATEGORYMANAGER_CONTRACTID);
   if (catMan) {
     nsCString contractId;
-    nsresult rv = catMan->GetCategoryEntry("push",
-                                           mScope.BeginReading(),
-                                           getter_Copies(contractId));
+    nsresult rv = catMan->GetCategoryEntry("push", mScope, contractId);
     if (NS_SUCCEEDED(rv)) {
       // Ensure the service is created - we don't need to do anything with
       // it though - we assume the service constructor attaches a listener.
       nsCOMPtr<nsISupports> service = do_GetService(contractId.get());
     }
   }
   return obsService->NotifyObservers(aSubject, aTopic,
                                      NS_ConvertUTF8toUTF16(mScope).get());
--- a/dom/workers/RuntimeService.cpp
+++ b/dom/workers/RuntimeService.cpp
@@ -571,16 +571,23 @@ LoadJSGCMemoryOptions(const char* aPrefN
 }
 
 bool
 InterruptCallback(JSContext* aCx)
 {
   WorkerPrivate* worker = GetWorkerPrivateFromContext(aCx);
   MOZ_ASSERT(worker);
 
+  // As with the main thread, the interrupt callback is triggered
+  // non-deterministically when recording/replaying, so return early to avoid
+  // performing any recorded events.
+  if (recordreplay::IsRecordingOrReplaying()) {
+    return true;
+  }
+
   // Now is a good time to turn on profiling if it's pending.
   PROFILER_JS_INTERRUPT_CALLBACK();
 
   return worker->InterruptCallback(aCx);
 }
 
 class LogViolationDetailsRunnable final : public WorkerMainThreadRunnable
 {
@@ -960,30 +967,39 @@ public:
       MOZ_ASSERT(defaultLocale,
                  "failure of a WorkerPrivate to have a default locale should "
                  "have made the worker fail to spawn");
 
       if (!JS_SetDefaultLocale(Runtime(), defaultLocale.get())) {
         NS_WARNING("failed to set workerCx's default locale");
       }
     }
+
+    // Cycle collections must occur at consistent points when recording/replaying.
+    if (recordreplay::IsRecordingOrReplaying()) {
+      recordreplay::RegisterTrigger(this, [=]() { nsCycleCollector_collect(nullptr); });
+    }
   }
 
   void Shutdown(JSContext* cx) override
   {
     // The CC is shut down, and the superclass destructor will GC, so make sure
     // we don't try to CC again.
     mWorkerPrivate = nullptr;
 
     CycleCollectedJSRuntime::Shutdown(cx);
   }
 
   ~WorkerJSRuntime()
   {
     MOZ_COUNT_DTOR_INHERITED(WorkerJSRuntime, CycleCollectedJSRuntime);
+
+    if (recordreplay::IsRecordingOrReplaying()) {
+      recordreplay::UnregisterTrigger(this);
+    }
   }
 
   virtual void
   PrepareForForgetSkippable() override
   {
   }
 
   virtual void
@@ -1010,17 +1026,21 @@ public:
     if (!mWorkerPrivate) {
       // We're shutting down, no need to do anything.
       return;
     }
 
     mWorkerPrivate->AssertIsOnWorkerThread();
 
     if (aStatus == JSGC_END) {
-      nsCycleCollector_collect(nullptr);
+      if (recordreplay::IsRecordingOrReplaying()) {
+        recordreplay::ActivateTrigger(this);
+      } else {
+        nsCycleCollector_collect(nullptr);
+      }
     }
   }
 
 private:
   WorkerPrivate* mWorkerPrivate;
 };
 
 } // anonymous namespace
--- a/dom/xbl/nsXBLBinding.cpp
+++ b/dom/xbl/nsXBLBinding.cpp
@@ -1023,16 +1023,17 @@ nsXBLBinding::DoInitJSClass(JSContext *c
     // we can guarantee that in XBLFinalize this will be non-null.
     // Note that we can't just store aProtoBinding in the private and
     // addref/release the nsXBLDocumentInfo through it, because cycle
     // collection doesn't seem to work right if the private is not an
     // nsISupports.
     nsXBLDocumentInfo* docInfo = aProtoBinding->XBLDocumentInfo();
     ::JS_SetPrivate(proto, docInfo);
     NS_ADDREF(docInfo);
+    RecordReplayRegisterDeferredFinalize(docInfo);
     JS_SetReservedSlot(proto, 0, JS::PrivateValue(aProtoBinding));
 
     // Next, enter the realm of the property holder, wrap the proto, and
     // stick it on.
     JSAutoRealm ar3(cx, holder);
     if (!JS_WrapObject(cx, &proto) ||
         !JS_DefineUCProperty(cx, holder, aClassName.get(), -1, proto,
                              JSPROP_READONLY | JSPROP_PERMANENT))
--- a/extensions/spellcheck/locales/en-US/hunspell/en-US.dic
+++ b/extensions/spellcheck/locales/en-US/hunspell/en-US.dic
@@ -1,9 +1,9 @@
-52817
+52849
 0/nm
 0th/pt
 1/n1
 1st/p
 1th/tc
 2/nm
 2nd/p
 2th/tc
@@ -2431,16 +2431,17 @@ Charlemagne/M
 Charlene/M
 Charles/M
 Charleston/MS
 Charley/M
 Charlie/M
 Charlot/M
 Charlotta/M
 Charlotte/M
+Charlottesville/M
 Charlottetown/M
 Charlton
 Charmaine/M
 Charmian/M
 Charmin/M
 Charolais/M
 Charon/M
 Chartism/M
@@ -4551,16 +4552,17 @@ Fuzhou/M
 Fuzzbuster/M
 G/MNRB
 GA
 GAO
 GATT/M
 GB/M
 GCC/M
 GDP/M
+GDPR
 GE/M
 GED
 GHQ/M
 GHz/M
 GI
 GIF
 GIGO
 GM/M
@@ -15830,16 +15832,17 @@ background/MRZS
 backgrounder/M
 backhand/MDRSZG
 backhanded/Y
 backhander/M
 backhoe/MS
 backing/M
 backlash/MS
 backless
+backlit
 backlog/MS
 backlogged
 backlogging
 backpack/ZGMDRS
 backpacker/M
 backpacking/M
 backpedal/SDG
 backrest/SM
@@ -16085,16 +16088,17 @@ barbel/SM
 barbell/MS
 barber/GMD
 barberry/SM
 barbershop/MS
 barbie/S
 barbiturate/SM
 barbwire/M
 barcarole/SM
+barcode/SDG
 bard/SM
 bardic
 bare/DRSPYG
 bareback/D
 barefaced/Y
 barefoot/D
 barehanded
 bareheaded
@@ -16708,16 +16712,17 @@ bigheartedness/M
 bighorn/SM
 bight/MS
 bigmouth/M
 bigmouths
 bigness/M
 bigot/MDS
 bigotry/SM
 bigwig/MS
+bijection/S
 bijou/M
 bijoux
 bike/DRSMZG
 biker/M
 bikini/MS
 bilabial/MS
 bilateral/Y
 bilberry/S
@@ -20255,16 +20260,17 @@ committeemen
 committeewoman/M
 committeewomen
 committer/S
 committing/A
 commode's
 commode/EIS
 commodification
 commodious/Y
+commoditization
 commodity/SM
 commodore/SM
 common's
 common/UPRYT
 commonality/S
 commonalty/M
 commoner/MS
 commonness/UM
@@ -23723,17 +23729,19 @@ downtrend/MS
 downtrodden
 downturn/MS
 downward/S
 downwind
 downy/RT
 dowry/SM
 dowse/DRSZG
 dowser/M
+dox/SDG
 doxology/SM
+doxx/SDG
 doyen/SM
 doyenne/MS
 doz/XGDNS
 doze/M
 dozen/MH
 dozily
 dozy/RTP
 dpi
@@ -24511,17 +24519,17 @@ elocutionary
 elocutionist/SM
 elodea/SM
 elongate/DSGNX
 elongation/M
 elope/DSGL
 elopement/MS
 eloquence/M
 eloquent/Y
-else
+else/M
 elsewhere
 elucidate/DSGNX
 elucidation/M
 elude/DSG
 elusive/YP
 elusiveness/M
 elver/SM
 elves
@@ -27866,16 +27874,18 @@ gentry/SM
 genuflect/DGS
 genuflection/MS
 genuine/PY
 genuineness/M
 genus/M
 geocache/DSG
 geocentric
 geocentrically
+geocentricism
+geocentrism
 geochemistry/M
 geode/SM
 geodesic/SM
 geodesy/M
 geodetic
 geoengineering
 geog
 geographer/SM
@@ -29061,18 +29071,21 @@ handsaw/SM
 handset/SM
 handshake/JMGS
 handsome/PYTR
 handsomeness/M
 handspring/MS
 handstand/SM
 handwork/M
 handwoven
+handwrite/GS
 handwriting/M
 handwritten
+handwritten
+handwrote
 handy/UTR
 handyman/M
 handymen
 hang/MDRJZGS
 hangar/MS
 hangdog
 hanger/M
 hanging/M
@@ -29448,16 +29461,19 @@ heir/MS
 heiress/MS
 heirloom/SM
 heist/SMDG
 held
 helical
 helices
 helicopter/SGMD
 heliocentric
+heliocentrically
+heliocentricism
+heliocentrism
 heliotrope/SM
 helipad/S
 heliport/MS
 helium/M
 helix/M
 hell/M
 hellbent
 hellcat/MS
@@ -31168,16 +31184,17 @@ infestation/MS
 infidel/MS
 infidelity/S
 infiltrator/SM
 infinite/MV
 infinitesimal/SMY
 infinitival
 infinitive/MS
 infinitude/M
+infinitum
 infinity/SM
 infirm
 infirmary/SM
 infirmity/SM
 infix
 inflame/DSG
 inflammable
 inflammation/SM
@@ -34727,17 +34744,17 @@ meow/MDSG
 mercantile
 mercantilism/M
 mercenary/SM
 mercer/MS
 mercerize/GDS
 merchandise/MZGDRS
 merchandiser/M
 merchandising/M
-merchant/MBS
+merchant/MBSG
 merchantman/M
 merchantmen
 merciful/UY
 merciless/PY
 mercilessness/M
 mercurial/Y
 mercuric
 mercury/M
@@ -39405,16 +39422,17 @@ phosphorescent/Y
 phosphoric
 phosphorous
 phosphorus/M
 phosphorylate/DSGN
 photo/SGMD
 photocell/MS
 photocopier/M
 photocopy/DRSMZG
+photodetector/S
 photoelectric
 photoelectrically
 photoengrave/DRSJZG
 photoengraver/M
 photoengraving/M
 photofinishing/M
 photogenic
 photogenically
@@ -39424,16 +39442,18 @@ photographic
 photographically
 photographs/A
 photography/M
 photojournalism/M
 photojournalist/SM
 photometer/MS
 photon/MS
 photosensitive
+photosensor/S
+photosensory
 photostat/SM
 photostatic
 photostatted
 photostatting
 photosynthesis/M
 photosynthesize/GDS
 photosynthetic
 phototropic
@@ -40424,16 +40444,17 @@ prate/MZGDRS
 prater/M
 pratfall/SM
 prattle/DRSMZG
 prattler/M
 prawn/MDSG
 pray/ZGDRS
 prayer/M
 prayerful/Y
+pre-fill/SDG
 preach/DRSZGL
 preacher/M
 preachment/M
 preachy/RT
 preadolescence/SM
 preadolescent
 preamble/MGDS
 prearrange/LGDS
@@ -40532,16 +40553,17 @@ prefecture/MS
 prefer/SBL
 preferably
 preference/MS
 preferential/Y
 preferment/M
 preferred
 preferring
 prefigure/GDS
+prefill/SDG
 prefix/MDSG
 preform/GSD
 prefrontal
 pregame/SM
 pregnancy/SM
 pregnant
 preheat/GSD
 prehensile
@@ -40559,16 +40581,17 @@ prejudiced/U
 prejudicial
 prekindergarten/SM
 prelacy/M
 prelate/SM
 prelim/SM
 preliminarily
 preliminary/SM
 preliterate
+preload/SDG
 prelude/MS
 premarital
 premature/Y
 premed/SM
 premedical
 premeditate/DSGN
 premeditated/U
 premeditation/M
@@ -40596,16 +40619,17 @@ prepacked
 prepaid
 preparation/SM
 preparatory
 prepare/ZGDRS
 prepared/UP
 preparedness/UM
 prepay/GSL
 prepayment/MS
+prepend/SDG
 preponderance/SM
 preponderant/Y
 preponderate/GDS
 preposition/SM
 prepositional/Y
 prepossess/GDS
 prepossessing/U
 prepossession/SM
@@ -42360,16 +42384,17 @@ reject/GSMD
 rejection/SM
 rejoice/JGDS
 rejoicing/M
 rejoinder/SM
 rejuvenate/DSGN
 rejuvenation/M
 rel
 relate/DRSBXZGNV
+relatedly
 relatedness/M
 relater/M
 relation/M
 relational
 relationship/MS
 relative/MYS
 relativism/M
 relativist/S
@@ -43906,21 +43931,23 @@ scoopful/MS
 scoot/DRSZG
 scooter/M
 scope/MGDS
 scorbutic
 scorch/MDRSZG
 scorcher/M
 score/MZGDRS
 scoreboard/SM
+scorebook/MS
 scorecard/MS
 scorekeeper/MS
 scoreless
 scoreline/S
 scorer/M
+scoresheet/MS
 scorn/MDRSZG
 scorner/M
 scornful/Y
 scorpion/MS
 scot-free
 scotch/MDSG
 scotchs
 scoundrel/MS
@@ -47681,16 +47708,17 @@ surface/AGDS
 surfboard/MDSG
 surfeit/MDSG
 surfer/M
 surfing/M
 surge/DSMG
 surgeon/MS
 surgery/SM
 surgical/Y
+surjection/S
 surliness/M
 surly/PTR
 surmise/MGDS
 surmount/DGSB
 surmountable/I
 surname/MS
 surpass/GDS
 surpassed/U
@@ -49187,16 +49215,17 @@ tonnage/SM
 tonne/SM
 tonsil/MS
 tonsillectomy/SM
 tonsillitis/M
 tonsorial
 tonsure/DSMG
 tony/RT
 too
+toodles
 took/A
 tool's
 tool/ADGS
 toolbar/SM
 toolbox/MS
 toolkit
 toolmaker/MS
 toot/MDRZGS
@@ -50545,16 +50574,18 @@ unperceptive
 unpersuasive
 unpick/GDS
 unpin/S
 unpleasing
 unpolitical
 unpopular
 unpractical
 unprecedented/Y
+unpressured
+unpressurized
 unprofessional/Y
 unpromising
 unpropitious
 unquestioning/Y
 unquiet/TR
 unread/B
 unready
 unreal
@@ -50563,16 +50594,17 @@ unregenerate
 unrelated
 unrelenting/Y
 unrelieved/Y
 unremarkable
 unremitting/Y
 unrepentant
 unreported
 unrepresentative
+unrequest/D
 unrest/M
 unrevealing
 unripe/TR
 unroll/GDS
 unromantic
 unruliness/M
 unruly/RTP
 unsafe/YTR
--- a/gfx/layers/wr/WebRenderCommandBuilder.cpp
+++ b/gfx/layers/wr/WebRenderCommandBuilder.cpp
@@ -912,16 +912,19 @@ IsItemProbablyActive(nsDisplayItem* aIte
     return transformItem->MayBeAnimated(aDisplayListBuilder) || !is2D || HasActiveChildren(*transformItem->GetChildren(), aDisplayListBuilder);
   } else if (aItem->GetType() == DisplayItemType::TYPE_OPACITY) {
     nsDisplayOpacity* opacityItem = static_cast<nsDisplayOpacity*>(aItem);
     bool active = opacityItem->NeedsActiveLayer(aDisplayListBuilder, opacityItem->Frame());
     GP("active: %d\n", active);
     return active || HasActiveChildren(*opacityItem->GetChildren(), aDisplayListBuilder);
   }
   // TODO: handle other items?
+  if (aItem->GetChildren()) {
+    return HasActiveChildren(*aItem->GetChildren(), aDisplayListBuilder);;
+  }
   return false;
 }
 
 // If we have an item we need to make sure it matches the current group
 // otherwise it means the item switched groups and we need to invalidate
 // it and recreate the data.
 static BlobItemData*
 GetBlobItemDataForGroup(nsDisplayItem* aItem, DIGroup* aGroup)
--- a/gfx/thebes/gfxPlatform.cpp
+++ b/gfx/thebes/gfxPlatform.cpp
@@ -2494,25 +2494,29 @@ gfxPlatform::InitCompositorAccelerationP
     }
   }
 
   // This has specific meaning elsewhere, so we always record it.
   if (gfxPrefs::LayersAccelerationForceEnabledDoNotUseDirectly()) {
     feature.UserForceEnable("Force-enabled by pref");
   }
 
-  // Safe and headless modes override everything.
+  // Safe, headless, and record/replay modes override everything.
   if (InSafeMode()) {
     feature.ForceDisable(FeatureStatus::Blocked, "Acceleration blocked by safe-mode",
                          NS_LITERAL_CSTRING("FEATURE_FAILURE_COMP_SAFEMODE"));
   }
   if (IsHeadless()) {
     feature.ForceDisable(FeatureStatus::Blocked, "Acceleration blocked by headless mode",
                          NS_LITERAL_CSTRING("FEATURE_FAILURE_COMP_HEADLESSMODE"));
   }
+  if (recordreplay::IsRecordingOrReplaying()) {
+    feature.ForceDisable(FeatureStatus::Blocked, "Acceleration blocked by recording/replaying",
+                         NS_LITERAL_CSTRING("FEATURE_FAILURE_COMP_RECORDREPLAY"));
+  }
 }
 
 /*static*/ bool
 gfxPlatform::WebRenderPrefEnabled()
 {
   return gfxPrefs::WebRenderAll() || gfxPrefs::WebRenderEnabledDoNotUseDirectly();
 }
 
--- a/gfx/thebes/gfxSVGGlyphs.cpp
+++ b/gfx/thebes/gfxSVGGlyphs.cpp
@@ -132,17 +132,17 @@ gfxSVGGlyphs::FindOrCreateGlyphsDocument
     return result;
 }
 
 nsresult
 gfxSVGGlyphsDocument::SetupPresentation()
 {
     nsCOMPtr<nsICategoryManager> catMan = do_GetService(NS_CATEGORYMANAGER_CONTRACTID);
     nsCString contractId;
-    nsresult rv = catMan->GetCategoryEntry("Gecko-Content-Viewers", "image/svg+xml", getter_Copies(contractId));
+    nsresult rv = catMan->GetCategoryEntry("Gecko-Content-Viewers", "image/svg+xml", contractId);
     NS_ENSURE_SUCCESS(rv, rv);
 
     nsCOMPtr<nsIDocumentLoaderFactory> docLoaderFactory =
       do_GetService(contractId.get());
     NS_ASSERTION(docLoaderFactory, "Couldn't get DocumentLoaderFactory");
 
     nsCOMPtr<nsIContentViewer> viewer;
     rv = docLoaderFactory->CreateInstanceForDocument(nullptr, mDocument, nullptr, getter_AddRefs(viewer));
--- a/gfx/webrender/src/debug_render.rs
+++ b/gfx/webrender/src/debug_render.rs
@@ -1,15 +1,15 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 use api::{ColorU, DeviceIntRect, DeviceUintSize, ImageFormat, TextureTarget};
 use debug_font_data;
-use device::{Device, Program, Texture, TextureSlot, VertexDescriptor, VAO};
+use device::{Device, Program, Texture, TextureSlot, VertexDescriptor, ShaderError, VAO};
 use device::{TextureFilter, VertexAttribute, VertexAttributeKind, VertexUsageHint};
 use euclid::{Point2D, Rect, Size2D, Transform3D};
 use internal_types::{ORTHO_FAR_PLANE, ORTHO_NEAR_PLANE};
 use std::f32;
 
 #[derive(Debug, Copy, Clone)]
 enum DebugSampler {
     Font,
@@ -99,52 +99,50 @@ pub struct DebugRenderer {
     tri_indices: Vec<u32>,
     tri_vao: VAO,
     line_vertices: Vec<DebugColorVertex>,
     line_vao: VAO,
     color_program: Program,
 }
 
 impl DebugRenderer {
-    pub fn new(device: &mut Device) -> Self {
-        let font_program = device.create_program("debug_font", "", &DESC_FONT).unwrap();
+    pub fn new(device: &mut Device) -> Result<Self, ShaderError> {
+        let font_program = device.create_program("debug_font", "", &DESC_FONT)?;
         device.bind_shader_samplers(&font_program, &[("sColor0", DebugSampler::Font)]);
 
-        let color_program = device
-            .create_program("debug_color", "", &DESC_COLOR)
-            .unwrap();
+        let color_program = device.create_program("debug_color", "", &DESC_COLOR)?;
 
         let font_vao = device.create_vao(&DESC_FONT);
         let line_vao = device.create_vao(&DESC_COLOR);
         let tri_vao = device.create_vao(&DESC_COLOR);
 
         let mut font_texture = device.create_texture(TextureTarget::Array, ImageFormat::R8);
         device.init_texture(
             &mut font_texture,
             debug_font_data::BMP_WIDTH,
             debug_font_data::BMP_HEIGHT,
             TextureFilter::Linear,
             None,
             1,
             Some(&debug_font_data::FONT_BITMAP),
         );
 
-        DebugRenderer {
+        Ok(DebugRenderer {
             font_vertices: Vec::new(),
             font_indices: Vec::new(),
             line_vertices: Vec::new(),
             tri_vao,
             tri_vertices: Vec::new(),
             tri_indices: Vec::new(),
             font_program,
             color_program,
             font_vao,
             line_vao,
             font_texture,
-        }
+        })
     }
 
     pub fn deinit(self, device: &mut Device) {
         device.delete_texture(self.font_texture);
         device.delete_program(self.font_program);
         device.delete_program(self.color_program);
         device.delete_vao(self.tri_vao);
         device.delete_vao(self.line_vao);
--- a/gfx/webrender/src/renderer.rs
+++ b/gfx/webrender/src/renderer.rs
@@ -1301,28 +1301,43 @@ struct TargetSelector {
     size: DeviceUintSize,
     num_layers: usize,
     format: ImageFormat,
 }
 
 #[cfg(feature = "debug_renderer")]
 struct LazyInitializedDebugRenderer {
     debug_renderer: Option<DebugRenderer>,
+    failed: bool,
 }
 
 #[cfg(feature = "debug_renderer")]
 impl LazyInitializedDebugRenderer {
     pub fn new() -> Self {
         Self {
             debug_renderer: None,
+            failed: false,
         }
     }
 
-    pub fn get_mut<'a>(&'a mut self, device: &mut Device) -> &'a mut DebugRenderer {
-        self.debug_renderer.get_or_insert_with(|| DebugRenderer::new(device))
+    pub fn get_mut<'a>(&'a mut self, device: &mut Device) -> Option<&'a mut DebugRenderer> {
+        if self.failed {
+            return None;
+        }
+        if self.debug_renderer.is_none() {
+            match DebugRenderer::new(device) {
+                Ok(renderer) => { self.debug_renderer = Some(renderer); }
+                Err(_) => {
+                    // The shader compilation code already logs errors.
+                    self.failed = true;
+                }
+            }
+        }
+
+        self.debug_renderer.as_mut()
     }
 
     pub fn deinit(self, device: &mut Device) {
         if let Some(debug_renderer) = self.debug_renderer {
             debug_renderer.deinit(device);
         }
     }
 }
@@ -2379,63 +2394,70 @@ impl Renderer {
             self.cpu_profiles.push_back(cpu_profile);
         }
 
         #[cfg(feature = "debug_renderer")]
         {
             if self.debug_flags.contains(DebugFlags::PROFILER_DBG) {
                 if let Some(framebuffer_size) = framebuffer_size {
                     //TODO: take device/pixel ratio into equation?
-                    let screen_fraction = 1.0 / framebuffer_size.to_f32().area();
-                    self.profiler.draw_profile(
-                        &frame_profiles,
-                        &self.backend_profile_counters,
-                        &self.profile_counters,
-                        &mut profile_timers,
-                        &profile_samplers,
-                        screen_fraction,
-                        self.debug.get_mut(&mut self.device),
-                        self.debug_flags.contains(DebugFlags::COMPACT_PROFILER),
-                    );
+                    if let Some(debug_renderer) = self.debug.get_mut(&mut self.device) {
+                        let screen_fraction = 1.0 / framebuffer_size.to_f32().area();
+                        self.profiler.draw_profile(
+                            &frame_profiles,
+                            &self.backend_profile_counters,
+                            &self.profile_counters,
+                            &mut profile_timers,
+                            &profile_samplers,
+                            screen_fraction,
+                            debug_renderer,
+                            self.debug_flags.contains(DebugFlags::COMPACT_PROFILER),
+                        );
+                    }
                 }
             }
 
             if self.debug_flags.contains(DebugFlags::NEW_FRAME_INDICATOR) {
-                self.new_frame_indicator.changed();
-                self.new_frame_indicator.draw(
-                    0.0, 0.0,
-                    ColorU::new(0, 110, 220, 255),
-                    self.debug.get_mut(&mut self.device)
-                );
+                if let Some(debug_renderer) = self.debug.get_mut(&mut self.device) {
+                    self.new_frame_indicator.changed();
+                    self.new_frame_indicator.draw(
+                        0.0, 0.0,
+                        ColorU::new(0, 110, 220, 255),
+                        debug_renderer,
+                    );
+                }
             }
 
             if self.debug_flags.contains(DebugFlags::NEW_SCENE_INDICATOR) {
-                self.new_scene_indicator.draw(
-                    160.0, 0.0,
-                    ColorU::new(220, 30, 10, 255),
-                    self.debug.get_mut(&mut self.device)
-                );
+                if let Some(debug_renderer) = self.debug.get_mut(&mut self.device) {
+                    self.new_scene_indicator.draw(
+                        160.0, 0.0,
+                        ColorU::new(220, 30, 10, 255),
+                        debug_renderer,
+                    );
+                }
             }
         }
 
         if self.debug_flags.contains(DebugFlags::ECHO_DRIVER_MESSAGES) {
             self.device.echo_driver_messages();
         }
 
         self.backend_profile_counters.reset();
         self.profile_counters.reset();
         self.profile_counters.frame_counter.inc();
 
         profile_timers.cpu_time.profile(|| {
             let _gm = self.gpu_profile.start_marker("end frame");
             self.gpu_profile.end_frame();
             #[cfg(feature = "debug_renderer")]
             {
-                self.debug.get_mut(&mut self.device)
-                          .render(&mut self.device, framebuffer_size);
+                if let Some(debug_renderer) = self.debug.get_mut(&mut self.device) {
+                    debug_renderer.render(&mut self.device, framebuffer_size);
+                }
             }
             self.device.end_frame();
         });
         if framebuffer_size.is_some() {
             self.last_time = current_time;
         }
 
         if self.renderer_errors.is_empty() {
@@ -3750,17 +3772,17 @@ impl Renderer {
             } else {
                 true
             });
 
         frame.has_been_rendered = true;
     }
 
     #[cfg(feature = "debug_renderer")]
-    pub fn debug_renderer<'b>(&'b mut self) -> &'b mut DebugRenderer {
+    pub fn debug_renderer<'b>(&'b mut self) -> Option<&'b mut DebugRenderer> {
         self.debug.get_mut(&mut self.device)
     }
 
     pub fn get_debug_flags(&self) -> DebugFlags {
         self.debug_flags
     }
 
     pub fn set_debug_flags(&mut self, flags: DebugFlags) {
@@ -3888,17 +3910,20 @@ impl Renderer {
     }
 
     #[cfg(feature = "debug_renderer")]
     fn draw_epoch_debug(&mut self) {
         if !self.debug_flags.contains(DebugFlags::EPOCHS) {
             return;
         }
 
-        let debug_renderer = self.debug.get_mut(&mut self.device);
+        let debug_renderer = match self.debug.get_mut(&mut self.device) {
+            Some(render) => render,
+            None => { return; }
+        };
 
         let dy = debug_renderer.line_height();
         let x0: f32 = 30.0;
         let y0: f32 = 30.0;
         let mut y = y0;
         let mut text_width = 0.0;
         for (pipeline, epoch) in  &self.pipeline_info.epochs {
             y += dy;
--- a/gfx/webrender/src/resource_cache.rs
+++ b/gfx/webrender/src/resource_cache.rs
@@ -870,19 +870,25 @@ impl ResourceCache {
                     .or_insert(CachedImageInfo {
                         texture_cache_handle: TextureCacheHandle::new(),
                         dirty_rect: Some(template.descriptor.full_rect()),
                     })
             },
             ImageResult::Err(_) => panic!("Errors should already have been handled"),
         };
 
-        self.texture_cache.request(&entry.texture_cache_handle, gpu_cache);
+        let needs_upload = self.texture_cache.request(&entry.texture_cache_handle, gpu_cache);
 
-        self.pending_image_requests.insert(request);
+        if !needs_upload && entry.dirty_rect.is_none() {
+            return
+        }
+
+        if !self.pending_image_requests.insert(request) {
+            return
+        }
 
         if template.data.is_blob() {
             let request: BlobImageRequest = request.into();
             let missing = match self.rasterized_blob_images.get(&request.key) {
                 Some(img) => !img.data.contains_key(&request.tile),
                 None => true,
             };
 
--- a/gfx/webrender_bindings/revision.txt
+++ b/gfx/webrender_bindings/revision.txt
@@ -1,1 +1,1 @@
-c2c5aaebdd6df22ce13941c1a4b16ef47eaa9f7b
+e850fbd2e0e60a8de76c2d2464f0fa27316d5949
--- a/gfx/wrench/src/wrench.rs
+++ b/gfx/wrench/src/wrench.rs
@@ -556,17 +556,17 @@ impl Wrench {
             "Q - Toggle GPU queries for time and samples",
             "M - Trigger memory pressure event",
             "T - Save CPU profile to a file",
             "C - Save a capture to captures/wrench/",
             "X - Do a hit test at the current cursor position",
         ];
 
         let color_and_offset = [(*BLACK_COLOR, 2.0), (*WHITE_COLOR, 0.0)];
-        let dr = self.renderer.debug_renderer();
+        let dr = self.renderer.debug_renderer().unwrap();
 
         for ref co in &color_and_offset {
             let x = self.device_pixel_ratio * (15.0 + co.1);
             let mut y = self.device_pixel_ratio * (15.0 + co.1 + dr.line_height());
             for ref line in &help_lines {
                 dr.add_text(x, y, line, co.0.into());
                 y += self.device_pixel_ratio * dr.line_height();
             }
--- a/hal/sandbox/SandboxHal.cpp
+++ b/hal/sandbox/SandboxHal.cpp
@@ -129,17 +129,20 @@ LockScreenOrientation(const dom::ScreenO
   bool allowed;
   Hal()->SendLockScreenOrientation(aOrientation, &allowed);
   return allowed;
 }
 
 void
 UnlockScreenOrientation()
 {
-  Hal()->SendUnlockScreenOrientation();
+  // Don't send this message from both the middleman and recording processes.
+  if (!recordreplay::IsMiddleman()) {
+    Hal()->SendUnlockScreenOrientation();
+  }
 }
 
 void
 EnableSensorNotifications(SensorType aSensor) {
   Hal()->SendEnableSensorNotifications(aSensor);
 }
 
 void
--- a/image/SVGDocumentWrapper.cpp
+++ b/image/SVGDocumentWrapper.cpp
@@ -327,17 +327,17 @@ SVGDocumentWrapper::SetupViewer(nsIReque
   NS_ENSURE_TRUE(newLoadGroup, NS_ERROR_OUT_OF_MEMORY);
   newLoadGroup->SetLoadGroup(loadGroup);
 
   nsCOMPtr<nsICategoryManager> catMan =
     do_GetService(NS_CATEGORYMANAGER_CONTRACTID);
   NS_ENSURE_TRUE(catMan, NS_ERROR_NOT_AVAILABLE);
   nsCString contractId;
   nsresult rv = catMan->GetCategoryEntry("Gecko-Content-Viewers", IMAGE_SVG_XML,
-                                         getter_Copies(contractId));
+                                         contractId);
   NS_ENSURE_SUCCESS(rv, rv);
   nsCOMPtr<nsIDocumentLoaderFactory> docLoaderFactory =
     do_GetService(contractId.get());
   NS_ENSURE_TRUE(docLoaderFactory, NS_ERROR_NOT_AVAILABLE);
 
   nsCOMPtr<nsIContentViewer> viewer;
   nsCOMPtr<nsIStreamListener> listener;
   rv = docLoaderFactory->CreateInstance("external-resource", chan,
--- a/intl/strres/nsStringBundle.cpp
+++ b/intl/strres/nsStringBundle.cpp
@@ -115,25 +115,25 @@ namespace {
  */
 class StringBundleProxy : public nsIStringBundle
 {
   NS_DECL_THREADSAFE_ISUPPORTS
 
   NS_DECLARE_STATIC_IID_ACCESSOR(STRINGBUNDLEPROXY_IID)
 
   explicit StringBundleProxy(already_AddRefed<nsIStringBundle> aTarget)
-    : mReentrantMonitor("StringBundleProxy::mReentrantMonitor")
+    : mMutex("StringBundleProxy::mMutex")
     , mTarget(aTarget)
   {}
 
   NS_FORWARD_NSISTRINGBUNDLE(Target()->);
 
   void Retarget(nsIStringBundle* aTarget)
   {
-    ReentrantMonitorAutoEnter automon(mReentrantMonitor);
+    MutexAutoLock automon(mMutex);
     mTarget = aTarget;
   }
 
   size_t SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const override
   {
     return aMallocSizeOf(this);
   }
 
@@ -141,25 +141,25 @@ class StringBundleProxy : public nsIStri
   {
     return mRefCnt == 1 ? SizeOfIncludingThis(aMallocSizeOf) : 0;
   }
 
 protected:
   virtual ~StringBundleProxy() = default;
 
 private:
-  ReentrantMonitor mReentrantMonitor;
+  Mutex mMutex;
   nsCOMPtr<nsIStringBundle> mTarget;
 
   // Atomically reads mTarget and returns a strong reference to it. This
   // allows for safe multi-threaded use when the proxy may be retargetted by
   // the main thread during access.
   nsCOMPtr<nsIStringBundle> Target()
   {
-    ReentrantMonitorAutoEnter automon(mReentrantMonitor);
+    MutexAutoLock automon(mMutex);
     return mTarget;
   }
 };
 
 NS_DEFINE_STATIC_IID_ACCESSOR(StringBundleProxy, STRINGBUNDLEPROXY_IID)
 
 NS_IMPL_ISUPPORTS(StringBundleProxy, nsIStringBundle, StringBundleProxy)
 
@@ -299,17 +299,17 @@ MakeBundleRefPtr(Args... args)
 
 NS_IMPL_ISUPPORTS(nsStringBundleBase, nsIStringBundle, nsIMemoryReporter)
 
 NS_IMPL_ISUPPORTS_INHERITED0(nsStringBundle, nsStringBundleBase)
 NS_IMPL_ISUPPORTS_INHERITED(SharedStringBundle, nsStringBundleBase, SharedStringBundle)
 
 nsStringBundleBase::nsStringBundleBase(const char* aURLSpec) :
   mPropertiesURL(aURLSpec),
-  mReentrantMonitor("nsStringBundle.mReentrantMonitor"),
+  mMutex("nsStringBundle.mMutex"),
   mAttemptedLoad(false),
   mLoaded(false)
 {
 }
 
 nsStringBundleBase::~nsStringBundleBase()
 {
   UnregisterWeakMemoryReporter(this);
@@ -590,17 +590,17 @@ nsStringBundleBase::GetStringFromAUTF8Na
   return GetStringFromName(PromiseFlatCString(aName).get(), aResult);
 }
 
 NS_IMETHODIMP
 nsStringBundleBase::GetStringFromName(const char* aName, nsAString& aResult)
 {
   NS_ENSURE_ARG_POINTER(aName);
 
-  ReentrantMonitorAutoEnter automon(mReentrantMonitor);
+  MutexAutoLock autolock(mMutex);
 
   return GetStringImpl(nsDependentCString(aName), aResult);
 }
 
 nsresult
 nsStringBundle::GetStringImpl(const nsACString& aName, nsAString& aResult)
 {
   MOZ_TRY(LoadProperties());
--- a/intl/strres/nsStringBundle.h
+++ b/intl/strres/nsStringBundle.h
@@ -1,17 +1,17 @@
 /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef nsStringBundle_h__
 #define nsStringBundle_h__
 
-#include "mozilla/ReentrantMonitor.h"
+#include "mozilla/Mutex.h"
 #include "nsIStringBundle.h"
 #include "nsIMemoryReporter.h"
 #include "nsCOMPtr.h"
 #include "nsString.h"
 #include "nsCOMArray.h"
 
 class nsIPersistentProperties;
 
@@ -50,20 +50,20 @@ protected:
     virtual ~nsStringBundleBase();
 
     virtual nsresult GetStringImpl(const nsACString& aName, nsAString& aResult) = 0;
 
     virtual nsresult GetSimpleEnumerationImpl(nsISimpleEnumerator** elements) = 0;
 
     void RegisterMemoryReporter();
 
-    nsCString              mPropertiesURL;
-    mozilla::ReentrantMonitor    mReentrantMonitor;
-    bool                         mAttemptedLoad;
-    bool                         mLoaded;
+    nsCString mPropertiesURL;
+    mozilla::Mutex mMutex;
+    bool mAttemptedLoad;
+    bool mLoaded;
 
     size_t SizeOfIncludingThisIfUnshared(mozilla::MallocSizeOf aMallocSizeOf) const override;
 
 public:
     static nsresult FormatString(const char16_t *formatStr,
                                  const char16_t **aParams, uint32_t aLength,
                                  nsAString& aResult);
 };
--- a/ipc/glue/CrashReporterClient.h
+++ b/ipc/glue/CrashReporterClient.h
@@ -27,16 +27,21 @@ public:
   //
   //   async SetCrashReporterClient(Shmem shmem);
   //
   // The parent-side receive function of this message should save the shmem
   // somewhere, and when the top-level actor's ActorDestroy runs (or when the
   // crash reporter needs metadata), the shmem should be parsed.
   template <typename T>
   static bool InitSingleton(T* aToplevelProtocol) {
+    // The crash reporter is not enabled in recording/replaying processes.
+    if (recordreplay::IsRecordingOrReplaying()) {
+      return true;
+    }
+
     Shmem shmem;
     if (!AllocShmem(aToplevelProtocol, &shmem)) {
       return false;
     }
 
     InitSingletonWithShmem(shmem);
     Unused << aToplevelProtocol->SendInitCrashReporter(
       shmem,
--- a/js/src/ds/MemoryProtectionExceptionHandler.cpp
+++ b/js/src/ds/MemoryProtectionExceptionHandler.cpp
@@ -28,17 +28,19 @@
 
 #include "threading/LockGuard.h"
 #include "threading/Thread.h"
 #include "vm/MutexIDs.h"
 #include "vm/Runtime.h"
 
 namespace js {
 
-static mozilla::Atomic<bool> sProtectedRegionsInit(false);
+// Memory protection occurs at non-deterministic points when recording/replaying.
+static mozilla::Atomic<bool, mozilla::SequentiallyConsistent,
+                       mozilla::recordreplay::Behavior::DontPreserve> sProtectedRegionsInit(false);
 
 /*
  * A class to store the addresses of the regions recognized as protected
  * by this exception handler. We use a splay tree to store these addresses.
  */
 class ProtectedRegionTree
 {
     struct Region
--- a/js/src/frontend/BytecodeCompiler.cpp
+++ b/js/src/frontend/BytecodeCompiler.cpp
@@ -206,17 +206,21 @@ BytecodeCompiler::createScriptSource(con
 
 bool
 BytecodeCompiler::canLazilyParse()
 {
     return options.canLazilyParse &&
            !cx->realm()->behaviors().disableLazyParsing() &&
            !cx->realm()->behaviors().discardSource() &&
            !options.sourceIsLazy &&
-           !cx->lcovEnabled();
+           !cx->lcovEnabled() &&
+           // Disabled during record/replay. The replay debugger requires
+           // scripts to be constructed in a consistent order, which might not
+           // happen with lazy parsing.
+           !mozilla::recordreplay::IsRecordingOrReplaying();
 }
 
 bool
 BytecodeCompiler::createParser(ParseGoal goal)
 {
     usedNames.emplace(cx);
     if (!usedNames->init())
         return false;
@@ -790,17 +794,17 @@ frontend::CompileLazyFunction(JSContext*
            .setNoScriptRval(false)
            .setSelfHostingMode(false);
 
     // Update statistics to find out if we are delazifying just after having
     // lazified. Note that we are interested in the delta between end of
     // syntax parsing and start of full parsing, so we do this now rather than
     // after parsing below.
     if (!lazy->scriptSource()->parseEnded().IsNull()) {
-        const mozilla::TimeDuration delta = mozilla::TimeStamp::Now() -
+        const mozilla::TimeDuration delta = ReallyNow() -
             lazy->scriptSource()->parseEnded();
 
         // Differentiate between web-facing and privileged code, to aid
         // with optimization. Due to the number of calls to this function,
         // we use `cx->runningWithTrustedPrincipals`, which is fast but
         // will classify addons alongside with web-facing code.
         const int HISTOGRAM = cx->runningWithTrustedPrincipals()
             ? JS_TELEMETRY_PRIVILEGED_PARSER_COMPILE_LAZY_AFTER_MS
--- a/js/src/frontend/NameCollections.h
+++ b/js/src/frontend/NameCollections.h
@@ -137,21 +137,33 @@ struct RecyclableAtomMapValueWrapper
         return &wrapped;
     }
 
     const Wrapped* operator->() const {
         return &wrapped;
     }
 };
 
+struct NameMapHasher : public DefaultHasher<JSAtom*>
+{
+    static inline HashNumber hash(const Lookup& l) {
+        // Name maps use the atom's precomputed hash code, which is based on
+        // the atom's contents rather than its pointer value. This is necessary
+        // to preserve iteration order while recording/replaying: iteration can
+        // affect generated script bytecode and the order in which e.g. lookup
+        // property hooks are performed on the associated global.
+        return l->hash();
+    }
+};
+
 template <typename MapValue>
 using RecyclableNameMap = InlineMap<JSAtom*,
                                     RecyclableAtomMapValueWrapper<MapValue>,
                                     24,
-                                    DefaultHasher<JSAtom*>,
+                                    NameMapHasher,
                                     SystemAllocPolicy>;
 
 using DeclaredNameMap = RecyclableNameMap<DeclaredNameInfo>;
 using CheckTDZMap = RecyclableNameMap<MaybeCheckTDZ>;
 using NameLocationMap = RecyclableNameMap<NameLocation>;
 using AtomIndexMap = RecyclableNameMap<uint32_t>;
 
 template <typename RepresentativeTable>
--- a/js/src/gc/GC.cpp
+++ b/js/src/gc/GC.cpp
@@ -949,17 +949,17 @@ GCRuntime::GCRuntime(JSRuntime* rt) :
     atomsZone(nullptr),
     stats_(rt),
     marker(rt),
     usage(nullptr),
     nextCellUniqueId_(LargestTaggedNullCellPointer + 1), // Ensure disjoint from null tagged pointers.
     numArenasFreeCommitted(0),
     verifyPreData(nullptr),
     chunkAllocationSinceLastGC(false),
-    lastGCTime(mozilla::TimeStamp::Now()),
+    lastGCTime(ReallyNow()),
     mode(TuningDefaults::Mode),
     numActiveZoneIters(0),
     cleanUpEverything(false),
     grayBufferState(GCRuntime::GrayBufferState::Unused),
     grayBitsValid(false),
     majorGCTriggerReason(JS::gcreason::NO_REASON),
     fullGCForAtomsRequested_(false),
     minorGCNumber(0),
@@ -2139,17 +2139,19 @@ GCRuntime::shouldCompact()
     return !isIncremental
         || lastAnimationTime.IsNull()
         || lastAnimationTime + oneSecond < mozilla::TimeStamp::Now();
 }
 
 bool
 GCRuntime::isCompactingGCEnabled() const
 {
-    return compactingEnabled && rt->mainContextFromOwnThread()->compactingDisabledCount == 0;
+    return compactingEnabled
+        && rt->mainContextFromOwnThread()->compactingDisabledCount == 0
+        && !mozilla::recordreplay::IsRecordingOrReplaying();
 }
 
 AutoDisableCompactingGC::AutoDisableCompactingGC(JSContext* cx)
   : cx(cx)
 {
     ++cx->compactingDisabledCount;
     if (cx->runtime()->gc.isIncrementalGCInProgress() && cx->runtime()->gc.isCompactingGc())
         FinishGC(cx);
@@ -3240,17 +3242,17 @@ SliceBudget::SliceBudget()
 
 SliceBudget::SliceBudget(TimeBudget time)
   : timeBudget(time), workBudget(UnlimitedWorkBudget)
 {
     if (time.budget < 0) {
         makeUnlimited();
     } else {
         // Note: TimeBudget(0) is equivalent to WorkBudget(CounterReset).
-        deadline = mozilla::TimeStamp::Now() + mozilla::TimeDuration::FromMilliseconds(time.budget);
+        deadline = ReallyNow() + mozilla::TimeDuration::FromMilliseconds(time.budget);
         counter = CounterReset;
     }
 }
 
 SliceBudget::SliceBudget(WorkBudget work)
   : timeBudget(UnlimitedTimeBudget), workBudget(work)
 {
     if (work.budget < 0) {
@@ -3273,17 +3275,17 @@ SliceBudget::describe(char* buffer, size
 }
 
 bool
 SliceBudget::checkOverBudget()
 {
     if (deadline.IsNull())
         return true;
 
-    bool over = mozilla::TimeStamp::Now() >= deadline;
+    bool over = ReallyNow() >= deadline;
     if (!over)
         counter = CounterReset;
     return over;
 }
 
 void
 GCRuntime::requestMajorGC(JS::gcreason::Reason reason)
 {
@@ -4231,17 +4233,17 @@ GCRuntime::prepareZonesForCollection(JS:
         for (auto i : AllAllocKinds())
             MOZ_ASSERT(!zone->arenas.arenaListsToSweep(i));
     }
 #endif
 
     *isFullOut = true;
     bool any = false;
 
-    auto currentTime = mozilla::TimeStamp::Now();
+    auto currentTime = ReallyNow();
 
     for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
         /* Set up which zones will be collected. */
         if (ShouldCollectZone(zone, reason)) {
             MOZ_ASSERT(zone->canCollect());
             any = true;
             zone->changeGCState(Zone::NoGC, Zone::Mark);
         } else {
@@ -6761,17 +6763,17 @@ GCRuntime::endCompactPhase()
 void
 GCRuntime::finishCollection()
 {
     assertBackgroundSweepingFinished();
     MOZ_ASSERT(marker.isDrained());
     marker.stop();
     clearBufferedGrayRoots();
 
-    auto currentTime = mozilla::TimeStamp::Now();
+    auto currentTime = ReallyNow();
     schedulingState.updateHighFrequencyMode(lastGCTime, currentTime, tunables);
 
     for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
         if (zone->isCollecting()) {
             zone->changeGCState(Zone::Finished, Zone::NoGC);
             zone->notifyObservingDebuggers();
         }
 
@@ -7746,16 +7748,20 @@ GCRuntime::defaultBudget(JS::gcreason::R
     }
 
     return SliceBudget(TimeBudget(millis));
 }
 
 void
 GCRuntime::gc(JSGCInvocationKind gckind, JS::gcreason::Reason reason)
 {
+    // Garbage collection can occur at different points between recording and
+    // replay, so disallow recorded events from occurring during the GC.
+    mozilla::recordreplay::AutoDisallowThreadEvents d;
+
     invocationKind = gckind;
     collect(true, SliceBudget::unlimited(), reason);
 }
 
 void
 GCRuntime::startGC(JSGCInvocationKind gckind, JS::gcreason::Reason reason, int64_t millis)
 {
     MOZ_ASSERT(!isIncrementalGCInProgress());
@@ -8749,17 +8755,18 @@ JS_PUBLIC_API(void)
 JS::DisableIncrementalGC(JSContext* cx)
 {
     cx->runtime()->gc.disallowIncrementalGC();
 }
 
 JS_PUBLIC_API(bool)
 JS::IsIncrementalGCEnabled(JSContext* cx)
 {
-    return cx->runtime()->gc.isIncrementalGCEnabled();
+    return cx->runtime()->gc.isIncrementalGCEnabled()
+        && !mozilla::recordreplay::IsRecordingOrReplaying();
 }
 
 JS_PUBLIC_API(bool)
 JS::IsIncrementalGCInProgress(JSContext* cx)
 {
     return cx->runtime()->gc.isIncrementalGCInProgress() && !cx->runtime()->gc.isVerifyPreBarriersEnabled();
 }
 
--- a/js/src/gc/Marking.cpp
+++ b/js/src/gc/Marking.cpp
@@ -3494,16 +3494,20 @@ js::IsUnmarkGrayTracer(JSTracer* trc)
 }
 #endif
 
 static bool
 UnmarkGrayGCThing(JSRuntime* rt, JS::GCCellPtr thing)
 {
     MOZ_ASSERT(thing);
 
+    // Gray cell unmarking can occur at different points between recording and
+    // replay, so disallow recorded events from occurring in the tracer.
+    mozilla::recordreplay::AutoDisallowThreadEvents d;
+
     UnmarkGrayTracer unmarker(rt);
     gcstats::AutoPhase innerPhase(rt->gc.stats(), gcstats::PhaseKind::UNMARK_GRAY);
     unmarker.unmark(thing);
     return unmarker.unmarkedAny;
 }
 
 JS_FRIEND_API(bool)
 JS::UnmarkGrayGCThingRecursively(JS::GCCellPtr thing)
--- a/js/src/gc/Memory.cpp
+++ b/js/src/gc/Memory.cpp
@@ -281,16 +281,18 @@ MarkPagesInUse(void* p, size_t size)
         return;
 
     MOZ_ASSERT(OffsetFromAligned(p, pageSize) == 0);
 }
 
 size_t
 GetPageFaultCount()
 {
+    if (mozilla::recordreplay::IsRecordingOrReplaying())
+        return 0;
     PROCESS_MEMORY_COUNTERS pmc;
     if (!GetProcessMemoryInfo(GetCurrentProcess(), &pmc, sizeof(pmc)))
         return 0;
     return pmc.PageFaultCount;
 }
 
 void*
 AllocateMappedContent(int fd, size_t offset, size_t length, size_t alignment)
@@ -808,16 +810,18 @@ MarkPagesInUse(void* p, size_t size)
         return;
 
     MOZ_ASSERT(OffsetFromAligned(p, pageSize) == 0);
 }
 
 size_t
 GetPageFaultCount()
 {
+    if (mozilla::recordreplay::IsRecordingOrReplaying())
+        return 0;
     struct rusage usage;
     int err = getrusage(RUSAGE_SELF, &usage);
     if (err)
         return 0;
     return usage.ru_majflt;
 }
 
 void*
--- a/js/src/gc/Nursery.cpp
+++ b/js/src/gc/Nursery.cpp
@@ -370,23 +370,23 @@ js::Nursery::allocate(size_t size)
     if (currentEnd() < position() + size) {
         unsigned chunkno = currentChunk_ + 1;
         MOZ_ASSERT(chunkno <= chunkCountLimit());
         MOZ_ASSERT(chunkno <= maxChunkCount());
         MOZ_ASSERT(chunkno <= allocatedChunkCount());
         if (chunkno == maxChunkCount())
             return nullptr;
         if (MOZ_UNLIKELY(chunkno == allocatedChunkCount())) {
-            mozilla::TimeStamp start = TimeStamp::Now();
+            mozilla::TimeStamp start = ReallyNow();
             {
                 AutoLockGCBgAlloc lock(runtime());
                 if (!allocateNextChunk(chunkno, lock))
                     return nullptr;
             }
-            timeInChunkAlloc_ += TimeStamp::Now() - start;
+            timeInChunkAlloc_ += ReallyNow() - start;
             MOZ_ASSERT(chunkno < allocatedChunkCount());
         }
         setCurrentChunk(chunkno);
     }
 
     void* thing = (void*)position();
     position_ = position() + size;
 
@@ -664,23 +664,23 @@ js::Nursery::maybeClearProfileDurations(
 {
     for (auto& duration : profileDurations_)
         duration = mozilla::TimeDuration();
 }
 
 inline void
 js::Nursery::startProfile(ProfileKey key)
 {
-    startTimes_[key] = TimeStamp::Now();
+    startTimes_[key] = ReallyNow();
 }
 
 inline void
 js::Nursery::endProfile(ProfileKey key)
 {
-    profileDurations_[key] = TimeStamp::Now() - startTimes_[key];
+    profileDurations_[key] = ReallyNow() - startTimes_[key];
     totalDurations_[key] += profileDurations_[key];
 }
 
 bool
 js::Nursery::needIdleTimeCollection() const {
     uint32_t threshold =
         runtime()->gc.tunables.nurseryFreeThresholdForIdleCollection();
     return minorGCRequested() || freeSpace() < threshold;
@@ -698,16 +698,18 @@ IsFullStoreBufferReason(JS::gcreason::Re
 }
 
 void
 js::Nursery::collect(JS::gcreason::Reason reason)
 {
     JSRuntime* rt = runtime();
     MOZ_ASSERT(!rt->mainContextFromOwnThread()->suppressGC);
 
+    mozilla::recordreplay::AutoDisallowThreadEvents disallow;
+
     if (!isEnabled() || isEmpty()) {
         // Our barriers are not always exact, and there may be entries in the
         // storebuffer even when the nursery is disabled or empty. It's not safe
         // to keep these entries as they may refer to tenured cells which may be
         // freed after this point.
         rt->gc.storeBuffer().clear();
     }
 
--- a/js/src/gc/Statistics.cpp
+++ b/js/src/gc/Statistics.cpp
@@ -1046,17 +1046,17 @@ Statistics::beginSlice(const ZoneGCStats
     this->zoneStats = zoneStats;
 
     bool first = !runtime->gc.isIncrementalGCInProgress();
     if (first)
         beginGC(gckind);
 
     if (!slices_.emplaceBack(budget,
                              reason,
-                             TimeStamp::Now(),
+                             ReallyNow(),
                              GetPageFaultCount(),
                              runtime->gc.state()))
     {
         // If we are OOM, set a flag to indicate we have missing slice data.
         aborted = true;
         return;
     }
 
@@ -1078,17 +1078,17 @@ Statistics::beginSlice(const ZoneGCStats
 void
 Statistics::endSlice()
 {
     MOZ_ASSERT(phaseStack.empty() ||
                (phaseStack.length() == 1 && phaseStack[0] == Phase::MUTATOR));
 
     if (!aborted) {
         auto& slice = slices_.back();
-        slice.end = TimeStamp::Now();
+        slice.end = ReallyNow();
         slice.endFaults = GetPageFaultCount();
         slice.finalState = runtime->gc.state();
 
         writeLogMessage("end slice");
         TimeDuration sliceTime = slice.end - slice.start;
         runtime->addTelemetry(JS_TELEMETRY_GC_SLICE_MS, t(sliceTime));
         runtime->addTelemetry(JS_TELEMETRY_GC_RESET, slice.wasReset());
         if (slice.wasReset())
@@ -1241,17 +1241,17 @@ Statistics::resumePhases()
     suspendedPhases.popBack();
 
     while (!suspendedPhases.empty() &&
            suspendedPhases.back() != Phase::EXPLICIT_SUSPENSION &&
            suspendedPhases.back() != Phase::IMPLICIT_SUSPENSION)
     {
         Phase resumePhase = suspendedPhases.popCopy();
         if (resumePhase == Phase::MUTATOR)
-            timedGCTime += TimeStamp::Now() - timedGCStart;
+            timedGCTime += ReallyNow() - timedGCStart;
         recordPhaseBegin(resumePhase);
     }
 }
 
 void
 Statistics::beginPhase(PhaseKind phaseKind)
 {
     // No longer timing these phases. We should never see these.
@@ -1272,17 +1272,17 @@ Statistics::recordPhaseBegin(Phase phase
     // Guard against any other re-entry.
     MOZ_ASSERT(!phaseStartTimes[phase]);
 
     MOZ_ASSERT(phaseStack.length() < MAX_PHASE_NESTING);
 
     Phase current = currentPhase();
     MOZ_ASSERT(phases[phase].parent == current);
 
-    TimeStamp now = TimeStamp::Now();
+    TimeStamp now = ReallyNow();
 
     if (current != Phase::NONE) {
         MOZ_ASSERT(now >= phaseStartTimes[currentPhase()], "Inconsistent time data; see bug 1400153");
         if (now < phaseStartTimes[currentPhase()]) {
             now = phaseStartTimes[currentPhase()];
             aborted = true;
         }
     }
@@ -1294,17 +1294,17 @@ Statistics::recordPhaseBegin(Phase phase
 
 void
 Statistics::recordPhaseEnd(Phase phase)
 {
     MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime));
 
     MOZ_ASSERT(phaseStartTimes[phase]);
 
-    TimeStamp now = TimeStamp::Now();
+    TimeStamp now = ReallyNow();
 
     // Make sure this phase ends after it starts.
     MOZ_ASSERT(now >= phaseStartTimes[phase], "Inconsistent time data; see bug 1400153");
 
 #ifdef DEBUG
     // Make sure this phase ends after all of its children. Note that some
     // children might not have run in this instance, in which case they will
     // have run in a previous instance of this parent or not at all.
@@ -1378,26 +1378,26 @@ Statistics::recordParallelPhase(PhaseKin
         parallelTimes[phase] += duration;
         phase = phases[phase].parent;
     }
 }
 
 TimeStamp
 Statistics::beginSCC()
 {
-    return TimeStamp::Now();
+    return ReallyNow();
 }
 
 void
 Statistics::endSCC(unsigned scc, TimeStamp start)
 {
     if (scc >= sccTimes.length() && !sccTimes.resize(scc + 1))
         return;
 
-    sccTimes[scc] += TimeStamp::Now() - start;
+    sccTimes[scc] += ReallyNow() - start;
 }
 
 /*
  * MMU (minimum mutator utilization) is a measure of how much garbage collection
  * is affecting the responsiveness of the system. MMU measurements are given
  * with respect to a certain window size. If we report MMU(50ms) = 80%, then
  * that means that, for any 50ms window of time, at least 80% of the window is
  * devoted to the mutator. In other words, the GC is running for at most 20% of
--- a/js/src/jit/CodeGenerator.cpp
+++ b/js/src/jit/CodeGenerator.cpp
@@ -5193,36 +5193,28 @@ CodeGenerator::generateArgumentsChecks(b
     Label miss;
     for (uint32_t i = info.startArgSlot(); i < info.endArgSlot(); i++) {
         // All initial parameters are guaranteed to be MParameters.
         MParameter* param = rp->getOperand(i)->toParameter();
         const TypeSet* types = param->resultTypeSet();
         if (!types || types->unknown())
             continue;
 
-#ifndef JS_CODEGEN_ARM64
         // Calculate the offset on the stack of the argument.
         // (i - info.startArgSlot())    - Compute index of arg within arg vector.
         // ... * sizeof(Value)          - Scale by value size.
         // ArgToStackOffset(...)        - Compute displacement within arg vector.
         int32_t offset = ArgToStackOffset((i - info.startArgSlot()) * sizeof(Value));
         Address argAddr(masm.getStackPointer(), offset);
 
         // guardObjectType will zero the stack pointer register on speculative
         // paths.
-        Register spectreRegToZero = masm.getStackPointer();
+        Register spectreRegToZero = AsRegister(masm.getStackPointer());
         masm.guardTypeSet(argAddr, types, BarrierKind::TypeSet, temp1, temp2,
                           spectreRegToZero, &miss);
-#else
-        // On ARM64, the stack pointer situation is more complicated. When we
-        // enable Ion, we should figure out how to mitigate Spectre there.
-        mozilla::Unused << temp1;
-        mozilla::Unused << temp2;
-        MOZ_CRASH("NYI");
-#endif
     }
 
     if (miss.used()) {
         if (assert) {
 #ifdef DEBUG
             Label success;
             masm.jump(&success);
             masm.bind(&miss);
--- a/js/src/jit/Registers.h
+++ b/js/src/jit/Registers.h
@@ -136,16 +136,22 @@ IsHiddenSP(RegisterOrSP r)
 
 static inline Register
 AsRegister(RegisterOrSP r)
 {
     MOZ_ASSERT(!IsHiddenSP(r));
     return Register::FromCode(r.code);
 }
 
+static inline Register
+AsRegister(Register r)
+{
+    return r;
+}
+
 inline bool
 operator == (Register r, RegisterOrSP e) {
     return r.code() == e.code;
 }
 
 inline bool
 operator != (Register r, RegisterOrSP e) {
     return !(r == e);
--- a/js/src/jit/SharedIC.cpp
+++ b/js/src/jit/SharedIC.cpp
@@ -1308,17 +1308,17 @@ DoCompareFallback(JSContext* cx, void* p
 
     // Check to see if a new stub should be generated.
     if (stub->numOptimizedStubs() >= ICCompare_Fallback::MAX_OPTIMIZED_STUBS) {
         // TODO: Discard all stubs in this IC and replace with inert megamorphic stub.
         // But for now we just bail.
         return true;
     }
 
-    if (engine ==  ICStubEngine::Baseline) {
+    if (engine ==  ICStubEngine::Baseline && !JitOptions.disableCacheIR) {
         RootedScript script(cx, info.outerScript(cx));
         CompareIRGenerator gen(cx, script, pc, stub->state().mode(), op, lhs, rhs);
         bool attached = false;
         if (gen.tryAttachStub()) {
             ICStub* newStub = AttachBaselineCacheIRStub(cx, gen.writerRef(), gen.cacheKind(),
                                                         BaselineCacheIRStubKind::Regular,
                                                         engine, script, stub, &attached);
             if (newStub)
--- a/js/src/jit/arm64/Assembler-arm64.h
+++ b/js/src/jit/arm64/Assembler-arm64.h
@@ -89,18 +89,18 @@ static constexpr Register IntArgReg7 { R
 static constexpr Register HeapReg { Registers::x21 };
 
 // Define unsized Registers.
 #define DEFINE_UNSIZED_REGISTERS(N)  \
 static constexpr Register r##N { Registers::x##N };
 REGISTER_CODE_LIST(DEFINE_UNSIZED_REGISTERS)
 #undef DEFINE_UNSIZED_REGISTERS
 static constexpr Register ip0 { Registers::x16 };
-static constexpr Register ip1 { Registers::x16 };
-static constexpr Register fp  { Registers::x30 };
+static constexpr Register ip1 { Registers::x17 };
+static constexpr Register fp  { Registers::x29 };
 static constexpr Register lr  { Registers::x30 };
 static constexpr Register rzr { Registers::xzr };
 
 // Import VIXL registers into the js::jit namespace.
 #define IMPORT_VIXL_REGISTERS(N)  \
 static constexpr ARMRegister w##N = vixl::w##N;  \
 static constexpr ARMRegister x##N = vixl::x##N;
 REGISTER_CODE_LIST(IMPORT_VIXL_REGISTERS)
--- a/js/src/jit/arm64/CodeGenerator-arm64.cpp
+++ b/js/src/jit/arm64/CodeGenerator-arm64.cpp
@@ -34,29 +34,48 @@ using JS::GenericNaN;
 CodeGeneratorARM64::CodeGeneratorARM64(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm)
   : CodeGeneratorShared(gen, graph, masm)
 {
 }
 
 bool
 CodeGeneratorARM64::generateOutOfLineCode()
 {
-    MOZ_CRASH("generateOutOfLineCode");
+    if (!CodeGeneratorShared::generateOutOfLineCode())
+        return false;
+
+    if (deoptLabel_.used()) {
+        // All non-table-based bailouts will go here.
+        masm.bind(&deoptLabel_);
+
+        // Store the frame size, so the handler can recover the IonScript.
+        masm.Mov(x30, frameSize());
+
+        TrampolinePtr handler = gen->jitRuntime()->getGenericBailoutHandler();
+        masm.jump(handler);
+    }
+
+    return !masm.oom();
 }
 
 void
 CodeGeneratorARM64::emitBranch(Assembler::Condition cond, MBasicBlock* mirTrue, MBasicBlock* mirFalse)
 {
-    MOZ_CRASH("emitBranch");
+    if (isNextBlock(mirFalse->lir())) {
+        jumpToBlock(mirTrue, cond);
+    } else {
+        jumpToBlock(mirFalse, Assembler::InvertCondition(cond));
+        jumpToBlock(mirTrue);
+    }
 }
 
 void
 OutOfLineBailout::accept(CodeGeneratorARM64* codegen)
 {
-    MOZ_CRASH("accept");
+    codegen->visitOutOfLineBailout(this);
 }
 
 void
 CodeGenerator::visitTestIAndBranch(LTestIAndBranch* test)
 {
     MOZ_CRASH("visitTestIAndBranch");
 }
 
@@ -64,41 +83,83 @@ void
 CodeGenerator::visitCompare(LCompare* comp)
 {
     MOZ_CRASH("visitCompare");
 }
 
 void
 CodeGenerator::visitCompareAndBranch(LCompareAndBranch* comp)
 {
-    MOZ_CRASH("visitCompareAndBranch");
+    const MCompare* mir = comp->cmpMir();
+    const MCompare::CompareType type = mir->compareType();
+    const LAllocation* left = comp->left();
+    const LAllocation* right = comp->right();
+
+    if (type == MCompare::Compare_Object || type == MCompare::Compare_Symbol) {
+        masm.cmpPtr(ToRegister(left), ToRegister(right));
+    } else if (right->isConstant()) {
+        masm.cmp32(ToRegister(left), Imm32(ToInt32(right)));
+    } else {
+        masm.cmp32(ToRegister(left), ToRegister(right));
+    }
+
+    Assembler::Condition cond = JSOpToCondition(type, comp->jsop());
+    emitBranch(cond, comp->ifTrue(), comp->ifFalse());
 }
 
 void
 CodeGeneratorARM64::bailoutIf(Assembler::Condition condition, LSnapshot* snapshot)
 {
-    MOZ_CRASH("bailoutIf");
+    encode(snapshot);
+
+    // Though the assembler doesn't track all frame pushes, at least make sure
+    // the known value makes sense.
+    MOZ_ASSERT_IF(frameClass_ != FrameSizeClass::None() && deoptTable_,
+                  frameClass_.frameSize() == masm.framePushed());
+
+    // ARM64 doesn't use a bailout table.
+    InlineScriptTree* tree = snapshot->mir()->block()->trackedTree();
+    OutOfLineBailout* ool = new(alloc()) OutOfLineBailout(snapshot);
+    addOutOfLineCode(ool, new(alloc()) BytecodeSite(tree, tree->script()->code()));
+
+    masm.B(ool->entry(), condition);
 }
 
 void
 CodeGeneratorARM64::bailoutFrom(Label* label, LSnapshot* snapshot)
 {
-    MOZ_CRASH("bailoutFrom");
+    MOZ_ASSERT(label->used());
+    MOZ_ASSERT(!label->bound());
+
+    encode(snapshot);
+
+    // Though the assembler doesn't track all frame pushes, at least make sure
+    // the known value makes sense.
+    MOZ_ASSERT_IF(frameClass_ != FrameSizeClass::None() && deoptTable_,
+                  frameClass_.frameSize() == masm.framePushed());
+
+    // ARM64 doesn't use a bailout table.
+    InlineScriptTree* tree = snapshot->mir()->block()->trackedTree();
+    OutOfLineBailout* ool = new(alloc()) OutOfLineBailout(snapshot);
+    addOutOfLineCode(ool, new(alloc()) BytecodeSite(tree, tree->script()->code()));
+
+    masm.retarget(label, ool->entry());
 }
 
 void
 CodeGeneratorARM64::bailout(LSnapshot* snapshot)
 {
     MOZ_CRASH("bailout");
 }
 
 void
 CodeGeneratorARM64::visitOutOfLineBailout(OutOfLineBailout* ool)
 {
-    MOZ_CRASH("visitOutOfLineBailout");
+    masm.push(Imm32(ool->snapshot()->snapshotOffset()));
+    masm.B(&deoptLabel_);
 }
 
 void
 CodeGenerator::visitMinMaxD(LMinMaxD* ins)
 {
     MOZ_CRASH("visitMinMaxD");
 }
 
@@ -146,35 +207,55 @@ ARMRegister
 toXRegister(const T* a)
 {
     return ARMRegister(ToRegister(a), 64);
 }
 
 js::jit::Operand
 toWOperand(const LAllocation* a)
 {
-    MOZ_CRASH("toWOperand");
+    if (a->isConstant())
+        return js::jit::Operand(ToInt32(a));
+    return js::jit::Operand(toWRegister(a));
 }
 
 vixl::CPURegister
 ToCPURegister(const LAllocation* a, Scalar::Type type)
 {
-    MOZ_CRASH("ToCPURegister");
+    if (a->isFloatReg() && type == Scalar::Float64)
+        return ARMFPRegister(ToFloatRegister(a), 64);
+    if (a->isFloatReg() && type == Scalar::Float32)
+        return ARMFPRegister(ToFloatRegister(a), 32);
+    if (a->isGeneralReg())
+        return ARMRegister(ToRegister(a), 32);
+    MOZ_CRASH("Unknown LAllocation");
 }
 
 vixl::CPURegister
 ToCPURegister(const LDefinition* d, Scalar::Type type)
 {
     return ToCPURegister(d->output(), type);
 }
 
 void
 CodeGenerator::visitAddI(LAddI* ins)
 {
-    MOZ_CRASH("visitAddI");
+    const LAllocation* lhs = ins->getOperand(0);
+    const LAllocation* rhs = ins->getOperand(1);
+    const LDefinition* dest = ins->getDef(0);
+
+    // Platforms with three-operand arithmetic ops don't need recovery.
+    MOZ_ASSERT(!ins->recoversInput());
+
+    if (ins->snapshot()) {
+        masm.Adds(toWRegister(dest), toWRegister(lhs), toWOperand(rhs));
+        bailoutIf(Assembler::Overflow, ins->snapshot());
+    } else {
+        masm.Add(toWRegister(dest), toWRegister(lhs), toWOperand(rhs));
+    }
 }
 
 void
 CodeGenerator::visitSubI(LSubI* ins)
 {
     MOZ_CRASH("visitSubI");
 }
 
@@ -423,23 +504,85 @@ void
 CodeGenerator::visitValue(LValue* value)
 {
     MOZ_CRASH("visitValue");
 }
 
 void
 CodeGenerator::visitBox(LBox* box)
 {
-    MOZ_CRASH("visitBox");
+    const LAllocation* in = box->getOperand(0);
+    ValueOperand result = ToOutValue(box);
+
+    masm.moveValue(TypedOrValueRegister(box->type(), ToAnyRegister(in)), result);
 }
 
 void
 CodeGenerator::visitUnbox(LUnbox* unbox)
 {
-    MOZ_CRASH("visitUnbox");
+    MUnbox* mir = unbox->mir();
+
+    if (mir->fallible()) {
+        const ValueOperand value = ToValue(unbox, LUnbox::Input);
+        Assembler::Condition cond;
+        switch (mir->type()) {
+          case MIRType::Int32:
+            cond = masm.testInt32(Assembler::NotEqual, value);
+            break;
+          case MIRType::Boolean:
+            cond = masm.testBoolean(Assembler::NotEqual, value);
+            break;
+          case MIRType::Object:
+            cond = masm.testObject(Assembler::NotEqual, value);
+            break;
+          case MIRType::String:
+            cond = masm.testString(Assembler::NotEqual, value);
+            break;
+          case MIRType::Symbol:
+            cond = masm.testSymbol(Assembler::NotEqual, value);
+            break;
+          default:
+            MOZ_CRASH("Given MIRType cannot be unboxed.");
+        }
+        bailoutIf(cond, unbox->snapshot());
+    } else {
+#ifdef DEBUG
+        JSValueTag tag = MIRTypeToTag(mir->type());
+        Label ok;
+
+        ValueOperand input = ToValue(unbox, LUnbox::Input);
+        ScratchTagScope scratch(masm, input);
+        masm.splitTagForTest(input, scratch);
+        masm.branchTest32(Assembler::Condition::Equal, scratch, Imm32(tag), &ok);
+        masm.assumeUnreachable("Infallible unbox type mismatch");
+        masm.bind(&ok);
+#endif
+    }
+
+    ValueOperand input = ToValue(unbox, LUnbox::Input);
+    Register result = ToRegister(unbox->output());
+    switch (mir->type()) {
+      case MIRType::Int32:
+        masm.unboxInt32(input, result);
+        break;
+      case MIRType::Boolean:
+        masm.unboxBoolean(input, result);
+        break;
+      case MIRType::Object:
+        masm.unboxObject(input, result);
+        break;
+      case MIRType::String:
+        masm.unboxString(input, result);
+        break;
+      case MIRType::Symbol:
+        masm.unboxSymbol(input, result);
+        break;
+      default:
+        MOZ_CRASH("Given MIRType cannot be unboxed.");
+    }
 }
 
 void
 CodeGenerator::visitDouble(LDouble* ins)
 {
     MOZ_CRASH("visitDouble");
 }
 
@@ -562,17 +705,32 @@ CodeGeneratorARM64::storeElementTyped(co
                                       const LAllocation* index)
 {
     MOZ_CRASH("CodeGeneratorARM64::storeElementTyped");
 }
 
 void
 CodeGeneratorARM64::generateInvalidateEpilogue()
 {
-    MOZ_CRASH("generateInvalidateEpilogue");
+    // Ensure that there is enough space in the buffer for the OsiPoint patching
+    // to occur. Otherwise, we could overwrite the invalidation epilogue.
+    for (size_t i = 0; i < sizeof(void*); i += Assembler::NopSize())
+        masm.nop();
+
+    masm.bind(&invalidate_);
+
+    // Push the Ion script onto the stack (when we determine what that pointer is).
+    invalidateEpilogueData_ = masm.pushWithPatch(ImmWord(uintptr_t(-1)));
+
+    TrampolinePtr thunk = gen->jitRuntime()->getInvalidationThunk();
+    masm.call(thunk);
+
+    // We should never reach this point in JIT code -- the invalidation thunk
+    // should pop the invalidated JS frame and return directly to its caller.
+    masm.assumeUnreachable("Should have returned directly to its caller instead of here.");
 }
 
 template <class U>
 Register
 getBase(U* mir)
 {
     switch (mir->base()) {
       case U::Heap: return HeapReg;
--- a/js/src/jit/arm64/Lowering-arm64.cpp
+++ b/js/src/jit/arm64/Lowering-arm64.cpp
@@ -38,44 +38,92 @@ LAllocation
 LIRGeneratorARM64::useByteOpRegisterOrNonDoubleConstant(MDefinition* mir)
 {
     MOZ_CRASH("useByteOpRegisterOrNonDoubleConstant");
 }
 
 void
 LIRGenerator::visitBox(MBox* box)
 {
-    MOZ_CRASH("visitBox");
+    MDefinition* opd = box->getOperand(0);
+
+    // If the operand is a constant, emit near its uses.
+    if (opd->isConstant() && box->canEmitAtUses()) {
+        emitAtUses(box);
+        return;
+    }
+
+    if (opd->isConstant()) {
+        define(new(alloc()) LValue(opd->toConstant()->toJSValue()), box, LDefinition(LDefinition::BOX));
+    } else {
+        LBox* ins = new(alloc()) LBox(useRegister(opd), opd->type());
+        define(ins, box, LDefinition(LDefinition::BOX));
+    }
 }
 
 void
 LIRGenerator::visitUnbox(MUnbox* unbox)
 {
-    MOZ_CRASH("visitUnbox");
+    MDefinition* box = unbox->getOperand(0);
+
+    if (box->type() == MIRType::ObjectOrNull) {
+        LUnboxObjectOrNull* lir = new(alloc()) LUnboxObjectOrNull(useRegisterAtStart(box));
+        if (unbox->fallible())
+            assignSnapshot(lir, unbox->bailoutKind());
+        defineReuseInput(lir, unbox, 0);
+        return;
+    }
+
+    MOZ_ASSERT(box->type() == MIRType::Value);
+
+    LUnboxBase* lir;
+    if (IsFloatingPointType(unbox->type())) {
+        lir = new(alloc()) LUnboxFloatingPoint(useRegisterAtStart(box), unbox->type());
+    } else if (unbox->fallible()) {
+        // If the unbox is fallible, load the Value in a register first to
+        // avoid multiple loads.
+        lir = new(alloc()) LUnbox(useRegisterAtStart(box));
+    } else {
+        lir = new(alloc()) LUnbox(useAtStart(box));
+    }
+
+    if (unbox->fallible())
+        assignSnapshot(lir, unbox->bailoutKind());
+
+    define(lir, unbox);
 }
 
 void
 LIRGenerator::visitReturn(MReturn* ret)
 {
-    MOZ_CRASH("visitReturn");
+    MDefinition* opd = ret->getOperand(0);
+    MOZ_ASSERT(opd->type() == MIRType::Value);
+
+    LReturn* ins = new(alloc()) LReturn;
+    ins->setOperand(0, useFixed(opd, JSReturnReg));
+    add(ins);
 }
 
 // x = !y
 void
 LIRGeneratorARM64::lowerForALU(LInstructionHelper<1, 1, 0>* ins, MDefinition* mir, MDefinition* input)
 {
-    MOZ_CRASH("lowerForALU");
+    ins->setOperand(0, ins->snapshot() ? useRegister(input) : useRegisterAtStart(input));
+    define(ins, mir, LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::REGISTER));
 }
 
 // z = x+y
 void
 LIRGeneratorARM64::lowerForALU(LInstructionHelper<1, 2, 0>* ins, MDefinition* mir,
                                MDefinition* lhs, MDefinition* rhs)
 {
-    MOZ_CRASH("lowerForALU");
+    ins->setOperand(0, ins->snapshot() ? useRegister(lhs) : useRegisterAtStart(lhs));
+    ins->setOperand(1, ins->snapshot() ? useRegisterOrConstant(rhs) :
+                                         useRegisterOrConstantAtStart(rhs));
+    define(ins, mir, LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::REGISTER));
 }
 
 void
 LIRGeneratorARM64::lowerForFPU(LInstructionHelper<1, 1, 0>* ins, MDefinition* mir, MDefinition* input)
 {
     MOZ_CRASH("lowerForFPU");
 }
 
--- a/js/src/jsapi.cpp
+++ b/js/src/jsapi.cpp
@@ -4195,17 +4195,19 @@ CanDoOffThread(JSContext* cx, const Read
                 return false;
             if (what == OffThread::Decode && length < HUGE_BC_LENGTH)
                 return false;
             if (what == OffThread::DecodeBinAST && length < HUGE_BINAST_LENGTH)
                 return false;
         }
     }
 
-    return cx->runtime()->canUseParallelParsing() && CanUseExtraThreads();
+    return cx->runtime()->canUseParallelParsing() &&
+           CanUseExtraThreads() &&
+           !mozilla::recordreplay::IsRecordingOrReplaying();
 }
 
 JS_PUBLIC_API(bool)
 JS::CanCompileOffThread(JSContext* cx, const ReadOnlyCompileOptions& options, size_t length)
 {
     return CanDoOffThread(cx, options, length, OffThread::Compile);
 }
 
--- a/js/src/jsutil.h
+++ b/js/src/jsutil.h
@@ -340,17 +340,18 @@ AlwaysPoison(void* ptr, uint8_t value, s
 #endif // !DEBUG
 
     SetMemCheckKind(ptr, num, kind);
 }
 
 static inline void
 Poison(void* ptr, uint8_t value, size_t num, MemCheckKind kind)
 {
-    static bool disablePoison = bool(getenv("JSGC_DISABLE_POISONING"));
+    static bool disablePoison = !mozilla::recordreplay::IsRecordingOrReplaying()
+                             && bool(getenv("JSGC_DISABLE_POISONING"));
     if (!disablePoison)
         AlwaysPoison(ptr, value, num, kind);
 }
 
 } // namespace js
 
 /* Crash diagnostics by default in debug and on nightly channel. */
 #if defined(DEBUG) || defined(NIGHTLY_BUILD)
--- a/js/src/threading/posix/Thread.cpp
+++ b/js/src/threading/posix/Thread.cpp
@@ -192,17 +192,17 @@ js::ThisThread::SetName(const char* name
 #elif defined(__DragonFly__) || defined(__FreeBSD__) || defined(__OpenBSD__)
   pthread_set_name_np(pthread_self(), name);
   rv = 0;
 #elif defined(__NetBSD__)
   rv = pthread_setname_np(pthread_self(), "%s", (void*)name);
 #else
   rv = pthread_setname_np(pthread_self(), name);
 #endif
-  MOZ_RELEASE_ASSERT(!rv);
+  MOZ_RELEASE_ASSERT(!rv || mozilla::recordreplay::IsRecordingOrReplaying());
 }
 
 void
 js::ThisThread::GetName(char* nameBuffer, size_t len)
 {
   MOZ_RELEASE_ASSERT(len >= 16);
 
   int rv = -1;
--- a/js/src/vm/HelperThreads.cpp
+++ b/js/src/vm/HelperThreads.cpp
@@ -1542,45 +1542,45 @@ js::GCParallelTask::join()
     AutoLockHelperThreadState helperLock;
     joinWithLockHeld(helperLock);
 }
 
 static inline
 TimeDuration
 TimeSince(TimeStamp prev)
 {
-    TimeStamp now = TimeStamp::Now();
+    TimeStamp now = ReallyNow();
     // Sadly this happens sometimes.
     MOZ_ASSERT(now >= prev);
     if (now < prev)
         now = prev;
     return now - prev;
 }
 
 void
 js::GCParallelTask::runFromMainThread(JSRuntime* rt)
 {
     assertNotStarted();
     MOZ_ASSERT(js::CurrentThreadCanAccessRuntime(rt));
-    TimeStamp timeStart = TimeStamp::Now();
+    TimeStamp timeStart = ReallyNow();
     runTask();
     duration_ = TimeSince(timeStart);
 }
 
 void
 js::GCParallelTask::runFromHelperThread(AutoLockHelperThreadState& lock)
 {
     MOZ_ASSERT(isDispatched(lock));
 
     AutoSetContextRuntime ascr(runtime());
     gc::AutoSetThreadIsPerformingGC performingGC;
 
     {
         AutoUnlockHelperThreadState parallelSection(lock);
-        TimeStamp timeStart = TimeStamp::Now();
+        TimeStamp timeStart = ReallyNow();
         runTask();
         duration_ = TimeSince(timeStart);
     }
 
     setFinished(lock);
     HelperThreadState().notifyAll(GlobalHelperThreadState::CONSUMER, lock);
 }
 
@@ -1854,17 +1854,17 @@ HelperThread::destroy()
         thread->join();
         thread.reset();
     }
 }
 
 void
 HelperThread::ensureRegisteredWithProfiler()
 {
-    if (registered)
+    if (registered || mozilla::recordreplay::IsRecordingOrReplaying())
         return;
 
     JS::RegisterThreadCallback callback = HelperThreadState().registerThread;
     if (callback) {
         callback("JS Helper", reinterpret_cast<void*>(GetNativeStackBase()));
         registered = true;
     }
 }
@@ -1883,16 +1883,21 @@ HelperThread::unregisterWithProfilerIfNe
 }
 
 /* static */
 void
 HelperThread::ThreadMain(void* arg)
 {
     ThisThread::SetName("JS Helper");
 
+    // Helper threads are allowed to run differently during recording and
+    // replay, as compiled scripts and GCs are allowed to vary. Because of
+    // this, no recorded events at all should occur while on helper threads.
+    mozilla::recordreplay::AutoDisallowThreadEvents d;
+
     static_cast<HelperThread*>(arg)->threadLoop();
     Mutex::ShutDown();
 }
 
 void
 HelperThread::handleWasmTier1Workload(AutoLockHelperThreadState& locked)
 {
     handleWasmWorkload(locked, wasm::CompileMode::Tier1);
@@ -2292,16 +2297,23 @@ GlobalHelperThreadState::trace(JSTracer*
     for (auto parseTask : parseWorklist_)
         parseTask->trace(trc);
     for (auto parseTask : parseFinishedList_)
         parseTask->trace(trc);
     for (auto parseTask : parseWaitingOnGC_)
         parseTask->trace(trc);
 }
 
+/* static */ void
+HelperThread::WakeupAll()
+{
+    AutoLockHelperThreadState lock;
+    HelperThreadState().notifyAll(GlobalHelperThreadState::PRODUCER, lock);
+}
+
 void
 JSContext::setHelperThread(HelperThread* thread)
 {
     if (helperThread_)
         nurserySuppressions_--;
 
     helperThread_ = thread;
 
@@ -2385,16 +2397,29 @@ HelperThread::threadLoop()
         // The selectors may depend on the HelperThreadState not changing
         // between task selection and task execution, in particular, on new
         // tasks not being added (because of the lifo structure of the work
         // lists). Unlocking the HelperThreadState between task selection and
         // execution is not well-defined.
 
         const TaskSpec* task = findHighestPriorityTask(lock);
         if (!task) {
+            if (mozilla::recordreplay::IsRecordingOrReplaying()) {
+                // Unlock the helper thread state lock before potentially
+                // blocking while the main thread waits for all threads to
+                // become idle. Otherwise we would need to see if we need to
+                // block at every point where a helper thread acquires the
+                // helper thread state lock.
+                {
+                    AutoUnlockHelperThreadState unlock(lock);
+                    mozilla::recordreplay::MaybeWaitForCheckpointSave();
+                }
+                mozilla::recordreplay::NotifyUnrecordedWait(WakeupAll);
+            }
+
             HelperThreadState().wait(lock, GlobalHelperThreadState::PRODUCER);
             continue;
         }
 
         js::oom::SetThreadType(task->type);
         (this->*(task->handleWorkload))(lock);
         js::oom::SetThreadType(js::THREAD_TYPE_NONE);
     }
--- a/js/src/vm/HelperThreads.h
+++ b/js/src/vm/HelperThreads.h
@@ -409,16 +409,18 @@ struct HelperThread
         return maybeCurrentTaskAs<GCParallelTask*>();
     }
 
     void destroy();
 
     static void ThreadMain(void* arg);
     void threadLoop();
 
+    static void WakeupAll();
+
     void ensureRegisteredWithProfiler();
     void unregisterWithProfilerIfNeeded();
 
   private:
     struct TaskSpec
     {
         using Selector = bool(GlobalHelperThreadState::*)(const AutoLockHelperThreadState&);
         using Handler = void(HelperThread::*)(AutoLockHelperThreadState&);
--- a/js/src/vm/JSContext.cpp
+++ b/js/src/vm/JSContext.cpp
@@ -295,16 +295,17 @@ js::ReportOutOfMemory(JSContext* cx)
 #ifdef JS_MORE_DETERMINISTIC
     /*
      * OOMs are non-deterministic, especially across different execution modes
      * (e.g. interpreter vs JIT). In more-deterministic builds, print to stderr
      * so that the fuzzers can detect this.
      */
     fprintf(stderr, "ReportOutOfMemory called\n");
 #endif
+    mozilla::recordreplay::InvalidateRecording("OutOfMemory exception thrown");
 
     if (cx->helperThread())
         return cx->addPendingOutOfMemory();
 
     cx->runtime()->hadOutOfMemory = true;
     AutoSuppressGC suppressGC(cx);
 
     /* Report the oom. */
@@ -331,16 +332,17 @@ js::ReportOverRecursed(JSContext* maybec
      * implementations (e.g. JIT vs. interpreter will differ in
      * their maximum stack depth).
      * However, we can detect externally when we hit the maximum
      * stack depth which is useful for external testing programs
      * like fuzzers.
      */
     fprintf(stderr, "ReportOverRecursed called\n");
 #endif
+    mozilla::recordreplay::InvalidateRecording("OverRecursed exception thrown");
     if (maybecx) {
         if (!maybecx->helperThread()) {
             JS_ReportErrorNumberASCII(maybecx, GetErrorMessage, nullptr, errorNumber);
             maybecx->overRecursed_ = true;
         } else {
             maybecx->addPendingOverRecursed();
         }
     }
--- a/js/src/vm/JSScript.h
+++ b/js/src/vm/JSScript.h
@@ -24,16 +24,17 @@
 #include "js/UbiNode.h"
 #include "js/UniquePtr.h"
 #include "vm/BytecodeUtil.h"
 #include "vm/JSAtom.h"
 #include "vm/NativeObject.h"
 #include "vm/Scope.h"
 #include "vm/Shape.h"
 #include "vm/SharedImmutableStringsCache.h"
+#include "vm/Time.h"
 
 namespace JS {
 struct ScriptSourceInfo;
 } // namespace JS
 
 namespace js {
 
 namespace jit {
@@ -677,17 +678,17 @@ class ScriptSource
     bool xdrFinalizeEncoder(JS::TranscodeBuffer& buffer);
 
     const mozilla::TimeStamp parseEnded() const {
         return parseEnded_;
     }
     // Inform `this` source that it has been fully parsed.
     void recordParseEnded() {
         MOZ_ASSERT(parseEnded_.IsNull());
-        parseEnded_ = mozilla::TimeStamp::Now();
+        parseEnded_ = ReallyNow();
     }
 };
 
 class ScriptSourceHolder
 {
     ScriptSource* ss;
   public:
     ScriptSourceHolder()
--- a/js/src/vm/Runtime.cpp
+++ b/js/src/vm/Runtime.cpp
@@ -408,16 +408,23 @@ JSRuntime::addSizeOfIncludingThis(mozill
 }
 
 static bool
 HandleInterrupt(JSContext* cx, bool invokeCallback)
 {
     MOZ_ASSERT(cx->requestDepth >= 1);
     MOZ_ASSERT(!cx->zone()->isAtomsZone());
 
+    // Interrupts can occur at different points between recording and replay,
+    // so no recorded behaviors should occur while handling an interrupt.
+    // Additionally, returning false here will change subsequent behavior, so
+    // such an event cannot occur during recording or replay without
+    // invalidating the recording.
+    mozilla::recordreplay::AutoDisallowThreadEvents d;
+
     cx->runtime()->gc.gcIfRequested();
 
     // A worker thread may have requested an interrupt after finishing an Ion
     // compilation.
     jit::AttachFinishedCompilations(cx);
 
     // Don't call the interrupt callback if we only interrupted for GC or Ion.
     if (!invokeCallback)
@@ -442,25 +449,28 @@ HandleInterrupt(JSContext* cx, bool invo
             ScriptFrameIter iter(cx);
             if (!iter.done() &&
                 cx->compartment() == iter.compartment() &&
                 iter.script()->stepModeEnabled())
             {
                 RootedValue rval(cx);
                 switch (Debugger::onSingleStep(cx, &rval)) {
                   case ResumeMode::Terminate:
+                    mozilla::recordreplay::InvalidateRecording("Debugger single-step produced an error");
                     return false;
                   case ResumeMode::Continue:
                     return true;
                   case ResumeMode::Return:
                     // See note in Debugger::propagateForcedReturn.
                     Debugger::propagateForcedReturn(cx, iter.abstractFramePtr(), rval);
+                    mozilla::recordreplay::InvalidateRecording("Debugger single-step forced return");
                     return false;
                   case ResumeMode::Throw:
                     cx->setPendingException(rval);
+                    mozilla::recordreplay::InvalidateRecording("Debugger single-step threw an exception");
                     return false;
                   default:;
                 }
             }
         }
 
         return true;
     }
@@ -474,16 +484,17 @@ HandleInterrupt(JSContext* cx, bool invo
     AutoStableStringChars stableChars(cx);
     if (flat && stableChars.initTwoByte(cx, flat))
         chars = stableChars.twoByteRange().begin().get();
     else
         chars = u"(stack not available)";
     JS_ReportErrorFlagsAndNumberUC(cx, JSREPORT_WARNING, GetErrorMessage, nullptr,
                                    JSMSG_TERMINATED, chars);
 
+    mozilla::recordreplay::InvalidateRecording("Interrupt callback forced return");
     return false;
 }
 
 void
 JSContext::requestInterrupt(InterruptReason reason)
 {
     interruptBits_ |= uint32_t(reason);
     jitStackLimit = UINTPTR_MAX;
--- a/js/src/vm/Runtime.h
+++ b/js/src/vm/Runtime.h
@@ -472,17 +472,18 @@ struct JSRuntime : public js::MallocProv
     bool activeThreadHasScriptDataAccess;
 #endif
 
     // Number of zones which may be operated on by helper threads.
     mozilla::Atomic<size_t, mozilla::SequentiallyConsistent,
                     mozilla::recordreplay::Behavior::DontPreserve> numActiveHelperThreadZones;
 
     // Any activity affecting the heap.
-    mozilla::Atomic<JS::HeapState> heapState_;
+    mozilla::Atomic<JS::HeapState, mozilla::SequentiallyConsistent,
+                    mozilla::recordreplay::Behavior::DontPreserve> heapState_;
 
     friend class js::AutoLockScriptData;
 
   public:
     void setUsedByHelperThread(JS::Zone* zone);
     void clearUsedByHelperThread(JS::Zone* zone);
 
     bool hasHelperThreadZones() const {
--- a/js/src/vm/StructuredClone.cpp
+++ b/js/src/vm/StructuredClone.cpp
@@ -477,17 +477,17 @@ struct JSStructuredCloneWriter {
                                      JS::CloneDataPolicy cloneDataPolicy,
                                      const JSStructuredCloneCallbacks* cb,
                                      void* cbClosure,
                                      const Value& tVal)
         : out(cx, scope), objs(out.context()),
           counts(out.context()), entries(out.context()),
           memory(out.context()),
           transferable(out.context(), tVal),
-          transferableObjects(out.context(), GCHashSet<JSObject*>(cx)),
+          transferableObjects(out.context(), TransferableObjectsSet(cx)),
           cloneDataPolicy(cloneDataPolicy)
     {
         out.setCallbacks(cb, cbClosure, OwnTransferablePolicy::NoTransferables);
     }
 
     ~JSStructuredCloneWriter();
 
     bool init() {
@@ -561,19 +561,33 @@ struct JSStructuredCloneWriter {
     // algorithm.  memory is a superset of objs; items are never removed from
     // Memory until a serialization operation is finished
     using CloneMemory = GCHashMap<JSObject*,
                                   uint32_t,
                                   MovableCellHasher<JSObject*>,
                                   SystemAllocPolicy>;
     Rooted<CloneMemory> memory;
 
+    struct TransferableObjectsHasher : public DefaultHasher<JSObject*>
+    {
+        static inline HashNumber hash(const Lookup& l) {
+            // Iteration order of the transferable objects table must be
+            // preserved during recording/replaying, as the callbacks used
+            // during transfer may interact with the recording. Just use the
+            // same hash number for all elements to ensure this.
+            if (mozilla::recordreplay::IsRecordingOrReplaying())
+                return 0;
+            return DefaultHasher<JSObject*>::hash(l);
+        }
+    };
+
     // Set of transferable objects
     RootedValue transferable;
-    Rooted<GCHashSet<JSObject*>> transferableObjects;
+    typedef GCHashSet<JSObject*, TransferableObjectsHasher> TransferableObjectsSet;
+    Rooted<TransferableObjectsSet> transferableObjects;
 
     const JS::CloneDataPolicy cloneDataPolicy;
 
     friend bool JS_WriteString(JSStructuredCloneWriter* w, HandleString str);
     friend bool JS_WriteTypedArray(JSStructuredCloneWriter* w, HandleValue v);
     friend bool JS_ObjectNotWritten(JSStructuredCloneWriter* w, HandleObject obj);
 };
 
--- a/js/src/vm/Time.h
+++ b/js/src/vm/Time.h
@@ -2,16 +2,19 @@
  * vim: set ts=8 sts=4 et sw=4 tw=99:
  * This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef vm_Time_h
 #define vm_Time_h
 
+#include "mozilla/RecordReplay.h"
+#include "mozilla/TimeStamp.h"
+
 #include <stddef.h>
 #include <stdint.h>
 
 /*
  * Broken down form of 64 bit time value.
  */
 struct PRMJTime {
     int32_t tm_usec;            /* microseconds of second (0-999999) */
@@ -127,38 +130,56 @@ PRMJ_FormatTime(char* buf, int buflen, c
 #define MOZ_HAVE_RDTSC 1
 
 #if defined(_WIN32)
 
 #include <intrin.h>
 static __inline uint64_t
 ReadTimestampCounter(void)
 {
+    if (mozilla::recordreplay::IsRecordingOrReplaying())
+        return 0;
     return __rdtsc();
 }
 
 #elif defined(__i386__)
 
 static __inline__ uint64_t
 ReadTimestampCounter(void)
 {
+    if (mozilla::recordreplay::IsRecordingOrReplaying())
+        return 0;
     uint64_t x;
     __asm__ volatile (".byte 0x0f, 0x31" : "=A" (x));
     return x;
 }
 
 #elif defined(__x86_64__)
 
 static __inline__ uint64_t
 ReadTimestampCounter(void)
 {
+    if (mozilla::recordreplay::IsRecordingOrReplaying())
+        return 0;
     unsigned hi, lo;
     __asm__ __volatile__ ("rdtsc" : "=a"(lo), "=d"(hi));
     return ( (uint64_t)lo)|( ((uint64_t)hi)<<32 );
 }
 
 #else
 
 #undef MOZ_HAVE_RDTSC
 
 #endif
 
+namespace js {
+
+// Get the current time, bypassing any record/replay instrumentation.
+static inline mozilla::TimeStamp
+ReallyNow()
+{
+    mozilla::recordreplay::AutoPassThroughThreadEvents pt;
+    return mozilla::TimeStamp::Now();
+}
+
+} // namespace js
+
 #endif /* vm_Time_h */
--- a/js/src/vm/TypeInference.cpp
+++ b/js/src/vm/TypeInference.cpp
@@ -167,16 +167,18 @@ TypeSet::ObjectGroupString(ObjectGroup* 
 bool
 js::InferSpewActive(SpewChannel channel)
 {
     static bool active[SPEW_COUNT];
     static bool checked = false;
     if (!checked) {
         checked = true;
         PodArrayZero(active);
+        if (mozilla::recordreplay::IsRecordingOrReplaying())
+            return false;
         const char* env = getenv("INFERFLAGS");
         if (!env)
             return false;
         if (strstr(env, "ops"))
             active[ISpewOps] = true;
         if (strstr(env, "result"))
             active[ISpewResult] = true;
         if (strstr(env, "full")) {
@@ -189,16 +191,18 @@ js::InferSpewActive(SpewChannel channel)
 
 static bool InferSpewColorable()
 {
     /* Only spew colors on xterm-color to not screw up emacs. */
     static bool colorable = false;
     static bool checked = false;
     if (!checked) {
         checked = true;
+        if (mozilla::recordreplay::IsRecordingOrReplaying())
+            return false;
         const char* env = getenv("TERM");
         if (!env)
             return false;
         if (strcmp(env, "xterm-color") == 0 || strcmp(env, "xterm-256color") == 0)
             colorable = true;
     }
     return colorable;
 }
--- a/js/src/wasm/WasmSignalHandlers.cpp
+++ b/js/src/wasm/WasmSignalHandlers.cpp
@@ -1393,16 +1393,20 @@ ProcessHasSignalHandlers()
         return sHaveSignalHandlers;
     sTriedInstallSignalHandlers = true;
 
 #if defined (JS_CODEGEN_NONE)
     // If there is no JIT, then there should be no Wasm signal handlers.
     return false;
 #endif
 
+    // Signal handlers are currently disabled when recording or replaying.
+    if (mozilla::recordreplay::IsRecordingOrReplaying())
+        return false;
+
 #if defined(ANDROID) && defined(MOZ_LINKER)
     // Signal handling is broken on some android systems.
     if (IsSignalHandlingBroken())
         return false;
 #endif
 
     // Initalize ThreadLocal flag used by WasmFaultHandler
     sAlreadyInSignalHandler.infallibleInit();
--- a/js/xpconnect/loader/AutoMemMap.h
+++ b/js/xpconnect/loader/AutoMemMap.h
@@ -46,17 +46,17 @@ class AutoMemMap
         // Windows, the FileDescriptor must be a handle for a file mapping,
         // rather than a file descriptor.
         Result<Ok, nsresult>
         initWithHandle(const ipc::FileDescriptor& file, size_t size,
                        PRFileMapProtect prot = PR_PROT_READONLY);
 
         void reset();
 
-        bool initialized() { return addr; }
+        bool initialized() const { return addr; }
 
         uint32_t size() const { return size_; }
 
         template<typename T = void>
         RangedPtr<T> get()
         {
             MOZ_ASSERT(addr);
             return { static_cast<T*>(addr), size_ };
--- a/js/xpconnect/src/SandboxPrivate.h
+++ b/js/xpconnect/src/SandboxPrivate.h
@@ -29,16 +29,17 @@ public:
     static void Create(nsIPrincipal* principal, JS::Handle<JSObject*> global)
     {
         RefPtr<SandboxPrivate> sbp = new SandboxPrivate(principal);
         sbp->SetWrapper(global);
         sbp->PreserveWrapper(ToSupports(sbp.get()));
 
         // Pass on ownership of sbp to |global|.
         // The type used to cast to void needs to match the one in GetPrivate.
+        mozilla::RecordReplayRegisterDeferredFinalizeThing(nullptr, nullptr, sbp);
         JS_SetPrivate(global, static_cast<nsIScriptObjectPrincipal*>(sbp.forget().take()));
     }
 
     static SandboxPrivate* GetPrivate(JSObject* obj)
     {
         // The type used to cast to void needs to match the one in Create.
         return static_cast<SandboxPrivate*>(
             static_cast<nsIScriptObjectPrincipal*>(JS_GetPrivate(obj)));
--- a/js/xpconnect/src/XPCJSContext.cpp
+++ b/js/xpconnect/src/XPCJSContext.cpp
@@ -550,28 +550,40 @@ xpc::SimulateActivityCallback(bool aActi
 {
     XPCJSContext::ActivityCallback(XPCJSContext::Get(), aActive);
 }
 
 // static
 void
 XPCJSContext::ActivityCallback(void* arg, bool active)
 {
+    // Since the slow script dialog never activates if we are recording or
+    // replaying, don't record/replay JS activity notifications.
+    if (recordreplay::IsRecordingOrReplaying()) {
+        return;
+    }
+
     if (!active) {
         ProcessHangMonitor::ClearHang();
     }
 
     XPCJSContext* self = static_cast<XPCJSContext*>(arg);
     self->mWatchdogManager->RecordContextActivity(self, active);
 }
 
 // static
 bool
 XPCJSContext::InterruptCallback(JSContext* cx)
 {
+    // The slow script dialog never activates if we are recording or replaying,
+    // since the precise timing of the dialog cannot be replayed.
+    if (recordreplay::IsRecordingOrReplaying()) {
+        return true;
+    }
+
     XPCJSContext* self = XPCJSContext::Get();
 
     // Now is a good time to turn on profiling if it's pending.
     PROFILER_JS_INTERRUPT_CALLBACK();
 
     // Normally we record mSlowScriptCheckpoint when we start to process an
     // event. However, we can run JS outside of event handlers. This code takes
     // care of that case.
--- a/js/xpconnect/src/XPCJSRuntime.cpp
+++ b/js/xpconnect/src/XPCJSRuntime.cpp
@@ -135,38 +135,73 @@ public:
               mActive = false;
           }
       } else {
           mActive = false;
       }
       return NS_OK;
   }
 
+  // Dispatch() can be called during a GC, which occur at non-deterministic
+  // points when recording or replaying. This callback is used with the
+  // record/replay trigger mechanism to make sure the snow white freer executes
+  // at a consistent point.
+  void RecordReplayRun()
+  {
+      // Make sure state in the freer is consistent with the recording.
+      mActive = recordreplay::RecordReplayValue(mActive);
+      mPurge = recordreplay::RecordReplayValue(mPurge);
+      mContinuation = recordreplay::RecordReplayValue(mContinuation);
+
+      Run();
+  }
+
   nsresult Dispatch()
   {
+      if (recordreplay::IsRecordingOrReplaying()) {
+          recordreplay::ActivateTrigger(this);
+          return NS_OK;
+      }
       nsCOMPtr<nsIRunnable> self(this);
       return NS_IdleDispatchToCurrentThread(self.forget(), 2500);
   }
 
   void Start(bool aContinuation = false, bool aPurge = false)
   {
       if (mContinuation) {
           mContinuation = aContinuation;
       }
       mPurge = aPurge;
       if (!mActive && NS_SUCCEEDED(Dispatch())) {
           mActive = true;
       }
   }
 
+  // Workaround static analysis.
+  struct RawSelfPtr { AsyncFreeSnowWhite* mPtr; };
+
   AsyncFreeSnowWhite()
     : Runnable("AsyncFreeSnowWhite")
     , mContinuation(false)
     , mActive(false)
-    , mPurge(false) {}
+    , mPurge(false)
+  {
+      if (recordreplay::IsRecordingOrReplaying()) {
+          RawSelfPtr ptr;
+          ptr.mPtr = this;
+          recordreplay::RegisterTrigger(this, [=]() { ptr.mPtr->RecordReplayRun(); });
+      }
+  }
+
+  ~AsyncFreeSnowWhite()
+  {
+      if (recordreplay::IsRecordingOrReplaying()) {
+          recordreplay::UnregisterTrigger(this);
+      }
+  }
 
 public:
   bool mContinuation;
   bool mActive;
   bool mPurge;
 };
 
 namespace xpc {
--- a/js/xpconnect/src/XPCWrappedNative.cpp
+++ b/js/xpconnect/src/XPCWrappedNative.cpp
@@ -481,16 +481,18 @@ FinishCreate(XPCWrappedNativeScope* Scop
 XPCWrappedNative::XPCWrappedNative(already_AddRefed<nsISupports>&& aIdentity,
                                    XPCWrappedNativeProto* aProto)
     : mMaybeProto(aProto),
       mSet(aProto->GetSet())
 {
     MOZ_ASSERT(NS_IsMainThread());
 
     mIdentity = aIdentity;
+    RecordReplayRegisterDeferredFinalizeThing(nullptr, nullptr, mIdentity);
+
     mFlatJSObject.setFlags(FLAT_JS_OBJECT_VALID);
 
     MOZ_ASSERT(mMaybeProto, "bad ctor param");
     MOZ_ASSERT(mSet, "bad ctor param");
 }
 
 // This ctor is used if this object will NOT have a proto.
 XPCWrappedNative::XPCWrappedNative(already_AddRefed<nsISupports>&& aIdentity,
@@ -498,16 +500,18 @@ XPCWrappedNative::XPCWrappedNative(alrea
                                    already_AddRefed<XPCNativeSet>&& aSet)
 
     : mMaybeScope(TagScope(aScope)),
       mSet(aSet)
 {
     MOZ_ASSERT(NS_IsMainThread());
 
     mIdentity = aIdentity;
+    RecordReplayRegisterDeferredFinalizeThing(nullptr, nullptr, mIdentity);
+
     mFlatJSObject.setFlags(FLAT_JS_OBJECT_VALID);
 
     MOZ_ASSERT(aScope, "bad ctor param");
     MOZ_ASSERT(mSet, "bad ctor param");
 }
 
 XPCWrappedNative::~XPCWrappedNative()
 {
@@ -524,18 +528,22 @@ XPCWrappedNative::Destroy()
     XPCWrappedNativeScope* scope = GetScope();
     if (scope) {
         Native2WrappedNativeMap* map = scope->GetWrappedNativeMap();
         MOZ_ASSERT(map->Find(GetIdentityObject()) != this);
     }
 #endif
 
     if (mIdentity) {
+        // Either release mIdentity immediately or defer the release. When
+        // recording or replaying the release must always be deferred, so that
+        // DeferredFinalize matches the earlier call to
+        // RecordReplayRegisterDeferredFinalizeThing.
         XPCJSRuntime* rt = GetRuntime();
-        if (rt && rt->GetDoingFinalization()) {
+        if ((rt && rt->GetDoingFinalization()) || recordreplay::IsRecordingOrReplaying()) {
             DeferredFinalize(mIdentity.forget().take());
         } else {
             mIdentity = nullptr;
         }
     }
 
     mMaybeScope = nullptr;
 }
@@ -748,18 +756,20 @@ XPCWrappedNative::FlatJSObjectFinalized(
 #ifdef DEBUG
             JS_UpdateWeakPointerAfterGCUnbarriered(&jso);
             MOZ_ASSERT(!jso);
 #endif
             to->JSObjectFinalized();
         }
 
         // We also need to release any native pointers held...
+        // As for XPCWrappedNative::Destroy, when recording or replaying the
+        // release must always be deferred.
         RefPtr<nsISupports> native = to->TakeNative();
-        if (native && GetRuntime()) {
+        if (native && (GetRuntime() || recordreplay::IsRecordingOrReplaying())) {
             DeferredFinalize(native.forget().take());
         }
 
         to->SetInterface(nullptr);
     }
 
     nsWrapperCache* cache = nullptr;
     CallQueryInterface(mIdentity, &cache);
@@ -1049,16 +1059,18 @@ XPCWrappedNative::InitTearOff(XPCWrapped
 
     if (!mSet->HasInterface(aInterface) && !ExtendSet(aInterface)) {
         aTearOff->SetInterface(nullptr);
         return NS_ERROR_NO_INTERFACE;
     }
 
     aTearOff->SetInterface(aInterface);
     aTearOff->SetNative(qiResult);
+    RecordReplayRegisterDeferredFinalizeThing(nullptr, nullptr, qiResult);
+
     if (needJSObject && !InitTearOffJSObject(aTearOff))
         return NS_ERROR_OUT_OF_MEMORY;
 
     return NS_OK;
 }
 
 bool
 XPCWrappedNative::InitTearOffJSObject(XPCWrappedNativeTearOff* to)
--- a/js/xpconnect/src/XPCWrappedNativeProto.cpp
+++ b/js/xpconnect/src/XPCWrappedNativeProto.cpp
@@ -27,31 +27,35 @@ XPCWrappedNativeProto::XPCWrappedNativeP
     // by finalization of the JSObject (or explicitly if Init fails).
 
     MOZ_COUNT_CTOR(XPCWrappedNativeProto);
     MOZ_ASSERT(mScope);
 
 #ifdef DEBUG
     gDEBUG_LiveProtoCount++;
 #endif
+
+    RecordReplayRegisterDeferredFinalizeThing(nullptr, nullptr, mClassInfo);
 }
 
 XPCWrappedNativeProto::~XPCWrappedNativeProto()
 {
     MOZ_ASSERT(!mJSProtoObject, "JSProtoObject still alive");
 
     MOZ_COUNT_DTOR(XPCWrappedNativeProto);
 
 #ifdef DEBUG
     gDEBUG_LiveProtoCount--;
 #endif
 
     // Note that our weak ref to mScope is not to be trusted at this point.
 
     XPCNativeSet::ClearCacheEntryForClassInfo(mClassInfo);
+
+    DeferredFinalize(mClassInfo.forget().take());
 }
 
 bool
 XPCWrappedNativeProto::Init(nsIXPCScriptable* scriptable)
 {
     AutoJSContext cx;
     mScriptable = scriptable;
 
--- a/layout/base/nsStyleSheetService.cpp
+++ b/layout/base/nsStyleSheetService.cpp
@@ -69,17 +69,17 @@ nsStyleSheetService::RegisterFromEnumera
     nsCOMPtr<nsISupportsCString> icStr = do_QueryInterface(element);
     NS_ASSERTION(icStr,
                  "category manager entries must be nsISupportsCStrings");
 
     nsAutoCString name;
     icStr->GetData(name);
 
     nsCString spec;
-    aManager->GetCategoryEntry(aCategory, name.get(), getter_Copies(spec));
+    aManager->GetCategoryEntry(nsDependentCString(aCategory), name, spec);
 
     nsCOMPtr<nsIURI> uri;
     NS_NewURI(getter_AddRefs(uri), spec);
     if (uri)
       LoadAndRegisterSheetInternal(uri, aSheetType);
   }
 }
 
--- a/layout/style/ImageLoader.cpp
+++ b/layout/style/ImageLoader.cpp
@@ -41,16 +41,35 @@ ImageLoader::DropDocumentReference()
     }
     image->mRequests.Remove(mDocument);
   }
   mImages.Clear();
 
   mDocument = nullptr;
 }
 
+// Normally, arrays of requests and frames are sorted by their pointer address,
+// for faster lookup. When recording or replaying, we don't do this, so that
+// the arrays retain their insertion order and are consistent between recording
+// and replaying.
+template <typename Elem, typename Item, typename Comparator = nsDefaultComparator<Elem, Item>>
+static size_t
+GetMaybeSortedIndex(const nsTArray<Elem>& aArray, const Item& aItem, bool* aFound,
+                    Comparator aComparator = Comparator())
+{
+  if (recordreplay::IsRecordingOrReplaying()) {
+    size_t index = aArray.IndexOf(aItem, 0, aComparator);
+    *aFound = index != nsTArray<Elem>::NoIndex;
+    return *aFound ? index + 1 : aArray.Length();
+  }
+  size_t index = aArray.IndexOfFirstElementGt(aItem, aComparator);
+  *aFound = index > 0 && aComparator.Equals(aItem, aArray.ElementAt(index - 1));
+  return index;
+}
+
 void
 ImageLoader::AssociateRequestToFrame(imgIRequest* aRequest,
                                      nsIFrame* aFrame,
                                      FrameFlags aFlags)
 {
   nsCOMPtr<imgINotificationObserver> observer;
   aRequest->GetNotificationObserver(getter_AddRefs(observer));
   if (!observer) {
@@ -79,18 +98,19 @@ ImageLoader::AssociateRequestToFrame(img
     });
 
   // Add frame to the frameSet, and handle any special processing the
   // frame might require.
   FrameWithFlags fwf(aFrame);
   FrameWithFlags* fwfToModify(&fwf);
 
   // See if the frameSet already has this frame.
-  uint32_t i = frameSet->IndexOfFirstElementGt(fwf, FrameOnlyComparator());
-  if (i > 0 && aFrame == frameSet->ElementAt(i-1).mFrame) {
+  bool found;
+  uint32_t i = GetMaybeSortedIndex(*frameSet, fwf, &found, FrameOnlyComparator());
+  if (found) {
     // We're already tracking this frame, so prepare to modify the
     // existing FrameWithFlags object.
     fwfToModify = &frameSet->ElementAt(i-1);
   }
 
   // Check if the frame requires special processing.
   if (aFlags & REQUEST_REQUIRES_REFLOW) {
     fwfToModify->mFlags |= REQUEST_REQUIRES_REFLOW;
@@ -144,24 +164,24 @@ ImageLoader::AssociateRequestToFrame(img
   }
 
   // Do some sanity checking to ensure that we only add to one mapping
   // iff we also add to the other mapping.
   DebugOnly<bool> didAddToFrameSet(false);
   DebugOnly<bool> didAddToRequestSet(false);
 
   // If we weren't already tracking this frame, add it to the frameSet.
-  if (i == 0 || aFrame != frameSet->ElementAt(i-1).mFrame) {
+  if (!found) {
     frameSet->InsertElementAt(i, fwf);
     didAddToFrameSet = true;
   }
 
   // Add request to the request set if it wasn't already there.
-  i = requestSet->IndexOfFirstElementGt(aRequest);
-  if (i == 0 || aRequest != requestSet->ElementAt(i-1)) {
+  i = GetMaybeSortedIndex(*requestSet, aRequest, &found);
+  if (!found) {
     requestSet->InsertElementAt(i, aRequest);
     didAddToRequestSet = true;
   }
 
   MOZ_ASSERT(didAddToFrameSet == didAddToRequestSet,
              "We should only add to one map iff we also add to the other map.");
 }
 
@@ -214,20 +234,20 @@ ImageLoader::RemoveRequestToFrameMapping
   }
 #endif
 
   if (auto entry = mRequestToFrameMap.Lookup(aRequest)) {
     FrameSet* frameSet = entry.Data();
     MOZ_ASSERT(frameSet, "This should never be null");
 
     // Before we remove aFrame from the frameSet, unblock onload if needed.
-    uint32_t i = frameSet->IndexOfFirstElementGt(FrameWithFlags(aFrame),
-                                                 FrameOnlyComparator());
-
-    if (i > 0 && aFrame == frameSet->ElementAt(i-1).mFrame) {
+    bool found;
+    uint32_t i = GetMaybeSortedIndex(*frameSet, FrameWithFlags(aFrame), &found,
+                                     FrameOnlyComparator());
+    if (found) {
       FrameWithFlags& fwf = frameSet->ElementAt(i-1);
       if (fwf.mFlags & REQUEST_HAS_BLOCKED_ONLOAD) {
         mDocument->UnblockOnload(false);
         // We're about to remove fwf from the frameSet, so we don't bother
         // updating the flag.
       }
       frameSet->RemoveElementAt(i-1);
     }
@@ -244,17 +264,21 @@ ImageLoader::RemoveRequestToFrameMapping
 
 void
 ImageLoader::RemoveFrameToRequestMapping(imgIRequest* aRequest,
                                          nsIFrame*    aFrame)
 {
   if (auto entry = mFrameToRequestMap.Lookup(aFrame)) {
     RequestSet* requestSet = entry.Data();
     MOZ_ASSERT(requestSet, "This should never be null");
-    requestSet->RemoveElementSorted(aRequest);
+    if (recordreplay::IsRecordingOrReplaying()) {
+      requestSet->RemoveElement(aRequest);
+    } else {
+      requestSet->RemoveElementSorted(aRequest);
+    }
     if (requestSet->IsEmpty()) {
       aFrame->SetHasImageRequest(false);
       entry.Remove();
     }
   }
 }
 
 void
--- a/media/webrtc/signaling/src/peerconnection/PeerConnectionImpl.cpp
+++ b/media/webrtc/signaling/src/peerconnection/PeerConnectionImpl.cpp
@@ -1335,16 +1335,21 @@ PeerConnectionImpl::CreateDataChannel(co
                                       uint16_t aMaxNum,
                                       bool aExternalNegotiated,
                                       uint16_t aStream,
                                       nsDOMDataChannel** aRetval)
 {
   PC_AUTO_ENTER_API_CALL(false);
   MOZ_ASSERT(aRetval);
 
+  // WebRTC is not enabled when recording/replaying. See bug 1304149.
+  if (recordreplay::IsRecordingOrReplaying()) {
+    return NS_ERROR_NOT_AVAILABLE;
+  }
+
   RefPtr<DataChannel> dataChannel;
   DataChannelConnection::Type theType =
     static_cast<DataChannelConnection::Type>(aType);
 
   nsresult rv = EnsureDataConnection(WEBRTC_DATACHANNEL_PORT_DEFAULT,
                                      WEBRTC_DATACHANNEL_STREAMS_DEFAULT,
                                      WEBRTC_DATACHANNEL_MAX_MESSAGE_SIZE_REMOTE_DEFAULT,
                                      false);
--- a/modules/libpref/Preferences.cpp
+++ b/modules/libpref/Preferences.cpp
@@ -1688,17 +1688,18 @@ pref_Lookup(const char* aPrefName, bool 
 
   return result;
 }
 
 static Result<Pref*, nsresult>
 pref_LookupForModify(const char* aPrefName,
                      const std::function<bool(const PrefWrapper&)>& aCheckFn)
 {
-  Maybe<PrefWrapper> wrapper = pref_Lookup(aPrefName, /* includeTypeNone */ true);
+  Maybe<PrefWrapper> wrapper =
+    pref_Lookup(aPrefName, /* includeTypeNone */ true);
   if (wrapper.isNothing()) {
     return Err(NS_ERROR_INVALID_ARG);
   }
   if (!aCheckFn(*wrapper)) {
     return nullptr;
   }
   if (wrapper->is<Pref*>()) {
     return wrapper->as<Pref*>();
@@ -1751,18 +1752,18 @@ pref_SetPref(const char* aPrefName,
       // New entry. Set the type.
       pref->SetType(aType);
     }
   }
 
   bool valueChanged = false;
   nsresult rv;
   if (aKind == PrefValueKind::Default) {
-    rv = pref->SetDefaultValue(
-      aType, aValue, aIsSticky, aIsLocked, &valueChanged);
+    rv =
+      pref->SetDefaultValue(aType, aValue, aIsSticky, aIsLocked, &valueChanged);
   } else {
     MOZ_ASSERT(!aIsLocked); // `locked` is disallowed in user pref files
     rv = pref->SetUserValue(aType, aValue, aFromInit, &valueChanged);
   }
   if (NS_FAILED(rv)) {
     NS_WARNING(
       nsPrintfCString(
         "Rejected attempt to change type of pref %s's %s value from %s to %s",
@@ -2890,16 +2891,18 @@ nsPrefBranch::DeleteBranch(const char* a
 
     // The first disjunct matches branches: e.g. a branch name "foo.bar."
     // matches a name "foo.bar.baz" (but it won't match "foo.barrel.baz").
     // The second disjunct matches leaf nodes: e.g. a branch name "foo.bar."
     // matches a name "foo.bar" (by ignoring the trailing '.').
     nsDependentCString name(pref->Name());
     if (StringBeginsWith(name, branchName) || name.Equals(branchNameNoDot)) {
       iter.Remove();
+      // The saved callback pref may be invalid now.
+      gCallbackPref = nullptr;
     }
   }
 
   Preferences::HandleDirty();
   return NS_OK;
 }
 
 NS_IMETHODIMP
@@ -4771,16 +4774,51 @@ pref_ReadPrefFromJar(nsZipArchive* aJarR
                     startTime,
                     manifest)) {
     return NS_ERROR_FILE_CORRUPTED;
   }
 
   return NS_OK;
 }
 
+// These preference getter wrappers allow us to look up the value for static
+// preferences based on their native types, rather than manually mapping them to
+// the appropriate Preferences::Get* functions.
+template<typename T>
+static T
+GetPref(const char* aName, T aDefaultValue);
+
+template<>
+bool MOZ_MAYBE_UNUSED
+GetPref<bool>(const char* aName, bool aDefaultValue)
+{
+  return Preferences::GetBool(aName, aDefaultValue);
+}
+
+template<>
+int32_t MOZ_MAYBE_UNUSED
+GetPref<int32_t>(const char* aName, int32_t aDefaultValue)
+{
+  return Preferences::GetInt(aName, aDefaultValue);
+}
+
+template<>
+uint32_t MOZ_MAYBE_UNUSED
+GetPref<uint32_t>(const char* aName, uint32_t aDefaultValue)
+{
+  return Preferences::GetInt(aName, aDefaultValue);
+}
+
+template<>
+float MOZ_MAYBE_UNUSED
+GetPref<float>(const char* aName, float aDefaultValue)
+{
+  return Preferences::GetFloat(aName, aDefaultValue);
+}
+
 // Initialize default preference JavaScript buffers from appropriate TEXT
 // resources.
 /* static */ Result<Ok, const char*>
 Preferences::InitInitialObjects(bool aIsStartup)
 {
   // Initialize static prefs before prefs from data files so that the latter
   // will override the former.
   StaticPrefs::InitAll(aIsStartup);
@@ -4799,16 +4837,31 @@ Preferences::InitInitialObjects(bool aIs
     // support that for static preferences in this configuration, and therefore
     // ignore the possibility.
     for (auto& pref : gSharedMap->Iter()) {
       if (pref.HasUserValue() || pref.IsLocked()) {
         NotifyCallbacks(pref.Name(), PrefWrapper(pref));
       }
     }
 
+#ifdef DEBUG
+    // Check that all varcache preferences match their current values. This can
+    // currently fail if the default value of a static varcache preference is
+    // changed in a preference file or at runtime, rather than in
+    // StaticPrefList.h.
+
+#define PREF(name, cpp_type, value)
+#define VARCACHE_PREF(name, id, cpp_type, value)                               \
+  MOZ_ASSERT(GetPref<StripAtomic<cpp_type>>(name, value) == StaticPrefs::id(), \
+             "Incorrect cached value for " name);
+#include "mozilla/StaticPrefList.h"
+#undef PREF
+#undef VARCACHE_PREF
+#endif
+
     return Ok();
   }
 
   // In the omni.jar case, we load the following prefs:
   // - jar:$gre/omni.jar!/greprefs.js
   // - jar:$gre/omni.jar!/defaults/pref/*.js
   //
   // In the non-omni.jar case, we load:
--- a/modules/libpref/init/all.js
+++ b/modules/libpref/init/all.js
@@ -5835,8 +5835,14 @@ pref("layers.omtp.dump-capture", false);
 // Limits the depth of recursive conversion of data when opening
 // a content to view.  This is mostly intended to prevent infinite
 // loops with faulty converters involved.
 pref("general.document_open_conversion_depth_limit", 20);
 
 // If true, touchstart and touchmove listeners on window, document,
 // documentElement and document.body are passive by default.
 pref("dom.event.default_to_passive_touch_listeners", true);
+
+// Enable FastBlock?
+pref("browser.fastblock.enabled", false);
+// The timeout (ms) since navigation start, all tracker connections been made
+// after this timeout will be canceled.
+pref("browser.fastblock.timeout", 5000);
--- a/netwerk/ipc/NeckoChannelParams.ipdlh
+++ b/netwerk/ipc/NeckoChannelParams.ipdlh
@@ -237,16 +237,17 @@ struct HttpChannelOpenArgs
   uint64_t                    topLevelOuterContentWindowId;
   TimeStamp                   launchServiceWorkerStart;
   TimeStamp                   launchServiceWorkerEnd;
   TimeStamp                   dispatchFetchEventStart;
   TimeStamp                   dispatchFetchEventEnd;
   TimeStamp                   handleFetchEventStart;
   TimeStamp                   handleFetchEventEnd;
   bool                        forceMainDocumentChannel;
+  TimeStamp                   navigationStartTimeStamp;
 };
 
 struct HttpChannelConnectArgs
 {
   uint32_t registrarId;
   bool shouldIntercept;
 };
 
--- a/netwerk/ipc/NeckoCommon.h
+++ b/netwerk/ipc/NeckoCommon.h
@@ -86,17 +86,17 @@ namespace net {
 inline bool
 IsNeckoChild()
 {
   static bool didCheck = false;
   static bool amChild = false;
 
   if (!didCheck) {
     didCheck = true;
-    amChild = (XRE_GetProcessType() == GeckoProcessType_Content);
+    amChild = (XRE_GetProcessType() == GeckoProcessType_Content) && !recordreplay::IsMiddleman();
   }
   return amChild;
 }
 
 namespace NeckoCommonInternal {
   extern bool gSecurityDisabled;
   extern bool gRegisteredBool;
 } // namespace NeckoCommonInternal
--- a/netwerk/protocol/http/HttpBaseChannel.cpp
+++ b/netwerk/protocol/http/HttpBaseChannel.cpp
@@ -4588,16 +4588,28 @@ HttpBaseChannel::GetLastRedirectFlags(ui
 
 NS_IMETHODIMP
 HttpBaseChannel::SetLastRedirectFlags(uint32_t aValue)
 {
   mLastRedirectFlags = aValue;
   return NS_OK;
 }
 
+NS_IMETHODIMP
+HttpBaseChannel::GetNavigationStartTimeStamp(TimeStamp* aTimeStamp)
+{
+  return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP
+HttpBaseChannel::SetNavigationStartTimeStamp(TimeStamp aTimeStamp)
+{
+  return NS_ERROR_NOT_IMPLEMENTED;
+}
+
 nsresult
 HttpBaseChannel::CheckRedirectLimit(uint32_t aRedirectFlags) const
 {
   if (aRedirectFlags & nsIChannelEventSink::REDIRECT_INTERNAL) {
     // Some platform features, like Service Workers, depend on internal
     // redirects.  We should allow some number of internal redirects above
     // and beyond the normal redirect limit so these features continue
     // to work.
--- a/netwerk/protocol/http/HttpBaseChannel.h
+++ b/netwerk/protocol/http/HttpBaseChannel.h
@@ -263,16 +263,18 @@ public:
   NS_IMETHOD GetProxyURI(nsIURI **proxyURI) override;
   virtual void SetCorsPreflightParameters(const nsTArray<nsCString>& unsafeHeaders) override;
   virtual void SetAltDataForChild(bool aIsForChild) override;
   NS_IMETHOD GetConnectionInfoHashKey(nsACString& aConnectionInfoHashKey) override;
   NS_IMETHOD GetIntegrityMetadata(nsAString& aIntegrityMetadata) override;
   NS_IMETHOD SetIntegrityMetadata(const nsAString& aIntegrityMetadata) override;
   NS_IMETHOD GetLastRedirectFlags(uint32_t *aValue) override;
   NS_IMETHOD SetLastRedirectFlags(uint32_t aValue) override;
+  NS_IMETHOD GetNavigationStartTimeStamp(TimeStamp* aTimeStamp) override;
+  NS_IMETHOD SetNavigationStartTimeStamp(TimeStamp aTimeStamp) override;
 
   inline void CleanRedirectCacheChainIfNecessary()
   {
       mRedirectedCachekeys = nullptr;
   }
   NS_IMETHOD HTTPUpgrade(const nsACString & aProtocolName,
                          nsIHttpUpgradeListener *aListener) override;
 
--- a/netwerk/protocol/http/HttpChannelChild.cpp
+++ b/netwerk/protocol/http/HttpChannelChild.cpp
@@ -2685,21 +2685,24 @@ HttpChannelChild::ContinueAsyncOpen()
   }
   if (MissingRequiredTabChild(tabChild, "http")) {
     return NS_ERROR_ILLEGAL_VALUE;
   }
 
   // This id identifies the inner window's top-level document,
   // which changes on every new load or navigation.
   uint64_t contentWindowId = 0;
+  TimeStamp navigationStartTimeStamp;
   if (tabChild) {
     MOZ_ASSERT(tabChild->WebNavigation());
     nsCOMPtr<nsIDocument> document = tabChild->GetDocument();
     if (document) {
       contentWindowId = document->InnerWindowID();
+      navigationStartTimeStamp =
+        document->GetNavigationTiming()->GetNavigationStartTimeStamp();
       mTopLevelOuterContentWindowId = document->OuterWindowID();
     }
   }
   SetTopLevelContentWindowId(contentWindowId);
 
   HttpChannelOpenArgs openArgs;
   // No access to HttpChannelOpenArgs members, but they each have a
   // function with the struct name that returns a ref.
@@ -2802,16 +2805,18 @@ HttpChannelChild::ContinueAsyncOpen()
   openArgs.launchServiceWorkerEnd()   = mLaunchServiceWorkerEnd;
   openArgs.dispatchFetchEventStart()  = mDispatchFetchEventStart;
   openArgs.dispatchFetchEventEnd()    = mDispatchFetchEventEnd;
   openArgs.handleFetchEventStart()    = mHandleFetchEventStart;
   openArgs.handleFetchEventEnd()      = mHandleFetchEventEnd;
 
   openArgs.forceMainDocumentChannel() = mForceMainDocumentChannel;
 
+  openArgs.navigationStartTimeStamp() = navigationStartTimeStamp;
+
   // This must happen before the constructor message is sent. Otherwise messages
   // from the parent could arrive quickly and be delivered to the wrong event
   // target.
   SetEventTarget();
 
   // The socket transport in the chrome process now holds a logical ref to us
   // until OnStopRequest, or we do a redirect, or we hit an IPDL error.
   AddIPDLReference();
--- a/netwerk/protocol/http/HttpChannelParent.cpp
+++ b/netwerk/protocol/http/HttpChannelParent.cpp
@@ -149,17 +149,18 @@ HttpChannelParent::Init(const HttpChanne
                        a.contentWindowId(), a.preferredAlternativeType(),
                        a.topLevelOuterContentWindowId(),
                        a.launchServiceWorkerStart(),
                        a.launchServiceWorkerEnd(),
                        a.dispatchFetchEventStart(),
                        a.dispatchFetchEventEnd(),
                        a.handleFetchEventStart(),
                        a.handleFetchEventEnd(),
-                       a.forceMainDocumentChannel());
+                       a.forceMainDocumentChannel(),
+                       a.navigationStartTimeStamp());
   }
   case HttpChannelCreationArgs::THttpChannelConnectArgs:
   {
     const HttpChannelConnectArgs& cArgs = aArgs.get_HttpChannelConnectArgs();
     return ConnectChannel(cArgs.registrarId(), cArgs.shouldIntercept());
   }
   default:
     MOZ_ASSERT_UNREACHABLE("unknown open type");
@@ -449,17 +450,18 @@ HttpChannelParent::DoAsyncOpen(  const U
                                  const nsCString&           aPreferredAlternativeType,
                                  const uint64_t&            aTopLevelOuterContentWindowId,
                                  const TimeStamp&           aLaunchServiceWorkerStart,
                                  const TimeStamp&           aLaunchServiceWorkerEnd,
                                  const TimeStamp&           aDispatchFetchEventStart,
                                  const TimeStamp&           aDispatchFetchEventEnd,
                                  const TimeStamp&           aHandleFetchEventStart,
                                  const TimeStamp&           aHandleFetchEventEnd,
-                                 const bool&                aForceMainDocumentChannel)
+                                 const bool&                aForceMainDocumentChannel,
+                                 const TimeStamp&           aNavigationStartTimeStamp)
 {
   nsCOMPtr<nsIURI> uri = DeserializeURI(aURI);
   if (!uri) {
     // URIParams does MOZ_ASSERT if null, but we need to protect opt builds from
     // null deref here.
     return false;
   }
   nsCOMPtr<nsIURI> originalUri = DeserializeURI(aOriginalURI);
@@ -635,16 +637,18 @@ HttpChannelParent::DoAsyncOpen(  const U
 
   httpChannel->SetLaunchServiceWorkerStart(aLaunchServiceWorkerStart);
   httpChannel->SetLaunchServiceWorkerEnd(aLaunchServiceWorkerEnd);
   httpChannel->SetDispatchFetchEventStart(aDispatchFetchEventStart);
   httpChannel->SetDispatchFetchEventEnd(aDispatchFetchEventEnd);
   httpChannel->SetHandleFetchEventStart(aHandleFetchEventStart);
   httpChannel->SetHandleFetchEventEnd(aHandleFetchEventEnd);
 
+  httpChannel->SetNavigationStartTimeStamp(aNavigationStartTimeStamp);
+
   nsCOMPtr<nsIApplicationCacheChannel> appCacheChan =
     do_QueryObject(httpChannel);
   nsCOMPtr<nsIApplicationCacheService> appCacheService =
     do_GetService(NS_APPLICATIONCACHESERVICE_CONTRACTID);
 
   bool setChooseApplicationCache = chooseApplicationCache;
   if (appCacheChan && appCacheService) {
     // We might potentially want to drop this flag (that is TRUE by default)
--- a/netwerk/protocol/http/HttpChannelParent.h
+++ b/netwerk/protocol/http/HttpChannelParent.h
@@ -175,17 +175,18 @@ protected:
               const nsCString&           aPreferredAlternativeType,
               const uint64_t&            aTopLevelOuterContentWindowId,
               const TimeStamp&           aLaunchServiceWorkerStart,
               const TimeStamp&           aLaunchServiceWorkerEnd,
               const TimeStamp&           aDispatchFetchEventStart,
               const TimeStamp&           aDispatchFetchEventEnd,
               const TimeStamp&           aHandleFetchEventStart,
               const TimeStamp&           aHandleFetchEventEnd,
-              const bool&                aForceMainDocumentChannel);
+              const bool&                aForceMainDocumentChannel,
+              const TimeStamp&           aNavigationStartTimeStamp);
 
   virtual mozilla::ipc::IPCResult RecvSetPriority(const int16_t& priority) override;
   virtual mozilla::ipc::IPCResult RecvSetClassOfService(const uint32_t& cos) override;
   virtual mozilla::ipc::IPCResult RecvSetCacheTokenCachedCharset(const nsCString& charset) override;
   virtual mozilla::ipc::IPCResult RecvSuspend() override;
   virtual mozilla::ipc::IPCResult RecvResume() override;
   virtual mozilla::ipc::IPCResult RecvCancel(const nsresult& status) override;
   virtual mozilla::ipc::IPCResult RecvRedirect2Verify(const nsresult& result,
--- a/netwerk/protocol/http/nsHttpChannel.cpp
+++ b/netwerk/protocol/http/nsHttpChannel.cpp
@@ -566,28 +566,67 @@ nsHttpChannel::Connect()
         return RedirectToInterceptedChannel();
     }
 
     bool isTrackingResource = mIsTrackingResource; // is atomic
     LOG(("nsHttpChannel %p tracking resource=%d, cos=%u",
           this, isTrackingResource, mClassOfService));
 
     if (isTrackingResource) {
+        if (CheckFastBlocked()) {
+            Unused << AsyncAbort(NS_ERROR_ABORT);
+            CloseCacheEntry(false);
+            return NS_OK;
+        }
+
         AddClassFlags(nsIClassOfService::Tail);
     }
 
     if (WaitingForTailUnblock()) {
         MOZ_DIAGNOSTIC_ASSERT(!mOnTailUnblock);
         mOnTailUnblock = &nsHttpChannel::ConnectOnTailUnblock;
         return NS_OK;
     }
 
     return ConnectOnTailUnblock();
 }
 
+bool
+nsHttpChannel::CheckFastBlocked()
+{
+    LOG(("nsHttpChannel::CheckFastBlocked [this=%p]\n", this));
+
+    static bool sFastBlockInited = false;
+    static bool sIsFastBlockEnabled = false;
+    static uint32_t sFastBlockTimeout = 0;
+
+    if (!sFastBlockInited) {
+        sFastBlockInited = true;
+        Preferences::AddBoolVarCache(&sIsFastBlockEnabled, "browser.fastblock.enabled");
+        Preferences::AddUintVarCache(&sFastBlockTimeout, "browser.fastblock.timeout");
+    }
+
+    TimeStamp timestamp;
+    if (NS_FAILED(GetNavigationStartTimeStamp(&timestamp))) {
+        return false;
+    }
+
+    if (!sIsFastBlockEnabled || !timestamp) {
+        return false;
+    }
+
+    TimeDuration duration = TimeStamp::NowLoRes() - timestamp;
+    if (duration.ToMilliseconds() < sFastBlockTimeout) {
+        return false;
+    }
+
+    LOG(("FastBlock timeout (%lf) [this=%p]\n", duration.ToMilliseconds(), this));
+    return true;
+}
+
 nsresult
 nsHttpChannel::ConnectOnTailUnblock()
 {
     nsresult rv;
 
     LOG(("nsHttpChannel::ConnectOnTailUnblock [this=%p]\n", this));
 
     // Consider opening a TCP connection right away.
@@ -5509,16 +5548,28 @@ nsHttpChannel::SetupReplacementChannel(n
         nsCOMPtr<nsIResumableChannel> resumableChannel(do_QueryInterface(newChannel));
         if (!resumableChannel) {
             NS_WARNING("Got asked to resume, but redirected to non-resumable channel!");
             return NS_ERROR_NOT_RESUMABLE;
         }
         resumableChannel->ResumeAt(mStartPos, mEntityID);
     }
 
+    nsCOMPtr<nsIHttpChannelInternal> internalChannel = do_QueryInterface(newChannel, &rv);
+    if (NS_SUCCEEDED(rv)) {
+        TimeStamp timestamp;
+        rv = GetNavigationStartTimeStamp(&timestamp);
+        if (NS_WARN_IF(NS_FAILED(rv))) {
+            return rv;
+        }
+        if (timestamp) {
+            Unused << internalChannel->SetNavigationStartTimeStamp(timestamp);
+        }
+    }
+
     return NS_OK;
 }
 
 nsresult
 nsHttpChannel::AsyncProcessRedirection(uint32_t redirectType)
 {
     LOG(("nsHttpChannel::AsyncProcessRedirection [this=%p type=%u]\n",
         this, redirectType));
@@ -6489,16 +6540,33 @@ nsHttpChannel::AttachStreamFilter(ipc::E
   if (httpParent) {
     return httpParent->SendAttachStreamFilter(std::move(aEndpoint));
   }
 
   extensions::StreamFilterParent::Attach(this, std::move(aEndpoint));
   return true;
 }
 
+NS_IMETHODIMP
+nsHttpChannel::GetNavigationStartTimeStamp(TimeStamp* aTimeStamp)
+{
+  LOG(("nsHttpChannel::GetNavigationStartTimeStamp %p", this));
+  MOZ_ASSERT(aTimeStamp);
+  *aTimeStamp = mNavigationStartTimeStamp;
+  return NS_OK;
+}
+
+NS_IMETHODIMP
+nsHttpChannel::SetNavigationStartTimeStamp(TimeStamp aTimeStamp)
+{
+  LOG(("nsHttpChannel::SetNavigationStartTimeStamp %p", this));
+  mNavigationStartTimeStamp = aTimeStamp;
+  return NS_OK;
+}
+
 //-----------------------------------------------------------------------------
 // nsHttpChannel::nsISupportsPriority
 //-----------------------------------------------------------------------------
 
 NS_IMETHODIMP
 nsHttpChannel::SetPriority(int32_t value)
 {
     int16_t newValue = clamped<int32_t>(value, INT16_MIN, INT16_MAX);
--- a/netwerk/protocol/http/nsHttpChannel.h
+++ b/netwerk/protocol/http/nsHttpChannel.h
@@ -156,16 +156,18 @@ public:
     NS_IMETHOD GetSecurityInfo(nsISupports **aSecurityInfo) override;
     NS_IMETHOD AsyncOpen(nsIStreamListener *listener, nsISupports *aContext) override;
     NS_IMETHOD AsyncOpen2(nsIStreamListener *aListener) override;
     // nsIHttpChannel
     NS_IMETHOD GetEncodedBodySize(uint64_t *aEncodedBodySize) override;
     // nsIHttpChannelInternal
     NS_IMETHOD SetupFallbackChannel(const char *aFallbackKey) override;
     NS_IMETHOD SetChannelIsForDownload(bool aChannelIsForDownload) override;
+    NS_IMETHOD GetNavigationStartTimeStamp(TimeStamp* aTimeStamp) override;
+    NS_IMETHOD SetNavigationStartTimeStamp(TimeStamp aTimeStamp) override;
     // nsISupportsPriority
     NS_IMETHOD SetPriority(int32_t value) override;
     // nsIClassOfService
     NS_IMETHOD SetClassFlags(uint32_t inFlags) override;
     NS_IMETHOD AddClassFlags(uint32_t inFlags) override;
     NS_IMETHOD ClearClassFlags(uint32_t inFlags) override;
 
     // nsIResumableChannel
@@ -650,16 +652,19 @@ private:
     // A function we trigger when untail callback is triggered by our request
     // context in case this channel was tail-blocked.
     nsresult (nsHttpChannel::*mOnTailUnblock)();
     // Called on untail when tailed during AsyncOpen execution.
     nsresult AsyncOpenOnTailUnblock();
     // Called on untail when tailed because of being a tracking resource.
     nsresult ConnectOnTailUnblock();
 
+    // Check if current channel should be canceled by FastBlock rules.
+    bool CheckFastBlocked();
+
     nsCString mUsername;
 
     // If non-null, warnings should be reported to this object.
     RefPtr<HttpChannelSecurityWarningReporter> mWarningReporter;
 
     RefPtr<ADivertableParentChannel> mParentChannel;
 
     // True if the channel is reading from cache.
@@ -699,16 +704,18 @@ private:
     // If true then OnCacheEntryAvailable should ignore the entry, because
     // SetupTransaction removed conditional headers and decisions made in
     // OnCacheEntryCheck are no longer valid.
     bool mIgnoreCacheEntry;
     // Lock preventing OnCacheEntryCheck and SetupTransaction being called at
     // the same time.
     mozilla::Mutex mRCWNLock;
 
+    TimeStamp mNavigationStartTimeStamp;
+
 protected:
     virtual void DoNotifyListenerCleanup() override;
 
     // Override ReleaseListeners() because mChannelClassifier only exists
     // in nsHttpChannel and it will be released in ReleaseListeners().
     virtual void ReleaseListeners() override;
 
     virtual void DoAsyncAbort(nsresult aStatus) override;
--- a/netwerk/protocol/http/nsIHttpChannelInternal.idl
+++ b/netwerk/protocol/http/nsIHttpChannelInternal.idl
@@ -4,21 +4,26 @@
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "nsISupports.idl"
 
 %{C++
 #include "nsStringFwd.h"
 #include "nsTArrayForwardDeclare.h"
 template<class T> class nsCOMArray;
+namespace mozilla {
+class TimeStamp;
+}
 %}
 [ptr] native StringArray(nsTArray<nsCString>);
 [ref] native StringArrayRef(const nsTArray<nsCString>);
 [ref] native securityMessagesArray(nsCOMArray<nsISecurityConsoleMessage>);
 
+native TimeStamp(mozilla::TimeStamp);
+
 interface nsIAsyncInputStream;
 interface nsIAsyncOutputStream;
 interface nsIPrincipal;
 interface nsIProxyInfo;
 interface nsISecurityConsoleMessage;
 interface nsISocketTransport;
 interface nsIURI;
 
@@ -319,9 +324,13 @@ interface nsIHttpChannelInternal : nsISu
 
     /**
      * If this channel was created as the result of a redirect, then this
      * value will reflect the redirect flags passed to the
      * SetupReplacementChannel() method.
      */
     [noscript, infallible]
     attribute unsigned long lastRedirectFlags;
+
+      // This is use to determine the duration since navigation started.
+    [noscript] attribute TimeStamp navigationStartTimeStamp;
+
 };
--- a/servo/components/style/Cargo.toml
+++ b/servo/components/style/Cargo.toml
@@ -35,29 +35,29 @@ cssparser = "0.24.0"
 new_debug_unreachable = "1.0"
 encoding_rs = {version = "0.7", optional = true}
 euclid = "0.18"
 fallible = { path = "../fallible" }
 fnv = "1.0"
 hashglobe = { path = "../hashglobe" }
 html5ever = {version = "0.22", optional = true}
 itertools = "0.7.6"
-itoa = "0.3"
+itoa = "0.4"
 lazy_static = "1"
 log = "0.4"
 malloc_size_of = { path = "../malloc_size_of" }
 malloc_size_of_derive = { path = "../malloc_size_of_derive" }
 matches = "0.1"
 nsstring = {path = "../../support/gecko/nsstring", optional = true}
 num_cpus = {version = "1.1.0", optional = true}
 num-integer = "0.1.32"
 num-traits = "0.1.32"
 new-ordered-float = "1.0"
 owning_ref = "0.3.3"
-parking_lot = "0.5"
+parking_lot = "0.6"
 precomputed-hash = "0.1.1"
 rayon = "1"
 selectors = { path = "../selectors" }
 serde = {version = "1.0", optional = true, features = ["derive"]}
 servo_arc = { path = "../servo_arc" }
 servo_atoms = {path = "../atoms", optional = true}
 servo_config = {path = "../config", optional = true}
 smallbitvec = "2.1.1"
--- a/servo/components/style/properties/data.py
+++ b/servo/components/style/properties/data.py
@@ -240,19 +240,18 @@ class Longhand(object):
         assert self.logical
         logical_side = None
         for s in LOGICAL_SIDES + LOGICAL_SIZES:
             if s in self.name:
                 assert not logical_side
                 logical_side = s
         assert logical_side
         physical = PHYSICAL_SIDES if logical_side in LOGICAL_SIDES else PHYSICAL_SIZES
-        return [self.name.replace(logical_side, physical_side).replace("inset-", "") \
-            for physical_side in physical]
-
+        return [self.name.replace(logical_side, physical_side).replace("inset-", "")
+                for physical_side in physical]
 
     def experimental(self, product):
         if product == "gecko":
             return bool(self.gecko_pref)
         return bool(self.servo_pref)
 
     # FIXME(emilio): Shorthand and Longhand should really share a base class.
     def explicitly_enabled_in_ua_sheets(self):
--- a/servo/components/style/properties/longhands/position.mako.rs
+++ b/servo/components/style/properties/longhands/position.mako.rs
@@ -252,42 +252,51 @@ macro_rules! impl_align_conversions {
             logical_group="max-size",
             allow_quirks=not logical,
             spec=spec % size,
             animation_value_type="MaxLength",
             servo_restyle_damage = "reflow"
         )}
     % else:
         // servo versions (no keyword support)
-        ${helpers.predefined_type(size,
-                                  "LengthOrPercentageOrAuto",
-                                  "computed::LengthOrPercentageOrAuto::Auto",
-                                  "parse_non_negative",
-                                  spec=spec % size,
-                                  allow_quirks=not logical,
-                                  animation_value_type="ComputedValue", logical = logical,
-                                  servo_restyle_damage = "reflow")}
-        ${helpers.predefined_type("min-%s" % size,
-                                  "LengthOrPercentage",
-                                  "computed::LengthOrPercentage::Length(computed::Length::new(0.))",
-                                  "parse_non_negative",
-                                  spec=spec % ("min-%s" % size),
-                                  animation_value_type="ComputedValue",
-                                  logical=logical,
-                                  allow_quirks=not logical,
-                                  servo_restyle_damage = "reflow")}
-        ${helpers.predefined_type("max-%s" % size,
-                                  "LengthOrPercentageOrNone",
-                                  "computed::LengthOrPercentageOrNone::None",
-                                  "parse_non_negative",
-                                  spec=spec % ("min-%s" % size),
-                                  animation_value_type="ComputedValue",
-                                  logical=logical,
-                                  allow_quirks=not logical,
-                                  servo_restyle_damage = "reflow")}
+        ${helpers.predefined_type(
+            size,
+            "LengthOrPercentageOrAuto",
+            "computed::LengthOrPercentageOrAuto::Auto",
+            "parse_non_negative",
+            spec=spec % size,
+            logical_group="size",
+            allow_quirks=not logical,
+            animation_value_type="ComputedValue", logical = logical,
+            servo_restyle_damage = "reflow",
+        )}
+        ${helpers.predefined_type(
+            "min-%s" % size,
+            "LengthOrPercentage",
+            "computed::LengthOrPercentage::Length(computed::Length::new(0.))",
+            "parse_non_negative",
+            spec=spec % ("min-%s" % size),
+            logical_group="min-size",
+            animation_value_type="ComputedValue",
+            logical=logical,
+            allow_quirks=not logical,
+            servo_restyle_damage = "reflow",
+        )}
+        ${helpers.predefined_type(
+            "max-%s" % size,
+            "LengthOrPercentageOrNone",
+            "computed::LengthOrPercentageOrNone::None",
+            "parse_non_negative",
+            spec=spec % ("max-%s" % size),
+            logical_group="max-size",
+            animation_value_type="ComputedValue",
+            logical=logical,
+            allow_quirks=not logical,
+            servo_restyle_damage = "reflow",
+        )}
     % endif
 % endfor
 
 ${helpers.single_keyword("box-sizing",
                          "content-box border-box",
                          extra_prefixes="moz:layout.css.prefixes.box-sizing webkit",
                          spec="https://drafts.csswg.org/css-ui/#propdef-box-sizing",
                          gecko_enum_prefix="StyleBoxSizing",
--- a/servo/components/style/properties/properties.mako.rs
+++ b/servo/components/style/properties/properties.mako.rs
@@ -1999,17 +1999,18 @@ impl PropertyDeclaration {
         }
 
         let mut ret = self.clone();
 
         % for prop in data.longhands:
         % if prop.logical:
         % for physical_property in prop.all_physical_mapped_properties():
         % if data.longhands_by_name[physical_property].specified_type() != prop.specified_type():
-            <% raise "Logical property %s should share specified value with physical property %s" % (prop.name, physical_property) %>
+            <% raise "Logical property %s should share specified value with physical property %s" % \
+                     (prop.name, physical_property) %>
         % endif
         % endfor
         % endif
         % endfor
 
         unsafe {
             let longhand_id = *(&mut ret as *mut _ as *mut LonghandId);
 
--- a/servo/components/style/servo/selector_parser.rs
+++ b/servo/components/style/servo/selector_parser.rs
@@ -9,17 +9,17 @@
 use {Atom, CaseSensitivityExt, LocalName, Namespace, Prefix};
 use attr::{AttrIdentifier, AttrValue};
 use cssparser::{serialize_identifier, CowRcStr, Parser as CssParser, SourceLocation, ToCss};
 use dom::{OpaqueNode, TElement, TNode};
 use element_state::{DocumentState, ElementState};
 use fnv::FnvHashMap;
 use invalidation::element::document_state::InvalidationMatchingData;
 use invalidation::element::element_wrapper::ElementSnapshot;
-use properties::{CascadeFlags, ComputedValues, PropertyFlags};
+use properties::{ComputedValues, PropertyFlags};
 use properties::longhands::display::computed_value::T as Display;
 use selector_parser::{AttrValue as SelectorAttrValue, PseudoElementCascadeType, SelectorParser};
 use selectors::attr::{AttrSelectorOperation, CaseSensitivity, NamespaceConstraint};
 use selectors::parser::{SelectorParseErrorKind, Visit};
 use selectors::visitor::SelectorVisitor;
 use std::fmt;
 use std::mem;
 use std::ops::{Deref, DerefMut};
--- a/servo/components/style/shared_lock.rs
+++ b/servo/components/style/shared_lock.rs
@@ -6,16 +6,18 @@
 
 #[cfg(feature = "gecko")]
 use atomic_refcell::{AtomicRef, AtomicRefCell, AtomicRefMut};
 #[cfg(feature = "servo")]
 use parking_lot::RwLock;
 use servo_arc::Arc;
 use std::cell::UnsafeCell;
 use std::fmt;
+#[cfg(feature = "servo")]
+use std::mem;
 #[cfg(feature = "gecko")]
 use std::ptr;
 use str::{CssString, CssStringWriter};
 use stylesheets::Origin;
 
 /// A shared read/write lock that can protect multiple objects.
 ///
 /// In Gecko builds, we don't need the blocking behavior, just the safety. As
@@ -69,30 +71,30 @@ impl SharedRwLock {
             shared_lock: self.clone(),
             data: UnsafeCell::new(data),
         }
     }
 
     /// Obtain the lock for reading (servo).
     #[cfg(feature = "servo")]
     pub fn read(&self) -> SharedRwLockReadGuard {
-        self.arc.raw_read();
+        mem::forget(self.arc.read());
         SharedRwLockReadGuard(self)
     }
 
     /// Obtain the lock for reading (gecko).
     #[cfg(feature = "gecko")]
     pub fn read(&self) -> SharedRwLockReadGuard {
         SharedRwLockReadGuard(self.cell.borrow())
     }
 
     /// Obtain the lock for writing (servo).
     #[cfg(feature = "servo")]
     pub fn write(&self) -> SharedRwLockWriteGuard {
-        self.arc.raw_write();
+        mem::forget(self.arc.write());
         SharedRwLockWriteGuard(self)
     }
 
     /// Obtain the lock for writing (gecko).
     #[cfg(feature = "gecko")]
     pub fn write(&self) -> SharedRwLockWriteGuard {
         SharedRwLockWriteGuard(self.cell.borrow_mut())
     }
@@ -102,34 +104,34 @@ impl SharedRwLock {
 #[cfg(feature = "servo")]
 pub struct SharedRwLockReadGuard<'a>(&'a SharedRwLock);
 /// Proof that a shared lock was obtained for writing (gecko).
 #[cfg(feature = "gecko")]
 pub struct SharedRwLockReadGuard<'a>(AtomicRef<'a, SomethingZeroSizedButTyped>);
 #[cfg(feature = "servo")]
 impl<'a> Drop for SharedRwLockReadGuard<'a> {
     fn drop(&mut self) {
-        // Unsafe: self.lock is private to this module, only ever set after `raw_read()`,
+        // Unsafe: self.lock is private to this module, only ever set after `read()`,
         // and never copied or cloned (see `compile_time_assert` below).
-        unsafe { self.0.arc.raw_unlock_read() }
+        unsafe { self.0.arc.force_unlock_read() }
     }
 }
 
 /// Proof that a shared lock was obtained for writing (servo).
 #[cfg(feature = "servo")]
 pub struct SharedRwLockWriteGuard<'a>(&'a SharedRwLock);
 /// Proof that a shared lock was obtained for writing (gecko).
 #[cfg(feature = "gecko")]
 pub struct SharedRwLockWriteGuard<'a>(AtomicRefMut<'a, SomethingZeroSizedButTyped>);
 #[cfg(feature = "servo")]
 impl<'a> Drop for SharedRwLockWriteGuard<'a> {
     fn drop(&mut self) {
-        // Unsafe: self.lock is private to this module, only ever set after `raw_write()`,
+        // Unsafe: self.lock is private to this module, only ever set after `write()`,
         // and never copied or cloned (see `compile_time_assert` below).
-        unsafe { self.0.arc.raw_unlock_write() }
+        unsafe { self.0.arc.force_unlock_write() }
     }
 }
 
 /// Data protect by a shared lock.
 pub struct Locked<T> {
     shared_lock: SharedRwLock,
     data: UnsafeCell<T>,
 }
--- a/servo/components/style/values/specified/source_size_list.rs
+++ b/servo/components/style/values/specified/source_size_list.rs
@@ -13,16 +13,17 @@ use parser::{Parse, ParserContext};
 use selectors::context::QuirksMode;
 use style_traits::ParseError;
 use values::computed::{self, ToComputedValue};
 use values::specified::{Length, NoCalcLength, ViewportPercentageLength};
 
 /// A value for a `<source-size>`:
 ///
 /// https://html.spec.whatwg.org/multipage/#source-size
+#[derive(Debug)]
 pub struct SourceSize {
     condition: MediaCondition,
     value: Length,
 }
 
 impl Parse for SourceSize {
     fn parse<'i, 't>(
         context: &ParserContext,
@@ -33,30 +34,36 @@ impl Parse for SourceSize {
 
         Ok(Self { condition, value })
     }
 }
 
 /// A value for a `<source-size-list>`:
 ///
 /// https://html.spec.whatwg.org/multipage/#source-size-list
+#[derive(Debug)]
 pub struct SourceSizeList {
     source_sizes: Vec<SourceSize>,
     value: Option<Length>,
 }
 
 impl SourceSizeList {
     /// Create an empty `SourceSizeList`, which can be used as a fall-back.
     pub fn empty() -> Self {
         Self {
             source_sizes: vec![],
             value: None,
         }
     }
 
+    /// Set content of `value`, which can be used as fall-back during evaluate.
+    pub fn set_fallback_value(&mut self, width: Option<Length>) {
+        self.value = width;
+    }
+
     /// Evaluate this <source-size-list> to get the final viewport length.
     pub fn evaluate(&self, device: &Device, quirks_mode: QuirksMode) -> Au {
         let matching_source_size = self.source_sizes
             .iter()
             .find(|source_size| source_size.condition.matches(device, quirks_mode));
 
         computed::Context::for_media_query_evaluation(device, quirks_mode, |context| {
             match matching_source_size {
--- a/servo/ports/geckolib/Cargo.toml
+++ b/servo/ports/geckolib/Cargo.toml
@@ -16,14 +16,14 @@ gecko_debug = ["style/gecko_debug"]
 [dependencies]
 atomic_refcell = "0.1"
 cssparser = "0.24.0"
 cstr = "0.1.2"
 libc = "0.2"
 log = {version = "0.4", features = ["release_max_level_info"]}
 malloc_size_of = {path = "../../components/malloc_size_of"}
 nsstring = {path = "../../support/gecko/nsstring"}
-parking_lot = "0.5"
+parking_lot = "0.6"
 selectors = {path = "../../components/selectors"}
 servo_arc = {path = "../../components/servo_arc"}
 smallvec = "0.6"
 style = {path = "../../components/style", features = ["gecko"]}
 style_traits = {path = "../../components/style_traits"}
deleted file mode 100644
--- a/third_party/rust/itoa-0.3.1/.cargo-checksum.json
+++ /dev/null
@@ -1,1 +0,0 @@
-{"files":{".travis.yml":"a2b867b2e28af9bde20a669a6ff0f366ecc5150b89314cd7ec97ed95bb427547","Cargo.toml":"82b9e862ca8c12656987883e7339d992b770b2a8b23a9cd9ceb5ae0083252687","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"e18259ab3aa7f39a194795bdad8039b3c5fd544f6dd922526c9326c44842b76d","README.md":"f2b713cdc7ade373c4a733c09693cecd288201ec76bde725de65b4ff74530284","benches/bench.rs":"3e7075b70a899ab1e926403856afeb04b34a254b234bbca834f6136a703008a3","performance.png":"a6e70647a44084e65cedaaff3633b0624b37e0f0a84457362c1e078fb56c877d","src/lib.rs":"16169ef9fc6c6a6521daff8fefdfc1b54f4ce145763b9733308d6631dad4d14e","tests/test.rs":"9c7629f758e2833757c15617cd8c1ec2a2fb8437865d05b5d20abb07279d35ea"},"package":"eb2f404fbc66fd9aac13e998248505e7ecb2ad8e44ab6388684c5fb11c6c251c"}
\ No newline at end of file
deleted file mode 100644
--- a/third_party/rust/itoa-0.3.1/.travis.yml
+++ /dev/null
@@ -1,6 +0,0 @@
-sudo: false
-
-language: rust
-
-rust:
-  - nightly
deleted file mode 100644
--- a/third_party/rust/itoa-0.3.1/Cargo.toml
+++ /dev/null
@@ -1,9 +0,0 @@
-[package]
-name = "itoa"
-version = "0.3.1"
-authors = ["David Tolnay <dtolnay@gmail.com>"]
-license = "MIT/Apache-2.0"
-description = "Fast functions for printing integer primitives to an io::Write"
-repository = "https://github.com/dtolnay/itoa"
-documentation = "https://github.com/dtolnay/itoa"
-categories = ["value-formatting"]
deleted file mode 100644
--- a/third_party/rust/itoa-0.3.1/LICENSE-APACHE
+++ /dev/null
@@ -1,201 +0,0 @@
-                              Apache License
-                        Version 2.0, January 2004
-                     http://www.apache.org/licenses/
-
-TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-1. Definitions.
-
-   "License" shall mean the terms and conditions for use, reproduction,
-   and distribution as defined by Sections 1 through 9 of this document.
-
-   "Licensor" shall mean the copyright owner or entity authorized by
-   the copyright owner that is granting the License.
-
-   "Legal Entity" shall mean the union of the acting entity and all
-   other entities that control, are controlled by, or are under common
-   control with that entity. For the purposes of this definition,
-   "control" means (i) the power, direct or indirect, to cause the
-   direction or management of such entity, whether by contract or
-   otherwise, or (ii) ownership of fifty percent (50%) or more of the
-   outstanding shares, or (iii) beneficial ownership of such entity.
-
-   "You" (or "Your") shall mean an individual or Legal Entity
-   exercising permissions granted by this License.
-
-   "Source" form shall mean the preferred form for making modifications,
-   including but not limited to software source code, documentation
-   source, and configuration files.
-
-   "Object" form shall mean any form resulting from mechanical
-   transformation or translation of a Source form, including but
-   not limited to compiled object code, generated documentation,
-   and conversions to other media types.
-
-   "Work" shall mean the work of authorship, whether in Source or
-   Object form, made available under the License, as indicated by a
-   copyright notice that is included in or attached to the work
-   (an example is provided in the Appendix below).
-
-   "Derivative Works" shall mean any work, whether in Source or Object
-   form, that is based on (or derived from) the Work and for which the
-   editorial revisions, annotations, elaborations, or other modifications
-   represent, as a whole, an original work of authorship. For the purposes
-   of this License, Derivative Works shall not include works that remain
-   separable from, or merely link (or bind by name) to the interfaces of,
-   the Work and Derivative Works thereof.
-
-   "Contribution" shall mean any work of authorship, including
-   the original version of the Work and any modifications or additions
-   to that Work or Derivative Works thereof, that is intentionally
-   submitted to Licensor for inclusion in the Work by the copyright owner
-   or by an individual or Legal Entity authorized to submit on behalf of
-   the copyright owner. For the purposes of this definition, "submitted"
-   means any form of electronic, verbal, or written communication sent
-   to the Licensor or its representatives, including but not limited to
-   communication on electronic mailing lists, source code control systems,
-   and issue tracking systems that are managed by, or on behalf of, the
-   Licensor for the purpose of discussing and improving the Work, but
-   excluding communication that is conspicuously marked or otherwise
-   designated in writing by the copyright owner as "Not a Contribution."
-
-   "Contributor" shall mean Licensor and any individual or Legal Entity
-   on behalf of whom a Contribution has been received by Licensor and
-   subsequently incorporated within the Work.
-
-2. Grant of Copyright License. Subject to the terms and conditions of
-   this License, each Contributor hereby grants to You a perpetual,
-   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-   copyright license to reproduce, prepare Derivative Works of,
-   publicly display, publicly perform, sublicense, and distribute the
-   Work and such Derivative Works in Source or Object form.
-
-3. Grant of Patent License. Subject to the terms and conditions of
-   this License, each Contributor hereby grants to You a perpetual,
-   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-   (except as stated in this section) patent license to make, have made,
-   use, offer to sell, sell, import, and otherwise transfer the Work,
-   where such license applies only to those patent claims licensable
-   by such Contributor that are necessarily infringed by their
-   Contribution(s) alone or by combination of their Contribution(s)
-   with the Work to which such Contribution(s) was submitted. If You
-   institute patent litigation against any entity (including a
-   cross-claim or counterclaim in a lawsuit) alleging that the Work
-   or a Contribution incorporated within the Work constitutes direct
-   or contributory patent infringement, then any patent licenses
-   granted to You under this License for that Work shall terminate
-   as of the date such litigation is filed.
-
-4. Redistribution. You may reproduce and distribute copies of the
-   Work or Derivative Works thereof in any medium, with or without
-   modifications, and in Source or Object form, provided that You
-   meet the following conditions:
-
-   (a) You must give any other recipients of the Work or
-       Derivative Works a copy of this License; and
-
-   (b) You must cause any modified files to carry prominent notices
-       stating that You changed the files; and
-
-   (c) You must retain, in the Source form of any Derivative Works
-       that You distribute, all copyright, patent, trademark, and
-       attribution notices from the Source form of the Work,
-       excluding those notices that do not pertain to any part of
-       the Derivative Works; and
-
-   (d) If the Work includes a "NOTICE" text file as part of its
-       distribution, then any Derivative Works that You distribute must
-       include a readable copy of the attribution notices contained
-       within such NOTICE file, excluding those notices that do not
-       pertain to any part of the Derivative Works, in at least one
-       of the following places: within a NOTICE text file distributed
-       as part of the Derivative Works; within the Source form or
-       documentation, if provided along with the Derivative Works; or,
-       within a display generated by the Derivative Works, if and
-       wherever such third-party notices normally appear. The contents
-       of the NOTICE file are for informational purposes only and
-       do not modify the License. You may add Your own attribution
-       notices within Derivative Works that You distribute, alongside
-       or as an addendum to the NOTICE text from the Work, provided
-       that such additional attribution notices cannot be construed
-       as modifying the License.
-
-   You may add Your own copyright statement to Your modifications and
-   may provide additional or different license terms and conditions
-   for use, reproduction, or distribution of Your modifications, or
-   for any such Derivative Works as a whole, provided Your use,
-   reproduction, and distribution of the Work otherwise complies with
-   the conditions stated in this License.
-
-5. Submission of Contributions. Unless You explicitly state otherwise,
-   any Contribution intentionally submitted for inclusion in the Work
-   by You to the Licensor shall be under the terms and conditions of
-   this License, without any additional terms or conditions.
-   Notwithstanding the above, nothing herein shall supersede or modify
-   the terms of any separate license agreement you may have executed
-   with Licensor regarding such Contributions.
-
-6. Trademarks. This License does not grant permission to use the trade
-   names, trademarks, service marks, or product names of the Licensor,
-   except as required for reasonable and customary use in describing the
-   origin of the Work and reproducing the content of the NOTICE file.
-
-7. Disclaimer of Warranty. Unless required by applicable law or
-   agreed to in writing, Licensor provides the Work (and each
-   Contributor provides its Contributions) on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-   implied, including, without limitation, any warranties or conditions
-   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-   PARTICULAR PURPOSE. You are solely responsible for determining the
-   appropriateness of using or redistributing the Work and assume any
-   risks associated with Your exercise of permissions under this License.
-
-8. Limitation of Liability. In no event and under no legal theory,
-   whether in tort (including negligence), contract, or otherwise,
-   unless required by applicable law (such as deliberate and grossly
-   negligent acts) or agreed to in writing, shall any Contributor be
-   liable to You for damages, including any direct, indirect, special,
-   incidental, or consequential damages of any character arising as a
-   result of this License or out of the use or inability to use the
-   Work (including but not limited to damages for loss of goodwill,
-   work stoppage, computer failure or malfunction, or any and all
-   other commercial damages or losses), even if such Contributor
-   has been advised of the possibility of such damages.
-
-9. Accepting Warranty or Additional Liability. While redistributing
-   the Work or Derivative Works thereof, You may choose to offer,
-   and charge a fee for, acceptance of support, warranty, indemnity,
-   or other liability obligations and/or rights consistent with this
-   License. However, in accepting such obligations, You may act only
-   on Your own behalf and on Your sole responsibility, not on behalf
-   of any other Contributor, and only if You agree to indemnify,
-   defend, and hold each Contributor harmless for any liability
-   incurred by, or claims asserted against, such Contributor by reason
-   of your accepting any such warranty or additional liability.
-
-END OF TERMS AND CONDITIONS
-
-APPENDIX: How to apply the Apache License to your work.
-
-   To apply the Apache License to your work, attach the following
-   boilerplate notice, with the fields enclosed by brackets "[]"
-   replaced with your own identifying information. (Don't include
-   the brackets!)  The text should be enclosed in the appropriate
-   comment syntax for the file format. We also recommend that a
-   file or class name and description of purpose be included on the
-   same "printed page" as the copyright notice for easier
-   identification within third-party archives.
-
-Copyright [yyyy] [name of copyright owner]
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-	http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
deleted file mode 100644
--- a/third_party/rust/itoa-0.3.1/LICENSE-MIT
+++ /dev/null
@@ -1,25 +0,0 @@
-Copyright (c) 2016 Itoa Developers
-
-Permission is hereby granted, free of charge, to any
-person obtaining a copy of this software and associated
-documentation files (the "Software"), to deal in the
-Software without restriction, including without
-limitation the rights to use, copy, modify, merge,
-publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software
-is furnished to do so, subject to the following
-conditions:
-
-The above copyright notice and this permission notice
-shall be included in all copies or substantial portions
-of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
-ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
-TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
-PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
-IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-DEALINGS IN THE SOFTWARE.
deleted file mode 100644
--- a/third_party/rust/itoa-0.3.1/README.md
+++ /dev/null
@@ -1,70 +0,0 @@
-itoa
-====
-
-[![Build Status](https://api.travis-ci.org/dtolnay/itoa.svg?branch=master)](https://travis-ci.org/dtolnay/itoa)
-[![Latest Version](https://img.shields.io/crates/v/itoa.svg)](https://crates.io/crates/itoa)
-
-This crate provides fast functions for printing integer primitives to an
-[`io::Write`](https://doc.rust-lang.org/std/io/trait.Write.html). The
-implementation comes straight from
-[libcore](https://github.com/rust-lang/rust/blob/b8214dc6c6fc20d0a660fb5700dca9ebf51ebe89/src/libcore/fmt/num.rs#L201-L254)
-but avoids the performance penalty of going through
-[`fmt::Formatter`](https://doc.rust-lang.org/std/fmt/struct.Formatter.html).
-
-See also [`dtoa`](https://github.com/dtolnay/dtoa) for printing floating point
-primitives.
-
-## Performance
-
-![performance](https://raw.githubusercontent.com/dtolnay/itoa/master/performance.png)
-
-## Functions
-
-```rust
-extern crate itoa;
-
-// write to a vector or other io::Write
-let mut buf = Vec::new();
-itoa::write(&mut buf, 128u64)?;
-println!("{:?}", buf);
-
-// write to a stack buffer
-let mut bytes = [b'\0'; 20];
-let n = itoa::write(&mut bytes[..], 128u64)?;
-println!("{:?}", &bytes[..n]);
-```
-
-The function signature is:
-
-```rust
-fn write<W: io::Write, V: itoa::Integer>(writer: W, value: V) -> io::Result<usize>
-```
-
-where `itoa::Integer` is implemented for `i8`, `u8`, `i16`, `u16`, `i32`, `u32`,
-`i64`, `u64`, `isize` and `usize`. The return value gives the number of bytes
-written.
-
-## Dependency
-
-Itoa is available on [crates.io](https://crates.io/crates/itoa). Use the
-following in `Cargo.toml`:
-
-```toml
-[dependencies]
-itoa = "0.3"
-```
-
-## License
-
-Licensed under either of
-
- * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
- * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
-
-at your option.
-
-### Contribution
-
-Unless you explicitly state otherwise, any contribution intentionally submitted
-for inclusion in itoa by you, as defined in the Apache-2.0 license, shall be
-dual licensed as above, without any additional terms or conditions.
deleted file mode 100644
--- a/third_party/rust/itoa-0.3.1/benches/bench.rs
+++ /dev/null
@@ -1,52 +0,0 @@
-#![feature(test)]
-#![allow(non_snake_case)]
-
-extern crate itoa;
-extern crate test;
-
-macro_rules! benches {
-    ($($name:ident($value:expr),)*) => {
-        mod bench_itoa {
-            use test::{Bencher, black_box};
-            $(
-                #[bench]
-                fn $name(b: &mut Bencher) {
-                    use itoa;
-
-                    let mut buf = Vec::with_capacity(20);
-
-                    b.iter(|| {
-                        buf.clear();
-                        itoa::write(&mut buf, black_box($value)).unwrap()
-                    });
-                }
-            )*
-        }
-
-        mod bench_fmt {
-            use test::{Bencher, black_box};
-            $(
-                #[bench]
-                fn $name(b: &mut Bencher) {
-                    use std::io::Write;
-
-                    let mut buf = Vec::with_capacity(20);
-
-                    b.iter(|| {
-                        buf.clear();
-                        write!(&mut buf, "{}", black_box($value)).unwrap()
-                    });
-                }
-            )*
-        }
-    }
-}
-
-benches!(
-    bench_0u64(0u64),
-    bench_HALFu64(<u32>::max_value() as u64),
-    bench_MAXu64(<u64>::max_value()),
-
-    bench_0i16(0i16),
-    bench_MINi16(<i16>::min_value()),
-);
deleted file mode 100644
index 1e23b7123d6aa8bf373789ae5340c167bfe278b0..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
GIT binary patch
literal 0
Hc$@<O00001
deleted file mode 100644
--- a/third_party/rust/itoa-0.3.1/src/lib.rs
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright 2016 Itoa Developers
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use std::{io, mem, ptr, slice};
-
-#[inline]
-pub fn write<W: io::Write, V: Integer>(wr: W, value: V) -> io::Result<usize> {
-    value.write(wr)
-}
-
-pub trait Integer {
-    fn write<W: io::Write>(self, W) -> io::Result<usize>;
-}
-
-const DEC_DIGITS_LUT: &'static[u8] =
-    b"0001020304050607080910111213141516171819\
-      2021222324252627282930313233343536373839\
-      4041424344454647484950515253545556575859\
-      6061626364656667686970717273747576777879\
-      8081828384858687888990919293949596979899";
-
-// Adaptation of the original implementation at
-// https://github.com/rust-lang/rust/blob/b8214dc6c6fc20d0a660fb5700dca9ebf51ebe89/src/libcore/fmt/num.rs#L188-L266
-macro_rules! impl_Integer {
-    ($($t:ident),* as $conv_fn:ident) => ($(
-    impl Integer for $t {
-        #[allow(unused_comparisons)]
-        fn write<W: io::Write>(self, mut wr: W) -> io::Result<usize> {
-            let is_nonnegative = self >= 0;
-            let mut n = if is_nonnegative {
-                self as $conv_fn
-            } else {
-                try!(wr.write_all(b"-"));
-                // convert the negative num to positive by summing 1 to it's 2 complement
-                (!(self as $conv_fn)).wrapping_add(1)
-            };
-            let mut buf: [u8; 20] = unsafe { mem::uninitialized() };
-            let mut curr = buf.len() as isize;
-            let buf_ptr = buf.as_mut_ptr();
-            let lut_ptr = DEC_DIGITS_LUT.as_ptr();
-
-            unsafe {
-                // eagerly decode 4 characters at a time
-                if <$t>::max_value() as u64 >= 10000 {
-                    while n >= 10000 {
-                        let rem = (n % 10000) as isize;
-                        n /= 10000;
-
-                        let d1 = (rem / 100) << 1;
-                        let d2 = (rem % 100) << 1;
-                        curr -= 4;
-                        ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
-                        ptr::copy_nonoverlapping(lut_ptr.offset(d2), buf_ptr.offset(curr + 2), 2);
-                    }
-                }
-
-                // if we reach here numbers are <= 9999, so at most 4 chars long
-                let mut n = n as isize; // possibly reduce 64bit math
-
-                // decode 2 more chars, if > 2 chars
-                if n >= 100 {
-                    let d1 = (n % 100) << 1;
-                    n /= 100;
-                    curr -= 2;
-                    ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
-                }
-
-                // decode last 1 or 2 chars
-                if n < 10 {
-                    curr -= 1;
-                    *buf_ptr.offset(curr) = (n as u8) + 48;
-                } else {
-                    let d1 = n << 1;
-                    curr -= 2;
-                    ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
-                }
-            }
-
-            let mut len = buf.len() - curr as usize;
-            try!(wr.write_all(unsafe { slice::from_raw_parts(buf_ptr.offset(curr), len) }));
-            if !is_nonnegative {
-                len += 1;
-            }
-            Ok(len)
-        }
-    })*);
-}
-
-impl_Integer!(i8, u8, i16, u16, i32, u32 as u32);
-impl_Integer!(i64, u64 as u64);
-#[cfg(target_pointer_width = "16")]
-impl_Integer!(isize, usize as u16);
-#[cfg(target_pointer_width = "32")]
-impl_Integer!(isize, usize as u32);
-#[cfg(target_pointer_width = "64")]
-impl_Integer!(isize, usize as u64);
deleted file mode 100644
--- a/third_party/rust/itoa-0.3.1/tests/test.rs
+++ /dev/null
@@ -1,25 +0,0 @@
-#![allow(non_snake_case)]
-
-extern crate itoa;
-
-macro_rules! test {
-    ($($name:ident($value:expr, $expected:expr),)*) => {
-        $(
-            #[test]
-            fn $name() {
-                let mut buf = [b'\0'; 20];
-                let len = itoa::write(&mut buf[..], $value).unwrap();
-                assert_eq!(&buf[0..len], $expected.as_bytes());
-            }
-        )*
-    }
-}
-
-test!(
-    test_0u64(0u64, "0"),
-    test_HALFu64(<u32>::max_value() as u64, "4294967295"),
-    test_MAXu64(<u64>::max_value(), "18446744073709551615"),
-
-    test_0i16(0i16, "0"),
-    test_MINi16(<i16>::min_value(), "-32768"),
-);
new file mode 100644
--- /dev/null
+++ b/third_party/rust/lock_api/.cargo-checksum.json
@@ -0,0 +1,1 @@
+{"files":{"Cargo.toml":"9a369b3ff1ef121205879d10ba966bbccb81d6461843a75460f4cfcb82c3c849","src/lib.rs":"62142edfbe7e6cc04473aef25ab42553f02e4455ce567cb4303f0bbfee94d87d","src/mutex.rs":"d7ea4e06ae1a2d470e23bdfe73f52272f7c8b4231fe62fd9846e2a3773dcface","src/remutex.rs":"2fc11fec14af2886e507104e501caf6fddcf199ee7db38eabcd35ab9cf430d89","src/rwlock.rs":"45b940a9d046c7ee0c295237abac45f694c32a247724d93e3879a79f6e60b399"},"package":"949826a5ccf18c1b3a7c3d57692778d21768b79e46eb9dd07bfc4c2160036c54"}
\ No newline at end of file
new file mode 100644
--- /dev/null
+++ b/third_party/rust/lock_api/Cargo.toml
@@ -0,0 +1,31 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g. crates.io) dependencies
+#
+# If you believe there's an error in this file please file an
+# issue against the rust-lang/cargo repository. If you're
+# editing this file be aware that the upstream Cargo.toml
+# will likely look very different (and much more reasonable)
+
+[package]
+name = "lock_api"
+version = "0.1.3"
+authors = ["Amanieu d'Antras <amanieu@gmail.com>"]
+description = "Wrappers to create fully-featured Mutex and RwLock types. Compatible with no_std."
+keywords = ["mutex", "rwlock", "lock", "no_std"]
+categories = ["concurrency", "no-std"]
+license = "Apache-2.0/MIT"
+repository = "https://github.com/Amanieu/parking_lot"
+[dependencies.owning_ref]
+version = "0.3"
+optional = true
+
+[dependencies.scopeguard]
+version = "0.3"
+default-features = false
+
+[features]
+nightly = []
new file mode 100644
--- /dev/null
+++ b/third_party/rust/lock_api/src/lib.rs
@@ -0,0 +1,106 @@
+// Copyright 2018 Amanieu d'Antras
+//
+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
+// http://opensource.org/licenses/MIT>, at your option. This file may not be
+// copied, modified, or distributed except according to those terms.
+
+//! This library provides type-safe and fully-featured `Mutex` and `RwLock`
+//! types which wrap a simple raw mutex or rwlock type. This has several
+//! benefits: not only does it eliminate a large portion of the work in
+//! implementing custom lock types, it also allows users to write code which is
+//! generic with regards to different lock implementations.
+//!
+//! Basic usage of this crate is very straightfoward:
+//!
+//! 1. Create a raw lock type. This should only contain the lock state, not any
+//!    data protected by the lock.
+//! 2. Implement the `RawMutex` trait for your custom lock type.
+//! 3. Export your mutex as a type alias for `lock_api::Mutex`, and
+//!    your mutex guard as a type alias for `lock_api::MutexGuard`.
+//!    See the [example](#example) below for details.
+//!
+//! This process is similar for RwLocks, except that two guards need to be
+//! exported instead of one. (Or 3 guards if your type supports upgradable read
+//! locks, see [extension traits](#extension-traits) below for details)
+//!
+//! # Example
+//!
+//! ```
+//! use lock_api::{RawMutex, Mutex};
+//! use std::sync::atomic::{AtomicBool, Ordering, ATOMIC_BOOL_INIT};
+//!
+//! // 1. Define our raw lock type
+//! pub struct RawSpinlock(AtomicBool);
+//!
+//! // 2. Implement RawMutex for this type
+//! unsafe impl RawMutex for RawSpinlock {
+//!     const INIT: RawSpinlock = RawSpinlock(ATOMIC_BOOL_INIT);
+//!
+//!     fn lock(&self) {
+//!         // Note: This isn't the best way of implementing a spinlock, but it
+//!         // suffices for the sake of this example.
+//!         while !self.try_lock() {}
+//!     }
+//!
+//!     fn try_lock(&self) -> bool {
+//!         self.0.swap(true, Ordering::Acquire)
+//!     }
+//!
+//!     fn unlock(&self) {
+//!         self.0.store(false, Ordering::Release);
+//!     }
+//! }
+//!
+//! // 3. Export the wrappers. This are the types that your users will actually use.
+//! pub type Spinlock<T> = lock_api::Mutex<RawSpinlock, T>;
+//! pub type SpinlockGuard<'a, T> = lock_api::MutexGuard<'a, RawSpinlock, T>;
+//! ```
+//!
+//! # Extension traits
+//!
+//! In addition to basic locking & unlocking functionality, you have the option
+//! of exposing additional functionality in your lock types by implementing
+//! additional traits for it. Examples of extension features include:
+//!
+//! - Fair unlocking (`RawMutexFair`, `RawRwLockFair`)
+//! - Lock timeouts (`RawMutexTimed`, `RawRwLockTimed`)
+//! - Downgradable write locks (`RawRwLockDowngradable`)
+//! - Recursive read locks (`RawRwLockRecursive`)
+//! - Upgradable read locks (`RawRwLockUpgrade`)
+//!
+//! The `Mutex` and `RwLock` wrappers will automatically expose this additional
+//! functionality if the raw lock type implements these extension traits.
+//!
+//! # Cargo features
+//!
+//! This crate supports two cargo features:
+//!
+//! - `owning_ref`: Allows your lock types to be used with the `owning_ref` crate.
+//! - `nightly`: Enables nightly-only features. At the moment the only such
+//!   feature is `const fn` constructors for lock types.
+
+#![no_std]
+#![warn(missing_docs)]
+#![cfg_attr(feature = "nightly", feature(const_fn))]
+
+#[macro_use]
+extern crate scopeguard;
+
+#[cfg(feature = "owning_ref")]
+extern crate owning_ref;
+
+/// Marker type which indicates that the Guard type for a lock is `Send`.
+pub struct GuardSend(());
+
+/// Marker type which indicates that the Guard type for a lock is not `Send`.
+pub struct GuardNoSend(*mut ());
+
+mod mutex;
+pub use mutex::*;
+
+mod remutex;
+pub use remutex::*;
+
+mod rwlock;
+pub use rwlock::*;
new file mode 100644
--- /dev/null
+++ b/third_party/rust/lock_api/src/mutex.rs
@@ -0,0 +1,496 @@
+// Copyright 2018 Amanieu d'Antras
+//
+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
+// http://opensource.org/licenses/MIT>, at your option. This file may not be
+// copied, modified, or distributed except according to those terms.
+
+use core::cell::UnsafeCell;
+use core::fmt;
+use core::marker::PhantomData;
+use core::mem;
+use core::ops::{Deref, DerefMut};
+
+#[cfg(feature = "owning_ref")]
+use owning_ref::StableAddress;
+
+/// Basic operations for a mutex.
+///
+/// Types implementing this trait can be used by `Mutex` to form a safe and
+/// fully-functioning mutex type.
+///
+/// # Safety
+///
+/// Implementations of this trait must ensure that the mutex is actually
+/// exclusive: a lock can't be acquired while the mutex is already locked.
+pub unsafe trait RawMutex {
+    /// Initial value for an unlocked mutex.
+    const INIT: Self;
+
+    /// Marker type which determines whether a lock guard should be `Send`. Use
+    /// one of the `GuardSend` or `GuardNoSend` helper types here.
+    type GuardMarker;
+
+    /// Acquires this mutex, blocking the current thread until it is able to do so.
+    fn lock(&self);
+
+    /// Attempts to acquire this mutex without blocking.
+    fn try_lock(&self) -> bool;
+
+    /// Unlocks this mutex.
+    fn unlock(&self);
+}
+
+/// Additional methods for mutexes which support fair unlocking.
+///
+/// Fair unlocking means that a lock is handed directly over to the next waiting
+/// thread if there is one, without giving other threads the opportunity to
+/// "steal" the lock in the meantime. This is typically slower than unfair
+/// unlocking, but may be necessary in certain circumstances.
+pub unsafe trait RawMutexFair: RawMutex {
+    /// Unlocks this mutex using a fair unlock protocol.
+    fn unlock_fair(&self);
+
+    /// Temporarily yields the mutex to a waiting thread if there is one.
+    ///
+    /// This method is functionally equivalent to calling `unlock_fair` followed
+    /// by `lock`, however it can be much more efficient in the case where there
+    /// are no waiting threads.
+    fn bump(&self) {
+        self.unlock_fair();
+        self.lock();
+    }
+}
+
+/// Additional methods for mutexes which support locking with timeouts.
+///
+/// The `Duration` and `Instant` types are specified as associated types so that
+/// this trait is usable even in `no_std` environments.
+pub unsafe trait RawMutexTimed: RawMutex {
+    /// Duration type used for `try_lock_for`.
+    type Duration;
+
+    /// Instant type used for `try_lock_until`.
+    type Instant;
+
+    /// Attempts to acquire this lock until a timeout is reached.
+    fn try_lock_for(&self, timeout: Self::Duration) -> bool;
+
+    /// Attempts to acquire this lock until a timeout is reached.
+    fn try_lock_until(&self, timeout: Self::Instant) -> bool;
+}
+
+/// A mutual exclusion primitive useful for protecting shared data
+///
+/// This mutex will block threads waiting for the lock to become available. The
+/// mutex can also be statically initialized or created via a `new`
+/// constructor. Each mutex has a type parameter which represents the data that
+/// it is protecting. The data can only be accessed through the RAII guards
+/// returned from `lock` and `try_lock`, which guarantees that the data is only
+/// ever accessed when the mutex is locked.
+pub struct Mutex<R: RawMutex, T: ?Sized> {
+    raw: R,
+    data: UnsafeCell<T>,
+}
+
+unsafe impl<R: RawMutex + Send, T: ?Sized + Send> Send for Mutex<R, T> {}
+unsafe impl<R: RawMutex + Sync, T: ?Sized + Send> Sync for Mutex<R, T> {}
+
+impl<R: RawMutex, T> Mutex<R, T> {
+    /// Creates a new mutex in an unlocked state ready for use.
+    #[cfg(feature = "nightly")]
+    #[inline]
+    pub const fn new(val: T) -> Mutex<R, T> {
+        Mutex {
+            data: UnsafeCell::new(val),
+            raw: R::INIT,
+        }
+    }
+
+    /// Creates a new mutex in an unlocked state ready for use.
+    #[cfg(not(feature = "nightly"))]
+    #[inline]
+    pub fn new(val: T) -> Mutex<R, T> {
+        Mutex {
+            data: UnsafeCell::new(val),
+            raw: R::INIT,
+        }
+    }
+
+    /// Consumes this mutex, returning the underlying data.
+    #[inline]
+    #[allow(unused_unsafe)]
+    pub fn into_inner(self) -> T {
+        unsafe { self.data.into_inner() }
+    }
+}
+
+impl<R: RawMutex, T: ?Sized> Mutex<R, T> {
+    #[inline]
+    fn guard(&self) -> MutexGuard<R, T> {
+        MutexGuard {
+            mutex: self,
+            marker: PhantomData,
+        }
+    }
+
+    /// Acquires a mutex, blocking the current thread until it is able to do so.
+    ///
+    /// This function will block the local thread until it is available to acquire
+    /// the mutex. Upon returning, the thread is the only thread with the mutex
+    /// held. An RAII guard is returned to allow scoped unlock of the lock. When
+    /// the guard goes out of scope, the mutex will be unlocked.
+    ///
+    /// Attempts to lock a mutex in the thread which already holds the lock will
+    /// result in a deadlock.
+    #[inline]
+    pub fn lock(&self) -> MutexGuard<R, T> {
+        self.raw.lock();
+        self.guard()
+    }
+
+    /// Attempts to acquire this lock.
+    ///
+    /// If the lock could not be acquired at this time, then `None` is returned.
+    /// Otherwise, an RAII guard is returned. The lock will be unlocked when the
+    /// guard is dropped.
+    ///
+    /// This function does not block.
+    #[inline]
+    pub fn try_lock(&self) -> Option<MutexGuard<R, T>> {
+        if self.raw.try_lock() {
+            Some(self.guard())
+        } else {
+            None
+        }
+    }
+
+    /// Returns a mutable reference to the underlying data.
+    ///
+    /// Since this call borrows the `Mutex` mutably, no actual locking needs to
+    /// take place---the mutable borrow statically guarantees no locks exist.
+    #[inline]
+    pub fn get_mut(&mut self) -> &mut T {
+        unsafe { &mut *self.data.get() }
+    }
+
+    /// Forcibly unlocks the mutex.
+    ///
+    /// This is useful when combined with `mem::forget` to hold a lock without
+    /// the need to maintain a `MutexGuard` object alive, for example when
+    /// dealing with FFI.
+    ///
+    /// # Safety
+    ///
+    /// This method must only be called if the current thread logically owns a
+    /// `MutexGuard` but that guard has be discarded using `mem::forget`.
+    /// Behavior is undefined if a mutex is unlocked when not locked.
+    #[inline]
+    pub unsafe fn force_unlock(&self) {
+        self.raw.unlock();
+    }
+
+    /// Returns the underlying raw mutex object.
+    ///
+    /// Note that you will most likely need to import the `RawMutex` trait from
+    /// `lock_api` to be able to call functions on the raw mutex.
+    ///
+    /// # Safety
+    ///
+    /// This method is unsafe because it allows unlocking a mutex while
+    /// still holding a reference to a `MutexGuard`.
+    #[inline]
+    pub unsafe fn raw(&self) -> &R {
+        &self.raw
+    }
+}
+
+impl<R: RawMutexFair, T: ?Sized> Mutex<R, T> {
+    /// Forcibly unlocks the mutex using a fair unlock procotol.
+    ///
+    /// This is useful when combined with `mem::forget` to hold a lock without
+    /// the need to maintain a `MutexGuard` object alive, for example when
+    /// dealing with FFI.
+    ///
+    /// # Safety
+    ///
+    /// This method must only be called if the current thread logically owns a
+    /// `MutexGuard` but that guard has be discarded using `mem::forget`.
+    /// Behavior is undefined if a mutex is unlocked when not locked.
+    #[inline]
+    pub unsafe fn force_unlock_fair(&self) {
+        self.raw.unlock_fair();
+    }
+}
+
+impl<R: RawMutexTimed, T: ?Sized> Mutex<R, T> {
+    /// Attempts to acquire this lock until a timeout is reached.
+    ///
+    /// If the lock could not be acquired before the timeout expired, then
+    /// `None` is returned. Otherwise, an RAII guard is returned. The lock will
+    /// be unlocked when the guard is dropped.
+    #[inline]
+    pub fn try_lock_for(&self, timeout: R::Duration) -> Option<MutexGuard<R, T>> {
+        if self.raw.try_lock_for(timeout) {
+            Some(self.guard())
+        } else {
+            None
+        }
+    }
+
+    /// Attempts to acquire this lock until a timeout is reached.
+    ///
+    /// If the lock could not be acquired before the timeout expired, then
+    /// `None` is returned. Otherwise, an RAII guard is returned. The lock will
+    /// be unlocked when the guard is dropped.
+    #[inline]
+    pub fn try_lock_until(&self, timeout: R::Instant) -> Option<MutexGuard<R, T>> {
+        if self.raw.try_lock_until(timeout) {
+            Some(self.guard())
+        } else {
+            None
+        }
+    }
+}
+
+impl<R: RawMutex, T: ?Sized + Default> Default for Mutex<R, T> {
+    #[inline]
+    fn default() -> Mutex<R, T> {
+        Mutex::new(Default::default())
+    }
+}
+
+impl<R: RawMutex, T> From<T> for Mutex<R, T> {
+    #[inline]
+    fn from(t: T) -> Mutex<R, T> {
+        Mutex::new(t)
+    }
+}
+
+impl<R: RawMutex, T: ?Sized + fmt::Debug> fmt::Debug for Mutex<R, T> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        match self.try_lock() {
+            Some(guard) => f.debug_struct("Mutex").field("data", &&*guard).finish(),
+            None => f.pad("Mutex { <locked> }"),
+        }
+    }
+}
+
+/// An RAII implementation of a "scoped lock" of a mutex. When this structure is
+/// dropped (falls out of scope), the lock will be unlocked.
+///
+/// The data protected by the mutex can be accessed through this guard via its
+/// `Deref` and `DerefMut` implementations.
+#[must_use]
+pub struct MutexGuard<'a, R: RawMutex + 'a, T: ?Sized + 'a> {
+    mutex: &'a Mutex<R, T>,
+    marker: PhantomData<(&'a mut T, R::GuardMarker)>,
+}
+
+unsafe impl<'a, R: RawMutex + Sync + 'a, T: ?Sized + Sync + 'a> Sync for MutexGuard<'a, R, T> {}
+
+impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> MutexGuard<'a, R, T> {
+    /// Returns a reference to the original `Mutex` object.
+    pub fn mutex(s: &Self) -> &'a Mutex<R, T> {
+        s.mutex
+    }
+
+    /// Makes a new `MappedMutexGuard` for a component of the locked data.
+    ///
+    /// This operation cannot fail as the `MutexGuard` passed
+    /// in already locked the mutex.
+    ///
+    /// This is an associated function that needs to be
+    /// used as `MutexGuard::map(...)`. A method would interfere with methods of
+    /// the same name on the contents of the locked data.
+    #[inline]
+    pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedMutexGuard<'a, R, U>
+    where
+        F: FnOnce(&mut T) -> &mut U,
+    {
+        let raw = &s.mutex.raw;
+        let data = f(unsafe { &mut *s.mutex.data.get() });
+        mem::forget(s);
+        MappedMutexGuard {
+            raw,
+            data,
+            marker: PhantomData,
+        }
+    }
+
+    /// Temporarily unlocks the mutex to execute the given function.
+    ///
+    /// This is safe because `&mut` guarantees that there exist no other
+    /// references to the data protected by the mutex.
+    #[inline]
+    pub fn unlocked<F, U>(s: &mut Self, f: F) -> U
+    where
+        F: FnOnce() -> U,
+    {
+        s.mutex.raw.unlock();
+        defer!(s.mutex.raw.lock());
+        f()
+    }
+}
+
+impl<'a, R: RawMutexFair + 'a, T: ?Sized + 'a> MutexGuard<'a, R, T> {
+    /// Unlocks the mutex using a fair unlock protocol.
+    ///
+    /// By default, mutexes are unfair and allow the current thread to re-lock
+    /// the mutex before another has the chance to acquire the lock, even if
+    /// that thread has been blocked on the mutex for a long time. This is the
+    /// default because it allows much higher throughput as it avoids forcing a
+    /// context switch on every mutex unlock. This can result in one thread
+    /// acquiring a mutex many more times than other threads.
+    ///
+    /// However in some cases it can be beneficial to ensure fairness by forcing
+    /// the lock to pass on to a waiting thread if there is one. This is done by
+    /// using this method instead of dropping the `MutexGuard` normally.
+    #[inline]
+    pub fn unlock_fair(s: Self) {
+        s.mutex.raw.unlock_fair();
+        mem::forget(s);
+    }
+
+    /// Temporarily unlocks the mutex to execute the given function.
+    ///
+    /// The mutex is unlocked a fair unlock protocol.
+    ///
+    /// This is safe because `&mut` guarantees that there exist no other
+    /// references to the data protected by the mutex.
+    #[inline]
+    pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U
+    where
+        F: FnOnce() -> U,
+    {
+        s.mutex.raw.unlock_fair();
+        defer!(s.mutex.raw.lock());
+        f()
+    }
+
+    /// Temporarily yields the mutex to a waiting thread if there is one.
+    ///
+    /// This method is functionally equivalent to calling `unlock_fair` followed
+    /// by `lock`, however it can be much more efficient in the case where there
+    /// are no waiting threads.
+    #[inline]
+    pub fn bump(s: &mut Self) {
+        s.mutex.raw.bump();
+    }
+}
+
+impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> Deref for MutexGuard<'a, R, T> {
+    type Target = T;
+    #[inline]
+    fn deref(&self) -> &T {
+        unsafe { &*self.mutex.data.get() }
+    }
+}
+
+impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> DerefMut for MutexGuard<'a, R, T> {
+    #[inline]
+    fn deref_mut(&mut self) -> &mut T {
+        unsafe { &mut *self.mutex.data.get() }
+    }
+}
+
+impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> Drop for MutexGuard<'a, R, T> {
+    #[inline]
+    fn drop(&mut self) {
+        self.mutex.raw.unlock();
+    }
+}
+
+#[cfg(feature = "owning_ref")]
+unsafe impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> StableAddress for MutexGuard<'a, R, T> {}
+
+/// An RAII mutex guard returned by `MutexGuard::map`, which can point to a
+/// subfield of the protected data.
+///
+/// The main difference between `MappedMutexGuard` and `MutexGuard` is that the
+/// former doesn't support temporarily unlocking and re-locking, since that
+/// could introduce soundness issues if the locked object is modified by another
+/// thread.
+#[must_use]
+pub struct MappedMutexGuard<'a, R: RawMutex + 'a, T: ?Sized + 'a> {
+    raw: &'a R,
+    data: *mut T,
+    marker: PhantomData<&'a mut T>,
+}
+
+unsafe impl<'a, R: RawMutex + Sync + 'a, T: ?Sized + Sync + 'a> Sync
+    for MappedMutexGuard<'a, R, T>
+{}
+unsafe impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> Send for MappedMutexGuard<'a, R, T> where
+    R::GuardMarker: Send
+{}
+
+impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> MappedMutexGuard<'a, R, T> {
+    /// Makes a new `MappedMutexGuard` for a component of the locked data.
+    ///
+    /// This operation cannot fail as the `MutexGuard` passed
+    /// in already locked the mutex.
+    ///
+    /// This is an associated function that needs to be
+    /// used as `MutexGuard::map(...)`. A method would interfere with methods of
+    /// the same name on the contents of the locked data.
+    #[inline]
+    pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedMutexGuard<'a, R, U>
+    where
+        F: FnOnce(&mut T) -> &mut U,
+    {
+        let raw = s.raw;
+        let data = f(unsafe { &mut *s.data });
+        mem::forget(s);
+        MappedMutexGuard {
+            raw,
+            data,
+            marker: PhantomData,
+        }
+    }
+}
+
+impl<'a, R: RawMutexFair + 'a, T: ?Sized + 'a> MappedMutexGuard<'a, R, T> {
+    /// Unlocks the mutex using a fair unlock protocol.
+    ///
+    /// By default, mutexes are unfair and allow the current thread to re-lock
+    /// the mutex before another has the chance to acquire the lock, even if
+    /// that thread has been blocked on the mutex for a long time. This is the
+    /// default because it allows much higher throughput as it avoids forcing a
+    /// context switch on every mutex unlock. This can result in one thread
+    /// acquiring a mutex many more times than other threads.
+    ///
+    /// However in some cases it can be beneficial to ensure fairness by forcing
+    /// the lock to pass on to a waiting thread if there is one. This is done by
+    /// using this method instead of dropping the `MutexGuard` normally.
+    #[inline]
+    pub fn unlock_fair(s: Self) {
+        s.raw.unlock_fair();
+        mem::forget(s);
+    }
+}
+
+impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> Deref for MappedMutexGuard<'a, R, T> {
+    type Target = T;
+    #[inline]
+    fn deref(&self) -> &T {
+        unsafe { &*self.data }
+    }
+}
+
+impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> DerefMut for MappedMutexGuard<'a, R, T> {
+    #[inline]
+    fn deref_mut(&mut self) -> &mut T {
+        unsafe { &mut *self.data }
+    }
+}
+
+impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> Drop for MappedMutexGuard<'a, R, T> {
+    #[inline]
+    fn drop(&mut self) {
+        self.raw.unlock();
+    }
+}
+
+#[cfg(feature = "owning_ref")]
+unsafe impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> StableAddress for MappedMutexGuard<'a, R, T> {}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/lock_api/src/remutex.rs
@@ -0,0 +1,564 @@
+// Copyright 2018 Amanieu d'Antras
+//
+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
+// http://opensource.org/licenses/MIT>, at your option. This file may not be
+// copied, modified, or distributed except according to those terms.
+
+use core::cell::{Cell, UnsafeCell};
+use core::fmt;
+use core::marker::PhantomData;
+use core::mem;
+use core::ops::Deref;
+use core::sync::atomic::{AtomicUsize, Ordering};
+use mutex::{RawMutex, RawMutexFair, RawMutexTimed};
+use GuardNoSend;
+
+#[cfg(feature = "owning_ref")]
+use owning_ref::StableAddress;
+
+/// Helper trait which returns a non-zero thread ID.
+///
+/// The simplest way to implement this trait is to return the address of a
+/// thread-local variable.
+///
+/// # Safety
+///
+/// Implementations of this trait must ensure that no two active threads share
+/// the same thread ID. However the ID of a thread that has exited can be
+/// re-used since that thread is no longer active.
+pub unsafe trait GetThreadId {
+    /// Initial value.
+    const INIT: Self;
+
+    /// Returns a non-zero thread ID which identifies the current thread of
+    /// execution.
+    fn nonzero_thread_id(&self) -> usize;
+}
+
+struct RawReentrantMutex<R: RawMutex, G: GetThreadId> {
+    owner: AtomicUsize,
+    lock_count: Cell<usize>,
+    mutex: R,
+    get_thread_id: G,
+}
+
+impl<R: RawMutex, G: GetThreadId> RawReentrantMutex<R, G> {
+    #[inline]
+    fn lock_internal<F: FnOnce() -> bool>(&self, try_lock: F) -> bool {
+        let id = self.get_thread_id.nonzero_thread_id();
+        if self.owner.load(Ordering::Relaxed) == id {
+            self.lock_count.set(
+                self.lock_count
+                    .get()
+                    .checked_add(1)
+                    .expect("ReentrantMutex lock count overflow"),
+            );
+        } else {
+            if !try_lock() {
+                return false;
+            }
+            self.owner.store(id, Ordering::Relaxed);
+            self.lock_count.set(1);
+        }
+        true
+    }
+
+    #[inline]
+    fn lock(&self) {
+        self.lock_internal(|| {
+            self.mutex.lock();
+            true
+        });
+    }
+
+    #[inline]
+    fn try_lock(&self) -> bool {
+        self.lock_internal(|| self.mutex.try_lock())
+    }
+
+    #[inline]
+    fn unlock(&self) {
+        let lock_count = self.lock_count.get() - 1;
+        if lock_count == 0 {
+            self.owner.store(0, Ordering::Relaxed);
+            self.mutex.unlock();
+        } else {
+            self.lock_count.set(lock_count);
+        }
+    }
+}
+
+impl<R: RawMutexFair, G: GetThreadId> RawReentrantMutex<R, G> {
+    #[inline]
+    fn unlock_fair(&self) {
+        let lock_count = self.lock_count.get() - 1;
+        if lock_count == 0 {
+            self.owner.store(0, Ordering::Relaxed);
+            self.mutex.unlock_fair();
+        } else {
+            self.lock_count.set(lock_count);
+        }
+    }
+
+    #[inline]
+    fn bump(&self) {
+        if self.lock_count.get() == 1 {
+            let id = self.owner.load(Ordering::Relaxed);
+            self.owner.store(0, Ordering::Relaxed);
+            self.mutex.bump();
+            self.owner.store(id, Ordering::Relaxed);
+        }
+    }
+}
+
+impl<R: RawMutexTimed, G: GetThreadId> RawReentrantMutex<R, G> {
+    #[inline]
+    fn try_lock_until(&self, timeout: R::Instant) -> bool {
+        self.lock_internal(|| self.mutex.try_lock_until(timeout))
+    }
+
+    #[inline]
+    fn try_lock_for(&self, timeout: R::Duration) -> bool {
+        self.lock_internal(|| self.mutex.try_lock_for(timeout))
+    }
+}
+
+/// A mutex which can be recursively locked by a single thread.
+///
+/// This type is identical to `Mutex` except for the following points:
+///
+/// - Locking multiple times from the same thread will work correctly instead of
+///   deadlocking.
+/// - `ReentrantMutexGuard` does not give mutable references to the locked data.
+///   Use a `RefCell` if you need this.
+///
+/// See [`Mutex`](struct.Mutex.html) for more details about the underlying mutex
+/// primitive.
+pub struct ReentrantMutex<R: RawMutex, G: GetThreadId, T: ?Sized> {
+    raw: RawReentrantMutex<R, G>,
+    data: UnsafeCell<T>,
+}
+
+unsafe impl<R: RawMutex + Send, G: GetThreadId + Send, T: ?Sized + Send> Send
+    for ReentrantMutex<R, G, T>
+{}
+unsafe impl<R: RawMutex + Sync, G: GetThreadId + Sync, T: ?Sized + Send> Sync
+    for ReentrantMutex<R, G, T>
+{}
+
+impl<R: RawMutex, G: GetThreadId, T> ReentrantMutex<R, G, T> {
+    /// Creates a new reentrant mutex in an unlocked state ready for use.
+    #[cfg(feature = "nightly")]
+    #[inline]
+    pub const fn new(val: T) -> ReentrantMutex<R, G, T> {
+        ReentrantMutex {
+            data: UnsafeCell::new(val),
+            raw: RawReentrantMutex {
+                owner: AtomicUsize::new(0),
+                lock_count: Cell::new(0),
+                mutex: R::INIT,
+                get_thread_id: G::INIT,
+            },
+        }
+    }
+
+    /// Creates a new reentrant mutex in an unlocked state ready for use.
+    #[cfg(not(feature = "nightly"))]
+    #[inline]
+    pub fn new(val: T) -> ReentrantMutex<R, G, T> {
+        ReentrantMutex {
+            data: UnsafeCell::new(val),
+            raw: RawReentrantMutex {
+                owner: AtomicUsize::new(0),
+                lock_count: Cell::new(0),
+                mutex: R::INIT,
+                get_thread_id: G::INIT,
+            },
+        }
+    }
+
+    /// Consumes this mutex, returning the underlying data.
+    #[inline]
+    #[allow(unused_unsafe)]
+    pub fn into_inner(self) -> T {
+        unsafe { self.data.into_inner() }
+    }
+}
+
+impl<R: RawMutex, G: GetThreadId, T: ?Sized> ReentrantMutex<R, G, T> {
+    #[inline]
+    fn guard(&self) -> ReentrantMutexGuard<R, G, T> {
+        ReentrantMutexGuard {
+            remutex: &self,
+            marker: PhantomData,
+        }
+    }
+
+    /// Acquires a reentrant mutex, blocking the current thread until it is able
+    /// to do so.
+    ///
+    /// If the mutex is held by another thread then this function will block the
+    /// local thread until it is available to acquire the mutex. If the mutex is
+    /// already held by the current thread then this function will increment the
+    /// lock reference count and return immediately. Upon returning,
+    /// the thread is the only thread with the mutex held. An RAII guard is
+    /// returned to allow scoped unlock of the lock. When the guard goes out of
+    /// scope, the mutex will be unlocked.
+    #[inline]
+    pub fn lock(&self) -> ReentrantMutexGuard<R, G, T> {
+        self.raw.lock();
+        self.guard()
+    }
+
+    /// Attempts to acquire this lock.
+    ///
+    /// If the lock could not be acquired at this time, then `None` is returned.
+    /// Otherwise, an RAII guard is returned. The lock will be unlocked when the
+    /// guard is dropped.
+    ///
+    /// This function does not block.
+    #[inline]
+    pub fn try_lock(&self) -> Option<ReentrantMutexGuard<R, G, T>> {
+        if self.raw.try_lock() {
+            Some(self.guard())
+        } else {
+            None
+        }
+    }
+
+    /// Returns a mutable reference to the underlying data.
+    ///
+    /// Since this call borrows the `ReentrantMutex` mutably, no actual locking needs to
+    /// take place---the mutable borrow statically guarantees no locks exist.
+    #[inline]
+    pub fn get_mut(&mut self) -> &mut T {
+        unsafe { &mut *self.data.get() }
+    }
+
+    /// Forcibly unlocks the mutex.
+    ///
+    /// This is useful when combined with `mem::forget` to hold a lock without
+    /// the need to maintain a `ReentrantMutexGuard` object alive, for example when
+    /// dealing with FFI.
+    ///
+    /// # Safety
+    ///
+    /// This method must only be called if the current thread logically owns a
+    /// `ReentrantMutexGuard` but that guard has be discarded using `mem::forget`.
+    /// Behavior is undefined if a mutex is unlocked when not locked.
+    #[inline]
+    pub unsafe fn force_unlock(&self) {
+        self.raw.unlock();
+    }
+
+    /// Returns the underlying raw mutex object.
+    ///
+    /// Note that you will most likely need to import the `RawMutex` trait from
+    /// `lock_api` to be able to call functions on the raw mutex.
+    ///
+    /// # Safety
+    ///
+    /// This method is unsafe because it allows unlocking a mutex while
+    /// still holding a reference to a `ReentrantMutexGuard`.
+    #[inline]
+    pub unsafe fn raw(&self) -> &R {
+        &self.raw.mutex
+    }
+}
+
+impl<R: RawMutexFair, G: GetThreadId, T: ?Sized> ReentrantMutex<R, G, T> {
+    /// Forcibly unlocks the mutex using a fair unlock procotol.
+    ///
+    /// This is useful when combined with `mem::forget` to hold a lock without
+    /// the need to maintain a `ReentrantMutexGuard` object alive, for example when
+    /// dealing with FFI.
+    ///
+    /// # Safety
+    ///
+    /// This method must only be called if the current thread logically owns a
+    /// `ReentrantMutexGuard` but that guard has be discarded using `mem::forget`.
+    /// Behavior is undefined if a mutex is unlocked when not locked.
+    #[inline]
+    pub unsafe fn force_unlock_fair(&self) {
+        self.raw.unlock_fair();
+    }
+}
+
+impl<R: RawMutexTimed, G: GetThreadId, T: ?Sized> ReentrantMutex<R, G, T> {
+    /// Attempts to acquire this lock until a timeout is reached.
+    ///
+    /// If the lock could not be acquired before the timeout expired, then
+    /// `None` is returned. Otherwise, an RAII guard is returned. The lock will
+    /// be unlocked when the guard is dropped.
+    #[inline]
+    pub fn try_lock_for(&self, timeout: R::Duration) -> Option<ReentrantMutexGuard<R, G, T>> {
+        if self.raw.try_lock_for(timeout) {
+            Some(self.guard())
+        } else {
+            None
+        }
+    }
+
+    /// Attempts to acquire this lock until a timeout is reached.
+    ///
+    /// If the lock could not be acquired before the timeout expired, then
+    /// `None` is returned. Otherwise, an RAII guard is returned. The lock will
+    /// be unlocked when the guard is dropped.
+    #[inline]
+    pub fn try_lock_until(&self, timeout: R::Instant) -> Option<ReentrantMutexGuard<R, G, T>> {
+        if self.raw.try_lock_until(timeout) {
+            Some(self.guard())
+        } else {
+            None
+        }
+    }
+}
+
+impl<R: RawMutex, G: GetThreadId, T: ?Sized + Default> Default for ReentrantMutex<R, G, T> {
+    #[inline]
+    fn default() -> ReentrantMutex<R, G, T> {
+        ReentrantMutex::new(Default::default())
+    }
+}
+
+impl<R: RawMutex, G: GetThreadId, T> From<T> for ReentrantMutex<R, G, T> {
+    #[inline]
+    fn from(t: T) -> ReentrantMutex<R, G, T> {
+        ReentrantMutex::new(t)
+    }
+}
+
+impl<R: RawMutex, G: GetThreadId, T: ?Sized + fmt::Debug> fmt::Debug for ReentrantMutex<R, G, T> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        match self.try_lock() {
+            Some(guard) => f
+                .debug_struct("ReentrantMutex")
+                .field("data", &&*guard)
+                .finish(),
+            None => f.pad("ReentrantMutex { <locked> }"),
+        }
+    }
+}
+
+/// An RAII implementation of a "scoped lock" of a reentrant mutex. When this structure
+/// is dropped (falls out of scope), the lock will be unlocked.
+///
+/// The data protected by the mutex can be accessed through this guard via its
+/// `Deref` implementation.
+#[must_use]
+pub struct ReentrantMutexGuard<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> {
+    remutex: &'a ReentrantMutex<R, G, T>,
+    marker: PhantomData<(&'a T, GuardNoSend)>,
+}
+
+unsafe impl<'a, R: RawMutex + Sync + 'a, G: GetThreadId + Sync + 'a, T: ?Sized + Sync + 'a> Sync
+    for ReentrantMutexGuard<'a, R, G, T>
+{}
+
+impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> ReentrantMutexGuard<'a, R, G, T> {
+    /// Returns a reference to the original `ReentrantMutex` object.
+    pub fn remutex(s: &Self) -> &'a ReentrantMutex<R, G, T> {
+        s.remutex
+    }
+
+    /// Makes a new `MappedReentrantMutexGuard` for a component of the locked data.
+    ///
+    /// This operation cannot fail as the `ReentrantMutexGuard` passed
+    /// in already locked the mutex.
+    ///
+    /// This is an associated function that needs to be
+    /// used as `ReentrantMutexGuard::map(...)`. A method would interfere with methods of
+    /// the same name on the contents of the locked data.
+    #[inline]
+    pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedReentrantMutexGuard<'a, R, G, U>
+    where
+        F: FnOnce(&T) -> &U,
+    {
+        let raw = &s.remutex.raw;
+        let data = f(unsafe { &*s.remutex.data.get() });
+        mem::forget(s);
+        MappedReentrantMutexGuard {
+            raw,
+            data,
+            marker: PhantomData,
+        }
+    }
+
+    /// Temporarily unlocks the mutex to execute the given function.
+    ///
+    /// This is safe because `&mut` guarantees that there exist no other
+    /// references to the data protected by the mutex.
+    #[inline]
+    pub fn unlocked<F, U>(s: &mut Self, f: F) -> U
+    where
+        F: FnOnce() -> U,
+    {
+        s.remutex.raw.unlock();
+        defer!(s.remutex.raw.lock());
+        f()
+    }
+}
+
+impl<'a, R: RawMutexFair + 'a, G: GetThreadId + 'a, T: ?Sized + 'a>
+    ReentrantMutexGuard<'a, R, G, T>
+{
+    /// Unlocks the mutex using a fair unlock protocol.
+    ///
+    /// By default, mutexes are unfair and allow the current thread to re-lock
+    /// the mutex before another has the chance to acquire the lock, even if
+    /// that thread has been blocked on the mutex for a long time. This is the
+    /// default because it allows much higher throughput as it avoids forcing a
+    /// context switch on every mutex unlock. This can result in one thread
+    /// acquiring a mutex many more times than other threads.
+    ///
+    /// However in some cases it can be beneficial to ensure fairness by forcing
+    /// the lock to pass on to a waiting thread if there is one. This is done by
+    /// using this method instead of dropping the `ReentrantMutexGuard` normally.
+    #[inline]
+    pub fn unlock_fair(s: Self) {
+        s.remutex.raw.unlock_fair();
+        mem::forget(s);
+    }
+
+    /// Temporarily unlocks the mutex to execute the given function.
+    ///
+    /// The mutex is unlocked a fair unlock protocol.
+    ///
+    /// This is safe because `&mut` guarantees that there exist no other
+    /// references to the data protected by the mutex.
+    #[inline]
+    pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U
+    where
+        F: FnOnce() -> U,
+    {
+        s.remutex.raw.unlock_fair();
+        defer!(s.remutex.raw.lock());
+        f()
+    }
+
+    /// Temporarily yields the mutex to a waiting thread if there is one.
+    ///
+    /// This method is functionally equivalent to calling `unlock_fair` followed
+    /// by `lock`, however it can be much more efficient in the case where there
+    /// are no waiting threads.
+    #[inline]
+    pub fn bump(s: &mut Self) {
+        s.remutex.raw.bump();
+    }
+}
+
+impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Deref
+    for ReentrantMutexGuard<'a, R, G, T>
+{
+    type Target = T;
+    #[inline]
+    fn deref(&self) -> &T {
+        unsafe { &*self.remutex.data.get() }
+    }
+}
+
+impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Drop
+    for ReentrantMutexGuard<'a, R, G, T>
+{
+    #[inline]
+    fn drop(&mut self) {
+        self.remutex.raw.unlock();
+    }
+}
+
+#[cfg(feature = "owning_ref")]
+unsafe impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> StableAddress
+    for ReentrantMutexGuard<'a, R, G, T>
+{}
+
+/// An RAII mutex guard returned by `ReentrantMutexGuard::map`, which can point to a
+/// subfield of the protected data.
+///
+/// The main difference between `MappedReentrantMutexGuard` and `ReentrantMutexGuard` is that the
+/// former doesn't support temporarily unlocking and re-locking, since that
+/// could introduce soundness issues if the locked object is modified by another
+/// thread.
+#[must_use]
+pub struct MappedReentrantMutexGuard<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> {
+    raw: &'a RawReentrantMutex<R, G>,
+    data: *const T,
+    marker: PhantomData<&'a T>,
+}
+
+unsafe impl<'a, R: RawMutex + Sync + 'a, G: GetThreadId + Sync + 'a, T: ?Sized + Sync + 'a> Sync
+    for MappedReentrantMutexGuard<'a, R, G, T>
+{}
+
+impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a>
+    MappedReentrantMutexGuard<'a, R, G, T>
+{
+    /// Makes a new `MappedReentrantMutexGuard` for a component of the locked data.
+    ///
+    /// This operation cannot fail as the `ReentrantMutexGuard` passed
+    /// in already locked the mutex.
+    ///
+    /// This is an associated function that needs to be
+    /// used as `ReentrantMutexGuard::map(...)`. A method would interfere with methods of
+    /// the same name on the contents of the locked data.
+    #[inline]
+    pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedReentrantMutexGuard<'a, R, G, U>
+    where
+        F: FnOnce(&T) -> &U,
+    {
+        let raw = s.raw;
+        let data = f(unsafe { &*s.data });
+        mem::forget(s);
+        MappedReentrantMutexGuard {
+            raw,
+            data,
+            marker: PhantomData,
+        }
+    }
+}
+
+impl<'a, R: RawMutexFair + 'a, G: GetThreadId + 'a, T: ?Sized + 'a>
+    MappedReentrantMutexGuard<'a, R, G, T>
+{
+    /// Unlocks the mutex using a fair unlock protocol.
+    ///
+    /// By default, mutexes are unfair and allow the current thread to re-lock
+    /// the mutex before another has the chance to acquire the lock, even if
+    /// that thread has been blocked on the mutex for a long time. This is the
+    /// default because it allows much higher throughput as it avoids forcing a
+    /// context switch on every mutex unlock. This can result in one thread
+    /// acquiring a mutex many more times than other threads.
+    ///
+    /// However in some cases it can be beneficial to ensure fairness by forcing
+    /// the lock to pass on to a waiting thread if there is one. This is done by
+    /// using this method instead of dropping the `ReentrantMutexGuard` normally.
+    #[inline]
+    pub fn unlock_fair(s: Self) {
+        s.raw.unlock_fair();
+        mem::forget(s);
+    }
+}
+
+impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Deref
+    for MappedReentrantMutexGuard<'a, R, G, T>
+{
+    type Target = T;
+    #[inline]
+    fn deref(&self) -> &T {
+        unsafe { &*self.data }
+    }
+}
+
+impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Drop
+    for MappedReentrantMutexGuard<'a, R, G, T>
+{
+    #[inline]
+    fn drop(&mut self) {
+        self.raw.unlock();
+    }
+}
+
+#[cfg(feature = "owning_ref")]
+unsafe impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> StableAddress
+    for MappedReentrantMutexGuard<'a, R, G, T>
+{}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/lock_api/src/rwlock.rs
@@ -0,0 +1,1345 @@
+// Copyright 2016 Amanieu d'Antras
+//
+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
+// http://opensource.org/licenses/MIT>, at your option. This file may not be
+// copied, modified, or distributed except according to those terms.
+
+use core::cell::UnsafeCell;
+use core::fmt;
+use core::marker::PhantomData;
+use core::mem;
+use core::ops::{Deref, DerefMut};
+
+#[cfg(feature = "owning_ref")]
+use owning_ref::StableAddress;
+
+/// Basic operations for a reader-writer lock.
+///
+/// Types implementing this trait can be used by `RwLock` to form a safe and
+/// fully-functioning `RwLock` type.
+///
+/// # Safety
+///
+/// Implementations of this trait must ensure that the `RwLock` is actually
+/// exclusive: an exclusive lock can't be acquired while an exclusive or shared
+/// lock exists, and a shared lock can't be acquire while an exclusive lock
+/// exists.
+pub unsafe trait RawRwLock {
+    /// Initial value for an unlocked `RwLock`.
+    const INIT: Self;
+
+    /// Marker type which determines whether a lock guard should be `Send`. Use
+    /// one of the `GuardSend` or `GuardNoSend` helper types here.
+    type GuardMarker;
+
+    /// Acquires a shared lock, blocking the current thread until it is able to do so.
+    fn lock_shared(&self);
+
+    /// Attempts to acquire a shared lock without blocking.
+    fn try_lock_shared(&self) -> bool;
+
+    /// Releases a shared lock.
+    fn unlock_shared(&self);
+
+    /// Acquires an exclusive lock, blocking the current thread until it is able to do so.
+    fn lock_exclusive(&self);
+
+    /// Attempts to acquire an exclusive lock without blocking.
+    fn try_lock_exclusive(&self) -> bool;
+
+    /// Releases an exclusive lock.
+    fn unlock_exclusive(&self);
+}
+
+/// Additional methods for RwLocks which support fair unlocking.
+///
+/// Fair unlocking means that a lock is handed directly over to the next waiting
+/// thread if there is one, without giving other threads the opportunity to
+/// "steal" the lock in the meantime. This is typically slower than unfair
+/// unlocking, but may be necessary in certain circumstances.
+pub unsafe trait RawRwLockFair: RawRwLock {
+    /// Releases a shared lock using a fair unlock protocol.
+    fn unlock_shared_fair(&self);
+
+    /// Releases an exclusive lock using a fair unlock protocol.
+    fn unlock_exclusive_fair(&self);
+
+    /// Temporarily yields a shared lock to a waiting thread if there is one.
+    ///
+    /// This method is functionally equivalent to calling `unlock_shared_fair` followed
+    /// by `lock_shared`, however it can be much more efficient in the case where there
+    /// are no waiting threads.
+    fn bump_shared(&self) {
+        self.unlock_shared_fair();
+        self.lock_shared();
+    }
+
+    /// Temporarily yields an exclusive lock to a waiting thread if there is one.
+    ///
+    /// This method is functionally equivalent to calling `unlock_exclusive_fair` followed
+    /// by `lock_exclusive`, however it can be much more efficient in the case where there
+    /// are no waiting threads.
+    fn bump_exclusive(&self) {
+        self.unlock_exclusive_fair();
+        self.lock_exclusive();
+    }
+}
+
+/// Additional methods for RwLocks which support atomically downgrading an
+/// exclusive lock to a shared lock.
+pub unsafe trait RawRwLockDowngrade: RawRwLock {
+    /// Atomically downgrades an exclusive lock into a shared lock without
+    /// allowing any thread to take an exclusive lock in the meantime.
+    fn downgrade(&self);
+}
+
+/// Additional methods for RwLocks which support locking with timeouts.
+///
+/// The `Duration` and `Instant` types are specified as associated types so that
+/// this trait is usable even in `no_std` environments.
+pub unsafe trait RawRwLockTimed: RawRwLock {
+    /// Duration type used for `try_lock_for`.
+    type Duration;
+
+    /// Instant type used for `try_lock_until`.
+    type Instant;
+
+    /// Attempts to acquire a shared lock until a timeout is reached.
+    fn try_lock_shared_for(&self, timeout: Self::Duration) -> bool;
+
+    /// Attempts to acquire a shared lock until a timeout is reached.
+    fn try_lock_shared_until(&self, timeout: Self::Instant) -> bool;
+
+    /// Attempts to acquire an exclusive lock until a timeout is reached.
+    fn try_lock_exclusive_for(&self, timeout: Self::Duration) -> bool;
+
+    /// Attempts to acquire an exclusive lock until a timeout is reached.
+    fn try_lock_exclusive_until(&self, timeout: Self::Instant) -> bool;
+}
+
+/// Additional methods for RwLocks which support recursive read locks.
+///
+/// These are guaranteed to succeed without blocking if
+/// another read lock is held at the time of the call. This allows a thread
+/// to recursively lock a `RwLock`. However using this method can cause
+/// writers to starve since readers no longer block if a writer is waiting
+/// for the lock.
+pub unsafe trait RawRwLockRecursive: RawRwLock {
+    /// Acquires a shared lock without deadlocking in case of a recursive lock.
+    fn lock_shared_recursive(&self);
+
+    /// Attempts to acquire a shared lock without deadlocking in case of a recursive lock.
+    fn try_lock_shared_recursive(&self) -> bool;
+}
+
+/// Additional methods for RwLocks which support recursive read locks and timeouts.
+pub unsafe trait RawRwLockRecursiveTimed: RawRwLockRecursive + RawRwLockTimed {
+    /// Attempts to acquire a shared lock until a timeout is reached, without
+    /// deadlocking in case of a recursive lock.
+    fn try_lock_shared_recursive_for(&self, timeout: Self::Duration) -> bool;
+
+    /// Attempts to acquire a shared lock until a timeout is reached, without
+    /// deadlocking in case of a recursive lock.
+    fn try_lock_shared_recursive_until(&self, timeout: Self::Instant) -> bool;
+}
+
+/// Additional methods for RwLocks which support atomically upgrading a shared
+/// lock to an exclusive lock.
+///
+/// This requires acquiring a special "upgradable read lock" instead of a
+/// normal shared lock. There may only be one upgradable lock at any time,
+/// otherwise deadlocks could occur when upgrading.
+pub unsafe trait RawRwLockUpgrade: RawRwLock {
+    /// Acquires an upgradable lock, blocking the current thread until it is able to do so.
+    fn lock_upgradable(&self);
+
+    /// Attempts to acquire an upgradable lock without blocking.
+    fn try_lock_upgradable(&self) -> bool;
+
+    /// Releases an upgradable lock.
+    fn unlock_upgradable(&self);
+
+    /// Upgrades an upgradable lock to an exclusive lock.
+    fn upgrade(&self);
+
+    /// Attempts to upgrade an upgradable lock to an exclusive lock without
+    /// blocking.
+    fn try_upgrade(&self) -> bool;
+}
+
+/// Additional methods for RwLocks which support upgradable locks and fair
+/// unlocking.
+pub unsafe trait RawRwLockUpgradeFair: RawRwLockUpgrade + RawRwLockFair {
+    /// Releases an upgradable lock using a fair unlock protocol.
+    fn unlock_upgradable_fair(&self);
+
+    /// Temporarily yields an upgradable lock to a waiting thread if there is one.
+    ///
+    /// This method is functionally equivalent to calling `unlock_upgradable_fair` followed
+    /// by `lock_upgradable`, however it can be much more efficient in the case where there
+    /// are no waiting threads.
+    fn bump_upgradable(&self) {
+        self.unlock_upgradable_fair();
+        self.lock_upgradable();
+    }
+}
+
+/// Additional methods for RwLocks which support upgradable locks and lock
+/// downgrading.
+pub unsafe trait RawRwLockUpgradeDowngrade: RawRwLockUpgrade + RawRwLockDowngrade {
+    /// Downgrades an upgradable lock to a shared lock.
+    fn downgrade_upgradable(&self);
+
+    /// Downgrades an exclusive lock to an upgradable lock.
+    fn downgrade_to_upgradable(&self);
+}
+
+/// Additional methods for RwLocks which support upgradable locks and locking
+/// with timeouts.
+pub unsafe trait RawRwLockUpgradeTimed: RawRwLockUpgrade + RawRwLockTimed {
+    /// Attempts to acquire an upgradable lock until a timeout is reached.
+    fn try_lock_upgradable_for(&self, timeout: Self::Duration) -> bool;
+
+    /// Attempts to acquire an upgradable lock until a timeout is reached.
+    fn try_lock_upgradable_until(&self, timeout: Self::Instant) -> bool;
+
+    /// Attempts to upgrade an upgradable lock to an exclusive lock until a
+    /// timeout is reached.
+    fn try_upgrade_for(&self, timeout: Self::Duration) -> bool;
+
+    /// Attempts to upgrade an upgradable lock to an exclusive lock until a
+    /// timeout is reached.
+    fn try_upgrade_until(&self, timeout: Self::Instant) -> bool;
+}
+
+/// A reader-writer lock
+///
+/// This type of lock allows a number of readers or at most one writer at any
+/// point in time. The write portion of this lock typically allows modification
+/// of the underlying data (exclusive access) and the read portion of this lock
+/// typically allows for read-only access (shared access).
+///
+/// The type parameter `T` represents the data that this lock protects. It is
+/// required that `T` satisfies `Send` to be shared across threads and `Sync` to
+/// allow concurrent access through readers. The RAII guards returned from the
+/// locking methods implement `Deref` (and `DerefMut` for the `write` methods)
+/// to allow access to the contained of the lock.
+pub struct RwLock<R: RawRwLock, T: ?Sized> {
+    raw: R,
+    data: UnsafeCell<T>,
+}
+
+unsafe impl<R: RawRwLock + Send, T: ?Sized + Send> Send for RwLock<R, T> {}
+unsafe impl<R: RawRwLock + Sync, T: ?Sized + Send + Sync> Sync for RwLock<R, T> {}
+
+impl<R: RawRwLock, T> RwLock<R, T> {
+    /// Creates a new instance of an `RwLock<T>` which is unlocked.
+    #[cfg(feature = "nightly")]
+    #[inline]
+    pub const fn new(val: T) -> RwLock<R, T> {
+        RwLock {
+            data: UnsafeCell::new(val),
+            raw: R::INIT,
+        }
+    }
+
+    /// Creates a new instance of an `RwLock<T>` which is unlocked.
+    #[cfg(not(feature = "nightly"))]
+    #[inline]
+    pub fn new(val: T) -> RwLock<R, T> {
+        RwLock {
+            data: UnsafeCell::new(val),
+            raw: R::INIT,
+        }
+    }
+
+    /// Consumes this `RwLock`, returning the underlying data.
+    #[inline]
+    #[allow(unused_unsafe)]
+    pub fn into_inner(self) -> T {
+        unsafe { self.data.into_inner() }
+    }
+}
+
+impl<R: RawRwLock, T: ?Sized> RwLock<R, T> {
+    #[inline]
+    fn read_guard(&self) -> RwLockReadGuard<R, T> {
+        RwLockReadGuard {
+            rwlock: self,
+            marker: PhantomData,
+        }
+    }
+
+    #[inline]
+    fn write_guard(&self) -> RwLockWriteGuard<R, T> {
+        RwLockWriteGuard {
+            rwlock: self,
+            marker: PhantomData,
+        }
+    }
+
+    /// Locks this `RwLock` with shared read access, blocking the current thread
+    /// until it can be acquired.
+    ///
+    /// The calling thread will be blocked until there are no more writers which
+    /// hold the lock. There may be other readers currently inside the lock when
+    /// this method returns.
+    ///
+    /// Note that attempts to recursively acquire a read lock on a `RwLock` when
+    /// the current thread already holds one may result in a deadlock.
+    ///
+    /// Returns an RAII guard which will release this thread's shared access
+    /// once it is dropped.
+    #[inline]
+    pub fn read(&self) -> RwLockReadGuard<R, T> {
+        self.raw.lock_shared();
+        self.read_guard()
+    }
+
+    /// Attempts to acquire this `RwLock` with shared read access.
+    ///
+    /// If the access could not be granted at this time, then `None` is returned.
+    /// Otherwise, an RAII guard is returned which will release the shared access
+    /// when it is dropped.
+    ///
+    /// This function does not block.
+    #[inline]
+    pub fn try_read(&self) -> Option<RwLockReadGuard<R, T>> {
+        if self.raw.try_lock_shared() {
+            Some(self.read_guard())
+        } else {
+            None
+        }
+    }
+
+    /// Locks this `RwLock` with exclusive write access, blocking the current
+    /// thread until it can be acquired.
+    ///
+    /// This function will not return while other writers or other readers
+    /// currently have access to the lock.
+    ///
+    /// Returns an RAII guard which will drop the write access of this `RwLock`
+    /// when dropped.
+    #[inline]
+    pub fn write(&self) -> RwLockWriteGuard<R, T> {
+        self.raw.lock_exclusive();
+        self.write_guard()
+    }
+
+    /// Attempts to lock this `RwLock` with exclusive write access.
+    ///
+    /// If the lock could not be acquired at this time, then `None` is returned.
+    /// Otherwise, an RAII guard is returned which will release the lock when
+    /// it is dropped.
+    ///
+    /// This function does not block.
+    #[inline]
+    pub fn try_write(&self) -> Option<RwLockWriteGuard<R, T>> {
+        if self.raw.try_lock_exclusive() {
+            Some(self.write_guard())
+        } else {
+            None
+        }
+    }
+
+    /// Returns a mutable reference to the underlying data.
+    ///
+    /// Since this call borrows the `RwLock` mutably, no actual locking needs to
+    /// take place---the mutable borrow statically guarantees no locks exist.
+    #[inline]
+    pub fn get_mut(&mut self) -> &mut T {
+        unsafe { &mut *self.data.get() }
+    }
+
+    /// Forcibly unlocks a read lock.
+    ///
+    /// This is useful when combined with `mem::forget` to hold a lock without
+    /// the need to maintain a `RwLockReadGuard` object alive, for example when
+    /// dealing with FFI.
+    ///
+    /// # Safety
+    ///
+    /// This method must only be called if the current thread logically owns a
+    /// `RwLockReadGuard` but that guard has be discarded using `mem::forget`.
+    /// Behavior is undefined if a rwlock is read-unlocked when not read-locked.
+    #[inline]
+    pub unsafe fn force_unlock_read(&self) {
+        self.raw.unlock_shared();
+    }
+
+    /// Forcibly unlocks a write lock.
+    ///
+    /// This is useful when combined with `mem::forget` to hold a lock without
+    /// the need to maintain a `RwLockWriteGuard` object alive, for example when
+    /// dealing with FFI.
+    ///
+    /// # Safety
+    ///
+    /// This method must only be called if the current thread logically owns a
+    /// `RwLockWriteGuard` but that guard has be discarded using `mem::forget`.
+    /// Behavior is undefined if a rwlock is write-unlocked when not write-locked.
+    #[inline]
+    pub unsafe fn force_unlock_write(&self) {
+        self.raw.unlock_exclusive();
+    }
+
+    /// Returns the underlying raw reader-writer lock object.
+    ///
+    /// Note that you will most likely need to import the `RawRwLock` trait from
+    /// `lock_api` to be able to call functions on the raw
+    /// reader-writer lock.
+    ///
+    /// # Safety
+    ///
+    /// This method is unsafe because it allows unlocking a mutex while
+    /// still holding a reference to a lock guard.
+    pub unsafe fn raw(&self) -> &R {
+        &self.raw
+    }
+}
+
+impl<R: RawRwLockFair, T: ?Sized> RwLock<R, T> {
+    /// Forcibly unlocks a read lock using a fair unlock procotol.
+    ///
+    /// This is useful when combined with `mem::forget` to hold a lock without
+    /// the need to maintain a `RwLockReadGuard` object alive, for example when
+    /// dealing with FFI.
+    ///
+    /// # Safety
+    ///
+    /// This method must only be called if the current thread logically owns a
+    /// `RwLockReadGuard` but that guard has be discarded using `mem::forget`.
+    /// Behavior is undefined if a rwlock is read-unlocked when not read-locked.
+    #[inline]
+    pub unsafe fn force_unlock_read_fair(&self) {
+        self.raw.unlock_shared_fair();
+    }
+
+    /// Forcibly unlocks a write lock using a fair unlock procotol.
+    ///
+    /// This is useful when combined with `mem::forget` to hold a lock without
+    /// the need to maintain a `RwLockWriteGuard` object alive, for example when
+    /// dealing with FFI.
+    ///
+    /// # Safety
+    ///
+    /// This method must only be called if the current thread logically owns a
+    /// `RwLockWriteGuard` but that guard has be discarded using `mem::forget`.
+    /// Behavior is undefined if a rwlock is write-unlocked when not write-locked.
+    #[inline]
+    pub unsafe fn force_unlock_write_fair(&self) {
+        self.raw.unlock_exclusive_fair();
+    }
+}
+
+impl<R: RawRwLockTimed, T: ?Sized> RwLock<R, T> {
+    /// Attempts to acquire this `RwLock` with shared read access until a timeout
+    /// is reached.
+    ///
+    /// If the access could not be granted before the timeout expires, then
+    /// `None` is returned. Otherwise, an RAII guard is returned which will
+    /// release the shared access when it is dropped.
+    #[inline]
+    pub fn try_read_for(&self, timeout: R::Duration) -> Option<RwLockReadGuard<R, T>> {
+        if self.raw.try_lock_shared_for(timeout) {
+            Some(self.read_guard())
+        } else {
+            None
+        }
+    }
+
+    /// Attempts to acquire this `RwLock` with shared read access until a timeout
+    /// is reached.
+    ///
+    /// If the access could not be granted before the timeout expires, then
+    /// `None` is returned. Otherwise, an RAII guard is returned which will
+    /// release the shared access when it is dropped.
+    #[inline]
+    pub fn try_read_until(&self, timeout: R::Instant) -> Option<RwLockReadGuard<R, T>> {
+        if self.raw.try_lock_shared_until(timeout) {
+            Some(self.read_guard())
+        } else {
+            None
+        }
+    }
+
+    /// Attempts to acquire this `RwLock` with exclusive write access until a
+    /// timeout is reached.
+    ///
+    /// If the access could not be granted before the timeout expires, then
+    /// `None` is returned. Otherwise, an RAII guard is returned which will
+    /// release the exclusive access when it is dropped.
+    #[inline]
+    pub fn try_write_for(&self, timeout: R::Duration) -> Option<RwLockWriteGuard<R, T>> {
+        if self.raw.try_lock_exclusive_for(timeout) {
+            Some(self.write_guard())
+        } else {
+            None
+        }
+    }
+
+    /// Attempts to acquire this `RwLock` with exclusive write access until a
+    /// timeout is reached.
+    ///
+    /// If the access could not be granted before the timeout expires, then
+    /// `None` is returned. Otherwise, an RAII guard is returned which will
+    /// release the exclusive access when it is dropped.
+    #[inline]
+    pub fn try_write_until(&self, timeout: R::Instant) -> Option<RwLockWriteGuard<R, T>> {
+        if self.raw.try_lock_exclusive_until(timeout) {
+            Some(self.write_guard())
+        } else {
+            None
+        }
+    }
+}
+
+impl<R: RawRwLockRecursive, T: ?Sized> RwLock<R, T> {
+    /// Locks this `RwLock` with shared read access, blocking the current thread
+    /// until it can be acquired.
+    ///
+    /// The calling thread will be blocked until there are no more writers which
+    /// hold the lock. There may be other readers currently inside the lock when
+    /// this method returns.
+    ///
+    /// Unlike `read`, this method is guaranteed to succeed without blocking if
+    /// another read lock is held at the time of the call. This allows a thread
+    /// to recursively lock a `RwLock`. However using this method can cause
+    /// writers to starve since readers no longer block if a writer is waiting
+    /// for the lock.
+    ///
+    /// Returns an RAII guard which will release this thread's shared access
+    /// once it is dropped.
+    #[inline]
+    pub fn read_recursive(&self) -> RwLockReadGuard<R, T> {
+        self.raw.lock_shared_recursive();
+        self.read_guard()
+    }
+
+    /// Attempts to acquire this `RwLock` with shared read access.
+    ///
+    /// If the access could not be granted at this time, then `None` is returned.
+    /// Otherwise, an RAII guard is returned which will release the shared access
+    /// when it is dropped.
+    ///
+    /// This method is guaranteed to succeed if another read lock is held at the
+    /// time of the call. See the documentation for `read_recursive` for details.
+    ///
+    /// This function does not block.
+    #[inline]
+    pub fn try_read_recursive(&self) -> Option<RwLockReadGuard<R, T>> {
+        if self.raw.try_lock_shared_recursive() {
+            Some(self.read_guard())
+        } else {
+            None
+        }
+    }
+}
+
+impl<R: RawRwLockRecursiveTimed, T: ?Sized> RwLock<R, T> {
+    /// Attempts to acquire this `RwLock` with shared read access until a timeout
+    /// is reached.
+    ///
+    /// If the access could not be granted before the timeout expires, then
+    /// `None` is returned. Otherwise, an RAII guard is returned which will
+    /// release the shared access when it is dropped.
+    ///
+    /// This method is guaranteed to succeed without blocking if another read
+    /// lock is held at the time of the call. See the documentation for
+    /// `read_recursive` for details.
+    #[inline]
+    pub fn try_read_recursive_for(&self, timeout: R::Duration) -> Option<RwLockReadGuard<R, T>> {
+        if self.raw.try_lock_shared_recursive_for(timeout) {
+            Some(self.read_guard())
+        } else {
+            None
+        }
+    }
+
+    /// Attempts to acquire this `RwLock` with shared read access until a timeout
+    /// is reached.
+    ///
+    /// If the access could not be granted before the timeout expires, then
+    /// `None` is returned. Otherwise, an RAII guard is returned which will
+    /// release the shared access when it is dropped.
+    #[inline]
+    pub fn try_read_recursive_until(&self, timeout: R::Instant) -> Option<RwLockReadGuard<R, T>> {
+        if self.raw.try_lock_shared_recursive_until(timeout) {
+            Some(self.read_guard())
+        } else {
+            None
+        }
+    }
+}
+
+impl<R: RawRwLockUpgrade, T: ?Sized> RwLock<R, T> {
+    #[inline]
+    fn upgradable_guard(&self) -> RwLockUpgradableReadGuard<R, T> {
+        RwLockUpgradableReadGuard {
+            rwlock: self,
+            marker: PhantomData,
+        }
+    }
+
+    /// Locks this `RwLock` with upgradable read access, blocking the current thread
+    /// until it can be acquired.
+    ///
+    /// The calling thread will be blocked until there are no more writers or other
+    /// upgradable reads which hold the lock. There may be other readers currently
+    /// inside the lock when this method returns.
+    ///
+    /// Returns an RAII guard which will release this thread's shared access
+    /// once it is dropped.
+    #[inline]
+    pub fn upgradable_read(&self) -> RwLockUpgradableReadGuard<R, T> {
+        self.raw.lock_upgradable();
+        self.upgradable_guard()
+    }
+
+    /// Attempts to acquire this `RwLock` with upgradable read access.
+    ///
+    /// If the access could not be granted at this time, then `None` is returned.
+    /// Otherwise, an RAII guard is returned which will release the shared access
+    /// when it is dropped.
+    ///
+    /// This function does not block.
+    #[inline]
+    pub fn try_upgradable_read(&self) -> Option<RwLockUpgradableReadGuard<R, T>> {
+        if self.raw.try_lock_upgradable() {
+            Some(self.upgradable_guard())
+        } else {
+            None
+        }
+    }
+}
+
+impl<R: RawRwLockUpgradeTimed, T: ?Sized> RwLock<R, T> {
+    /// Attempts to acquire this `RwLock` with upgradable read access until a timeout
+    /// is reached.
+    ///
+    /// If the access could not be granted before the timeout expires, then
+    /// `None` is returned. Otherwise, an RAII guard is returned which will
+    /// release the shared access when it is dropped.
+    #[inline]
+    pub fn try_upgradable_read_for(
+        &self,
+        timeout: R::Duration,
+    ) -> Option<RwLockUpgradableReadGuard<R, T>> {
+        if self.raw.try_lock_upgradable_for(timeout) {
+            Some(self.upgradable_guard())
+        } else {
+            None
+        }
+    }
+
+    /// Attempts to acquire this `RwLock` with upgradable read access until a timeout
+    /// is reached.
+    ///
+    /// If the access could not be granted before the timeout expires, then
+    /// `None` is returned. Otherwise, an RAII guard is returned which will
+    /// release the shared access when it is dropped.
+    #[inline]
+    pub fn try_upgradable_read_until(
+        &self,
+        timeout: R::Instant,
+    ) -> Option<RwLockUpgradableReadGuard<R, T>> {
+        if self.raw.try_lock_upgradable_until(timeout) {
+            Some(self.upgradable_guard())
+        } else {
+            None
+        }
+    }
+}
+
+impl<R: RawRwLock, T: ?Sized + Default> Default for RwLock<R, T> {
+    #[inline]
+    fn default() -> RwLock<R, T> {
+        RwLock::new(Default::default())
+    }
+}
+
+impl<R: RawRwLock, T> From<T> for RwLock<R, T> {
+    #[inline]
+    fn from(t: T) -> RwLock<R, T> {
+        RwLock::new(t)
+    }
+}
+
+impl<R: RawRwLock, T: ?Sized + fmt::Debug> fmt::Debug for RwLock<R, T> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        match self.try_read() {
+            Some(guard) => f.debug_struct("RwLock").field("data", &&*guard).finish(),
+            None => f.pad("RwLock { <locked> }"),
+        }
+    }
+}
+
+/// RAII structure used to release the shared read access of a lock when
+/// dropped.
+#[must_use]
+pub struct RwLockReadGuard<'a, R: RawRwLock + 'a, T: ?Sized + 'a> {
+    rwlock: &'a RwLock<R, T>,
+    marker: PhantomData<(&'a T, R::GuardMarker)>,
+}
+
+unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + Sync + 'a> Sync for RwLockReadGuard<'a, R, T> {}
+
+impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> RwLockReadGuard<'a, R, T> {
+    /// Returns a reference to the original reader-writer lock object.
+    pub fn rwlock(s: &Self) -> &'a RwLock<R, T> {
+        s.rwlock
+    }
+
+    /// Make a new `MappedRwLockReadGuard` for a component of the locked data.
+    ///
+    /// This operation cannot fail as the `RwLockReadGuard` passed
+    /// in already locked the data.
+    ///
+    /// This is an associated function that needs to be
+    /// used as `RwLockReadGuard::map(...)`. A method would interfere with methods of
+    /// the same name on the contents of the locked data.
+    #[inline]
+    pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockReadGuard<'a, R, U>
+    where
+        F: FnOnce(&T) -> &U,
+    {
+        let raw = &s.rwlock.raw;
+        let data = f(unsafe { &*s.rwlock.data.get() });
+        mem::forget(s);
+        MappedRwLockReadGuard {
+            raw,
+            data,
+            marker: PhantomData,
+        }
+    }
+
+    /// Temporarily unlocks the `RwLock` to execute the given function.
+    ///
+    /// The `RwLock` is unlocked a fair unlock protocol.
+    ///
+    /// This is safe because `&mut` guarantees that there exist no other
+    /// references to the data protected by the `RwLock`.
+    #[inline]
+    pub fn unlocked<F, U>(s: &mut Self, f: F) -> U
+    where
+        F: FnOnce() -> U,
+    {
+        s.rwlock.raw.unlock_shared();
+        defer!(s.rwlock.raw.lock_shared());
+        f()
+    }
+}
+
+impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> RwLockReadGuard<'a, R, T> {
+    /// Unlocks the `RwLock` using a fair unlock protocol.
+    ///
+    /// By default, `RwLock` is unfair and allow the current thread to re-lock
+    /// the `RwLock` before another has the chance to acquire the lock, even if
+    /// that thread has been blocked on the `RwLock` for a long time. This is
+    /// the default because it allows much higher throughput as it avoids
+    /// forcing a context switch on every `RwLock` unlock. This can result in one
+    /// thread acquiring a `RwLock` many more times than other threads.
+    ///
+    /// However in some cases it can be beneficial to ensure fairness by forcing
+    /// the lock to pass on to a waiting thread if there is one. This is done by
+    /// using this method instead of dropping the `RwLockReadGuard` normally.
+    #[inline]
+    pub fn unlock_fair(s: Self) {
+        s.rwlock.raw.unlock_shared_fair();
+        mem::forget(s);
+    }
+
+    /// Temporarily unlocks the `RwLock` to execute the given function.
+    ///
+    /// The `RwLock` is unlocked a fair unlock protocol.
+    ///
+    /// This is safe because `&mut` guarantees that there exist no other
+    /// references to the data protected by the `RwLock`.
+    #[inline]
+    pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U
+    where
+        F: FnOnce() -> U,
+    {
+        s.rwlock.raw.unlock_shared_fair();
+        defer!(s.rwlock.raw.lock_shared());
+        f()
+    }
+
+    /// Temporarily yields the `RwLock` to a waiting thread if there is one.
+    ///
+    /// This method is functionally equivalent to calling `unlock_fair` followed
+    /// by `read`, however it can be much more efficient in the case where there
+    /// are no waiting threads.
+    #[inline]
+    pub fn bump(s: &mut Self) {
+        s.rwlock.raw.bump_shared();
+    }
+}
+
+impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Deref for RwLockReadGuard<'a, R, T> {
+    type Target = T;
+    #[inline]
+    fn deref(&self) -> &T {
+        unsafe { &*self.rwlock.data.get() }
+    }
+}
+
+impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for RwLockReadGuard<'a, R, T> {
+    #[inline]
+    fn drop(&mut self) {
+        self.rwlock.raw.unlock_shared();
+    }
+}
+
+#[cfg(feature = "owning_ref")]
+unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress for RwLockReadGuard<'a, R, T> {}
+
+/// RAII structure used to release the exclusive write access of a lock when
+/// dropped.
+#[must_use]
+pub struct RwLockWriteGuard<'a, R: RawRwLock + 'a, T: ?Sized + 'a> {
+    rwlock: &'a RwLock<R, T>,
+    marker: PhantomData<(&'a mut T, R::GuardMarker)>,
+}
+
+unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + Sync + 'a> Sync for RwLockWriteGuard<'a, R, T> {}
+
+impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> {
+    /// Returns a reference to the original reader-writer lock object.
+    pub fn rwlock(s: &Self) -> &'a RwLock<R, T> {
+        s.rwlock
+    }
+
+    /// Make a new `MappedRwLockWriteGuard` for a component of the locked data.
+    ///
+    /// This operation cannot fail as the `RwLockWriteGuard` passed
+    /// in already locked the data.
+    ///
+    /// This is an associated function that needs to be
+    /// used as `RwLockWriteGuard::map(...)`. A method would interfere with methods of
+    /// the same name on the contents of the locked data.
+    #[inline]
+    pub fn map<U: ?Sized, F>(orig: Self, f: F) -> MappedRwLockWriteGuard<'a, R, U>
+    where
+        F: FnOnce(&mut T) -> &mut U,
+    {
+        let raw = &orig.rwlock.raw;
+        let data = f(unsafe { &mut *orig.rwlock.data.get() });
+        mem::forget(orig);
+        MappedRwLockWriteGuard {
+            raw,
+            data,
+            marker: PhantomData,
+        }
+    }
+
+    /// Temporarily unlocks the `RwLock` to execute the given function.
+    ///
+    /// This is safe because `&mut` guarantees that there exist no other
+    /// references to the data protected by the `RwLock`.
+    #[inline]
+    pub fn unlocked<F, U>(s: &mut Self, f: F) -> U
+    where
+        F: FnOnce() -> U,
+    {
+        s.rwlock.raw.unlock_exclusive();
+        defer!(s.rwlock.raw.lock_exclusive());
+        f()
+    }
+}
+
+impl<'a, R: RawRwLockDowngrade + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> {
+    /// Atomically downgrades a write lock into a read lock without allowing any
+    /// writers to take exclusive access of the lock in the meantime.
+    ///
+    /// Note that if there are any writers currently waiting to take the lock
+    /// then other readers may not be able to acquire the lock even if it was
+    /// downgraded.
+    pub fn downgrade(s: Self) -> RwLockReadGuard<'a, R, T> {
+        s.rwlock.raw.downgrade();
+        let rwlock = s.rwlock;
+        mem::forget(s);
+        RwLockReadGuard {
+            rwlock,
+            marker: PhantomData,
+        }
+    }
+}
+
+impl<'a, R: RawRwLockUpgradeDowngrade + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> {
+    /// Atomically downgrades a write lock into an upgradable read lock without allowing any
+    /// writers to take exclusive access of the lock in the meantime.
+    ///
+    /// Note that if there are any writers currently waiting to take the lock
+    /// then other readers may not be able to acquire the lock even if it was
+    /// downgraded.
+    pub fn downgrade_to_upgradable(s: Self) -> RwLockUpgradableReadGuard<'a, R, T> {
+        s.rwlock.raw.downgrade_to_upgradable();
+        let rwlock = s.rwlock;
+        mem::forget(s);
+        RwLockUpgradableReadGuard {
+            rwlock,
+            marker: PhantomData,
+        }
+    }
+}
+
+impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> {
+    /// Unlocks the `RwLock` using a fair unlock protocol.
+    ///
+    /// By default, `RwLock` is unfair and allow the current thread to re-lock
+    /// the `RwLock` before another has the chance to acquire the lock, even if
+    /// that thread has been blocked on the `RwLock` for a long time. This is
+    /// the default because it allows much higher throughput as it avoids
+    /// forcing a context switch on every `RwLock` unlock. This can result in one
+    /// thread acquiring a `RwLock` many more times than other threads.
+    ///
+    /// However in some cases it can be beneficial to ensure fairness by forcing
+    /// the lock to pass on to a waiting thread if there is one. This is done by
+    /// using this method instead of dropping the `RwLockWriteGuard` normally.
+    #[inline]
+    pub fn unlock_fair(s: Self) {
+        s.rwlock.raw.unlock_exclusive_fair();
+        mem::forget(s);
+    }
+
+    /// Temporarily unlocks the `RwLock` to execute the given function.
+    ///
+    /// The `RwLock` is unlocked a fair unlock protocol.
+    ///
+    /// This is safe because `&mut` guarantees that there exist no other
+    /// references to the data protected by the `RwLock`.
+    #[inline]
+    pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U
+    where
+        F: FnOnce() -> U,
+    {
+        s.rwlock.raw.unlock_exclusive_fair();
+        defer!(s.rwlock.raw.lock_exclusive());
+        f()
+    }
+
+    /// Temporarily yields the `RwLock` to a waiting thread if there is one.
+    ///
+    /// This method is functionally equivalent to calling `unlock_fair` followed
+    /// by `write`, however it can be much more efficient in the case where there
+    /// are no waiting threads.
+    #[inline]
+    pub fn bump(s: &mut Self) {
+        s.rwlock.raw.bump_exclusive();
+    }
+}
+
+impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Deref for RwLockWriteGuard<'a, R, T> {
+    type Target = T;
+    #[inline]
+    fn deref(&self) -> &T {
+        unsafe { &*self.rwlock.data.get() }
+    }
+}
+
+impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> DerefMut for RwLockWriteGuard<'a, R, T> {
+    #[inline]
+    fn deref_mut(&mut self) -> &mut T {
+        unsafe { &mut *self.rwlock.data.get() }
+    }
+}
+
+impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for RwLockWriteGuard<'a, R, T> {
+    #[inline]
+    fn drop(&mut self) {
+        self.rwlock.raw.unlock_exclusive();
+    }
+}
+
+#[cfg(feature = "owning_ref")]
+unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress for RwLockWriteGuard<'a, R, T> {}
+
+/// RAII structure used to release the upgradable read access of a lock when
+/// dropped.
+#[must_use]
+pub struct RwLockUpgradableReadGuard<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> {
+    rwlock: &'a RwLock<R, T>,
+    marker: PhantomData<(&'a T, R::GuardMarker)>,
+}
+
+unsafe impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + Sync + 'a> Sync
+    for RwLockUpgradableReadGuard<'a, R, T>
+{}
+
+impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, R, T> {
+    /// Returns a reference to the original reader-writer lock object.
+    pub fn rwlock(s: &Self) -> &'a RwLock<R, T> {
+        s.rwlock
+    }
+
+    /// Temporarily unlocks the `RwLock` to execute the given function.
+    ///
+    /// This is safe because `&mut` guarantees that there exist no other
+    /// references to the data protected by the `RwLock`.
+    #[inline]
+    pub fn unlocked<F, U>(s: &mut Self, f: F) -> U
+    where
+        F: FnOnce() -> U,
+    {
+        s.rwlock.raw.unlock_upgradable();
+        defer!(s.rwlock.raw.lock_upgradable());
+        f()
+    }
+
+    /// Atomically upgrades an upgradable read lock lock into a exclusive write lock,
+    /// blocking the current thread until it can be aquired.
+    pub fn upgrade(s: Self) -> RwLockWriteGuard<'a, R, T> {
+        s.rwlock.raw.upgrade();
+        let rwlock = s.rwlock;
+        mem::forget(s);
+        RwLockWriteGuard {
+            rwlock,
+            marker: PhantomData,
+        }
+    }
+
+    /// Tries to atomically upgrade an upgradable read lock into a exclusive write lock.
+    ///
+    /// If the access could not be granted at this time, then the current guard is returned.
+    pub fn try_upgrade(s: Self) -> Result<RwLockWriteGuard<'a, R, T>, Self> {
+        if s.rwlock.raw.try_upgrade() {
+            let rwlock = s.rwlock;
+            mem::forget(s);
+            Ok(RwLockWriteGuard {
+                rwlock,
+                marker: PhantomData,
+            })
+        } else {
+            Err(s)
+        }
+    }
+}
+
+impl<'a, R: RawRwLockUpgradeFair + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, R, T> {
+    /// Unlocks the `RwLock` using a fair unlock protocol.
+    ///
+    /// By default, `RwLock` is unfair and allow the current thread to re-lock
+    /// the `RwLock` before another has the chance to acquire the lock, even if
+    /// that thread has been blocked on the `RwLock` for a long time. This is
+    /// the default because it allows much higher throughput as it avoids
+    /// forcing a context switch on every `RwLock` unlock. This can result in one
+    /// thread acquiring a `RwLock` many more times than other threads.
+    ///
+    /// However in some cases it can be beneficial to ensure fairness by forcing
+    /// the lock to pass on to a waiting thread if there is one. This is done by
+    /// using this method instead of dropping the `RwLockUpgradableReadGuard` normally.
+    #[inline]
+    pub fn unlock_fair(s: Self) {
+        s.rwlock.raw.unlock_upgradable_fair();
+        mem::forget(s);
+    }
+
+    /// Temporarily unlocks the `RwLock` to execute the given function.
+    ///
+    /// The `RwLock` is unlocked a fair unlock protocol.
+    ///
+    /// This is safe because `&mut` guarantees that there exist no other
+    /// references to the data protected by the `RwLock`.
+    #[inline]
+    pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U
+    where
+        F: FnOnce() -> U,
+    {
+        s.rwlock.raw.unlock_upgradable_fair();
+        defer!(s.rwlock.raw.lock_upgradable());
+        f()
+    }
+
+    /// Temporarily yields the `RwLock` to a waiting thread if there is one.
+    ///
+    /// This method is functionally equivalent to calling `unlock_fair` followed
+    /// by `upgradable_read`, however it can be much more efficient in the case where there
+    /// are no waiting threads.
+    #[inline]
+    pub fn bump(s: &mut Self) {
+        s.rwlock.raw.bump_upgradable();
+    }
+}
+
+impl<'a, R: RawRwLockUpgradeDowngrade + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, R, T> {
+    /// Atomically downgrades an upgradable read lock lock into a shared read lock
+    /// without allowing any writers to take exclusive access of the lock in the
+    /// meantime.
+    ///
+    /// Note that if there are any writers currently waiting to take the lock
+    /// then other readers may not be able to acquire the lock even if it was
+    /// downgraded.
+    pub fn downgrade(s: Self) -> RwLockReadGuard<'a, R, T> {
+        s.rwlock.raw.downgrade_upgradable();
+        let rwlock = s.rwlock;
+        mem::forget(s);
+        RwLockReadGuard {
+            rwlock,
+            marker: PhantomData,
+        }
+    }
+}
+
+impl<'a, R: RawRwLockUpgradeTimed + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, R, T> {
+    /// Tries to atomically upgrade an upgradable read lock into a exclusive
+    /// write lock, until a timeout is reached.
+    ///
+    /// If the access could not be granted before the timeout expires, then
+    /// the current guard is returned.
+    pub fn try_upgrade_for(
+        s: Self,
+        timeout: R::Duration,
+    ) -> Result<RwLockWriteGuard<'a, R, T>, Self> {
+        if s.rwlock.raw.try_upgrade_for(timeout) {
+            let rwlock = s.rwlock;
+            mem::forget(s);
+            Ok(RwLockWriteGuard {
+                rwlock,
+                marker: PhantomData,
+            })
+        } else {
+            Err(s)
+        }
+    }
+
+    /// Tries to atomically upgrade an upgradable read lock into a exclusive
+    /// write lock, until a timeout is reached.
+    ///
+    /// If the access could not be granted before the timeout expires, then
+    /// the current guard is returned.
+    #[inline]
+    pub fn try_upgrade_until(
+        s: Self,
+        timeout: R::Instant,
+    ) -> Result<RwLockWriteGuard<'a, R, T>, Self> {
+        if s.rwlock.raw.try_upgrade_until(timeout) {
+            let rwlock = s.rwlock;
+            mem::forget(s);
+            Ok(RwLockWriteGuard {
+                rwlock,
+                marker: PhantomData,
+            })
+        } else {
+            Err(s)
+        }
+    }
+}
+
+impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> Deref for RwLockUpgradableReadGuard<'a, R, T> {
+    type Target = T;
+    #[inline]
+    fn deref(&self) -> &T {
+        unsafe { &*self.rwlock.data.get() }
+    }
+}
+
+impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> Drop for RwLockUpgradableReadGuard<'a, R, T> {
+    #[inline]
+    fn drop(&mut self) {
+        self.rwlock.raw.unlock_upgradable();
+    }
+}
+
+#[cfg(feature = "owning_ref")]
+unsafe impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> StableAddress
+    for RwLockUpgradableReadGuard<'a, R, T>
+{}
+
+/// An RAII read lock guard returned by `RwLockReadGuard::map`, which can point to a
+/// subfield of the protected data.
+///
+/// The main difference between `MappedRwLockReadGuard` and `RwLockReadGuard` is that the
+/// former doesn't support temporarily unlocking and re-locking, since that
+/// could introduce soundness issues if the locked object is modified by another
+/// thread.
+#[must_use]
+pub struct MappedRwLockReadGuard<'a, R: RawRwLock + 'a, T: ?Sized + 'a> {
+    raw: &'a R,
+    data: *const T,
+    marker: PhantomData<&'a T>,
+}
+
+unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + Sync + 'a> Sync for MappedRwLockReadGuard<'a, R, T> {}
+unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Send for MappedRwLockReadGuard<'a, R, T> where
+    R::GuardMarker: Send
+{}
+
+impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> MappedRwLockReadGuard<'a, R, T> {
+    /// Make a new `MappedRwLockReadGuard` for a component of the locked data.
+    ///
+    /// This operation cannot fail as the `MappedRwLockReadGuard` passed
+    /// in already locked the data.
+    ///
+    /// This is an associated function that needs to be
+    /// used as `MappedRwLockReadGuard::map(...)`. A method would interfere with methods of
+    /// the same name on the contents of the locked data.
+    #[inline]
+    pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockReadGuard<'a, R, U>
+    where
+        F: FnOnce(&T) -> &U,
+    {
+        let raw = s.raw;
+        let data = f(unsafe { &*s.data });
+        mem::forget(s);
+        MappedRwLockReadGuard {
+            raw,
+            data,
+            marker: PhantomData,
+        }
+    }
+}
+
+impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> MappedRwLockReadGuard<'a, R, T> {
+    /// Unlocks the `RwLock` using a fair unlock protocol.
+    ///
+    /// By default, `RwLock` is unfair and allow the current thread to re-lock
+    /// the `RwLock` before another has the chance to acquire the lock, even if
+    /// that thread has been blocked on the `RwLock` for a long time. This is
+    /// the default because it allows much higher throughput as it avoids
+    /// forcing a context switch on every `RwLock` unlock. This can result in one
+    /// thread acquiring a `RwLock` many more times than other threads.
+    ///
+    /// However in some cases it can be beneficial to ensure fairness by forcing
+    /// the lock to pass on to a waiting thread if there is one. This is done by
+    /// using this method instead of dropping the `MappedRwLockReadGuard` normally.
+    #[inline]
+    pub fn unlock_fair(s: Self) {
+        s.raw.unlock_shared_fair();
+        mem::forget(s);
+    }
+}
+
+impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Deref for MappedRwLockReadGuard<'a, R, T> {
+    type Target = T;
+    #[inline]
+    fn deref(&self) -> &T {
+        unsafe { &*self.data }
+    }
+}
+
+impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for MappedRwLockReadGuard<'a, R, T> {
+    #[inline]
+    fn drop(&mut self) {
+        self.raw.unlock_shared();
+    }
+}
+
+#[cfg(feature = "owning_ref")]
+unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress
+    for MappedRwLockReadGuard<'a, R, T>
+{}
+
+/// An RAII write lock guard returned by `RwLockWriteGuard::map`, which can point to a
+/// subfield of the protected data.
+///
+/// The main difference between `MappedRwLockWriteGuard` and `RwLockWriteGuard` is that the
+/// former doesn't support temporarily unlocking and re-locking, since that
+/// could introduce soundness issues if the locked object is modified by another
+/// thread.
+#[must_use]
+pub struct MappedRwLockWriteGuard<'a, R: RawRwLock + 'a, T: ?Sized + 'a> {
+    raw: &'a R,
+    data: *mut T,
+    marker: PhantomData<&'a mut T>,
+}
+
+unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + Sync + 'a> Sync
+    for MappedRwLockWriteGuard<'a, R, T>
+{}
+unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Send for MappedRwLockWriteGuard<'a, R, T> where
+    R::GuardMarker: Send
+{}
+
+impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> MappedRwLockWriteGuard<'a, R, T> {
+    /// Make a new `MappedRwLockWriteGuard` for a component of the locked data.
+    ///
+    /// This operation cannot fail as the `MappedRwLockWriteGuard` passed
+    /// in already locked the data.
+    ///
+    /// This is an associated function that needs to be
+    /// used as `MappedRwLockWriteGuard::map(...)`. A method would interfere with methods of
+    /// the same name on the contents of the locked data.
+    #[inline]
+    pub fn map<U: ?Sized, F>(orig: Self, f: F) -> MappedRwLockWriteGuard<'a, R, U>
+    where
+        F: FnOnce(&mut T) -> &mut U,
+    {
+        let raw = orig.raw;
+        let data = f(unsafe { &mut *orig.data });
+        mem::forget(orig);
+        MappedRwLockWriteGuard {
+            raw,
+            data,
+            marker: PhantomData,
+        }
+    }
+}
+
+impl<'a, R: RawRwLockDowngrade + 'a, T: ?Sized + 'a> MappedRwLockWriteGuard<'a, R, T> {
+    /// Atomically downgrades a write lock into a read lock without allowing any
+    /// writers to take exclusive access of the lock in the meantime.
+    ///
+    /// Note that if there are any writers currently waiting to take the lock
+    /// then other readers may not be able to acquire the lock even if it was
+    /// downgraded.
+    pub fn downgrade(s: Self) -> MappedRwLockReadGuard<'a, R, T> {
+        s.raw.downgrade();
+        let raw = s.raw;
+        let data = s.data;
+        mem::forget(s);
+        MappedRwLockReadGuard {
+            raw,
+            data,
+            marker: PhantomData,
+        }
+    }
+}
+
+impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> MappedRwLockWriteGuard<'a, R, T> {
+    /// Unlocks the `RwLock` using a fair unlock protocol.
+    ///
+    /// By default, `RwLock` is unfair and allow the current thread to re-lock
+    /// the `RwLock` before another has the chance to acquire the lock, even if
+    /// that thread has been blocked on the `RwLock` for a long time. This is
+    /// the default because it allows much higher throughput as it avoids
+    /// forcing a context switch on every `RwLock` unlock. This can result in one
+    /// thread acquiring a `RwLock` many more times than other threads.
+    ///
+    /// However in some cases it can be beneficial to ensure fairness by forcing
+    /// the lock to pass on to a waiting thread if there is one. This is done by
+    /// using this method instead of dropping the `MappedRwLockWriteGuard` normally.
+    #[inline]
+    pub fn unlock_fair(s: Self) {
+        s.raw.unlock_exclusive_fair();
+        mem::forget(s);
+    }
+}
+
+impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Deref for MappedRwLockWriteGuard<'a, R, T> {
+    type Target = T;
+    #[inline]
+    fn deref(&self) -> &T {
+        unsafe { &*self.data }
+    }
+}
+
+impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> DerefMut for MappedRwLockWriteGuard<'a, R, T> {
+    #[inline]
+    fn deref_mut(&mut self) -> &mut T {
+        unsafe { &mut *self.data }
+    }
+}
+
+impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for MappedRwLockWriteGuard<'a, R, T> {
+    #[inline]
+    fn drop(&mut self) {
+        self.raw.unlock_exclusive();
+    }
+}
+
+#[cfg(feature = "owning_ref")]
+unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress
+    for MappedRwLockWriteGuard<'a, R, T>
+{}
--- a/third_party/rust/parking_lot/.cargo-checksum.json
+++ b/third_party/rust/parking_lot/.cargo-checksum.json
@@ -1,1 +1,1 @@
-{"files":{".travis.yml":"04d3d7425ce24e59d25df35da9c54f3ccd429c62ed8c9cf37b5ed2757afe96f1","Cargo.toml":"9e6a70c63617696e07a9130c27a80203180c1f240eb4ebdddde4429570da0c63","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"c9a75f18b9ab2927829a208fc6aa2cf4e63b8420887ba29cdb265d6619ae82d5","README.md":"9d1e4237f1063e54aca1f65fc00a87ad53f75fcc73d054f8dd139f62f4a0b15e","appveyor.yml":"cfa9c3ae2476c879fe4240c306d45de6c2c04025212d8217fa76690888117594","src/condvar.rs":"1a3de60460e832d7ff76a82d5dac3f387fe2255e6a8ad4a686fe37f134c088c7","src/deadlock.rs":"82de990ef5966c852f8019b511e3c60471b562e56fd7ed0ca340399968b44a2d","src/elision.rs":"89072fe0aca87d53abc0f56490ae77bcf9d77e28e291bd13e861b1924bbb079f","src/lib.rs":"02d5716f4f43c2598afa57234e53d1a4c5db4f91ede937a226ee34eabbdc4da5","src/mutex.rs":"d8f557d40c3aab3e36f81961db9eb32831580a3a6a4b2a59674cafe6621e4772","src/once.rs":"1f408083854f918e896fdba8a9ecf25ae79ee06613d8daec75b800fb78dfd3a8","src/raw_mutex.rs":"f98ddd76e1491bc239b7c24e94f3f6a94ae0f5828873e78e1245ef19621a257b","src/raw_remutex.rs":"86e1e339567c12f91e3274ca3126c4af004fd30dff88a6cd261fc67680e33798","src/raw_rwlock.rs":"d3c71098df5e8b22cdfd7f8d7c3f287951d0bac1ac9ede83a94f809576ed9d41","src/remutex.rs":"d73f4a0f22f4a5e8c6126b784c03157f34456b0c1b90570b98db9f1c6b1f4046","src/rwlock.rs":"28e6c3a3d1aea9add4950fa5c67ba79f4aeb2e72830ff4d4a66adc2a9afa12dc","src/util.rs":"2d07c0c010a857790ae2ed6a1215eeed8af76859e076797ea1ba8dec82169e84"},"package":"9fd9d732f2de194336fb02fe11f9eed13d9e76f13f4315b4d88a14ca411750cd"}
\ No newline at end of file
+{"files":{".travis.yml":"452f57d826f68d05caabcfb7b663b810d5b2a8b2eec855d8b50bf0d14b8d2c86","CHANGELOG.md":"e254fac6600c725edb746f31f41b1b2ceeb9cfc85f4f9a3e6af874c70b020823","Cargo.toml":"215d5b3a2c18f556b5c66ac6d27eea71d7dd7e6b4857ecd6966c2e5cc03270ea","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"c9a75f18b9ab2927829a208fc6aa2cf4e63b8420887ba29cdb265d6619ae82d5","README.md":"a52cf38f796e7f12215662e8a3a23aa9802c170a09ecba0e4be766c88f95a9c5","appveyor.yml":"cb1d02316926d88e174976bfc6781194569ca27f386c50e3091d8e52587d30a2","src/condvar.rs":"ce127f75bad5c175abb8147aac4b5be78aabdb599c5f8f3aad77f6bc3705274d","src/deadlock.rs":"8916c2e2820bfd3a55860ddb9f1b907888406b68cdae2b7a2093c825d28f3b99","src/elision.rs":"89072fe0aca87d53abc0f56490ae77bcf9d77e28e291bd13e861b1924bbb079f","src/lib.rs":"3e259bf3421f10c3e920daca511a4880b2620145a1fcb070a37548835c4f429a","src/mutex.rs":"0ac3e654e4aa2c3078a6aa22c83428d604e7f3f8ed4c261c40d030d232ca7b64","src/once.rs":"606e0e88d6c1ff82b69bda56e7409ec3a1aefa66b45b7fa42b88cba07ae70598","src/raw_mutex.rs":"881e75a843d76399d01c4ae0f09cd23b93b137b5035a47bd7886505132e58165","src/raw_rwlock.rs":"2e3c13e80cd06be53118ae2bcc7bdec708dda8c139c371ee12885f48903cf69c","src/remutex.rs":"bad8022610344086010b0661998a416db4b458c222e671b67df03fc4795c0298","src/rwlock.rs":"fc826cbcf2d7862ecb184b657a82bb8794a9e26ac329c8f87b589fa09f15d245","src/util.rs":"2d07c0c010a857790ae2ed6a1215eeed8af76859e076797ea1ba8dec82169e84"},"package":"69376b761943787ebd5cc85a5bc95958651a22609c5c1c2b65de21786baec72b"}
\ No newline at end of file
--- a/third_party/rust/parking_lot/.travis.yml
+++ b/third_party/rust/parking_lot/.travis.yml
@@ -1,40 +1,37 @@
 language: rust
 sudo: false
 
 rust:
-- 1.18.0
+- 1.26.2
 - stable
 - beta
 - nightly
 
 before_script:
 - |
   pip install 'travis-cargo<0.2' --user &&
   export PATH=$HOME/.local/bin:$PATH
 
 script:
 - cd core;
 - travis-cargo build;
+- cd ../lock_api;
+- travis-cargo build;
 - cd ..;
 - travis-cargo build
 - travis-cargo test
 - travis-cargo test -- --features=deadlock_detection
-- travis-cargo --only nightly doc -- --all-features --no-deps -p parking_lot -p parking_lot_core
-- if [ "$TRAVIS_RUST_VERSION" != "1.8.0" ]; then
-      cd benchmark;
-      travis-cargo build;
-      travis-cargo run -- --release --bin mutex 2 1 0 1;
-      travis-cargo run -- --release --bin rwlock 1 1 1 0 1;
-      cd ..;
-  fi
-
-after_success:
-- travis-cargo --only nightly doc-upload
+- travis-cargo --only nightly doc -- --all-features --no-deps -p parking_lot -p parking_lot_core -p lock_api
+- cd benchmark
+- travis-cargo build
+- cargo run -- --release --bin mutex 2 1 0 1 2
+- cargo run -- --release --bin rwlock 1 1 1 0 1 2
+- cd ..
 
 env:
   global:
   - TRAVIS_CARGO_NIGHTLY_FEATURE=nightly
   - RUST_TEST_THREADS=1
 
 notifications:
   email: false
new file mode 100644
--- /dev/null
+++ b/third_party/rust/parking_lot/CHANGELOG.md
@@ -0,0 +1,48 @@
+0.6.3 (2018-07-18)
+==================
+
+- Export `RawMutex`, `RawRwLock` and `RawThreadId`.
+
+0.6.2 (2018-06-18)
+==================
+
+- Enable `lock_api/nightly` feature from `parking_lot/nightly` (#79)
+
+0.6.1 (2018-06-08)
+==================
+
+Added missing typedefs for mapped lock guards:
+
+- `MappedMutexGuard`
+- `MappedReentrantMutexGuard`
+- `MappedRwLockReadGuard`
+- `MappedRwLockWriteGuard`
+
+0.6.0 (2018-06-08)
+==================
+
+This release moves most of the code for type-safe `Mutex` and `RwLock` types
+into a separate crate called `lock_api`. This new crate is compatible with
+`no_std` and provides `Mutex` and `RwLock` type-safe wrapper types from a
+raw mutex type which implements the `RawMutex` or `RawRwLock` trait. The API
+provided by the wrapper types can be extended by implementing more traits on the
+raw mutex type which provide more functionality (e.g. `RawMutexTimed`). See the
+crate documentation for more details.
+
+There are also several major changes:
+
+- The minimum required Rust version is bumped to 1.26.
+- All methods on `MutexGuard` (and other guard types) are no longer inherent
+  methods and must be called as `MutexGuard::method(self)`. This avoids
+  conflicts with methods from the inner type.
+- `MutexGuard` (and other guard types) add the `unlocked` method which
+  temporarily unlocks a mutex, runs the given closure, and then re-locks the
+   mutex.
+- `MutexGuard` (and other guard types) add the `bump` method which gives a
+  chance for other threads to acquire the mutex by temporarily unlocking it and
+  re-locking it. However this is optimized for the common case where there are
+  no threads waiting on the lock, in which case no unlocking is performed.
+- `MutexGuard` (and other guard types) add the `map` method which returns a
+  `MappedMutexGuard` which holds only a subset of the original locked type. The
+  `MappedMutexGuard` type is identical to `MutexGuard` except that it does not
+  support the `unlocked` and `bump` methods, and can't be used with `CondVar`.
--- a/third_party/rust/parking_lot/Cargo.toml
+++ b/third_party/rust/parking_lot/Cargo.toml
@@ -7,29 +7,29 @@
 #
 # If you believe there's an error in this file please file an
 # issue against the rust-lang/cargo repository. If you're
 # editing this file be aware that the upstream Cargo.toml
 # will likely look very different (and much more reasonable)
 
 [package]
 name = "parking_lot"
-version = "0.5.4"
+version = "0.6.3"
 authors = ["Amanieu d'Antras <amanieu@gmail.com>"]
 description = "More compact and efficient implementations of the standard synchronization primitives."
-documentation = "https://amanieu.github.io/parking_lot/parking_lot/index.html"
 readme = "README.md"
 keywords = ["mutex", "condvar", "rwlock", "once", "thread"]
+categories = ["concurrency"]
 license = "Apache-2.0/MIT"
 repository = "https://github.com/Amanieu/parking_lot"
-[dependencies.owning_ref]
-version = "0.3"
-optional = true
+[dependencies.lock_api]
+version = "0.1"
 
 [dependencies.parking_lot_core]
 version = "0.2"
 [dev-dependencies.rand]
-version = "0.4"
+version = "0.5"
 
 [features]
 deadlock_detection = ["parking_lot_core/deadlock_detection"]
 default = ["owning_ref"]
-nightly = ["parking_lot_core/nightly"]
+nightly = ["parking_lot_core/nightly", "lock_api/nightly"]
+owning_ref = ["lock_api/owning_ref"]
--- a/third_party/rust/parking_lot/README.md
+++ b/third_party/rust/parking_lot/README.md
@@ -1,16 +1,18 @@
 parking_lot
 ============
 
 [![Build Status](https://travis-ci.org/Amanieu/parking_lot.svg?branch=master)](https://travis-ci.org/Amanieu/parking_lot) [![Build status](https://ci.appveyor.com/api/projects/status/wppcc32ttpud0a30/branch/master?svg=true)](https://ci.appveyor.com/project/Amanieu/parking-lot/branch/master) [![Crates.io](https://img.shields.io/crates/v/parking_lot.svg)](https://crates.io/crates/parking_lot)
 
-[Documentation (synchronization primitives)](https://amanieu.github.io/parking_lot/parking_lot/index.html)
+[Documentation (synchronization primitives)](https://docs.rs/parking_lot/)
 
-[Documentation (core parking lot API)](https://amanieu.github.io/parking_lot/parking_lot_core/index.html)
+[Documentation (core parking lot API)](https://docs.rs/parking_lot_core/)
+
+[Documentation (type-safe lock API)](https://docs.rs/lock_api/)
 
 This library provides implementations of `Mutex`, `RwLock`, `Condvar` and
 `Once` that are smaller, faster and more flexible than those in the Rust
 standard library, as well as a `ReentrantMutex` type which supports recursive
 locking. It also exposes a low-level API for creating your own efficient
 synchronization primitives.
 
 When tested on x86_64 Linux, `parking_lot::Mutex` was found to be 1.5x
@@ -82,42 +84,40 @@ lock.
 
 There are a few restrictions when using this library on stable Rust:
 
 - `Mutex` and `Once` will use 1 word of space instead of 1 byte.
 - You will have to use `lazy_static!` to statically initialize `Mutex`,
   `Condvar` and `RwLock` types instead of `const fn`.
 - `RwLock` will not be able to take advantage of hardware lock elision for
   readers, which improves performance when there are multiple readers.
-- Slightly less efficient code may be generated for `compare_exchange`
-  operations. This should not affect architectures like x86 though.
 
 To enable nightly-only functionality, you need to enable the `nightly` feature
 in Cargo (see below).
 
 ## Usage
 
 Add this to your `Cargo.toml`:
 
 ```toml
 [dependencies]
-parking_lot = "0.5"
+parking_lot = "0.6"
 ```
 
 and this to your crate root:
 
 ```rust
 extern crate parking_lot;
 ```
 
 To enable nightly-only features, add this to your `Cargo.toml` instead:
 
 ```toml
 [dependencies]
-parking_lot = {version = "0.5", features = ["nightly"]}
+parking_lot = {version = "0.6", features = ["nightly"]}
 ```
 
 The experimental deadlock detector can be enabled with the
 `deadlock_detection` Cargo feature.
 
 The core parking lot API is provided by the `parking_lot_core` crate. It is
 separate from the synchronization primitives in the `parking_lot` crate so that
 changes to the core API do not cause breaking changes for users of `parking_lot`.
--- a/third_party/rust/parking_lot/appveyor.yml
+++ b/third_party/rust/parking_lot/appveyor.yml
@@ -1,20 +1,20 @@
 environment:
   TRAVIS_CARGO_NIGHTLY_FEATURE: nightly
   RUST_TEST_THREADS: 1
   matrix:
   - TARGET: nightly-x86_64-pc-windows-msvc
   - TARGET: nightly-i686-pc-windows-msvc
   - TARGET: nightly-x86_64-pc-windows-gnu
   - TARGET: nightly-i686-pc-windows-gnu
-  - TARGET: 1.18.0-x86_64-pc-windows-msvc
-  - TARGET: 1.18.0-i686-pc-windows-msvc
-  - TARGET: 1.18.0-x86_64-pc-windows-gnu
-  - TARGET: 1.18.0-i686-pc-windows-gnu
+  - TARGET: 1.24.0-x86_64-pc-windows-msvc
+  - TARGET: 1.24.0-i686-pc-windows-msvc
+  - TARGET: 1.24.0-x86_64-pc-windows-gnu
+  - TARGET: 1.24.0-i686-pc-windows-gnu
 
 install:
   - SET PATH=C:\Python27;C:\Python27\Scripts;%PATH%;%APPDATA%\Python\Scripts
   - pip install "travis-cargo<0.2" --user
   - ps: Start-FileDownload "https://static.rust-lang.org/dist/rust-${env:TARGET}.exe" -FileName "rust-install.exe"
   - ps: .\rust-install.exe /VERYSILENT /NORESTART /DIR="C:\rust" | Out-Null
   - ps: $env:PATH="$env:PATH;C:\rust\bin"
   - rustc -vV
--- a/third_party/rust/parking_lot/src/condvar.rs
+++ b/third_party/rust/parking_lot/src/condvar.rs
@@ -1,22 +1,23 @@
 // Copyright 2016 Amanieu d'Antras
 //
 // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
 // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
 // http://opensource.org/licenses/MIT>, at your option. This file may not be
 // copied, modified, or distributed except according to those terms.
 
+use deadlock;
+use lock_api::RawMutex as RawMutexTrait;
+use mutex::MutexGuard;
+use parking_lot_core::{self, ParkResult, RequeueOp, UnparkResult, DEFAULT_PARK_TOKEN};
+use raw_mutex::{RawMutex, TOKEN_HANDOFF, TOKEN_NORMAL};
 use std::sync::atomic::{AtomicPtr, Ordering};
 use std::time::{Duration, Instant};
-use std::ptr;
-use parking_lot_core::{self, ParkResult, RequeueOp, UnparkResult, DEFAULT_PARK_TOKEN};
-use mutex::{guard_lock, MutexGuard};
-use raw_mutex::{RawMutex, TOKEN_HANDOFF, TOKEN_NORMAL};
-use deadlock;
+use std::{fmt, ptr};
 
 /// A type indicating whether a timed wait on a condition variable returned
 /// due to a time out or not.
 #[derive(Debug, PartialEq, Eq, Copy, Clone)]
 pub struct WaitTimeoutResult(bool);
 
 impl WaitTimeoutResult {
     /// Returns whether the wait was known to have timed out.
@@ -211,17 +212,17 @@ impl Condvar {
     /// lock specified will have been re-acquired.
     ///
     /// # Panics
     ///
     /// This function will panic if another thread is waiting on the `Condvar`
     /// with a different `Mutex` object.
     #[inline]
     pub fn wait<T: ?Sized>(&self, mutex_guard: &mut MutexGuard<T>) {
-        self.wait_until_internal(guard_lock(mutex_guard), None);
+        self.wait_until_internal(unsafe { MutexGuard::mutex(mutex_guard).raw() }, None);
     }
 
     /// Waits on this condition variable for a notification, timing out after
     /// the specified time instant.
     ///
     /// The semantics of this function are equivalent to `wait()` except that
     /// the thread will be blocked roughly until `timeout` is reached. This
     /// method should not be used for precise timing due to anomalies such as
@@ -243,22 +244,29 @@ impl Condvar {
     /// This function will panic if another thread is waiting on the `Condvar`
     /// with a different `Mutex` object.
     #[inline]
     pub fn wait_until<T: ?Sized>(
         &self,
         mutex_guard: &mut MutexGuard<T>,
         timeout: Instant,
     ) -> WaitTimeoutResult {
-        self.wait_until_internal(guard_lock(mutex_guard), Some(timeout))
+        self.wait_until_internal(
+            unsafe { MutexGuard::mutex(mutex_guard).raw() },
+            Some(timeout),
+        )
     }
 
     // This is a non-generic function to reduce the monomorphization cost of
     // using `wait_until`.
-    fn wait_until_internal(&self, mutex: &RawMutex, timeout: Option<Instant>) -> WaitTimeoutResult {
+    fn wait_until_internal(
+        &self,
+        mutex: &RawMutex,
+        timeout: Option<Instant>,
+    ) -> WaitTimeoutResult {
         unsafe {
             let result;
             let mut bad_mutex = false;
             let mut requeued = false;
             {
                 let addr = self as *const _ as usize;
                 let lock_addr = mutex as *const _ as *mut _;
                 let validate = || {
@@ -271,17 +279,17 @@ impl Condvar {
                     } else if state != lock_addr {
                         bad_mutex = true;
                         return false;
                     }
                     true
                 };
                 let before_sleep = || {
                     // Unlock the mutex before sleeping...
-                    mutex.unlock(false);
+                    mutex.unlock();
                 };
                 let timed_out = |k, was_last_thread| {
                     // If we were requeued to a mutex, then we did not time out.
                     // We'll just park ourselves on the mutex again when we try
                     // to lock it later.
                     requeued = k != addr;
 
                     // If we were the last thread on the queue then we need to
@@ -349,16 +357,22 @@ impl Condvar {
 
 impl Default for Condvar {
     #[inline]
     fn default() -> Condvar {
         Condvar::new()
     }
 }
 
+impl fmt::Debug for Condvar {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.pad("Condvar { .. }")
+    }
+}
+
 #[cfg(test)]
 mod tests {
     use std::sync::mpsc::channel;
     use std::sync::Arc;
     use std::thread;
     use std::time::{Duration, Instant};
     use {Condvar, Mutex};
 
@@ -505,9 +519,15 @@ mod tests {
             let _g = m2.lock();
             c2.notify_one();
         });
         c.wait(&mut g);
         drop(g);
 
         let _ = c.wait_for(&mut m3.lock(), Duration::from_millis(1));
     }
+
+    #[test]
+    fn test_debug_condvar() {
+        let c = Condvar::new();
+        assert_eq!(format!("{:?}", c), "Condvar { .. }");
+    }
 }
--- a/third_party/rust/parking_lot/src/deadlock.rs
+++ b/third_party/rust/parking_lot/src/deadlock.rs
@@ -1,9 +1,9 @@
-//! [Experimental] Deadlock detection
+//! \[Experimental\] Deadlock detection
 //!
 //! This feature is optional and can be enabled via the `deadlock_detection` feature flag.
 //!
 //! # Example
 //!
 //! ```
 //! #[cfg(feature = "deadlock_detection")]
 //! { // only for #[cfg]
@@ -35,18 +35,18 @@
 
 #[cfg(feature = "deadlock_detection")]
 pub use parking_lot_core::deadlock::check_deadlock;
 pub(crate) use parking_lot_core::deadlock::{acquire_resource, release_resource};
 
 #[cfg(test)]
 #[cfg(feature = "deadlock_detection")]
 mod tests {
+    use std::sync::{Arc, Barrier};
     use std::thread::{self, sleep};
-    use std::sync::{Arc, Barrier};
     use std::time::Duration;
     use {Mutex, ReentrantMutex, RwLock};
 
     fn check_deadlock() -> bool {
         use parking_lot_core::deadlock::check_deadlock;
         !check_deadlock().is_empty()
     }
 
--- a/third_party/rust/parking_lot/src/lib.rs
+++ b/third_party/rust/parking_lot/src/lib.rs
@@ -9,58 +9,36 @@
 //! `Once` that are smaller, faster and more flexible than those in the Rust
 //! standard library. It also provides a `ReentrantMutex` type.
 
 #![warn(missing_docs)]
 #![cfg_attr(feature = "nightly", feature(const_fn))]
 #![cfg_attr(feature = "nightly", feature(integer_atomics))]
 #![cfg_attr(feature = "nightly", feature(asm))]
 
-#[cfg(feature = "owning_ref")]
-extern crate owning_ref;
-
+extern crate lock_api;
 extern crate parking_lot_core;
 
-mod util;
+mod condvar;
 mod elision;
+mod mutex;
+mod once;
 mod raw_mutex;
-mod raw_remutex;
 mod raw_rwlock;
-mod condvar;
-mod mutex;
 mod remutex;
 mod rwlock;
-mod once;
+mod util;
 
 #[cfg(feature = "deadlock_detection")]
 pub mod deadlock;
 #[cfg(not(feature = "deadlock_detection"))]
 mod deadlock;
 
+pub use condvar::{Condvar, WaitTimeoutResult};
+pub use mutex::{MappedMutexGuard, Mutex, MutexGuard};
 pub use once::{Once, OnceState, ONCE_INIT};
-pub use mutex::{Mutex, MutexGuard};
-pub use remutex::{ReentrantMutex, ReentrantMutexGuard};
-pub use condvar::{Condvar, WaitTimeoutResult};
-pub use rwlock::{RwLock, RwLockReadGuard, RwLockUpgradableReadGuard, RwLockWriteGuard};
-
-#[cfg(feature = "owning_ref")]
-use owning_ref::OwningRef;
-
-/// Typedef of an owning reference that uses a `MutexGuard` as the owner.
-#[cfg(feature = "owning_ref")]
-pub type MutexGuardRef<'a, T, U = T> = OwningRef<MutexGuard<'a, T>, U>;
-
-/// Typedef of an owning reference that uses a `ReentrantMutexGuard` as the owner.
-#[cfg(feature = "owning_ref")]
-pub type ReentrantMutexGuardRef<'a, T, U = T> = OwningRef<ReentrantMutexGuard<'a, T>, U>;
-
-/// Typedef of an owning reference that uses a `RwLockReadGuard` as the owner.
-#[cfg(feature = "owning_ref")]
-pub type RwLockReadGuardRef<'a, T, U = T> = OwningRef<RwLockReadGuard<'a, T>, U>;
-
-/// Typedef of an owning reference that uses a `RwLockWriteGuard` as the owner.
-#[cfg(feature = "owning_ref")]
-pub type RwLockWriteGuardRef<'a, T, U = T> = OwningRef<RwLockWriteGuard<'a, T>, U>;
-
-/// Typedef of an owning reference that uses a `RwLockUpgradableReadGuard` as the owner.
-#[cfg(feature = "owning_ref")]
-pub type RwLockUpgradableReadGuardRef<'a, T, U = T> =
-    OwningRef<RwLockUpgradableReadGuard<'a, T>, U>;
+pub use raw_mutex::RawMutex;
+pub use raw_rwlock::RawRwLock;
+pub use remutex::{MappedReentrantMutexGuard, RawThreadId, ReentrantMutex, ReentrantMutexGuard};
+pub use rwlock::{
+    MappedRwLockReadGuard, MappedRwLockWriteGuard, RwLock, RwLockReadGuard,
+    RwLockUpgradableReadGuard, RwLockWriteGuard,
+};
--- a/third_party/rust/parking_lot/src/mutex.rs
+++ b/third_party/rust/parking_lot/src/mutex.rs
@@ -1,26 +1,18 @@
 // Copyright 2016 Amanieu d'Antras
 //
 // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
 // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
 // http://opensource.org/licenses/MIT>, at your option. This file may not be
 // copied, modified, or distributed except according to those terms.
 
-use std::cell::UnsafeCell;
-use std::ops::{Deref, DerefMut};
-use std::time::{Duration, Instant};
-use std::fmt;
-use std::mem;
-use std::marker::PhantomData;
+use lock_api;
 use raw_mutex::RawMutex;
 
-#[cfg(feature = "owning_ref")]
-use owning_ref::StableAddress;
-
 /// A mutual exclusion primitive useful for protecting shared data
 ///
 /// This mutex will block threads waiting for the lock to become available. The
 /// mutex can also be statically initialized or created via a `new`
 /// constructor. Each mutex has a type parameter which represents the data that
 /// it is protecting. The data can only be accessed through the RAII guards
 /// returned from `lock` and `try_lock`, which guarantees that the data is only
 /// ever accessed when the mutex is locked.
@@ -88,286 +80,39 @@ use owning_ref::StableAddress;
 ///             tx.send(()).unwrap();
 ///         }
 ///         // the lock is unlocked here when `data` goes out of scope.
 ///     });
 /// }
 ///
 /// rx.recv().unwrap();
 /// ```
-pub struct Mutex<T: ?Sized> {
-    raw: RawMutex,
-    data: UnsafeCell<T>,
-}
-
-unsafe impl<T: ?Sized + Send> Send for Mutex<T> {}
-unsafe impl<T: ?Sized + Send> Sync for Mutex<T> {}
+pub type Mutex<T> = lock_api::Mutex<RawMutex, T>;
 
 /// An RAII implementation of a "scoped lock" of a mutex. When this structure is
 /// dropped (falls out of scope), the lock will be unlocked.
 ///
 /// The data protected by the mutex can be accessed through this guard via its
 /// `Deref` and `DerefMut` implementations.
-#[must_use]
-pub struct MutexGuard<'a, T: ?Sized + 'a> {
-    raw: &'a RawMutex,
-    data: *mut T,
-    marker: PhantomData<&'a mut T>,
-}
-
-unsafe impl<'a, T: ?Sized + Sync + 'a> Sync for MutexGuard<'a, T> {}
-
-impl<T> Mutex<T> {
-    /// Creates a new mutex in an unlocked state ready for use.
-    #[cfg(feature = "nightly")]
-    #[inline]
-    pub const fn new(val: T) -> Mutex<T> {
-        Mutex {
-            data: UnsafeCell::new(val),
-            raw: RawMutex::new(),
-        }
-    }
-
-    /// Creates a new mutex in an unlocked state ready for use.
-    #[cfg(not(feature = "nightly"))]
-    #[inline]
-    pub fn new(val: T) -> Mutex<T> {
-        Mutex {
-            data: UnsafeCell::new(val),
-            raw: RawMutex::new(),
-        }
-    }
-
-    /// Consumes this mutex, returning the underlying data.
-    #[inline]
-    pub fn into_inner(self) -> T {
-        unsafe { self.data.into_inner() }
-    }
-}
-
-impl<T: ?Sized> Mutex<T> {
-    #[inline]
-    fn guard(&self) -> MutexGuard<T> {
-        MutexGuard {
-            raw: &self.raw,
-            data: self.data.get(),
-            marker: PhantomData,
-        }
-    }
-
-    /// Acquires a mutex, blocking the current thread until it is able to do so.
-    ///
-    /// This function will block the local thread until it is available to acquire
-    /// the mutex. Upon returning, the thread is the only thread with the mutex
-    /// held. An RAII guard is returned to allow scoped unlock of the lock. When
-    /// the guard goes out of scope, the mutex will be unlocked.
-    ///
-    /// Attempts to lock a mutex in the thread which already holds the lock will
-    /// result in a deadlock.
-    #[inline]
-    pub fn lock(&self) -> MutexGuard<T> {
-        self.raw.lock();
-        self.guard()
-    }
-
-    /// Attempts to acquire this lock.
-    ///
-    /// If the lock could not be acquired at this time, then `None` is returned.
-    /// Otherwise, an RAII guard is returned. The lock will be unlocked when the
-    /// guard is dropped.
-    ///
-    /// This function does not block.
-    #[inline]
-    pub fn try_lock(&self) -> Option<MutexGuard<T>> {
-        if self.raw.try_lock() {
-            Some(self.guard())
-        } else {
-            None
-        }
-    }
-
-    /// Attempts to acquire this lock until a timeout is reached.
-    ///
-    /// If the lock could not be acquired before the timeout expired, then
-    /// `None` is returned. Otherwise, an RAII guard is returned. The lock will
-    /// be unlocked when the guard is dropped.
-    #[inline]
-    pub fn try_lock_for(&self, timeout: Duration) -> Option<MutexGuard<T>> {
-        if self.raw.try_lock_for(timeout) {
-            Some(self.guard())
-        } else {
-            None
-        }
-    }
-
-    /// Attempts to acquire this lock until a timeout is reached.
-    ///
-    /// If the lock could not be acquired before the timeout expired, then
-    /// `None` is returned. Otherwise, an RAII guard is returned. The lock will
-    /// be unlocked when the guard is dropped.
-    #[inline]
-    pub fn try_lock_until(&self, timeout: Instant) -> Option<MutexGuard<T>> {
-        if self.raw.try_lock_until(timeout) {
-            Some(self.guard())
-        } else {
-            None
-        }
-    }
-
-    /// Returns a mutable reference to the underlying data.
-    ///
-    /// Since this call borrows the `Mutex` mutably, no actual locking needs to
-    /// take place---the mutable borrow statically guarantees no locks exist.
-    #[inline]
-    pub fn get_mut(&mut self) -> &mut T {
-        unsafe { &mut *self.data.get() }
-    }
+pub type MutexGuard<'a, T> = lock_api::MutexGuard<'a, RawMutex, T>;
 
-    /// Releases the mutex.
-    ///
-    /// # Safety
-    ///
-    /// This function must only be called if the mutex was locked using
-    /// `raw_lock` or `raw_try_lock`, or if a `MutexGuard` from this mutex was
-    /// leaked (e.g. with `mem::forget`). The mutex must be locked.
-    #[inline]
-    pub unsafe fn raw_unlock(&self) {
-        self.raw.unlock(false);
-    }
-
-    /// Releases the mutex using a fair unlock protocol.
-    ///
-    /// See `MutexGuard::unlock_fair`.
-    ///
-    /// # Safety
-    ///
-    /// This function must only be called if the mutex was locked using
-    /// `raw_lock` or `raw_try_lock`, or if a `MutexGuard` from this mutex was
-    /// leaked (e.g. with `mem::forget`). The mutex must be locked.
-    #[inline]
-    pub unsafe fn raw_unlock_fair(&self) {
-        self.raw.unlock(true);
-    }
-}
-impl Mutex<()> {
-    /// Acquires a mutex, blocking the current thread until it is able to do so.
-    ///
-    /// This is similar to `lock`, except that a `MutexGuard` is not returned.
-    /// Instead you will need to call `raw_unlock` to release the mutex.
-    #[inline]
-    pub fn raw_lock(&self) {
-        self.raw.lock();
-    }
-
-    /// Attempts to acquire this lock.
-    ///
-    /// This is similar to `try_lock`, except that a `MutexGuard` is not
-    /// returned. Instead you will need to call `raw_unlock` to release the
-    /// mutex.
-    #[inline]
-    pub fn raw_try_lock(&self) -> bool {
-        self.raw.try_lock()
-    }
-}
-
-impl<T: ?Sized + Default> Default for Mutex<T> {
-    #[inline]
-    fn default() -> Mutex<T> {
-        Mutex::new(Default::default())
-    }
-}
-
-impl<T: ?Sized + fmt::Debug> fmt::Debug for Mutex<T> {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        match self.try_lock() {
-            Some(guard) => write!(f, "Mutex {{ data: {:?} }}", &*guard),
-            None => write!(f, "Mutex {{ <locked> }}"),
-        }
-    }
-}
-
-impl<'a, T: ?Sized + 'a> MutexGuard<'a, T> {
-    /// Unlocks the mutex using a fair unlock protocol.
-    ///
-    /// By default, mutexes are unfair and allow the current thread to re-lock
-    /// the mutex before another has the chance to acquire the lock, even if
-    /// that thread has been blocked on the mutex for a long time. This is the
-    /// default because it allows much higher throughput as it avoids forcing a
-    /// context switch on every mutex unlock. This can result in one thread
-    /// acquiring a mutex many more times than other threads.
-    ///
-    /// However in some cases it can be beneficial to ensure fairness by forcing
-    /// the lock to pass on to a waiting thread if there is one. This is done by
-    /// using this method instead of dropping the `MutexGuard` normally.
-    #[inline]
-    pub fn unlock_fair(self) {
-        self.raw.unlock(true);
-        mem::forget(self);
-    }
-
-    /// Make a new `MutexGuard` for a component of the locked data.
-    ///
-    /// This operation cannot fail as the `MutexGuard` passed
-    /// in already locked the mutex.
-    ///
-    /// This is an associated function that needs to be
-    /// used as `MutexGuard::map(...)`. A method would interfere with methods of
-    /// the same name on the contents of the locked data.
-    #[inline]
-    pub fn map<U: ?Sized, F>(orig: Self, f: F) -> MutexGuard<'a, U>
-    where
-        F: FnOnce(&mut T) -> &mut U,
-    {
-        let raw = orig.raw;
-        let data = f(unsafe { &mut *orig.data });
-        mem::forget(orig);
-        MutexGuard {
-            raw,
-            data,
-            marker: PhantomData,
-        }
-    }
-}
-
-impl<'a, T: ?Sized + 'a> Deref for MutexGuard<'a, T> {
-    type Target = T;
-    #[inline]
-    fn deref(&self) -> &T {
-        unsafe { &*self.data }
-    }
-}
-
-impl<'a, T: ?Sized + 'a> DerefMut for MutexGuard<'a, T> {
-    #[inline]
-    fn deref_mut(&mut self) -> &mut T {
-        unsafe { &mut *self.data }
-    }
-}
-
-impl<'a, T: ?Sized + 'a> Drop for MutexGuard<'a, T> {
-    #[inline]
-    fn drop(&mut self) {
-        self.raw.unlock(false);
-    }
-}
-
-#[cfg(feature = "owning_ref")]
-unsafe impl<'a, T: ?Sized> StableAddress for MutexGuard<'a, T> {}
-
-// Helper function used by Condvar, not publicly exported
-#[inline]
-pub(crate) fn guard_lock<'a, T: ?Sized>(guard: &MutexGuard<'a, T>) -> &'a RawMutex {
-    &guard.raw
-}
+/// An RAII mutex guard returned by `MutexGuard::map`, which can point to a
+/// subfield of the protected data.
+///
+/// The main difference between `MappedMutexGuard` and `MutexGuard` is that the
+/// former doesn't support temporarily unlocking and re-locking, since that
+/// could introduce soundness issues if the locked object is modified by another
+/// thread.
+pub type MappedMutexGuard<'a, T> = lock_api::MappedMutexGuard<'a, RawMutex, T>;
 
 #[cfg(test)]
 mod tests {
+    use std::sync::atomic::{AtomicUsize, Ordering};
     use std::sync::mpsc::channel;
     use std::sync::Arc;
-    use std::sync::atomic::{AtomicUsize, Ordering};
     use std::thread;
     use {Condvar, Mutex};
 
     struct Packet<T>(Arc<(Mutex<T>, Condvar)>);
 
     #[derive(Eq, PartialEq, Debug)]
     struct NonCopy(i32);
 
@@ -527,9 +272,27 @@ mod tests {
 
     #[test]
     fn test_mutexguard_sync() {
         fn sync<T: Sync>(_: T) {}
 
         let mutex = Mutex::new(());
         sync(mutex.lock());
     }
+
+    #[test]
+    fn test_mutex_debug() {
+        let mutex = Mutex::new(vec![0u8, 10]);
+
+        assert_eq!(format!("{:?}", mutex), "Mutex { data: [0, 10] }");
+        assert_eq!(
+            format!("{:#?}", mutex),
+            "Mutex {
+    data: [
+        0,
+        10
+    ]
+}"
+        );
+        let _lock = mutex.lock();
+        assert_eq!(format!("{:?}", mutex), "Mutex { <locked> }");
+    }
 }
--- a/third_party/rust/parking_lot/src/once.rs
+++ b/third_party/rust/parking_lot/src/once.rs
@@ -11,19 +11,19 @@ use std::sync::atomic::{ATOMIC_U8_INIT, 
 #[cfg(feature = "nightly")]
 type U8 = u8;
 #[cfg(not(feature = "nightly"))]
 use std::sync::atomic::AtomicUsize as AtomicU8;
 #[cfg(not(feature = "nightly"))]
 use std::sync::atomic::ATOMIC_USIZE_INIT as ATOMIC_U8_INIT;
 #[cfg(not(feature = "nightly"))]
 type U8 = usize;
-use std::mem;
+use parking_lot_core::{self, SpinWait, DEFAULT_PARK_TOKEN, DEFAULT_UNPARK_TOKEN};
 use std::fmt;
-use parking_lot_core::{self, SpinWait, DEFAULT_PARK_TOKEN, DEFAULT_UNPARK_TOKEN};
+use std::mem;
 use util::UncheckedOptionExt;
 
 const DONE_BIT: U8 = 1;
 const POISON_BIT: U8 = 2;
 const LOCKED_BIT: U8 = 4;
 const PARKED_BIT: U8 = 8;
 
 /// Current state of a `Once`.
@@ -340,17 +340,19 @@ impl Default for Once {
     #[inline]
     fn default() -> Once {
         Once::new()
     }
 }
 
 impl fmt::Debug for Once {
     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        write!(f, "Once {{ state: {:?} }}", &self.state())
+        f.debug_struct("Once")
+            .field("state", &self.state())
+            .finish()
     }
 }
 
 #[cfg(test)]
 mod tests {
     #[cfg(feature = "nightly")]
     use std::panic;
     use std::sync::mpsc::channel;
@@ -465,9 +467,22 @@ mod tests {
             assert!(!called);
         });
 
         tx2.send(()).unwrap();
 
         assert!(t1.join().is_ok());
         assert!(t2.join().is_ok());
     }
+
+    #[test]
+    fn test_once_debug() {
+        static O: Once = ONCE_INIT;
+
+        assert_eq!(format!("{:?}", O), "Once { state: New }");
+        assert_eq!(
+            format!("{:#?}", O),
+            "Once {
+    state: New
+}"
+        );
+    }
 }
--- a/third_party/rust/parking_lot/src/raw_mutex.rs
+++ b/third_party/rust/parking_lot/src/raw_mutex.rs
@@ -11,96 +11,58 @@ use std::sync::atomic::{ATOMIC_U8_INIT, 
 #[cfg(feature = "nightly")]
 type U8 = u8;
 #[cfg(not(feature = "nightly"))]
 use std::sync::atomic::AtomicUsize as AtomicU8;
 #[cfg(not(feature = "nightly"))]
 use std::sync::atomic::ATOMIC_USIZE_INIT as ATOMIC_U8_INIT;
 #[cfg(not(feature = "nightly"))]
 type U8 = usize;
-use std::time::{Duration, Instant};
+use deadlock;
+use lock_api::{GuardNoSend, RawMutex as RawMutexTrait, RawMutexFair, RawMutexTimed};
 use parking_lot_core::{self, ParkResult, SpinWait, UnparkResult, UnparkToken, DEFAULT_PARK_TOKEN};
-use deadlock;
+use std::time::{Duration, Instant};
 
 // UnparkToken used to indicate that that the target thread should attempt to
 // lock the mutex again as soon as it is unparked.
-pub const TOKEN_NORMAL: UnparkToken = UnparkToken(0);
+pub(crate) const TOKEN_NORMAL: UnparkToken = UnparkToken(0);
 
 // UnparkToken used to indicate that the mutex is being handed off to the target
 // thread directly without unlocking it.
-pub const TOKEN_HANDOFF: UnparkToken = UnparkToken(1);
+pub(crate) const TOKEN_HANDOFF: UnparkToken = UnparkToken(1);
 
 const LOCKED_BIT: U8 = 1;
 const PARKED_BIT: U8 = 2;
 
+/// Raw mutex type backed by the parking lot.
 pub struct RawMutex {
     state: AtomicU8,
 }
 
-impl RawMutex {
-    #[cfg(feature = "nightly")]
-    #[inline]
-    pub const fn new() -> RawMutex {
-        RawMutex {
-            state: ATOMIC_U8_INIT,
-        }
-    }
-    #[cfg(not(feature = "nightly"))]
-    #[inline]
-    pub fn new() -> RawMutex {
-        RawMutex {
-            state: ATOMIC_U8_INIT,
-        }
-    }
+unsafe impl RawMutexTrait for RawMutex {
+    const INIT: RawMutex = RawMutex {
+        state: ATOMIC_U8_INIT,
+    };
+
+    type GuardMarker = GuardNoSend;
 
     #[inline]
-    pub fn lock(&self) {
-        if self.state
+    fn lock(&self) {
+        if self
+            .state
             .compare_exchange_weak(0, LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed)
             .is_err()
         {
             self.lock_slow(None);
         }
         unsafe { deadlock::acquire_resource(self as *const _ as usize) };
     }
 
     #[inline]
-    pub fn try_lock_until(&self, timeout: Instant) -> bool {
-        let result = if self.state
-            .compare_exchange_weak(0, LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed)
-            .is_ok()
-        {
-            true
-        } else {
-            self.lock_slow(Some(timeout))
-        };
-        if result {
-            unsafe { deadlock::acquire_resource(self as *const _ as usize) };
-        }
-        result
-    }
-
-    #[inline]
-    pub fn try_lock_for(&self, timeout: Duration) -> bool {
-        let result = if self.state
-            .compare_exchange_weak(0, LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed)
-            .is_ok()
-        {
-            true
-        } else {
-            self.lock_slow(Some(Instant::now() + timeout))
-        };
-        if result {
-            unsafe { deadlock::acquire_resource(self as *const _ as usize) };
-        }
-        result
-    }
-
-    #[inline]
-    pub fn try_lock(&self) -> bool {
+    fn try_lock(&self) -> bool {
         let mut state = self.state.load(Ordering::Relaxed);
         loop {
             if state & LOCKED_BIT != 0 {
                 return false;
             }
             match self.state.compare_exchange_weak(
                 state,
                 state | LOCKED_BIT,
@@ -112,27 +74,91 @@ impl RawMutex {
                     return true;
                 }
                 Err(x) => state = x,
             }
         }
     }
 
     #[inline]
-    pub fn unlock(&self, force_fair: bool) {
+    fn unlock(&self) {
         unsafe { deadlock::release_resource(self as *const _ as usize) };
-        if self.state
+        if self
+            .state
+            .compare_exchange_weak(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed)
+            .is_ok()
+        {
+            return;
+        }
+        self.unlock_slow(false);
+    }
+}
+
+unsafe impl RawMutexFair for RawMutex {
+    #[inline]
+    fn unlock_fair(&self) {
+        unsafe { deadlock::release_resource(self as *const _ as usize) };
+        if self
+            .state
             .compare_exchange_weak(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed)
             .is_ok()
         {
             return;
         }
-        self.unlock_slow(force_fair);
+        self.unlock_slow(true);
     }
 
+    #[inline]
+    fn bump(&self) {
+        if self.state.load(Ordering::Relaxed) & PARKED_BIT != 0 {
+            self.bump_slow();
+        }
+    }
+}
+
+unsafe impl RawMutexTimed for RawMutex {
+    type Duration = Duration;
+    type Instant = Instant;
+
+    #[inline]
+    fn try_lock_until(&self, timeout: Instant) -> bool {
+        let result = if self
+            .state
+            .compare_exchange_weak(0, LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed)
+            .is_ok()
+        {
+            true
+        } else {
+            self.lock_slow(Some(timeout))
+        };
+        if result {
+            unsafe { deadlock::acquire_resource(self as *const _ as usize) };
+        }
+        result
+    }
+
+    #[inline]
+    fn try_lock_for(&self, timeout: Duration) -> bool {
+        let result = if self
+            .state
+            .compare_exchange_weak(0, LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed)
+            .is_ok()
+        {
+            true
+        } else {
+            self.lock_slow(Some(Instant::now() + timeout))
+        };
+        if result {
+            unsafe { deadlock::acquire_resource(self as *const _ as usize) };
+        }
+        result
+    }
+}
+
+impl RawMutex {
     // Used by Condvar when requeuing threads to us, must be called while
     // holding the queue lock.
     #[inline]
     pub(crate) fn mark_parked_if_locked(&self) -> bool {
         let mut state = self.state.load(Ordering::Relaxed);
         loop {
             if state & LOCKED_BIT == 0 {
                 return false;
@@ -234,17 +260,18 @@ impl RawMutex {
             state = self.state.load(Ordering::Relaxed);
         }
     }
 
     #[cold]
     #[inline(never)]
     fn unlock_slow(&self, force_fair: bool) {
         // Unlock directly if there are no parked threads
-        if self.state
+        if self
+            .state
             .compare_exchange(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed)
             .is_ok()
         {
             return;
         }
 
         // Unpark one thread and leave the parked bit set if there might
         // still be parked threads on this address.
@@ -269,9 +296,17 @@ impl RawMutex {
                 } else {
                     self.state.store(0, Ordering::Release);
                 }
                 TOKEN_NORMAL
             };
             parking_lot_core::unpark_one(addr, callback);
         }
     }
+
+    #[cold]
+    #[inline(never)]
+    fn bump_slow(&self) {
+        unsafe { deadlock::release_resource(self as *const _ as usize) };
+        self.unlock_slow(true);
+        self.lock();
+    }
 }
deleted file mode 100644
--- a/third_party/rust/parking_lot/src/raw_remutex.rs
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright 2016 Amanieu d'Antras
-//
-// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
-// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
-// http://opensource.org/licenses/MIT>, at your option. This file may not be
-// copied, modified, or distributed except according to those terms.
-
-use std::sync::atomic::{AtomicUsize, Ordering};
-use std::time::{Duration, Instant};
-use std::cell::Cell;
-use raw_mutex::RawMutex;
-