Bug 1552549 - Update rand dependency to 0.6 r=kats,froydnj,nika,jkt,jcj
☠☠ backed out by 25d6d9420195 ☠ ☠
authorDzmitry Malyshau <dmalyshau@mozilla.com>
Tue, 21 May 2019 19:36:56 +0000
changeset 474821 a10cdf32fb5a3fe7474a2e3f6a838215863c3c9e
parent 474820 723406ecb5f6063834f416606aea688d3cd56a95
child 474822 622f7bf7f38614a45ad8f47411c7b69c023e1688
push id36047
push usernerli@mozilla.com
push dateWed, 22 May 2019 03:40:58 +0000
treeherdermozilla-central@267ddc3595fe [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerskats, froydnj, nika, jkt, jcj
bugs1552549
milestone69.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1552549 - Update rand dependency to 0.6 r=kats,froydnj,nika,jkt,jcj Update rand version in u2fhid and xpcom Differential Revision: https://phabricator.services.mozilla.com/D31669
Cargo.lock
dom/webauthn/u2f-hid-rs/Cargo.toml
dom/webauthn/u2f-hid-rs/src/u2fprotocol.rs
netwerk/base/mozurl/Cargo.toml
python/mozbuild/mozbuild/vendor_rust.py
servo/components/style/Cargo.toml
servo/ports/geckolib/Cargo.toml
servo/tests/unit/style/Cargo.toml
third_party/rust/lock_api/.cargo-checksum.json
third_party/rust/lock_api/Cargo.toml
third_party/rust/lock_api/src/lib.rs
third_party/rust/lock_api/src/mutex.rs
third_party/rust/lock_api/src/remutex.rs
third_party/rust/lock_api/src/rwlock.rs
third_party/rust/parking_lot/.cargo-checksum.json
third_party/rust/parking_lot/CHANGELOG.md
third_party/rust/parking_lot/Cargo.toml
third_party/rust/parking_lot/README.md
third_party/rust/parking_lot/appveyor.yml
third_party/rust/parking_lot/build.rs
third_party/rust/parking_lot/src/condvar.rs
third_party/rust/parking_lot/src/deadlock.rs
third_party/rust/parking_lot/src/elision.rs
third_party/rust/parking_lot/src/lib.rs
third_party/rust/parking_lot/src/mutex.rs
third_party/rust/parking_lot/src/once.rs
third_party/rust/parking_lot/src/raw_mutex.rs
third_party/rust/parking_lot/src/raw_rwlock.rs
third_party/rust/parking_lot/src/remutex.rs
third_party/rust/parking_lot/src/rwlock.rs
third_party/rust/parking_lot/src/util.rs
third_party/rust/parking_lot_core/.cargo-checksum.json
third_party/rust/parking_lot_core/Cargo.toml
third_party/rust/parking_lot_core/LICENSE-APACHE
third_party/rust/parking_lot_core/LICENSE-MIT
third_party/rust/parking_lot_core/build.rs
third_party/rust/parking_lot_core/src/lib.rs
third_party/rust/parking_lot_core/src/parking_lot.rs
third_party/rust/parking_lot_core/src/spinwait.rs
third_party/rust/parking_lot_core/src/thread_parker/cloudabi.rs
third_party/rust/parking_lot_core/src/thread_parker/generic.rs
third_party/rust/parking_lot_core/src/thread_parker/linux.rs
third_party/rust/parking_lot_core/src/thread_parker/redox.rs
third_party/rust/parking_lot_core/src/thread_parker/sgx.rs
third_party/rust/parking_lot_core/src/thread_parker/unix.rs
third_party/rust/parking_lot_core/src/thread_parker/wasm.rs
third_party/rust/parking_lot_core/src/thread_parker/windows/keyed_event.rs
third_party/rust/parking_lot_core/src/thread_parker/windows/mod.rs
third_party/rust/parking_lot_core/src/thread_parker/windows/waitaddress.rs
third_party/rust/parking_lot_core/src/word_lock.rs
third_party/rust/scopeguard-0.3.2/.cargo-checksum.json
third_party/rust/scopeguard-0.3.2/Cargo.toml
third_party/rust/scopeguard-0.3.2/LICENSE-APACHE
third_party/rust/scopeguard-0.3.2/LICENSE-MIT
third_party/rust/scopeguard-0.3.2/README.rst
third_party/rust/scopeguard-0.3.2/examples/readme.rs
third_party/rust/scopeguard-0.3.2/src/lib.rs
third_party/rust/scopeguard/.cargo-checksum.json
third_party/rust/scopeguard/Cargo.toml
third_party/rust/scopeguard/README.rst
third_party/rust/scopeguard/examples/readme.rs
third_party/rust/scopeguard/src/lib.rs
third_party/rust/uuid/.cargo-checksum.json
third_party/rust/uuid/CONTRIBUTING.md
third_party/rust/uuid/Cargo.toml
third_party/rust/uuid/README.md
third_party/rust/uuid/README.tpl
third_party/rust/uuid/benches/format_str.rs
third_party/rust/uuid/benches/serde_support.rs
third_party/rust/uuid/src/adapter/compact.rs
third_party/rust/uuid/src/adapter/mod.rs
third_party/rust/uuid/src/builder.rs
third_party/rust/uuid/src/core_support.rs
third_party/rust/uuid/src/lib.rs
third_party/rust/uuid/src/parser/core_support.rs
third_party/rust/uuid/src/prelude.rs
third_party/rust/uuid/src/serde_support.rs
third_party/rust/uuid/src/std_support.rs
third_party/rust/uuid/src/v4.rs
third_party/rust/uuid/src/winapi_support.rs
xpcom/rust/gkrust_utils/Cargo.toml
xpcom/rust/gkrust_utils/src/lib.rs
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1206,17 +1206,17 @@ dependencies = [
  "atomic_refcell 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "cssparser 0.25.5 (registry+https://github.com/rust-lang/crates.io-index)",
  "cstr 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
  "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)",
  "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
  "malloc_size_of 0.0.1",
  "nsstring 0.1.0",
  "num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
- "parking_lot 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "parking_lot 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "selectors 0.21.0",
  "servo_arc 0.1.1",
  "smallvec 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)",
  "style 0.0.1",
  "style_traits 0.0.1",
  "to_shmem 0.0.1",
 ]
 
@@ -1275,28 +1275,28 @@ dependencies = [
  "netwerk_helper 0.0.1",
  "nserror 0.1.0",
  "nsstring 0.1.0",
  "prefs_parser 0.0.1",
  "profiler_helper 0.1.0",
  "rsdparsa_capi 0.1.0",
  "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
  "storage 0.1.0",
- "u2fhid 0.2.3",
+ "u2fhid 0.2.4",
  "webrender_bindings 0.1.0",
  "xpcom 0.1.0",
  "xulstore 0.1.0",
 ]
 
 [[package]]
 name = "gkrust_utils"
 version = "0.1.0"
 dependencies = [
  "nsstring 0.1.0",
- "uuid 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "uuid 0.7.4 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "gl_generator"
 version = "0.11.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "khronos_api 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -1643,21 +1643,20 @@ source = "registry+https://github.com/ru
 dependencies = [
  "cc 1.0.34 (registry+https://github.com/rust-lang/crates.io-index)",
  "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)",
  "pkg-config 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "lock_api"
-version = "0.1.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "owning_ref 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "scopeguard 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "scopeguard 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "log"
 version = "0.3.9"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -1885,17 +1884,17 @@ dependencies = [
 
 [[package]]
 name = "mozurl"
 version = "0.0.1"
 dependencies = [
  "nserror 0.1.0",
  "nsstring 0.1.0",
  "url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "uuid 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "uuid 0.7.4 (registry+https://github.com/rust-lang/crates.io-index)",
  "xpcom 0.1.0",
 ]
 
 [[package]]
 name = "mozversion"
 version = "0.2.0"
 dependencies = [
  "regex 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -2114,30 +2113,35 @@ name = "packed_simd"
 version = "0.3.3"
 source = "git+https://github.com/hsivonen/packed_simd?branch=rust_1_32#3541e3818fdc7c2a24f87e3459151a4ce955a67a"
 dependencies = [
  "cfg-if 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "parking_lot"
-version = "0.6.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "lock_api 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
- "parking_lot_core 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)",
+version = "0.8.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "lock_api 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "parking_lot_core 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "parking_lot_core"
-version = "0.2.14"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
+version = "0.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "cfg-if 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
  "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)",
- "rand 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "redox_syscall 0.1.32 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
  "smallvec 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)",
  "winapi 0.3.6 (git+https://github.com/froydnj/winapi-rs?branch=aarch64)",
 ]
 
 [[package]]
 name = "peeking_take_while"
 version = "0.1.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -2522,17 +2526,17 @@ dependencies = [
  "bincode 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "failure 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
  "lazy_static 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "lmdb-rkv 0.11.2 (registry+https://github.com/rust-lang/crates.io-index)",
  "ordered-float 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "serde 1.0.88 (registry+https://github.com/rust-lang/crates.io-index)",
  "serde_derive 1.0.88 (git+https://github.com/servo/serde?branch=deserialize_from_enums10)",
  "url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "uuid 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "uuid 0.7.4 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "ron"
 version = "0.1.7"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "serde 1.0.88 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -2622,16 +2626,21 @@ version = "0.1.9"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
 name = "scopeguard"
 version = "0.3.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
+name = "scopeguard"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
 name = "scroll"
 version = "0.9.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
  "scroll_derive 0.9.5 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
@@ -2884,17 +2893,17 @@ dependencies = [
  "new_debug_unreachable 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "nsstring 0.1.0",
  "num-derive 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)",
  "num-integer 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)",
  "num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
  "num_cpus 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "ordered-float 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "owning_ref 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "parking_lot 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "parking_lot 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "precomputed-hash 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "rayon 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "regex 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
  "selectors 0.21.0",
  "servo_arc 0.1.1",
  "smallbitvec 2.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "smallvec 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)",
  "style_derive 0.0.1",
@@ -3285,26 +3294,26 @@ source = "registry+https://github.com/ru
 
 [[package]]
 name = "typenum"
 version = "1.10.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
 name = "u2fhid"
-version = "0.2.3"
+version = "0.2.4"
 dependencies = [
  "bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
  "boxfnonce 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
  "core-foundation 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)",
  "devd-rs 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)",
  "libudev 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
- "rand 0.3.22 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
  "runloop 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "winapi 0.3.6 (git+https://github.com/froydnj/winapi-rs?branch=aarch64)",
 ]
 
 [[package]]
 name = "ucd-util"
 version = "0.1.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -3374,18 +3383,21 @@ version = "0.6.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "cfg-if 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
  "rand 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "uuid"
-version = "0.7.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
+version = "0.7.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
+]
 
 [[package]]
 name = "vcpkg"
 version = "0.2.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
 name = "vec_map"
@@ -3846,17 +3858,17 @@ dependencies = [
 "checksum lazycell 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b294d6fa9ee409a054354afc4352b0b9ef7ca222c69b8812cbea9e7d2bf3783f"
 "checksum libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)" = "bedcc7a809076656486ffe045abeeac163da1b558e963a31e29fbfbeba916917"
 "checksum libloading 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9c3ad660d7cb8c5822cd83d10897b0f1f1526792737a179e73896152f85b88c2"
 "checksum libudev 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ea626d3bdf40a1c5aee3bcd4f40826970cae8d80a8fec934c82a63840094dcfe"
 "checksum libz-sys 1.0.16 (registry+https://github.com/rust-lang/crates.io-index)" = "3fdd64ef8ee652185674455c1d450b83cbc8ad895625d543b5324d923f82e4d8"
 "checksum linked-hash-map 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "70fb39025bc7cdd76305867c4eccf2f2dcf6e9a57f5b21a93e1c2d86cd03ec9e"
 "checksum lmdb-rkv 0.11.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1452294309db7977dc75e1e8135a8c654d9e52e04ff0c0bd06c880897a91defd"
 "checksum lmdb-rkv-sys 0.8.3 (registry+https://github.com/rust-lang/crates.io-index)" = "1470e0168f1832e35afd6d0931ae60db625685332837b97aa156773ec9c5e393"
-"checksum lock_api 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "62ebf1391f6acad60e5c8b43706dde4582df75c06698ab44511d15016bc2442c"
+"checksum lock_api 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ed946d4529956a20f2d63ebe1b69996d5a2137c91913fe3ebbeff957f5bca7ff"
 "checksum log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "e19e8d5c34a3e0e2223db8e060f9e8264aeeb5c5fc64a4ee9965c062211c024b"
 "checksum log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "c84ec4b527950aa83a329754b01dbe3f58361d1c5efacd1f6d68c494d08a17c6"
 "checksum lzw 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7d947cbb889ed21c2a84be6ffbaebf5b4e0f4340638cba0444907e38b56be084"
 "checksum malloc_size_of_derive 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "35adee9ed962cf7d07d62cb58bc45029f3227f5b5b86246caa8632f06c187bc3"
 "checksum matches 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "100aabe6b8ff4e4a7e32c1c13523379802df0772b82466207ac25b013f193376"
 "checksum memchr 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "148fab2e51b4f1cfc66da2a7c32981d1d3c083a803978268bb11fe4b86925e7a"
 "checksum memchr 2.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2efc7bc57c883d4a4d6e3246905283d8dae951bb3bd32f49d6ef297f546e1c39"
 "checksum memmap 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "46f3c7359028b31999287dae4e5047ddfe90a23b7dca2282ce759b491080c99b"
@@ -3886,18 +3898,18 @@ dependencies = [
 "checksum num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "0b3a5d7cc97d6d30d8b9bc8fa19bf45349ffe46241e8816f50f62f6d6aaabee1"
 "checksum num_cpus 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "514f0d73e64be53ff320680ca671b64fe3fb91da01e1ae2ddc99eb51d453b20d"
 "checksum object 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6cca6ad89d0801138cb4ef606908ae12d83edc4c790ef5178fc7b4c72d959e90"
 "checksum opaque-debug 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "51ecbcb821e1bd256d456fe858aaa7f380b63863eab2eb86eee1bd9f33dd6682"
 "checksum ordered-float 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2f0015e9e8e28ee20c581cfbfe47c650cedeb9ed0721090e0b7ebb10b9cdbcc2"
 "checksum ordermap 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "a86ed3f5f244b372d6b1a00b72ef7f8876d0bc6a78a4c9985c53614041512063"
 "checksum owning_ref 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "49a4b8ea2179e6a2e27411d3bca09ca6dd630821cf6894c6c7c8467a8ee7ef13"
 "checksum packed_simd 0.3.3 (git+https://github.com/hsivonen/packed_simd?branch=rust_1_32)" = "<none>"
-"checksum parking_lot 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)" = "69376b761943787ebd5cc85a5bc95958651a22609c5c1c2b65de21786baec72b"
-"checksum parking_lot_core 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)" = "4db1a8ccf734a7bce794cc19b3df06ed87ab2f3907036b693c68f56b4d4537fa"
+"checksum parking_lot 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fa7767817701cce701d5585b9c4db3cdd02086398322c1d7e8bf5094a96a2ce7"
+"checksum parking_lot_core 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "cb88cb1cb3790baa6776844f968fea3be44956cf184fa1be5a03341f5491278c"
 "checksum peeking_take_while 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099"
 "checksum percent-encoding 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "de154f638187706bde41d9b4738748933d64e6b37bdbffc0b47a97d16a6ae356"
 "checksum petgraph 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)" = "9c3659d1ee90221741f65dd128d9998311b0e40c5d3c23a62445938214abce4f"
 "checksum phf 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)" = "cb325642290f28ee14d8c6201159949a872f220c62af6e110a56ea914fbe42fc"
 "checksum phf_codegen 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)" = "d62594c0bb54c464f633175d502038177e90309daf2e0158be42ed5f023ce88f"
 "checksum phf_generator 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)" = "6b07ffcc532ccc85e3afc45865469bf5d9e4ef5bfcf9622e3cfe80c2d275ec03"
 "checksum phf_shared 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)" = "07e24b0ca9643bdecd0632f2b3da6b1b89bbb0030e0b992afc1113b23a7bc2f2"
 "checksum pkg-config 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "3a8b4c6b8165cd1a1cd4b9b120978131389f64bdaf456435caa41e630edba903"
@@ -3942,16 +3954,17 @@ dependencies = [
 "checksum rustc-demangle 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "76d7ba1feafada44f2d38eed812bd2489a03c0f5abb975799251518b68848649"
 "checksum rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a"
 "checksum ryu 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)" = "fd0568787116e13c652377b6846f5931454a363a8fdf8ae50463ee40935b278b"
 "checksum safemem 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8dca453248a96cb0749e36ccdfe2b0b4e54a61bfef89fb97ec621eb8e0a93dd9"
 "checksum same-file 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "cfb6eded0b06a0b512c8ddbcf04089138c9b4362c2f696f3c3d76039d68f3637"
 "checksum scoped-tls 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f417c22df063e9450888a7561788e9bd46d3bb3c1466435b4eccb903807f147d"
 "checksum scoped_threadpool 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "1d51f5df5af43ab3f1360b429fa5e0152ac5ce8c0bd6485cae490332e96846a8"
 "checksum scopeguard 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c79eb2c3ac4bc2507cda80e7f3ac5b88bd8eae4c0914d5663e6a8933994be918"
+"checksum scopeguard 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b42e15e59b18a828bbf5c58ea01debb36b9b096346de35d941dcb89009f24a0d"
 "checksum scroll 0.9.2 (registry+https://github.com/rust-lang/crates.io-index)" = "2f84d114ef17fd144153d608fba7c446b0145d038985e7a8cc5d08bb0ce20383"
 "checksum scroll_derive 0.9.5 (registry+https://github.com/rust-lang/crates.io-index)" = "8f1aa96c45e7f5a91cb7fabe7b279f02fea7126239fc40b732316e8b6a2d0fcb"
 "checksum semver 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7a3186ec9e65071a2095434b1f5bb24838d4e8e130f584c790f6033c79943537"
 "checksum semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403"
 "checksum semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3"
 "checksum serde 1.0.88 (registry+https://github.com/rust-lang/crates.io-index)" = "9f301d728f2b94c9a7691c90f07b0b4e8a4517181d9461be94c04bddeb4bd850"
 "checksum serde_bytes 0.10.4 (registry+https://github.com/rust-lang/crates.io-index)" = "adb6e51a6b3696b301bc221d785f898b4457c619b51d7ce195a6d20baecb37b3"
 "checksum serde_derive 1.0.88 (git+https://github.com/servo/serde?branch=deserialize_from_enums10)" = "<none>"
@@ -4007,17 +4020,17 @@ dependencies = [
 "checksum unicode-normalization 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "51ccda9ef9efa3f7ef5d91e8f9b83bbe6955f9bf86aec89d5cce2c874625920f"
 "checksum unicode-segmentation 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "aa6024fc12ddfd1c6dbc14a80fa2324d4568849869b779f6bd37e5e4c03344d1"
 "checksum unicode-width 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "bf3a113775714a22dcb774d8ea3655c53a32debae63a063acc00a91cc586245f"
 "checksum unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc"
 "checksum unreachable 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "382810877fe448991dfc7f0dd6e3ae5d58088fd0ea5e35189655f84e6814fa56"
 "checksum url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "dd4e7c0d531266369519a4aa4f399d748bd37043b00bde1e4ff1f60a120b355a"
 "checksum utf8-ranges 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "662fab6525a98beff2921d7f61a39e7d59e0b425ebc7d0d9e66d316e55124122"
 "checksum uuid 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)" = "e1436e58182935dcd9ce0add9ea0b558e8a87befe01c1a301e6020aeb0876363"
-"checksum uuid 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "dab5c5526c5caa3d106653401a267fed923e7046f35895ffcb5ca42db64942e6"
+"checksum uuid 0.7.4 (registry+https://github.com/rust-lang/crates.io-index)" = "90dbc611eb48397705a6b0f6e917da23ae517e4d127123d2cf7674206627d32a"
 "checksum vcpkg 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9e0a7d8bed3178a8fb112199d466eeca9ed09a14ba8ad67718179b4fd5487d0b"
 "checksum vec_map 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "887b5b631c2ad01628bbbaa7dd4c869f80d3186688f8d0b6f58774fbe324988c"
 "checksum void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d"
 "checksum walkdir 2.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "63636bd0eb3d00ccb8b9036381b526efac53caf112b7783b730ab3f8e44da369"
 "checksum want 0.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "797464475f30ddb8830cc529aaaae648d581f99e2036a928877dfde027ddf6b3"
 "checksum wasmparser 0.29.2 (registry+https://github.com/rust-lang/crates.io-index)" = "981a8797cf89762e0233ec45fae731cb79a4dfaee12d9f0fe6cee01e4ac58d00"
 "checksum webidl 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d0f807f7488d680893f7188aa09d7672a3a0a8461975a098a2edf0a52e3fee29"
 "checksum which 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "4be6cfa54dab45266e98b5d7be2f8ce959ddd49abd141a05d52dce4b07f803bb"
--- a/dom/webauthn/u2f-hid-rs/Cargo.toml
+++ b/dom/webauthn/u2f-hid-rs/Cargo.toml
@@ -1,11 +1,11 @@
 [package]
 name = "u2fhid"
-version = "0.2.3"
+version = "0.2.4"
 authors = ["Kyle Machulis <kyle@nonpolynomial.com>", "J.C. Jones <jc@mozilla.com>", "Tim Taubert <ttaubert@mozilla.com>"]
 
 [target.'cfg(target_os = "linux")'.dependencies]
 libudev = "^0.2"
 
 [target.'cfg(target_os = "freebsd")'.dependencies]
 devd-rs = "0.2.1"
 
@@ -18,17 +18,17 @@ features = [
     "handleapi",
     "hidclass",
     "hidpi",
     "hidusage",
     "setupapi",
 ]
 
 [dependencies]
-rand = "0.3"
+rand = "0.6"
 log = "0.4"
 libc = "^0.2"
 boxfnonce = "0.0.3"
 runloop = "0.1.0"
 bitflags = "1.0"
 
 [dev-dependencies]
 sha2 = "^0.7"
--- a/dom/webauthn/u2f-hid-rs/src/u2fprotocol.rs
+++ b/dom/webauthn/u2f-hid-rs/src/u2fprotocol.rs
@@ -1,17 +1,17 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #![cfg_attr(feature = "cargo-clippy", allow(needless_lifetimes))]
 
 extern crate std;
 
-use rand::{thread_rng, Rng};
+use rand::{thread_rng, RngCore};
 use std::ffi::CString;
 use std::io;
 use std::io::{Read, Write};
 
 use consts::*;
 use u2ftypes::*;
 use util::io_err;
 
@@ -209,17 +209,17 @@ where
 }
 
 ////////////////////////////////////////////////////////////////////////
 // Tests
 ////////////////////////////////////////////////////////////////////////
 
 #[cfg(test)]
 mod tests {
-    use rand::{thread_rng, Rng};
+    use rand::{thread_rng, RngCore};
 
     use super::{init_device, send_apdu, sendrecv, U2FDevice};
     use consts::{CID_BROADCAST, SW_NO_ERROR, U2FHID_INIT, U2FHID_MSG, U2FHID_PING};
 
     mod platform {
         use std::io;
         use std::io::{Read, Write};
 
--- a/netwerk/base/mozurl/Cargo.toml
+++ b/netwerk/base/mozurl/Cargo.toml
@@ -3,9 +3,9 @@ name = "mozurl"
 version = "0.0.1"
 authors = ["Nika Layzell <nika@thelayzells.com>"]
 
 [dependencies]
 url = "1.7.2"
 nserror = { path = "../../../xpcom/rust/nserror" }
 nsstring = { path = "../../../xpcom/rust/nsstring" }
 xpcom = { path = "../../../xpcom/rust/xpcom" }
-uuid = { version = "0.6", features = ["v4"] }
+uuid = { version = "0.7.2", features = ["v4"] }
--- a/python/mozbuild/mozbuild/vendor_rust.py
+++ b/python/mozbuild/mozbuild/vendor_rust.py
@@ -182,16 +182,18 @@ Please commit or stash these changes bef
     # sha256 hash of the license file that we reviewed.
     #
     # As above, it is insufficient to have additions to this whitelist
     # reviewed solely by a build peer; any additions must be checked by
     # somebody competent to review licensing minutiae.
     RUNTIME_LICENSE_FILE_PACKAGE_WHITELIST = {
         # MIT
         'deque': '6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb',
+        # we're whitelisting this fuchsia crate because it doesn't get built in the final product but has a license-file that needs ignoring
+        'fuchsia-cprng' : '03b114f53e6587a398931762ee11e2395bfdba252a329940e2c8c9e81813845b',
     }
 
     @staticmethod
     def runtime_license(package, license_string):
         """Cargo docs say:
         ---
         https://doc.rust-lang.org/cargo/reference/manifest.html
 
--- a/servo/components/style/Cargo.toml
+++ b/servo/components/style/Cargo.toml
@@ -50,17 +50,17 @@ malloc_size_of_derive = "0.1"
 matches = "0.1"
 nsstring = {path = "../../../xpcom/rust/nsstring/", optional = true}
 num_cpus = {version = "1.1.0"}
 num-integer = "0.1"
 num-traits = "0.2"
 num-derive = "0.2"
 ordered-float = "1.0"
 owning_ref = "0.4"
-parking_lot = "0.6"
+parking_lot = "0.8"
 precomputed-hash = "0.1.1"
 rayon = "1"
 selectors = { path = "../selectors" }
 serde = {version = "1.0", optional = true, features = ["derive"]}
 servo_arc = { path = "../servo_arc" }
 servo_atoms = {path = "../atoms", optional = true}
 servo_config = {path = "../config", optional = true}
 smallbitvec = "2.3.0"
--- a/servo/ports/geckolib/Cargo.toml
+++ b/servo/ports/geckolib/Cargo.toml
@@ -17,15 +17,15 @@ gecko_profiler = ["style/gecko_profiler"
 atomic_refcell = "0.1"
 cssparser = "0.25"
 cstr = "0.1.2"
 libc = "0.2"
 log = {version = "0.4", features = ["release_max_level_info"]}
 malloc_size_of = {path = "../../components/malloc_size_of"}
 nsstring = {path = "../../../xpcom/rust/nsstring/"}
 num-traits = "0.2"
-parking_lot = "0.6"
+parking_lot = "0.8"
 selectors = {path = "../../components/selectors"}
 servo_arc = {path = "../../components/servo_arc"}
 smallvec = "0.6"
 style = {path = "../../components/style", features = ["gecko"]}
 style_traits = {path = "../../components/style_traits"}
 to_shmem = {path = "../../components/to_shmem"}
--- a/servo/tests/unit/style/Cargo.toml
+++ b/servo/tests/unit/style/Cargo.toml
@@ -10,17 +10,17 @@ path = "lib.rs"
 doctest = false
 
 [dependencies]
 byteorder = "1.0"
 app_units = "0.7"
 cssparser = "0.25"
 euclid = "0.19"
 html5ever = "0.22"
-parking_lot = "0.6"
+parking_lot = "0.8"
 rayon = "1"
 serde_json = "1.0"
 selectors = {path = "../../../components/selectors"}
 servo_arc = {path = "../../../components/servo_arc"}
 servo_atoms = {path = "../../../components/atoms"}
 servo_config = {path = "../../../components/config"}
 servo_url = {path = "../../../components/url"}
 size_of_test = {path = "../../../components/size_of_test"}
--- a/third_party/rust/lock_api/.cargo-checksum.json
+++ b/third_party/rust/lock_api/.cargo-checksum.json
@@ -1,1 +1,1 @@
-{"files":{"Cargo.toml":"ab2a7a96105e15de46900fb0da37edbab44e5513a9818672153dae44ed318f7e","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"c9a75f18b9ab2927829a208fc6aa2cf4e63b8420887ba29cdb265d6619ae82d5","src/lib.rs":"4a16128f58e3380b22b26b137ee1096732995b7e401f3d227dd7b0738b6bd604","src/mutex.rs":"fee397f72325621812c5f78c7a6b9369ea7ec14e71bb0049678a50349519c0c7","src/remutex.rs":"ed76d7b93a56b6248d79676de2aaa66b607b64f1b773c9dd7326b8324e2bc71a","src/rwlock.rs":"5ab1aab614358cfdaf23e8ff8a0ac5e0c7656b777f385aca2e5422f0aa8f0985"},"package":"62ebf1391f6acad60e5c8b43706dde4582df75c06698ab44511d15016bc2442c"}
\ No newline at end of file
+{"files":{"Cargo.toml":"4e6804e66f9429156bfe15d0d796baceb73a3f06d358608afcbea95cdf0086ba","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"c9a75f18b9ab2927829a208fc6aa2cf4e63b8420887ba29cdb265d6619ae82d5","src/lib.rs":"d9ed1f911f058d066ebfd024940da8a5c1ebbab6cfd65a633dfbc613573dd823","src/mutex.rs":"eeaab6ce6e50aed906bebe598c1b151258327e101eec08b0ff9ccd9c87daddfb","src/remutex.rs":"24cbd5b5b77dd746b065c6d3494dcb2095e81a062341052003b96210a1297ba8","src/rwlock.rs":"a3789a7e820f5c22c8661c4c9e279510a3db50e24894fb380e49dde6b110ddb1"},"package":"ed946d4529956a20f2d63ebe1b69996d5a2137c91913fe3ebbeff957f5bca7ff"}
\ No newline at end of file
--- a/third_party/rust/lock_api/Cargo.toml
+++ b/third_party/rust/lock_api/Cargo.toml
@@ -1,31 +1,37 @@
 # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
 #
 # When uploading crates to the registry Cargo will automatically
 # "normalize" Cargo.toml files for maximal compatibility
 # with all versions of Cargo and also rewrite `path` dependencies
-# to registry (e.g. crates.io) dependencies
+# to registry (e.g., crates.io) dependencies
 #
 # If you believe there's an error in this file please file an
 # issue against the rust-lang/cargo repository. If you're
 # editing this file be aware that the upstream Cargo.toml
 # will likely look very different (and much more reasonable)
 
 [package]
+edition = "2018"
 name = "lock_api"
-version = "0.1.5"
+version = "0.2.0"
 authors = ["Amanieu d'Antras <amanieu@gmail.com>"]
 description = "Wrappers to create fully-featured Mutex and RwLock types. Compatible with no_std."
 keywords = ["mutex", "rwlock", "lock", "no_std"]
 categories = ["concurrency", "no-std"]
 license = "Apache-2.0/MIT"
 repository = "https://github.com/Amanieu/parking_lot"
 [dependencies.owning_ref]
 version = "0.4"
 optional = true
 
 [dependencies.scopeguard]
-version = "0.3"
+version = "1.0"
+default-features = false
+
+[dependencies.serde]
+version = "1.0.90"
+optional = true
 default-features = false
 
 [features]
 nightly = []
--- a/third_party/rust/lock_api/src/lib.rs
+++ b/third_party/rust/lock_api/src/lib.rs
@@ -23,24 +23,24 @@
 //! This process is similar for RwLocks, except that two guards need to be
 //! exported instead of one. (Or 3 guards if your type supports upgradable read
 //! locks, see [extension traits](#extension-traits) below for details)
 //!
 //! # Example
 //!
 //! ```
 //! use lock_api::{RawMutex, Mutex, GuardSend};
-//! use std::sync::atomic::{AtomicBool, Ordering, ATOMIC_BOOL_INIT};
+//! use std::sync::atomic::{AtomicBool, Ordering};
 //!
 //! // 1. Define our raw lock type
 //! pub struct RawSpinlock(AtomicBool);
 //!
 //! // 2. Implement RawMutex for this type
 //! unsafe impl RawMutex for RawSpinlock {
-//!     const INIT: RawSpinlock = RawSpinlock(ATOMIC_BOOL_INIT);
+//!     const INIT: RawSpinlock = RawSpinlock(AtomicBool::new(false));
 //!
 //!     // A spinlock guard can be sent to another thread and unlocked there
 //!     type GuardMarker = GuardSend;
 //!
 //!     fn lock(&self) {
 //!         // Note: This isn't the best way of implementing a spinlock, but it
 //!         // suffices for the sake of this example.
 //!         while !self.try_lock() {}
@@ -80,30 +80,28 @@
 //! This crate supports two cargo features:
 //!
 //! - `owning_ref`: Allows your lock types to be used with the `owning_ref` crate.
 //! - `nightly`: Enables nightly-only features. At the moment the only such
 //!   feature is `const fn` constructors for lock types.
 
 #![no_std]
 #![warn(missing_docs)]
+#![warn(rust_2018_idioms)]
 #![cfg_attr(feature = "nightly", feature(const_fn))]
 
 #[macro_use]
 extern crate scopeguard;
 
-#[cfg(feature = "owning_ref")]
-extern crate owning_ref;
-
 /// Marker type which indicates that the Guard type for a lock is `Send`.
 pub struct GuardSend(());
 
 /// Marker type which indicates that the Guard type for a lock is not `Send`.
 pub struct GuardNoSend(*mut ());
 
 mod mutex;
-pub use mutex::*;
+pub use crate::mutex::*;
 
 mod remutex;
-pub use remutex::*;
+pub use crate::remutex::*;
 
 mod rwlock;
-pub use rwlock::*;
+pub use crate::rwlock::*;
--- a/third_party/rust/lock_api/src/mutex.rs
+++ b/third_party/rust/lock_api/src/mutex.rs
@@ -9,16 +9,19 @@ use core::cell::UnsafeCell;
 use core::fmt;
 use core::marker::PhantomData;
 use core::mem;
 use core::ops::{Deref, DerefMut};
 
 #[cfg(feature = "owning_ref")]
 use owning_ref::StableAddress;
 
+#[cfg(feature = "serde")]
+use serde::{Deserialize, Deserializer, Serialize, Serializer};
+
 /// Basic operations for a mutex.
 ///
 /// Types implementing this trait can be used by `Mutex` to form a safe and
 /// fully-functioning mutex type.
 ///
 /// # Safety
 ///
 /// Implementations of this trait must ensure that the mutex is actually
@@ -88,16 +91,45 @@ pub unsafe trait RawMutexTimed: RawMutex
 /// it is protecting. The data can only be accessed through the RAII guards
 /// returned from `lock` and `try_lock`, which guarantees that the data is only
 /// ever accessed when the mutex is locked.
 pub struct Mutex<R: RawMutex, T: ?Sized> {
     raw: R,
     data: UnsafeCell<T>,
 }
 
+// Copied and modified from serde
+#[cfg(feature = "serde")]
+impl<R, T> Serialize for Mutex<R, T>
+where
+    R: RawMutex,
+    T: Serialize + ?Sized,
+{
+    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+    where
+        S: Serializer,
+    {
+        self.lock().serialize(serializer)
+    }
+}
+
+#[cfg(feature = "serde")]
+impl<'de, R, T> Deserialize<'de> for Mutex<R, T>
+where
+    R: RawMutex,
+    T: Deserialize<'de> + ?Sized,
+{
+    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+    where
+        D: Deserializer<'de>,
+    {
+        Deserialize::deserialize(deserializer).map(Mutex::new)
+    }
+}
+
 unsafe impl<R: RawMutex + Send, T: ?Sized + Send> Send for Mutex<R, T> {}
 unsafe impl<R: RawMutex + Sync, T: ?Sized + Send> Sync for Mutex<R, T> {}
 
 impl<R: RawMutex, T> Mutex<R, T> {
     /// Creates a new mutex in an unlocked state ready for use.
     #[cfg(feature = "nightly")]
     #[inline]
     pub const fn new(val: T) -> Mutex<R, T> {
@@ -122,47 +154,47 @@ impl<R: RawMutex, T> Mutex<R, T> {
     #[allow(unused_unsafe)]
     pub fn into_inner(self) -> T {
         unsafe { self.data.into_inner() }
     }
 }
 
 impl<R: RawMutex, T: ?Sized> Mutex<R, T> {
     #[inline]
-    fn guard(&self) -> MutexGuard<R, T> {
+    fn guard(&self) -> MutexGuard<'_, R, T> {
         MutexGuard {
             mutex: self,
             marker: PhantomData,
         }
     }
 
     /// Acquires a mutex, blocking the current thread until it is able to do so.
     ///
     /// This function will block the local thread until it is available to acquire
     /// the mutex. Upon returning, the thread is the only thread with the mutex
     /// held. An RAII guard is returned to allow scoped unlock of the lock. When
     /// the guard goes out of scope, the mutex will be unlocked.
     ///
     /// Attempts to lock a mutex in the thread which already holds the lock will
     /// result in a deadlock.
     #[inline]
-    pub fn lock(&self) -> MutexGuard<R, T> {
+    pub fn lock(&self) -> MutexGuard<'_, R, T> {
         self.raw.lock();
         self.guard()
     }
 
     /// Attempts to acquire this lock.
     ///
     /// If the lock could not be acquired at this time, then `None` is returned.
     /// Otherwise, an RAII guard is returned. The lock will be unlocked when the
     /// guard is dropped.
     ///
     /// This function does not block.
     #[inline]
-    pub fn try_lock(&self) -> Option<MutexGuard<R, T>> {
+    pub fn try_lock(&self) -> Option<MutexGuard<'_, R, T>> {
         if self.raw.try_lock() {
             Some(self.guard())
         } else {
             None
         }
     }
 
     /// Returns a mutable reference to the underlying data.
@@ -225,31 +257,31 @@ impl<R: RawMutexFair, T: ?Sized> Mutex<R
 
 impl<R: RawMutexTimed, T: ?Sized> Mutex<R, T> {
     /// Attempts to acquire this lock until a timeout is reached.
     ///
     /// If the lock could not be acquired before the timeout expired, then
     /// `None` is returned. Otherwise, an RAII guard is returned. The lock will
     /// be unlocked when the guard is dropped.
     #[inline]
-    pub fn try_lock_for(&self, timeout: R::Duration) -> Option<MutexGuard<R, T>> {
+    pub fn try_lock_for(&self, timeout: R::Duration) -> Option<MutexGuard<'_, R, T>> {
         if self.raw.try_lock_for(timeout) {
             Some(self.guard())
         } else {
             None
         }
     }
 
     /// Attempts to acquire this lock until a timeout is reached.
     ///
     /// If the lock could not be acquired before the timeout expired, then
     /// `None` is returned. Otherwise, an RAII guard is returned. The lock will
     /// be unlocked when the guard is dropped.
     #[inline]
-    pub fn try_lock_until(&self, timeout: R::Instant) -> Option<MutexGuard<R, T>> {
+    pub fn try_lock_until(&self, timeout: R::Instant) -> Option<MutexGuard<'_, R, T>> {
         if self.raw.try_lock_until(timeout) {
             Some(self.guard())
         } else {
             None
         }
     }
 }
 
@@ -263,31 +295,42 @@ impl<R: RawMutex, T: ?Sized + Default> D
 impl<R: RawMutex, T> From<T> for Mutex<R, T> {
     #[inline]
     fn from(t: T) -> Mutex<R, T> {
         Mutex::new(t)
     }
 }
 
 impl<R: RawMutex, T: ?Sized + fmt::Debug> fmt::Debug for Mutex<R, T> {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
         match self.try_lock() {
             Some(guard) => f.debug_struct("Mutex").field("data", &&*guard).finish(),
-            None => f.pad("Mutex { <locked> }"),
+            None => {
+                struct LockedPlaceholder;
+                impl fmt::Debug for LockedPlaceholder {
+                    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+                        f.write_str("<locked>")
+                    }
+                }
+
+                f.debug_struct("Mutex")
+                    .field("data", &LockedPlaceholder)
+                    .finish()
+            }
         }
     }
 }
 
 /// An RAII implementation of a "scoped lock" of a mutex. When this structure is
 /// dropped (falls out of scope), the lock will be unlocked.
 ///
 /// The data protected by the mutex can be accessed through this guard via its
 /// `Deref` and `DerefMut` implementations.
-#[must_use]
-pub struct MutexGuard<'a, R: RawMutex + 'a, T: ?Sized + 'a> {
+#[must_use = "if unused the Mutex will immediately unlock"]
+pub struct MutexGuard<'a, R: RawMutex, T: ?Sized> {
     mutex: &'a Mutex<R, T>,
     marker: PhantomData<(&'a mut T, R::GuardMarker)>,
 }
 
 unsafe impl<'a, R: RawMutex + Sync + 'a, T: ?Sized + Sync + 'a> Sync for MutexGuard<'a, R, T> {}
 
 impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> MutexGuard<'a, R, T> {
     /// Returns a reference to the original `Mutex` object.
@@ -423,39 +466,53 @@ impl<'a, R: RawMutex + 'a, T: ?Sized + '
 
 impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> Drop for MutexGuard<'a, R, T> {
     #[inline]
     fn drop(&mut self) {
         self.mutex.raw.unlock();
     }
 }
 
+impl<'a, R: RawMutex + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug for MutexGuard<'a, R, T> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        fmt::Debug::fmt(&**self, f)
+    }
+}
+
+impl<'a, R: RawMutex + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display for MutexGuard<'a, R, T> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        (**self).fmt(f)
+    }
+}
+
 #[cfg(feature = "owning_ref")]
 unsafe impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> StableAddress for MutexGuard<'a, R, T> {}
 
 /// An RAII mutex guard returned by `MutexGuard::map`, which can point to a
 /// subfield of the protected data.
 ///
 /// The main difference between `MappedMutexGuard` and `MutexGuard` is that the
 /// former doesn't support temporarily unlocking and re-locking, since that
 /// could introduce soundness issues if the locked object is modified by another
 /// thread.
-#[must_use]
-pub struct MappedMutexGuard<'a, R: RawMutex + 'a, T: ?Sized + 'a> {
+#[must_use = "if unused the Mutex will immediately unlock"]
+pub struct MappedMutexGuard<'a, R: RawMutex, T: ?Sized> {
     raw: &'a R,
     data: *mut T,
     marker: PhantomData<&'a mut T>,
 }
 
 unsafe impl<'a, R: RawMutex + Sync + 'a, T: ?Sized + Sync + 'a> Sync
     for MappedMutexGuard<'a, R, T>
-{}
+{
+}
 unsafe impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> Send for MappedMutexGuard<'a, R, T> where
     R::GuardMarker: Send
-{}
+{
+}
 
 impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> MappedMutexGuard<'a, R, T> {
     /// Makes a new `MappedMutexGuard` for a component of the locked data.
     ///
     /// This operation cannot fail as the `MappedMutexGuard` passed
     /// in already locked the mutex.
     ///
     /// This is an associated function that needs to be
@@ -541,10 +598,24 @@ impl<'a, R: RawMutex + 'a, T: ?Sized + '
 
 impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> Drop for MappedMutexGuard<'a, R, T> {
     #[inline]
     fn drop(&mut self) {
         self.raw.unlock();
     }
 }
 
+impl<'a, R: RawMutex + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug for MappedMutexGuard<'a, R, T> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        fmt::Debug::fmt(&**self, f)
+    }
+}
+
+impl<'a, R: RawMutex + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
+    for MappedMutexGuard<'a, R, T>
+{
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        (**self).fmt(f)
+    }
+}
+
 #[cfg(feature = "owning_ref")]
 unsafe impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> StableAddress for MappedMutexGuard<'a, R, T> {}
--- a/third_party/rust/lock_api/src/remutex.rs
+++ b/third_party/rust/lock_api/src/remutex.rs
@@ -1,27 +1,30 @@
 // Copyright 2018 Amanieu d'Antras
 //
 // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
 // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
 // http://opensource.org/licenses/MIT>, at your option. This file may not be
 // copied, modified, or distributed except according to those terms.
 
+use crate::mutex::{RawMutex, RawMutexFair, RawMutexTimed};
+use crate::GuardNoSend;
 use core::cell::{Cell, UnsafeCell};
 use core::fmt;
 use core::marker::PhantomData;
 use core::mem;
 use core::ops::Deref;
 use core::sync::atomic::{AtomicUsize, Ordering};
-use mutex::{RawMutex, RawMutexFair, RawMutexTimed};
-use GuardNoSend;
 
 #[cfg(feature = "owning_ref")]
 use owning_ref::StableAddress;
 
+#[cfg(feature = "serde")]
+use serde::{Deserialize, Deserializer, Serialize, Serializer};
+
 /// Helper trait which returns a non-zero thread ID.
 ///
 /// The simplest way to implement this trait is to return the address of a
 /// thread-local variable.
 ///
 /// # Safety
 ///
 /// Implementations of this trait must ensure that no two active threads share
@@ -135,22 +138,55 @@ impl<R: RawMutexTimed, G: GetThreadId> R
 ///
 /// See [`Mutex`](struct.Mutex.html) for more details about the underlying mutex
 /// primitive.
 pub struct ReentrantMutex<R: RawMutex, G: GetThreadId, T: ?Sized> {
     raw: RawReentrantMutex<R, G>,
     data: UnsafeCell<T>,
 }
 
+// Copied and modified from serde
+#[cfg(feature = "serde")]
+impl<R, G, T> Serialize for ReentrantMutex<R, G, T>
+where
+    R: RawMutex,
+    G: GetThreadId,
+    T: Serialize + ?Sized,
+{
+    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+    where
+        S: Serializer,
+    {
+        self.lock().serialize(serializer)
+    }
+}
+
+#[cfg(feature = "serde")]
+impl<'de, R, G, T> Deserialize<'de> for ReentrantMutex<R, G, T>
+where
+    R: RawMutex,
+    G: GetThreadId,
+    T: Deserialize<'de> + ?Sized,
+{
+    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+    where
+        D: Deserializer<'de>,
+    {
+        Deserialize::deserialize(deserializer).map(ReentrantMutex::new)
+    }
+}
+
 unsafe impl<R: RawMutex + Send, G: GetThreadId + Send, T: ?Sized + Send> Send
     for ReentrantMutex<R, G, T>
-{}
+{
+}
 unsafe impl<R: RawMutex + Sync, G: GetThreadId + Sync, T: ?Sized + Send> Sync
     for ReentrantMutex<R, G, T>
-{}
+{
+}
 
 impl<R: RawMutex, G: GetThreadId, T> ReentrantMutex<R, G, T> {
     /// Creates a new reentrant mutex in an unlocked state ready for use.
     #[cfg(feature = "nightly")]
     #[inline]
     pub const fn new(val: T) -> ReentrantMutex<R, G, T> {
         ReentrantMutex {
             data: UnsafeCell::new(val),
@@ -183,17 +219,17 @@ impl<R: RawMutex, G: GetThreadId, T> Ree
     #[allow(unused_unsafe)]
     pub fn into_inner(self) -> T {
         unsafe { self.data.into_inner() }
     }
 }
 
 impl<R: RawMutex, G: GetThreadId, T: ?Sized> ReentrantMutex<R, G, T> {
     #[inline]
-    fn guard(&self) -> ReentrantMutexGuard<R, G, T> {
+    fn guard(&self) -> ReentrantMutexGuard<'_, R, G, T> {
         ReentrantMutexGuard {
             remutex: &self,
             marker: PhantomData,
         }
     }
 
     /// Acquires a reentrant mutex, blocking the current thread until it is able
     /// to do so.
@@ -201,30 +237,30 @@ impl<R: RawMutex, G: GetThreadId, T: ?Si
     /// If the mutex is held by another thread then this function will block the
     /// local thread until it is available to acquire the mutex. If the mutex is
     /// already held by the current thread then this function will increment the
     /// lock reference count and return immediately. Upon returning,
     /// the thread is the only thread with the mutex held. An RAII guard is
     /// returned to allow scoped unlock of the lock. When the guard goes out of
     /// scope, the mutex will be unlocked.
     #[inline]
-    pub fn lock(&self) -> ReentrantMutexGuard<R, G, T> {
+    pub fn lock(&self) -> ReentrantMutexGuard<'_, R, G, T> {
         self.raw.lock();
         self.guard()
     }
 
     /// Attempts to acquire this lock.
     ///
     /// If the lock could not be acquired at this time, then `None` is returned.
     /// Otherwise, an RAII guard is returned. The lock will be unlocked when the
     /// guard is dropped.
     ///
     /// This function does not block.
     #[inline]
-    pub fn try_lock(&self) -> Option<ReentrantMutexGuard<R, G, T>> {
+    pub fn try_lock(&self) -> Option<ReentrantMutexGuard<'_, R, G, T>> {
         if self.raw.try_lock() {
             Some(self.guard())
         } else {
             None
         }
     }
 
     /// Returns a mutable reference to the underlying data.
@@ -287,31 +323,31 @@ impl<R: RawMutexFair, G: GetThreadId, T:
 
 impl<R: RawMutexTimed, G: GetThreadId, T: ?Sized> ReentrantMutex<R, G, T> {
     /// Attempts to acquire this lock until a timeout is reached.
     ///
     /// If the lock could not be acquired before the timeout expired, then
     /// `None` is returned. Otherwise, an RAII guard is returned. The lock will
     /// be unlocked when the guard is dropped.
     #[inline]
-    pub fn try_lock_for(&self, timeout: R::Duration) -> Option<ReentrantMutexGuard<R, G, T>> {
+    pub fn try_lock_for(&self, timeout: R::Duration) -> Option<ReentrantMutexGuard<'_, R, G, T>> {
         if self.raw.try_lock_for(timeout) {
             Some(self.guard())
         } else {
             None
         }
     }
 
     /// Attempts to acquire this lock until a timeout is reached.
     ///
     /// If the lock could not be acquired before the timeout expired, then
     /// `None` is returned. Otherwise, an RAII guard is returned. The lock will
     /// be unlocked when the guard is dropped.
     #[inline]
-    pub fn try_lock_until(&self, timeout: R::Instant) -> Option<ReentrantMutexGuard<R, G, T>> {
+    pub fn try_lock_until(&self, timeout: R::Instant) -> Option<ReentrantMutexGuard<'_, R, G, T>> {
         if self.raw.try_lock_until(timeout) {
             Some(self.guard())
         } else {
             None
         }
     }
 }
 
@@ -325,41 +361,53 @@ impl<R: RawMutex, G: GetThreadId, T: ?Si
 impl<R: RawMutex, G: GetThreadId, T> From<T> for ReentrantMutex<R, G, T> {
     #[inline]
     fn from(t: T) -> ReentrantMutex<R, G, T> {
         ReentrantMutex::new(t)
     }
 }
 
 impl<R: RawMutex, G: GetThreadId, T: ?Sized + fmt::Debug> fmt::Debug for ReentrantMutex<R, G, T> {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
         match self.try_lock() {
             Some(guard) => f
                 .debug_struct("ReentrantMutex")
                 .field("data", &&*guard)
                 .finish(),
-            None => f.pad("ReentrantMutex { <locked> }"),
+            None => {
+                struct LockedPlaceholder;
+                impl fmt::Debug for LockedPlaceholder {
+                    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+                        f.write_str("<locked>")
+                    }
+                }
+
+                f.debug_struct("ReentrantMutex")
+                    .field("data", &LockedPlaceholder)
+                    .finish()
+            }
         }
     }
 }
 
 /// An RAII implementation of a "scoped lock" of a reentrant mutex. When this structure
 /// is dropped (falls out of scope), the lock will be unlocked.
 ///
 /// The data protected by the mutex can be accessed through this guard via its
 /// `Deref` implementation.
-#[must_use]
-pub struct ReentrantMutexGuard<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> {
+#[must_use = "if unused the ReentrantMutex will immediately unlock"]
+pub struct ReentrantMutexGuard<'a, R: RawMutex, G: GetThreadId, T: ?Sized> {
     remutex: &'a ReentrantMutex<R, G, T>,
     marker: PhantomData<(&'a T, GuardNoSend)>,
 }
 
 unsafe impl<'a, R: RawMutex + Sync + 'a, G: GetThreadId + Sync + 'a, T: ?Sized + Sync + 'a> Sync
     for ReentrantMutexGuard<'a, R, G, T>
-{}
+{
+}
 
 impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> ReentrantMutexGuard<'a, R, G, T> {
     /// Returns a reference to the original `ReentrantMutex` object.
     pub fn remutex(s: &Self) -> &'a ReentrantMutex<R, G, T> {
         s.remutex
     }
 
     /// Makes a new `MappedReentrantMutexGuard` for a component of the locked data.
@@ -390,17 +438,20 @@ impl<'a, R: RawMutex + 'a, G: GetThreadI
     ///
     /// This operation cannot fail as the `ReentrantMutexGuard` passed
     /// in already locked the mutex.
     ///
     /// This is an associated function that needs to be
     /// used as `ReentrantMutexGuard::map(...)`. A method would interfere with methods of
     /// the same name on the contents of the locked data.
     #[inline]
-    pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedReentrantMutexGuard<'a, R, G, U>, Self>
+    pub fn try_map<U: ?Sized, F>(
+        s: Self,
+        f: F,
+    ) -> Result<MappedReentrantMutexGuard<'a, R, G, U>, Self>
     where
         F: FnOnce(&mut T) -> Option<&mut U>,
     {
         let raw = &s.remutex.raw;
         let data = match f(unsafe { &mut *s.remutex.data.get() }) {
             Some(data) => data,
             None => return Err(s),
         };
@@ -489,38 +540,56 @@ impl<'a, R: RawMutex + 'a, G: GetThreadI
     for ReentrantMutexGuard<'a, R, G, T>
 {
     #[inline]
     fn drop(&mut self) {
         self.remutex.raw.unlock();
     }
 }
 
+impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug
+    for ReentrantMutexGuard<'a, R, G, T>
+{
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        fmt::Debug::fmt(&**self, f)
+    }
+}
+
+impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
+    for ReentrantMutexGuard<'a, R, G, T>
+{
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        (**self).fmt(f)
+    }
+}
+
 #[cfg(feature = "owning_ref")]
 unsafe impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> StableAddress
     for ReentrantMutexGuard<'a, R, G, T>
-{}
+{
+}
 
 /// An RAII mutex guard returned by `ReentrantMutexGuard::map`, which can point to a
 /// subfield of the protected data.
 ///
 /// The main difference between `MappedReentrantMutexGuard` and `ReentrantMutexGuard` is that the
 /// former doesn't support temporarily unlocking and re-locking, since that
 /// could introduce soundness issues if the locked object is modified by another
 /// thread.
-#[must_use]
-pub struct MappedReentrantMutexGuard<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> {
+#[must_use = "if unused the ReentrantMutex will immediately unlock"]
+pub struct MappedReentrantMutexGuard<'a, R: RawMutex, G: GetThreadId, T: ?Sized> {
     raw: &'a RawReentrantMutex<R, G>,
     data: *const T,
     marker: PhantomData<&'a T>,
 }
 
 unsafe impl<'a, R: RawMutex + Sync + 'a, G: GetThreadId + Sync + 'a, T: ?Sized + Sync + 'a> Sync
     for MappedReentrantMutexGuard<'a, R, G, T>
-{}
+{
+}
 
 impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a>
     MappedReentrantMutexGuard<'a, R, G, T>
 {
     /// Makes a new `MappedReentrantMutexGuard` for a component of the locked data.
     ///
     /// This operation cannot fail as the `MappedReentrantMutexGuard` passed
     /// in already locked the mutex.
@@ -548,17 +617,20 @@ impl<'a, R: RawMutex + 'a, G: GetThreadI
     ///
     /// This operation cannot fail as the `MappedReentrantMutexGuard` passed
     /// in already locked the mutex.
     ///
     /// This is an associated function that needs to be
     /// used as `MappedReentrantMutexGuard::map(...)`. A method would interfere with methods of
     /// the same name on the contents of the locked data.
     #[inline]
-    pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedReentrantMutexGuard<'a, R, G, U>, Self>
+    pub fn try_map<U: ?Sized, F>(
+        s: Self,
+        f: F,
+    ) -> Result<MappedReentrantMutexGuard<'a, R, G, U>, Self>
     where
         F: FnOnce(&T) -> Option<&U>,
     {
         let raw = s.raw;
         let data = match f(unsafe { &*s.data }) {
             Some(data) => data,
             None => return Err(s),
         };
@@ -607,12 +679,29 @@ impl<'a, R: RawMutex + 'a, G: GetThreadI
     for MappedReentrantMutexGuard<'a, R, G, T>
 {
     #[inline]
     fn drop(&mut self) {
         self.raw.unlock();
     }
 }
 
+impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug
+    for MappedReentrantMutexGuard<'a, R, G, T>
+{
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        fmt::Debug::fmt(&**self, f)
+    }
+}
+
+impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
+    for MappedReentrantMutexGuard<'a, R, G, T>
+{
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        (**self).fmt(f)
+    }
+}
+
 #[cfg(feature = "owning_ref")]
 unsafe impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> StableAddress
     for MappedReentrantMutexGuard<'a, R, G, T>
-{}
+{
+}
--- a/third_party/rust/lock_api/src/rwlock.rs
+++ b/third_party/rust/lock_api/src/rwlock.rs
@@ -9,16 +9,19 @@ use core::cell::UnsafeCell;
 use core::fmt;
 use core::marker::PhantomData;
 use core::mem;
 use core::ops::{Deref, DerefMut};
 
 #[cfg(feature = "owning_ref")]
 use owning_ref::StableAddress;
 
+#[cfg(feature = "serde")]
+use serde::{Deserialize, Deserializer, Serialize, Serializer};
+
 /// Basic operations for a reader-writer lock.
 ///
 /// Types implementing this trait can be used by `RwLock` to form a safe and
 /// fully-functioning `RwLock` type.
 ///
 /// # Safety
 ///
 /// Implementations of this trait must ensure that the `RwLock` is actually
@@ -225,16 +228,45 @@ pub unsafe trait RawRwLockUpgradeTimed: 
 /// allow concurrent access through readers. The RAII guards returned from the
 /// locking methods implement `Deref` (and `DerefMut` for the `write` methods)
 /// to allow access to the contained of the lock.
 pub struct RwLock<R: RawRwLock, T: ?Sized> {
     raw: R,
     data: UnsafeCell<T>,
 }
 
+// Copied and modified from serde
+#[cfg(feature = "serde")]
+impl<R, T> Serialize for RwLock<R, T>
+where
+    R: RawRwLock,
+    T: Serialize + ?Sized,
+{
+    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+    where
+        S: Serializer,
+    {
+        self.read().serialize(serializer)
+    }
+}
+
+#[cfg(feature = "serde")]
+impl<'de, R, T> Deserialize<'de> for RwLock<R, T>
+where
+    R: RawRwLock,
+    T: Deserialize<'de> + ?Sized,
+{
+    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+    where
+        D: Deserializer<'de>,
+    {
+        Deserialize::deserialize(deserializer).map(RwLock::new)
+    }
+}
+
 unsafe impl<R: RawRwLock + Send, T: ?Sized + Send> Send for RwLock<R, T> {}
 unsafe impl<R: RawRwLock + Sync, T: ?Sized + Send + Sync> Sync for RwLock<R, T> {}
 
 impl<R: RawRwLock, T> RwLock<R, T> {
     /// Creates a new instance of an `RwLock<T>` which is unlocked.
     #[cfg(feature = "nightly")]
     #[inline]
     pub const fn new(val: T) -> RwLock<R, T> {
@@ -259,25 +291,25 @@ impl<R: RawRwLock, T> RwLock<R, T> {
     #[allow(unused_unsafe)]
     pub fn into_inner(self) -> T {
         unsafe { self.data.into_inner() }
     }
 }
 
 impl<R: RawRwLock, T: ?Sized> RwLock<R, T> {
     #[inline]
-    fn read_guard(&self) -> RwLockReadGuard<R, T> {
+    fn read_guard(&self) -> RwLockReadGuard<'_, R, T> {
         RwLockReadGuard {
             rwlock: self,
             marker: PhantomData,
         }
     }
 
     #[inline]
-    fn write_guard(&self) -> RwLockWriteGuard<R, T> {
+    fn write_guard(&self) -> RwLockWriteGuard<'_, R, T> {
         RwLockWriteGuard {
             rwlock: self,
             marker: PhantomData,
         }
     }
 
     /// Locks this `RwLock` with shared read access, blocking the current thread
     /// until it can be acquired.
@@ -287,60 +319,60 @@ impl<R: RawRwLock, T: ?Sized> RwLock<R, 
     /// this method returns.
     ///
     /// Note that attempts to recursively acquire a read lock on a `RwLock` when
     /// the current thread already holds one may result in a deadlock.
     ///
     /// Returns an RAII guard which will release this thread's shared access
     /// once it is dropped.
     #[inline]
-    pub fn read(&self) -> RwLockReadGuard<R, T> {
+    pub fn read(&self) -> RwLockReadGuard<'_, R, T> {
         self.raw.lock_shared();
         self.read_guard()
     }
 
     /// Attempts to acquire this `RwLock` with shared read access.
     ///
     /// If the access could not be granted at this time, then `None` is returned.
     /// Otherwise, an RAII guard is returned which will release the shared access
     /// when it is dropped.
     ///
     /// This function does not block.
     #[inline]
-    pub fn try_read(&self) -> Option<RwLockReadGuard<R, T>> {
+    pub fn try_read(&self) -> Option<RwLockReadGuard<'_, R, T>> {
         if self.raw.try_lock_shared() {
             Some(self.read_guard())
         } else {
             None
         }
     }
 
     /// Locks this `RwLock` with exclusive write access, blocking the current
     /// thread until it can be acquired.
     ///
     /// This function will not return while other writers or other readers
     /// currently have access to the lock.
     ///
     /// Returns an RAII guard which will drop the write access of this `RwLock`
     /// when dropped.
     #[inline]
-    pub fn write(&self) -> RwLockWriteGuard<R, T> {
+    pub fn write(&self) -> RwLockWriteGuard<'_, R, T> {
         self.raw.lock_exclusive();
         self.write_guard()
     }
 
     /// Attempts to lock this `RwLock` with exclusive write access.
     ///
     /// If the lock could not be acquired at this time, then `None` is returned.
     /// Otherwise, an RAII guard is returned which will release the lock when
     /// it is dropped.
     ///
     /// This function does not block.
     #[inline]
-    pub fn try_write(&self) -> Option<RwLockWriteGuard<R, T>> {
+    pub fn try_write(&self) -> Option<RwLockWriteGuard<'_, R, T>> {
         if self.raw.try_lock_exclusive() {
             Some(self.write_guard())
         } else {
             None
         }
     }
 
     /// Returns a mutable reference to the underlying data.
@@ -436,62 +468,62 @@ impl<R: RawRwLockFair, T: ?Sized> RwLock
 impl<R: RawRwLockTimed, T: ?Sized> RwLock<R, T> {
     /// Attempts to acquire this `RwLock` with shared read access until a timeout
     /// is reached.
     ///
     /// If the access could not be granted before the timeout expires, then
     /// `None` is returned. Otherwise, an RAII guard is returned which will
     /// release the shared access when it is dropped.
     #[inline]
-    pub fn try_read_for(&self, timeout: R::Duration) -> Option<RwLockReadGuard<R, T>> {
+    pub fn try_read_for(&self, timeout: R::Duration) -> Option<RwLockReadGuard<'_, R, T>> {
         if self.raw.try_lock_shared_for(timeout) {
             Some(self.read_guard())
         } else {
             None
         }
     }
 
     /// Attempts to acquire this `RwLock` with shared read access until a timeout
     /// is reached.
     ///
     /// If the access could not be granted before the timeout expires, then
     /// `None` is returned. Otherwise, an RAII guard is returned which will
     /// release the shared access when it is dropped.
     #[inline]
-    pub fn try_read_until(&self, timeout: R::Instant) -> Option<RwLockReadGuard<R, T>> {
+    pub fn try_read_until(&self, timeout: R::Instant) -> Option<RwLockReadGuard<'_, R, T>> {
         if self.raw.try_lock_shared_until(timeout) {
             Some(self.read_guard())
         } else {
             None
         }
     }
 
     /// Attempts to acquire this `RwLock` with exclusive write access until a
     /// timeout is reached.
     ///
     /// If the access could not be granted before the timeout expires, then
     /// `None` is returned. Otherwise, an RAII guard is returned which will
     /// release the exclusive access when it is dropped.
     #[inline]
-    pub fn try_write_for(&self, timeout: R::Duration) -> Option<RwLockWriteGuard<R, T>> {
+    pub fn try_write_for(&self, timeout: R::Duration) -> Option<RwLockWriteGuard<'_, R, T>> {
         if self.raw.try_lock_exclusive_for(timeout) {
             Some(self.write_guard())
         } else {
             None
         }
     }
 
     /// Attempts to acquire this `RwLock` with exclusive write access until a
     /// timeout is reached.
     ///
     /// If the access could not be granted before the timeout expires, then
     /// `None` is returned. Otherwise, an RAII guard is returned which will
     /// release the exclusive access when it is dropped.
     #[inline]
-    pub fn try_write_until(&self, timeout: R::Instant) -> Option<RwLockWriteGuard<R, T>> {
+    pub fn try_write_until(&self, timeout: R::Instant) -> Option<RwLockWriteGuard<'_, R, T>> {
         if self.raw.try_lock_exclusive_until(timeout) {
             Some(self.write_guard())
         } else {
             None
         }
     }
 }
 
@@ -507,33 +539,33 @@ impl<R: RawRwLockRecursive, T: ?Sized> R
     /// another read lock is held at the time of the call. This allows a thread
     /// to recursively lock a `RwLock`. However using this method can cause
     /// writers to starve since readers no longer block if a writer is waiting
     /// for the lock.
     ///
     /// Returns an RAII guard which will release this thread's shared access
     /// once it is dropped.
     #[inline]
-    pub fn read_recursive(&self) -> RwLockReadGuard<R, T> {
+    pub fn read_recursive(&self) -> RwLockReadGuard<'_, R, T> {
         self.raw.lock_shared_recursive();
         self.read_guard()
     }
 
     /// Attempts to acquire this `RwLock` with shared read access.
     ///
     /// If the access could not be granted at this time, then `None` is returned.
     /// Otherwise, an RAII guard is returned which will release the shared access
     /// when it is dropped.
     ///
     /// This method is guaranteed to succeed if another read lock is held at the
     /// time of the call. See the documentation for `read_recursive` for details.
     ///
     /// This function does not block.
     #[inline]
-    pub fn try_read_recursive(&self) -> Option<RwLockReadGuard<R, T>> {
+    pub fn try_read_recursive(&self) -> Option<RwLockReadGuard<'_, R, T>> {
         if self.raw.try_lock_shared_recursive() {
             Some(self.read_guard())
         } else {
             None
         }
     }
 }
 
@@ -544,73 +576,79 @@ impl<R: RawRwLockRecursiveTimed, T: ?Siz
     /// If the access could not be granted before the timeout expires, then
     /// `None` is returned. Otherwise, an RAII guard is returned which will
     /// release the shared access when it is dropped.
     ///
     /// This method is guaranteed to succeed without blocking if another read
     /// lock is held at the time of the call. See the documentation for
     /// `read_recursive` for details.
     #[inline]
-    pub fn try_read_recursive_for(&self, timeout: R::Duration) -> Option<RwLockReadGuard<R, T>> {
+    pub fn try_read_recursive_for(
+        &self,
+        timeout: R::Duration,
+    ) -> Option<RwLockReadGuard<'_, R, T>> {
         if self.raw.try_lock_shared_recursive_for(timeout) {
             Some(self.read_guard())
         } else {
             None
         }
     }
 
     /// Attempts to acquire this `RwLock` with shared read access until a timeout
     /// is reached.
     ///
     /// If the access could not be granted before the timeout expires, then
     /// `None` is returned. Otherwise, an RAII guard is returned which will
     /// release the shared access when it is dropped.
     #[inline]
-    pub fn try_read_recursive_until(&self, timeout: R::Instant) -> Option<RwLockReadGuard<R, T>> {
+    pub fn try_read_recursive_until(
+        &self,
+        timeout: R::Instant,
+    ) -> Option<RwLockReadGuard<'_, R, T>> {
         if self.raw.try_lock_shared_recursive_until(timeout) {
             Some(self.read_guard())
         } else {
             None
         }
     }
 }
 
 impl<R: RawRwLockUpgrade, T: ?Sized> RwLock<R, T> {
     #[inline]
-    fn upgradable_guard(&self) -> RwLockUpgradableReadGuard<R, T> {
+    fn upgradable_guard(&self) -> RwLockUpgradableReadGuard<'_, R, T> {
         RwLockUpgradableReadGuard {
             rwlock: self,
             marker: PhantomData,
         }
     }
 
     /// Locks this `RwLock` with upgradable read access, blocking the current thread
     /// until it can be acquired.
     ///
     /// The calling thread will be blocked until there are no more writers or other
     /// upgradable reads which hold the lock. There may be other readers currently
     /// inside the lock when this method returns.
     ///
     /// Returns an RAII guard which will release this thread's shared access
     /// once it is dropped.
     #[inline]
-    pub fn upgradable_read(&self) -> RwLockUpgradableReadGuard<R, T> {
+    pub fn upgradable_read(&self) -> RwLockUpgradableReadGuard<'_, R, T> {
         self.raw.lock_upgradable();
         self.upgradable_guard()
     }
 
     /// Attempts to acquire this `RwLock` with upgradable read access.
     ///
     /// If the access could not be granted at this time, then `None` is returned.
     /// Otherwise, an RAII guard is returned which will release the shared access
     /// when it is dropped.
     ///
     /// This function does not block.
     #[inline]
-    pub fn try_upgradable_read(&self) -> Option<RwLockUpgradableReadGuard<R, T>> {
+    pub fn try_upgradable_read(&self) -> Option<RwLockUpgradableReadGuard<'_, R, T>> {
         if self.raw.try_lock_upgradable() {
             Some(self.upgradable_guard())
         } else {
             None
         }
     }
 }
 
@@ -620,17 +658,17 @@ impl<R: RawRwLockUpgradeTimed, T: ?Sized
     ///
     /// If the access could not be granted before the timeout expires, then
     /// `None` is returned. Otherwise, an RAII guard is returned which will
     /// release the shared access when it is dropped.
     #[inline]
     pub fn try_upgradable_read_for(
         &self,
         timeout: R::Duration,
-    ) -> Option<RwLockUpgradableReadGuard<R, T>> {
+    ) -> Option<RwLockUpgradableReadGuard<'_, R, T>> {
         if self.raw.try_lock_upgradable_for(timeout) {
             Some(self.upgradable_guard())
         } else {
             None
         }
     }
 
     /// Attempts to acquire this `RwLock` with upgradable read access until a timeout
@@ -638,17 +676,17 @@ impl<R: RawRwLockUpgradeTimed, T: ?Sized
     ///
     /// If the access could not be granted before the timeout expires, then
     /// `None` is returned. Otherwise, an RAII guard is returned which will
     /// release the shared access when it is dropped.
     #[inline]
     pub fn try_upgradable_read_until(
         &self,
         timeout: R::Instant,
-    ) -> Option<RwLockUpgradableReadGuard<R, T>> {
+    ) -> Option<RwLockUpgradableReadGuard<'_, R, T>> {
         if self.raw.try_lock_upgradable_until(timeout) {
             Some(self.upgradable_guard())
         } else {
             None
         }
     }
 }
 
@@ -662,28 +700,39 @@ impl<R: RawRwLock, T: ?Sized + Default> 
 impl<R: RawRwLock, T> From<T> for RwLock<R, T> {
     #[inline]
     fn from(t: T) -> RwLock<R, T> {
         RwLock::new(t)
     }
 }
 
 impl<R: RawRwLock, T: ?Sized + fmt::Debug> fmt::Debug for RwLock<R, T> {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
         match self.try_read() {
             Some(guard) => f.debug_struct("RwLock").field("data", &&*guard).finish(),
-            None => f.pad("RwLock { <locked> }"),
+            None => {
+                struct LockedPlaceholder;
+                impl fmt::Debug for LockedPlaceholder {
+                    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+                        f.write_str("<locked>")
+                    }
+                }
+
+                f.debug_struct("RwLock")
+                    .field("data", &LockedPlaceholder)
+                    .finish()
+            }
         }
     }
 }
 
 /// RAII structure used to release the shared read access of a lock when
 /// dropped.
-#[must_use]
-pub struct RwLockReadGuard<'a, R: RawRwLock + 'a, T: ?Sized + 'a> {
+#[must_use = "if unused the RwLock will immediately unlock"]
+pub struct RwLockReadGuard<'a, R: RawRwLock, T: ?Sized> {
     rwlock: &'a RwLock<R, T>,
     marker: PhantomData<(&'a T, R::GuardMarker)>,
 }
 
 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + Sync + 'a> Sync for RwLockReadGuard<'a, R, T> {}
 
 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> RwLockReadGuard<'a, R, T> {
     /// Returns a reference to the original reader-writer lock object.
@@ -814,23 +863,37 @@ impl<'a, R: RawRwLock + 'a, T: ?Sized + 
 
 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for RwLockReadGuard<'a, R, T> {
     #[inline]
     fn drop(&mut self) {
         self.rwlock.raw.unlock_shared();
     }
 }
 
+impl<'a, R: RawRwLock + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug for RwLockReadGuard<'a, R, T> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        fmt::Debug::fmt(&**self, f)
+    }
+}
+
+impl<'a, R: RawRwLock + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
+    for RwLockReadGuard<'a, R, T>
+{
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        (**self).fmt(f)
+    }
+}
+
 #[cfg(feature = "owning_ref")]
 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress for RwLockReadGuard<'a, R, T> {}
 
 /// RAII structure used to release the exclusive write access of a lock when
 /// dropped.
-#[must_use]
-pub struct RwLockWriteGuard<'a, R: RawRwLock + 'a, T: ?Sized + 'a> {
+#[must_use = "if unused the RwLock will immediately unlock"]
+pub struct RwLockWriteGuard<'a, R: RawRwLock, T: ?Sized> {
     rwlock: &'a RwLock<R, T>,
     marker: PhantomData<(&'a mut T, R::GuardMarker)>,
 }
 
 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + Sync + 'a> Sync for RwLockWriteGuard<'a, R, T> {}
 
 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> {
     /// Returns a reference to the original reader-writer lock object.
@@ -1002,30 +1065,45 @@ impl<'a, R: RawRwLock + 'a, T: ?Sized + 
 
 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for RwLockWriteGuard<'a, R, T> {
     #[inline]
     fn drop(&mut self) {
         self.rwlock.raw.unlock_exclusive();
     }
 }
 
+impl<'a, R: RawRwLock + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug for RwLockWriteGuard<'a, R, T> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        fmt::Debug::fmt(&**self, f)
+    }
+}
+
+impl<'a, R: RawRwLock + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
+    for RwLockWriteGuard<'a, R, T>
+{
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        (**self).fmt(f)
+    }
+}
+
 #[cfg(feature = "owning_ref")]
 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress for RwLockWriteGuard<'a, R, T> {}
 
 /// RAII structure used to release the upgradable read access of a lock when
 /// dropped.
-#[must_use]
-pub struct RwLockUpgradableReadGuard<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> {
+#[must_use = "if unused the RwLock will immediately unlock"]
+pub struct RwLockUpgradableReadGuard<'a, R: RawRwLockUpgrade, T: ?Sized> {
     rwlock: &'a RwLock<R, T>,
     marker: PhantomData<(&'a T, R::GuardMarker)>,
 }
 
 unsafe impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + Sync + 'a> Sync
     for RwLockUpgradableReadGuard<'a, R, T>
-{}
+{
+}
 
 impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, R, T> {
     /// Returns a reference to the original reader-writer lock object.
     pub fn rwlock(s: &Self) -> &'a RwLock<R, T> {
         s.rwlock
     }
 
     /// Temporarily unlocks the `RwLock` to execute the given function.
@@ -1191,39 +1269,57 @@ impl<'a, R: RawRwLockUpgrade + 'a, T: ?S
 
 impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> Drop for RwLockUpgradableReadGuard<'a, R, T> {
     #[inline]
     fn drop(&mut self) {
         self.rwlock.raw.unlock_upgradable();
     }
 }
 
+impl<'a, R: RawRwLockUpgrade + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug
+    for RwLockUpgradableReadGuard<'a, R, T>
+{
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        fmt::Debug::fmt(&**self, f)
+    }
+}
+
+impl<'a, R: RawRwLockUpgrade + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
+    for RwLockUpgradableReadGuard<'a, R, T>
+{
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        (**self).fmt(f)
+    }
+}
+
 #[cfg(feature = "owning_ref")]
 unsafe impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> StableAddress
     for RwLockUpgradableReadGuard<'a, R, T>
-{}
+{
+}
 
 /// An RAII read lock guard returned by `RwLockReadGuard::map`, which can point to a
 /// subfield of the protected data.
 ///
 /// The main difference between `MappedRwLockReadGuard` and `RwLockReadGuard` is that the
 /// former doesn't support temporarily unlocking and re-locking, since that
 /// could introduce soundness issues if the locked object is modified by another
 /// thread.
-#[must_use]
-pub struct MappedRwLockReadGuard<'a, R: RawRwLock + 'a, T: ?Sized + 'a> {
+#[must_use = "if unused the RwLock will immediately unlock"]
+pub struct MappedRwLockReadGuard<'a, R: RawRwLock, T: ?Sized> {
     raw: &'a R,
     data: *const T,
     marker: PhantomData<&'a T>,
 }
 
 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + Sync + 'a> Sync for MappedRwLockReadGuard<'a, R, T> {}
 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Send for MappedRwLockReadGuard<'a, R, T> where
     R::GuardMarker: Send
-{}
+{
+}
 
 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> MappedRwLockReadGuard<'a, R, T> {
     /// Make a new `MappedRwLockReadGuard` for a component of the locked data.
     ///
     /// This operation cannot fail as the `MappedRwLockReadGuard` passed
     /// in already locked the data.
     ///
     /// This is an associated function that needs to be
@@ -1302,41 +1398,60 @@ impl<'a, R: RawRwLock + 'a, T: ?Sized + 
 
 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for MappedRwLockReadGuard<'a, R, T> {
     #[inline]
     fn drop(&mut self) {
         self.raw.unlock_shared();
     }
 }
 
+impl<'a, R: RawRwLock + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug
+    for MappedRwLockReadGuard<'a, R, T>
+{
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        fmt::Debug::fmt(&**self, f)
+    }
+}
+
+impl<'a, R: RawRwLock + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
+    for MappedRwLockReadGuard<'a, R, T>
+{
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        (**self).fmt(f)
+    }
+}
+
 #[cfg(feature = "owning_ref")]
 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress
     for MappedRwLockReadGuard<'a, R, T>
-{}
+{
+}
 
 /// An RAII write lock guard returned by `RwLockWriteGuard::map`, which can point to a
 /// subfield of the protected data.
 ///
 /// The main difference between `MappedRwLockWriteGuard` and `RwLockWriteGuard` is that the
 /// former doesn't support temporarily unlocking and re-locking, since that
 /// could introduce soundness issues if the locked object is modified by another
 /// thread.
-#[must_use]
-pub struct MappedRwLockWriteGuard<'a, R: RawRwLock + 'a, T: ?Sized + 'a> {
+#[must_use = "if unused the RwLock will immediately unlock"]
+pub struct MappedRwLockWriteGuard<'a, R: RawRwLock, T: ?Sized> {
     raw: &'a R,
     data: *mut T,
     marker: PhantomData<&'a mut T>,
 }
 
 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + Sync + 'a> Sync
     for MappedRwLockWriteGuard<'a, R, T>
-{}
+{
+}
 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Send for MappedRwLockWriteGuard<'a, R, T> where
     R::GuardMarker: Send
-{}
+{
+}
 
 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> MappedRwLockWriteGuard<'a, R, T> {
     /// Make a new `MappedRwLockWriteGuard` for a component of the locked data.
     ///
     /// This operation cannot fail as the `MappedRwLockWriteGuard` passed
     /// in already locked the data.
     ///
     /// This is an associated function that needs to be
@@ -1442,12 +1557,29 @@ impl<'a, R: RawRwLock + 'a, T: ?Sized + 
 
 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for MappedRwLockWriteGuard<'a, R, T> {
     #[inline]
     fn drop(&mut self) {
         self.raw.unlock_exclusive();
     }
 }
 
+impl<'a, R: RawRwLock + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug
+    for MappedRwLockWriteGuard<'a, R, T>
+{
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        fmt::Debug::fmt(&**self, f)
+    }
+}
+
+impl<'a, R: RawRwLock + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
+    for MappedRwLockWriteGuard<'a, R, T>
+{
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        (**self).fmt(f)
+    }
+}
+
 #[cfg(feature = "owning_ref")]
 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress
     for MappedRwLockWriteGuard<'a, R, T>
-{}
+{
+}
--- a/third_party/rust/parking_lot/.cargo-checksum.json
+++ b/third_party/rust/parking_lot/.cargo-checksum.json
@@ -1,1 +1,1 @@
-{"files":{"CHANGELOG.md":"e254fac6600c725edb746f31f41b1b2ceeb9cfc85f4f9a3e6af874c70b020823","Cargo.toml":"215d5b3a2c18f556b5c66ac6d27eea71d7dd7e6b4857ecd6966c2e5cc03270ea","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"c9a75f18b9ab2927829a208fc6aa2cf4e63b8420887ba29cdb265d6619ae82d5","README.md":"a52cf38f796e7f12215662e8a3a23aa9802c170a09ecba0e4be766c88f95a9c5","appveyor.yml":"cb1d02316926d88e174976bfc6781194569ca27f386c50e3091d8e52587d30a2","src/condvar.rs":"ce127f75bad5c175abb8147aac4b5be78aabdb599c5f8f3aad77f6bc3705274d","src/deadlock.rs":"8916c2e2820bfd3a55860ddb9f1b907888406b68cdae2b7a2093c825d28f3b99","src/elision.rs":"89072fe0aca87d53abc0f56490ae77bcf9d77e28e291bd13e861b1924bbb079f","src/lib.rs":"3e259bf3421f10c3e920daca511a4880b2620145a1fcb070a37548835c4f429a","src/mutex.rs":"0ac3e654e4aa2c3078a6aa22c83428d604e7f3f8ed4c261c40d030d232ca7b64","src/once.rs":"606e0e88d6c1ff82b69bda56e7409ec3a1aefa66b45b7fa42b88cba07ae70598","src/raw_mutex.rs":"881e75a843d76399d01c4ae0f09cd23b93b137b5035a47bd7886505132e58165","src/raw_rwlock.rs":"2e3c13e80cd06be53118ae2bcc7bdec708dda8c139c371ee12885f48903cf69c","src/remutex.rs":"bad8022610344086010b0661998a416db4b458c222e671b67df03fc4795c0298","src/rwlock.rs":"fc826cbcf2d7862ecb184b657a82bb8794a9e26ac329c8f87b589fa09f15d245","src/util.rs":"2d07c0c010a857790ae2ed6a1215eeed8af76859e076797ea1ba8dec82169e84"},"package":"69376b761943787ebd5cc85a5bc95958651a22609c5c1c2b65de21786baec72b"}
\ No newline at end of file
+{"files":{"CHANGELOG.md":"f9a9c82373818d32816c42e0f127f6f14a64d37925f02041c10c66a528e0d454","Cargo.toml":"ef3558536eff060103a0c35e6e9ecfe723240c4a37429cf3d7d84d1eb4fda5e3","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"c9a75f18b9ab2927829a208fc6aa2cf4e63b8420887ba29cdb265d6619ae82d5","README.md":"61723e013019e0254522485795be4ff3f1cb4f580ebf4a8daa8fabeb4b9b9e6b","appveyor.yml":"fd584e381a2eb990c8d5eb44998d9c91ff4d538f4b9c62acc018a7bb94cb1fe7","build.rs":"4ed00d73d71057bcdf6c186559468927fc130fd65cfd806ee5d46d28540bc653","src/condvar.rs":"d7cf8af884d577a726f40ed043cbbf2a24424df6e20e1cc4718f4ae390cbb861","src/deadlock.rs":"081dbf009539b113f67ad0a1abd7af889dad684a47aa1a7dc00ae91f08975ef6","src/elision.rs":"00f7af80021fd602879fb7205befb6ff941cd8dc932a5c0a534b430fefe421ea","src/lib.rs":"acfb6cd0d6e69ab49325defc2d9dd624088d442c9c0dae71e20dd8eced84cae3","src/mutex.rs":"e3a48933b7e19d26eab4b5f44ed4e9bcb069b57cdd4a0569d1e65f6c3839b766","src/once.rs":"3b0c1254acbcff840048c722220066988df69f9d9487ac188356f64b7bcad54f","src/raw_mutex.rs":"9eeccbe797116f8c3f1a19e4803ac1bb57c6c5ec9b2d2770fb42ee5aee5a1002","src/raw_rwlock.rs":"5bb1d74a90a52f0f573d49776a2a68f00a2301c25c8400af2934d3e018728e79","src/remutex.rs":"85b3cff3aaa0ca4c644fcb7cd06447128e8e6065d6a632c436085841ac244022","src/rwlock.rs":"63be04f2af7eda7aa33f704846eb413a2ffd76135d248cb250dc91bd20d7dd66","src/util.rs":"8bd40151fea0a7ffb2fdcb751a5dfd868d8d4d275b0f1b04a7fc5d2a0ba41766"},"package":"fa7767817701cce701d5585b9c4db3cdd02086398322c1d7e8bf5094a96a2ce7"}
\ No newline at end of file
--- a/third_party/rust/parking_lot/CHANGELOG.md
+++ b/third_party/rust/parking_lot/CHANGELOG.md
@@ -1,8 +1,19 @@
+0.7.1 (2019-01-01)
+==================
+
+- Fixed potential deadlock when upgrading a RwLock.
+- Fixed overflow panic on very long timeouts (#111).
+
+0.7.0 (2018-11-20)
+==================
+
+- Return if or how many threads were notified from `Condvar::notify_*`
+
 0.6.3 (2018-07-18)
 ==================
 
 - Export `RawMutex`, `RawRwLock` and `RawThreadId`.
 
 0.6.2 (2018-06-18)
 ==================
 
--- a/third_party/rust/parking_lot/Cargo.toml
+++ b/third_party/rust/parking_lot/Cargo.toml
@@ -1,35 +1,45 @@
 # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
 #
 # When uploading crates to the registry Cargo will automatically
 # "normalize" Cargo.toml files for maximal compatibility
 # with all versions of Cargo and also rewrite `path` dependencies
-# to registry (e.g. crates.io) dependencies
+# to registry (e.g., crates.io) dependencies
 #
 # If you believe there's an error in this file please file an
 # issue against the rust-lang/cargo repository. If you're
 # editing this file be aware that the upstream Cargo.toml
 # will likely look very different (and much more reasonable)
 
 [package]
+edition = "2018"
 name = "parking_lot"
-version = "0.6.3"
+version = "0.8.0"
 authors = ["Amanieu d'Antras <amanieu@gmail.com>"]
 description = "More compact and efficient implementations of the standard synchronization primitives."
 readme = "README.md"
 keywords = ["mutex", "condvar", "rwlock", "once", "thread"]
 categories = ["concurrency"]
 license = "Apache-2.0/MIT"
 repository = "https://github.com/Amanieu/parking_lot"
 [dependencies.lock_api]
-version = "0.1"
+version = "0.2"
 
 [dependencies.parking_lot_core]
-version = "0.2"
+version = "0.5"
+[dev-dependencies.bincode]
+version = "1.1.3"
+
+[dev-dependencies.lazy_static]
+version = "1.0"
+
 [dev-dependencies.rand]
-version = "0.5"
+version = "0.6"
+[build-dependencies.rustc_version]
+version = "0.2"
 
 [features]
 deadlock_detection = ["parking_lot_core/deadlock_detection"]
-default = ["owning_ref"]
+default = []
 nightly = ["parking_lot_core/nightly", "lock_api/nightly"]
 owning_ref = ["lock_api/owning_ref"]
+serde = ["lock_api/serde"]
--- a/third_party/rust/parking_lot/README.md
+++ b/third_party/rust/parking_lot/README.md
@@ -63,16 +63,19 @@ 13. `Mutex<()>` and `RwLock<()>` allow r
 14. `Mutex` and `RwLock` support [eventual fairness](https://trac.webkit.org/changeset/203350)
     which allows them to be fair on average without sacrificing performance.
 15. A `ReentrantMutex` type which supports recursive locking.
 16. An *experimental* deadlock detector that works for `Mutex`,
     `RwLock` and `ReentrantMutex`. This feature is disabled by default and
     can be enabled via the `deadlock_detection` feature.
 17. `RwLock` supports atomically upgrading an "upgradable" read lock into a
     write lock.
+18. Optional support for [serde](https://docs.serde.rs/serde/).  Enable via the
+    feature `serde`.  **NOTE!** this support is for `Mutex`, `ReentrantMutex`,
+    and `RwLock` only; `Condvar` and `Once` are not currently supported.
 
 ## The parking lot
 
 To keep these primitives small, all thread queuing and suspending
 functionality is offloaded to the *parking lot*. The idea behind this is
 based on the Webkit [`WTF::ParkingLot`](https://webkit.org/blog/6161/locking-in-webkit/)
 class, which essentially consists of a hash table mapping of lock addresses
 to queues of parked (sleeping) threads. The Webkit parking lot was itself
@@ -94,39 +97,44 @@ To enable nightly-only functionality, yo
 in Cargo (see below).
 
 ## Usage
 
 Add this to your `Cargo.toml`:
 
 ```toml
 [dependencies]
-parking_lot = "0.6"
+parking_lot = "0.8"
 ```
 
 and this to your crate root:
 
 ```rust
 extern crate parking_lot;
 ```
 
 To enable nightly-only features, add this to your `Cargo.toml` instead:
 
 ```toml
 [dependencies]
-parking_lot = {version = "0.6", features = ["nightly"]}
+parking_lot = {version = "0.8", features = ["nightly"]}
 ```
 
 The experimental deadlock detector can be enabled with the
 `deadlock_detection` Cargo feature.
 
 The core parking lot API is provided by the `parking_lot_core` crate. It is
 separate from the synchronization primitives in the `parking_lot` crate so that
 changes to the core API do not cause breaking changes for users of `parking_lot`.
 
+## Minimum Rust version
+
+The current minimum required Rust version is 1.31. Any change to this is
+considered a breaking change and will require a major version bump.
+
 ## License
 
 Licensed under either of
 
  * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
  * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
 
 at your option.
--- a/third_party/rust/parking_lot/appveyor.yml
+++ b/third_party/rust/parking_lot/appveyor.yml
@@ -1,29 +1,29 @@
 environment:
   TRAVIS_CARGO_NIGHTLY_FEATURE: nightly
   RUST_TEST_THREADS: 1
   matrix:
   - TARGET: nightly-x86_64-pc-windows-msvc
   - TARGET: nightly-i686-pc-windows-msvc
   - TARGET: nightly-x86_64-pc-windows-gnu
   - TARGET: nightly-i686-pc-windows-gnu
-  - TARGET: 1.24.0-x86_64-pc-windows-msvc
-  - TARGET: 1.24.0-i686-pc-windows-msvc
-  - TARGET: 1.24.0-x86_64-pc-windows-gnu
-  - TARGET: 1.24.0-i686-pc-windows-gnu
+  - TARGET: 1.31.0-x86_64-pc-windows-msvc
+  - TARGET: 1.31.0-i686-pc-windows-msvc
+  - TARGET: 1.31.0-x86_64-pc-windows-gnu
+  - TARGET: 1.31.0-i686-pc-windows-gnu
 
 install:
   - SET PATH=C:\Python27;C:\Python27\Scripts;%PATH%;%APPDATA%\Python\Scripts
   - pip install "travis-cargo<0.2" --user
   - ps: Start-FileDownload "https://static.rust-lang.org/dist/rust-${env:TARGET}.exe" -FileName "rust-install.exe"
   - ps: .\rust-install.exe /VERYSILENT /NORESTART /DIR="C:\rust" | Out-Null
   - ps: $env:PATH="$env:PATH;C:\rust\bin"
   - rustc -vV
   - cargo -vV
 
 build_script:
   - travis-cargo build
 
 test_script:
   - travis-cargo test
-  - travis-cargo test -- --features=deadlock_detection
+  - travis-cargo --only nightly test -- --features=deadlock_detection
   - travis-cargo doc
new file mode 100644
--- /dev/null
+++ b/third_party/rust/parking_lot/build.rs
@@ -0,0 +1,8 @@
+use rustc_version::{version, Version};
+
+fn main() {
+    if version().unwrap() >= Version::parse("1.34.0").unwrap() {
+        println!("cargo:rustc-cfg=has_sized_atomics");
+        println!("cargo:rustc-cfg=has_checked_instant");
+    }
+}
--- a/third_party/rust/parking_lot/src/condvar.rs
+++ b/third_party/rust/parking_lot/src/condvar.rs
@@ -1,23 +1,25 @@
 // Copyright 2016 Amanieu d'Antras
 //
 // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
 // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
 // http://opensource.org/licenses/MIT>, at your option. This file may not be
 // copied, modified, or distributed except according to those terms.
 
-use deadlock;
+use crate::mutex::MutexGuard;
+use crate::raw_mutex::{RawMutex, TOKEN_HANDOFF, TOKEN_NORMAL};
+use crate::{deadlock, util};
+use core::{
+    fmt, ptr,
+    sync::atomic::{AtomicPtr, Ordering},
+};
 use lock_api::RawMutex as RawMutexTrait;
-use mutex::MutexGuard;
 use parking_lot_core::{self, ParkResult, RequeueOp, UnparkResult, DEFAULT_PARK_TOKEN};
-use raw_mutex::{RawMutex, TOKEN_HANDOFF, TOKEN_NORMAL};
-use std::sync::atomic::{AtomicPtr, Ordering};
 use std::time::{Duration, Instant};
-use std::{fmt, ptr};
 
 /// A type indicating whether a timed wait on a condition variable returned
 /// due to a time out or not.
 #[derive(Debug, PartialEq, Eq, Copy, Clone)]
 pub struct WaitTimeoutResult(bool);
 
 impl WaitTimeoutResult {
     /// Returns whether the wait was known to have timed out.
@@ -82,89 +84,122 @@ impl WaitTimeoutResult {
 /// ```
 pub struct Condvar {
     state: AtomicPtr<RawMutex>,
 }
 
 impl Condvar {
     /// Creates a new condition variable which is ready to be waited on and
     /// notified.
-    #[cfg(feature = "nightly")]
     #[inline]
     pub const fn new() -> Condvar {
         Condvar {
             state: AtomicPtr::new(ptr::null_mut()),
         }
     }
 
-    /// Creates a new condition variable which is ready to be waited on and
-    /// notified.
-    #[cfg(not(feature = "nightly"))]
-    #[inline]
-    pub fn new() -> Condvar {
-        Condvar {
-            state: AtomicPtr::new(ptr::null_mut()),
-        }
-    }
-
     /// Wakes up one blocked thread on this condvar.
     ///
+    /// Returns whether a thread was woken up.
+    ///
     /// If there is a blocked thread on this condition variable, then it will
     /// be woken up from its call to `wait` or `wait_timeout`. Calls to
     /// `notify_one` are not buffered in any way.
     ///
     /// To wake up all threads, see `notify_all()`.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use parking_lot::Condvar;
+    ///
+    /// let condvar = Condvar::new();
+    ///
+    /// // do something with condvar, share it with other threads
+    ///
+    /// if !condvar.notify_one() {
+    ///     println!("Nobody was listening for this.");
+    /// }
+    /// ```
     #[inline]
-    pub fn notify_one(&self) {
+    pub fn notify_one(&self) -> bool {
         // Nothing to do if there are no waiting threads
-        if self.state.load(Ordering::Relaxed).is_null() {
-            return;
+        let state = self.state.load(Ordering::Relaxed);
+        if state.is_null() {
+            return false;
         }
 
-        self.notify_one_slow();
+        self.notify_one_slow(state)
     }
 
     #[cold]
     #[inline(never)]
-    fn notify_one_slow(&self) {
+    fn notify_one_slow(&self, mutex: *mut RawMutex) -> bool {
         unsafe {
-            // Unpark one thread
-            let addr = self as *const _ as usize;
-            let callback = |result: UnparkResult| {
+            // Unpark one thread and requeue the rest onto the mutex
+            let from = self as *const _ as usize;
+            let to = mutex as usize;
+            let validate = || {
+                // Make sure that our atomic state still points to the same
+                // mutex. If not then it means that all threads on the current
+                // mutex were woken up and a new waiting thread switched to a
+                // different mutex. In that case we can get away with doing
+                // nothing.
+                if self.state.load(Ordering::Relaxed) != mutex {
+                    return RequeueOp::Abort;
+                }
+
+                // Unpark one thread if the mutex is unlocked, otherwise just
+                // requeue everything to the mutex. This is safe to do here
+                // since unlocking the mutex when the parked bit is set requires
+                // locking the queue. There is the possibility of a race if the
+                // mutex gets locked after we check, but that doesn't matter in
+                // this case.
+                if (*mutex).mark_parked_if_locked() {
+                    RequeueOp::RequeueOne
+                } else {
+                    RequeueOp::UnparkOne
+                }
+            };
+            let callback = |_op, result: UnparkResult| {
                 // Clear our state if there are no more waiting threads
                 if !result.have_more_threads {
                     self.state.store(ptr::null_mut(), Ordering::Relaxed);
                 }
                 TOKEN_NORMAL
             };
-            parking_lot_core::unpark_one(addr, callback);
+            let res = parking_lot_core::unpark_requeue(from, to, validate, callback);
+
+            res.unparked_threads + res.requeued_threads != 0
         }
     }
 
     /// Wakes up all blocked threads on this condvar.
     ///
+    /// Returns the number of threads woken up.
+    ///
     /// This method will ensure that any current waiters on the condition
     /// variable are awoken. Calls to `notify_all()` are not buffered in any
     /// way.
     ///
     /// To wake up only one thread, see `notify_one()`.
     #[inline]
-    pub fn notify_all(&self) {
+    pub fn notify_all(&self) -> usize {
         // Nothing to do if there are no waiting threads
         let state = self.state.load(Ordering::Relaxed);
         if state.is_null() {
-            return;
+            return 0;
         }
 
-        self.notify_all_slow(state);
+        self.notify_all_slow(state)
     }
 
     #[cold]
     #[inline(never)]
-    fn notify_all_slow(&self, mutex: *mut RawMutex) {
+    fn notify_all_slow(&self, mutex: *mut RawMutex) -> usize {
         unsafe {
             // Unpark one thread and requeue the rest onto the mutex
             let from = self as *const _ as usize;
             let to = mutex as usize;
             let validate = || {
                 // Make sure that our atomic state still points to the same
                 // mutex. If not then it means that all threads on the current
                 // mutex were woken up and a new waiting thread switched to a
@@ -188,22 +223,24 @@ impl Condvar {
                     RequeueOp::RequeueAll
                 } else {
                     RequeueOp::UnparkOneRequeueRest
                 }
             };
             let callback = |op, result: UnparkResult| {
                 // If we requeued threads to the mutex, mark it as having
                 // parked threads. The RequeueAll case is already handled above.
-                if op == RequeueOp::UnparkOneRequeueRest && result.have_more_threads {
+                if op == RequeueOp::UnparkOneRequeueRest && result.requeued_threads != 0 {
                     (*mutex).mark_parked();
                 }
                 TOKEN_NORMAL
             };
-            parking_lot_core::unpark_requeue(from, to, validate, callback);
+            let res = parking_lot_core::unpark_requeue(from, to, validate, callback);
+
+            res.unparked_threads + res.requeued_threads
         }
     }
 
     /// Blocks the current thread until this condition variable receives a
     /// notification.
     ///
     /// This function will atomically unlock the mutex specified (represented by
     /// `mutex_guard`) and block the current thread. This means that any calls
@@ -211,17 +248,17 @@ impl Condvar {
     /// candidates to wake this thread up. When this function call returns, the
     /// lock specified will have been re-acquired.
     ///
     /// # Panics
     ///
     /// This function will panic if another thread is waiting on the `Condvar`
     /// with a different `Mutex` object.
     #[inline]
-    pub fn wait<T: ?Sized>(&self, mutex_guard: &mut MutexGuard<T>) {
+    pub fn wait<T: ?Sized>(&self, mutex_guard: &mut MutexGuard<'_, T>) {
         self.wait_until_internal(unsafe { MutexGuard::mutex(mutex_guard).raw() }, None);
     }
 
     /// Waits on this condition variable for a notification, timing out after
     /// the specified time instant.
     ///
     /// The semantics of this function are equivalent to `wait()` except that
     /// the thread will be blocked roughly until `timeout` is reached. This
@@ -241,32 +278,28 @@ impl Condvar {
     ///
     /// # Panics
     ///
     /// This function will panic if another thread is waiting on the `Condvar`
     /// with a different `Mutex` object.
     #[inline]
     pub fn wait_until<T: ?Sized>(
         &self,
-        mutex_guard: &mut MutexGuard<T>,
+        mutex_guard: &mut MutexGuard<'_, T>,
         timeout: Instant,
     ) -> WaitTimeoutResult {
         self.wait_until_internal(
             unsafe { MutexGuard::mutex(mutex_guard).raw() },
             Some(timeout),
         )
     }
 
     // This is a non-generic function to reduce the monomorphization cost of
     // using `wait_until`.
-    fn wait_until_internal(
-        &self,
-        mutex: &RawMutex,
-        timeout: Option<Instant>,
-    ) -> WaitTimeoutResult {
+    fn wait_until_internal(&self, mutex: &RawMutex, timeout: Option<Instant>) -> WaitTimeoutResult {
         unsafe {
             let result;
             let mut bad_mutex = false;
             let mut requeued = false;
             {
                 let addr = self as *const _ as usize;
                 let lock_addr = mutex as *const _ as *mut _;
                 let validate = || {
@@ -340,46 +373,53 @@ impl Condvar {
     /// measured with a monotonic clock, and not affected by the changes made to
     /// the system time.
     ///
     /// The returned `WaitTimeoutResult` value indicates if the timeout is
     /// known to have elapsed.
     ///
     /// Like `wait`, the lock specified will be re-acquired when this function
     /// returns, regardless of whether the timeout elapsed or not.
+    ///
+    /// # Panics
+    ///
+    /// Panics if the given `timeout` is so large that it can't be added to the current time.
+    /// This panic is not possible if the crate is built with the `nightly` feature, then a too
+    /// large `timeout` becomes equivalent to just calling `wait`.
     #[inline]
     pub fn wait_for<T: ?Sized>(
         &self,
-        guard: &mut MutexGuard<T>,
+        mutex_guard: &mut MutexGuard<'_, T>,
         timeout: Duration,
     ) -> WaitTimeoutResult {
-        self.wait_until(guard, Instant::now() + timeout)
+        let deadline = util::to_deadline(timeout);
+        self.wait_until_internal(unsafe { MutexGuard::mutex(mutex_guard).raw() }, deadline)
     }
 }
 
 impl Default for Condvar {
     #[inline]
     fn default() -> Condvar {
         Condvar::new()
     }
 }
 
 impl fmt::Debug for Condvar {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
         f.pad("Condvar { .. }")
     }
 }
 
 #[cfg(test)]
 mod tests {
+    use crate::{Condvar, Mutex, MutexGuard};
     use std::sync::mpsc::channel;
     use std::sync::Arc;
     use std::thread;
     use std::time::{Duration, Instant};
-    use {Condvar, Mutex};
 
     #[test]
     fn smoke() {
         let c = Condvar::new();
         c.notify_one();
         c.notify_all();
     }
 
@@ -430,31 +470,104 @@ mod tests {
         drop(cnt);
 
         for _ in 0..N {
             rx.recv().unwrap();
         }
     }
 
     #[test]
+    fn notify_one_return_true() {
+        let m = Arc::new(Mutex::new(()));
+        let m2 = m.clone();
+        let c = Arc::new(Condvar::new());
+        let c2 = c.clone();
+
+        let mut g = m.lock();
+        let _t = thread::spawn(move || {
+            let _g = m2.lock();
+            assert!(c2.notify_one());
+        });
+        c.wait(&mut g);
+    }
+
+    #[test]
+    fn notify_one_return_false() {
+        let m = Arc::new(Mutex::new(()));
+        let c = Arc::new(Condvar::new());
+
+        let _t = thread::spawn(move || {
+            let _g = m.lock();
+            assert!(!c.notify_one());
+        });
+    }
+
+    #[test]
+    fn notify_all_return() {
+        const N: usize = 10;
+
+        let data = Arc::new((Mutex::new(0), Condvar::new()));
+        let (tx, rx) = channel();
+        for _ in 0..N {
+            let data = data.clone();
+            let tx = tx.clone();
+            thread::spawn(move || {
+                let &(ref lock, ref cond) = &*data;
+                let mut cnt = lock.lock();
+                *cnt += 1;
+                if *cnt == N {
+                    tx.send(()).unwrap();
+                }
+                while *cnt != 0 {
+                    cond.wait(&mut cnt);
+                }
+                tx.send(()).unwrap();
+            });
+        }
+        drop(tx);
+
+        let &(ref lock, ref cond) = &*data;
+        rx.recv().unwrap();
+        let mut cnt = lock.lock();
+        *cnt = 0;
+        assert_eq!(cond.notify_all(), N);
+        drop(cnt);
+
+        for _ in 0..N {
+            rx.recv().unwrap();
+        }
+
+        assert_eq!(cond.notify_all(), 0);
+    }
+
+    #[test]
     fn wait_for() {
         let m = Arc::new(Mutex::new(()));
         let m2 = m.clone();
         let c = Arc::new(Condvar::new());
         let c2 = c.clone();
 
         let mut g = m.lock();
         let no_timeout = c.wait_for(&mut g, Duration::from_millis(1));
         assert!(no_timeout.timed_out());
+
         let _t = thread::spawn(move || {
             let _g = m2.lock();
             c2.notify_one();
         });
-        let timeout_res = c.wait_for(&mut g, Duration::from_millis(u32::max_value() as u64));
+        // Non-nightly panics on too large timeouts. Nightly treats it as indefinite wait.
+        let very_long_timeout = if cfg!(feature = "nightly") {
+            Duration::from_secs(u64::max_value())
+        } else {
+            Duration::from_millis(u32::max_value() as u64)
+        };
+
+        let timeout_res = c.wait_for(&mut g, very_long_timeout);
         assert!(!timeout_res.timed_out());
+
         drop(g);
     }
 
     #[test]
     fn wait_until() {
         let m = Arc::new(Mutex::new(()));
         let m2 = m.clone();
         let c = Arc::new(Condvar::new());
@@ -525,9 +638,55 @@ mod tests {
         let _ = c.wait_for(&mut m3.lock(), Duration::from_millis(1));
     }
 
     #[test]
     fn test_debug_condvar() {
         let c = Condvar::new();
         assert_eq!(format!("{:?}", c), "Condvar { .. }");
     }
+
+    #[test]
+    fn test_condvar_requeue() {
+        let m = Arc::new(Mutex::new(()));
+        let m2 = m.clone();
+        let c = Arc::new(Condvar::new());
+        let c2 = c.clone();
+        let t = thread::spawn(move || {
+            let mut g = m2.lock();
+            c2.wait(&mut g);
+        });
+
+        let mut g = m.lock();
+        while !c.notify_one() {
+            // Wait for the thread to get into wait()
+            MutexGuard::bump(&mut g);
+        }
+        // The thread should have been requeued to the mutex, which we wake up now.
+        drop(g);
+        t.join().unwrap();
+    }
+
+    #[test]
+    fn test_issue_129() {
+        let locks = Arc::new((Mutex::new(()), Condvar::new()));
+
+        let (tx, rx) = channel();
+        for _ in 0..4 {
+            let locks = locks.clone();
+            let tx = tx.clone();
+            thread::spawn(move || {
+                let mut guard = locks.0.lock();
+                locks.1.wait(&mut guard);
+                locks.1.wait_for(&mut guard, Duration::from_millis(1));
+                locks.1.notify_one();
+                tx.send(()).unwrap();
+            });
+        }
+
+        thread::sleep(Duration::from_millis(100));
+        locks.1.notify_one();
+
+        for _ in 0..4 {
+            assert_eq!(rx.recv_timeout(Duration::from_millis(500)), Ok(()));
+        }
+    }
 }
--- a/third_party/rust/parking_lot/src/deadlock.rs
+++ b/third_party/rust/parking_lot/src/deadlock.rs
@@ -35,28 +35,35 @@
 
 #[cfg(feature = "deadlock_detection")]
 pub use parking_lot_core::deadlock::check_deadlock;
 pub(crate) use parking_lot_core::deadlock::{acquire_resource, release_resource};
 
 #[cfg(test)]
 #[cfg(feature = "deadlock_detection")]
 mod tests {
+    use crate::{Mutex, ReentrantMutex, RwLock};
     use std::sync::{Arc, Barrier};
     use std::thread::{self, sleep};
     use std::time::Duration;
-    use {Mutex, ReentrantMutex, RwLock};
+
+    // We need to serialize these tests since deadlock detection uses global state
+    lazy_static::lazy_static! {
+        static ref DEADLOCK_DETECTION_LOCK: Mutex<()> = Mutex::new(());
+    }
 
     fn check_deadlock() -> bool {
         use parking_lot_core::deadlock::check_deadlock;
         !check_deadlock().is_empty()
     }
 
     #[test]
     fn test_mutex_deadlock() {
+        let _guard = DEADLOCK_DETECTION_LOCK.lock();
+
         let m1: Arc<Mutex<()>> = Default::default();
         let m2: Arc<Mutex<()>> = Default::default();
         let m3: Arc<Mutex<()>> = Default::default();
         let b = Arc::new(Barrier::new(4));
 
         let m1_ = m1.clone();
         let m2_ = m2.clone();
         let m3_ = m3.clone();
@@ -90,16 +97,18 @@ mod tests {
         sleep(Duration::from_millis(50));
         assert!(check_deadlock());
 
         assert!(!check_deadlock());
     }
 
     #[test]
     fn test_mutex_deadlock_reentrant() {
+        let _guard = DEADLOCK_DETECTION_LOCK.lock();
+
         let m1: Arc<Mutex<()>> = Default::default();
 
         assert!(!check_deadlock());
 
         let _t1 = thread::spawn(move || {
             let _g = m1.lock();
             let _ = m1.lock();
         });
@@ -107,16 +116,18 @@ mod tests {
         sleep(Duration::from_millis(50));
         assert!(check_deadlock());
 
         assert!(!check_deadlock());
     }
 
     #[test]
     fn test_remutex_deadlock() {
+        let _guard = DEADLOCK_DETECTION_LOCK.lock();
+
         let m1: Arc<ReentrantMutex<()>> = Default::default();
         let m2: Arc<ReentrantMutex<()>> = Default::default();
         let m3: Arc<ReentrantMutex<()>> = Default::default();
         let b = Arc::new(Barrier::new(4));
 
         let m1_ = m1.clone();
         let m2_ = m2.clone();
         let m3_ = m3.clone();
@@ -153,16 +164,18 @@ mod tests {
         sleep(Duration::from_millis(50));
         assert!(check_deadlock());
 
         assert!(!check_deadlock());
     }
 
     #[test]
     fn test_rwlock_deadlock() {
+        let _guard = DEADLOCK_DETECTION_LOCK.lock();
+
         let m1: Arc<RwLock<()>> = Default::default();
         let m2: Arc<RwLock<()>> = Default::default();
         let m3: Arc<RwLock<()>> = Default::default();
         let b = Arc::new(Barrier::new(4));
 
         let m1_ = m1.clone();
         let m2_ = m2.clone();
         let m3_ = m3.clone();
@@ -194,18 +207,21 @@ mod tests {
 
         b.wait();
         sleep(Duration::from_millis(50));
         assert!(check_deadlock());
 
         assert!(!check_deadlock());
     }
 
+    #[cfg(rwlock_deadlock_detection_not_supported)]
     #[test]
     fn test_rwlock_deadlock_reentrant() {
+        let _guard = DEADLOCK_DETECTION_LOCK.lock();
+
         let m1: Arc<RwLock<()>> = Default::default();
 
         assert!(!check_deadlock());
 
         let _t1 = thread::spawn(move || {
             let _g = m1.read();
             let _ = m1.write();
         });
--- a/third_party/rust/parking_lot/src/elision.rs
+++ b/third_party/rust/parking_lot/src/elision.rs
@@ -7,27 +7,24 @@
 
 use std::sync::atomic::AtomicUsize;
 
 // Extension trait to add lock elision primitives to atomic types
 pub trait AtomicElisionExt {
     type IntType;
 
     // Perform a compare_exchange and start a transaction
-    fn elision_acquire(
+    fn elision_compare_exchange_acquire(
         &self,
         current: Self::IntType,
         new: Self::IntType,
     ) -> Result<Self::IntType, Self::IntType>;
-    // Perform a compare_exchange and end a transaction
-    fn elision_release(
-        &self,
-        current: Self::IntType,
-        new: Self::IntType,
-    ) -> Result<Self::IntType, Self::IntType>;
+
+    // Perform a fetch_sub and end a transaction
+    fn elision_fetch_sub_release(&self, val: Self::IntType) -> Self::IntType;
 }
 
 // Indicates whether the target architecture supports lock elision
 #[inline]
 pub fn have_elision() -> bool {
     cfg!(all(
         feature = "nightly",
         any(target_arch = "x86", target_arch = "x86_64"),
@@ -36,134 +33,84 @@ pub fn have_elision() -> bool {
 
 // This implementation is never actually called because it is guarded by
 // have_elision().
 #[cfg(not(all(feature = "nightly", any(target_arch = "x86", target_arch = "x86_64"))))]
 impl AtomicElisionExt for AtomicUsize {
     type IntType = usize;
 
     #[inline]
-    fn elision_acquire(&self, _: usize, _: usize) -> Result<usize, usize> {
+    fn elision_compare_exchange_acquire(&self, _: usize, _: usize) -> Result<usize, usize> {
         unreachable!();
     }
 
     #[inline]
-    fn elision_release(&self, _: usize, _: usize) -> Result<usize, usize> {
+    fn elision_fetch_sub_release(&self, _: usize) -> usize {
         unreachable!();
     }
 }
 
-#[cfg(all(feature = "nightly", target_arch = "x86"))]
+#[cfg(all(feature = "nightly", any(target_arch = "x86", target_arch = "x86_64")))]
 impl AtomicElisionExt for AtomicUsize {
     type IntType = usize;
 
+    #[cfg(target_pointer_width = "32")]
     #[inline]
-    fn elision_acquire(&self, current: usize, new: usize) -> Result<usize, usize> {
+    fn elision_compare_exchange_acquire(&self, current: usize, new: usize) -> Result<usize, usize> {
         unsafe {
             let prev: usize;
             asm!("xacquire; lock; cmpxchgl $2, $1"
                  : "={eax}" (prev), "+*m" (self)
                  : "r" (new), "{eax}" (current)
                  : "memory"
                  : "volatile");
             if prev == current {
                 Ok(prev)
             } else {
                 Err(prev)
             }
         }
     }
-
-    #[inline]
-    fn elision_release(&self, current: usize, new: usize) -> Result<usize, usize> {
-        unsafe {
-            let prev: usize;
-            asm!("xrelease; lock; cmpxchgl $2, $1"
-                 : "={eax}" (prev), "+*m" (self)
-                 : "r" (new), "{eax}" (current)
-                 : "memory"
-                 : "volatile");
-            if prev == current {
-                Ok(prev)
-            } else {
-                Err(prev)
-            }
-        }
-    }
-}
-
-#[cfg(all(feature = "nightly", target_arch = "x86_64", target_pointer_width = "32"))]
-impl AtomicElisionExt for AtomicUsize {
-    type IntType = usize;
-
+    #[cfg(target_pointer_width = "64")]
     #[inline]
-    fn elision_acquire(&self, current: usize, new: usize) -> Result<usize, usize> {
-        unsafe {
-            let prev: usize;
-            asm!("xacquire; lock; cmpxchgl $2, $1"
-                 : "={rax}" (prev), "+*m" (self)
-                 : "r" (new), "{rax}" (current)
-                 : "memory"
-                 : "volatile");
-            if prev == current {
-                Ok(prev)
-            } else {
-                Err(prev)
-            }
-        }
-    }
-
-    #[inline]
-    fn elision_release(&self, current: usize, new: usize) -> Result<usize, usize> {
-        unsafe {
-            let prev: usize;
-            asm!("xrelease; lock; cmpxchgl $2, $1"
-                 : "={rax}" (prev), "+*m" (self)
-                 : "r" (new), "{rax}" (current)
-                 : "memory"
-                 : "volatile");
-            if prev == current {
-                Ok(prev)
-            } else {
-                Err(prev)
-            }
-        }
-    }
-}
-
-#[cfg(all(feature = "nightly", target_arch = "x86_64", target_pointer_width = "64"))]
-impl AtomicElisionExt for AtomicUsize {
-    type IntType = usize;
-
-    #[inline]
-    fn elision_acquire(&self, current: usize, new: usize) -> Result<usize, usize> {
+    fn elision_compare_exchange_acquire(&self, current: usize, new: usize) -> Result<usize, usize> {
         unsafe {
             let prev: usize;
             asm!("xacquire; lock; cmpxchgq $2, $1"
                  : "={rax}" (prev), "+*m" (self)
                  : "r" (new), "{rax}" (current)
                  : "memory"
                  : "volatile");
             if prev == current {
                 Ok(prev)
             } else {
                 Err(prev)
             }
         }
     }
 
+    #[cfg(target_pointer_width = "32")]
     #[inline]
-    fn elision_release(&self, current: usize, new: usize) -> Result<usize, usize> {
+    fn elision_fetch_sub_release(&self, val: usize) -> usize {
         unsafe {
             let prev: usize;
-            asm!("xrelease; lock; cmpxchgq $2, $1"
-                 : "={rax}" (prev), "+*m" (self)
-                 : "r" (new), "{rax}" (current)
+            asm!("xrelease; lock; xaddl $2, $1"
+                 : "=r" (prev), "+*m" (self)
+                 : "0" (val.wrapping_neg())
                  : "memory"
                  : "volatile");
-            if prev == current {
-                Ok(prev)
-            } else {
-                Err(prev)
-            }
+            prev
+        }
+    }
+    #[cfg(target_pointer_width = "64")]
+    #[inline]
+    fn elision_fetch_sub_release(&self, val: usize) -> usize {
+        unsafe {
+            let prev: usize;
+            asm!("xrelease; lock; xaddq $2, $1"
+                 : "=r" (prev), "+*m" (self)
+                 : "0" (val.wrapping_neg())
+                 : "memory"
+                 : "volatile");
+            prev
         }
     }
 }
--- a/third_party/rust/parking_lot/src/lib.rs
+++ b/third_party/rust/parking_lot/src/lib.rs
@@ -5,40 +5,38 @@
 // http://opensource.org/licenses/MIT>, at your option. This file may not be
 // copied, modified, or distributed except according to those terms.
 
 //! This library provides implementations of `Mutex`, `RwLock`, `Condvar` and
 //! `Once` that are smaller, faster and more flexible than those in the Rust
 //! standard library. It also provides a `ReentrantMutex` type.
 
 #![warn(missing_docs)]
-#![cfg_attr(feature = "nightly", feature(const_fn))]
-#![cfg_attr(feature = "nightly", feature(integer_atomics))]
+#![warn(rust_2018_idioms)]
 #![cfg_attr(feature = "nightly", feature(asm))]
 
-extern crate lock_api;
-extern crate parking_lot_core;
-
 mod condvar;
 mod elision;
 mod mutex;
 mod once;
 mod raw_mutex;
 mod raw_rwlock;
 mod remutex;
 mod rwlock;
 mod util;
 
 #[cfg(feature = "deadlock_detection")]
 pub mod deadlock;
 #[cfg(not(feature = "deadlock_detection"))]
 mod deadlock;
 
-pub use condvar::{Condvar, WaitTimeoutResult};
-pub use mutex::{MappedMutexGuard, Mutex, MutexGuard};
-pub use once::{Once, OnceState, ONCE_INIT};
-pub use raw_mutex::RawMutex;
-pub use raw_rwlock::RawRwLock;
-pub use remutex::{MappedReentrantMutexGuard, RawThreadId, ReentrantMutex, ReentrantMutexGuard};
-pub use rwlock::{
+pub use self::condvar::{Condvar, WaitTimeoutResult};
+pub use self::mutex::{MappedMutexGuard, Mutex, MutexGuard};
+pub use self::once::{Once, OnceState};
+pub use self::raw_mutex::RawMutex;
+pub use self::raw_rwlock::RawRwLock;
+pub use self::remutex::{
+    MappedReentrantMutexGuard, RawThreadId, ReentrantMutex, ReentrantMutexGuard,
+};
+pub use self::rwlock::{
     MappedRwLockReadGuard, MappedRwLockWriteGuard, RwLock, RwLockReadGuard,
     RwLockUpgradableReadGuard, RwLockWriteGuard,
 };
--- a/third_party/rust/parking_lot/src/mutex.rs
+++ b/third_party/rust/parking_lot/src/mutex.rs
@@ -1,17 +1,17 @@
 // Copyright 2016 Amanieu d'Antras
 //
 // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
 // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
 // http://opensource.org/licenses/MIT>, at your option. This file may not be
 // copied, modified, or distributed except according to those terms.
 
+use crate::raw_mutex::RawMutex;
 use lock_api;
-use raw_mutex::RawMutex;
 
 /// A mutual exclusion primitive useful for protecting shared data
 ///
 /// This mutex will block threads waiting for the lock to become available. The
 /// mutex can also be statically initialized or created via a `new`
 /// constructor. Each mutex has a type parameter which represents the data that
 /// it is protecting. The data can only be accessed through the RAII guards
 /// returned from `lock` and `try_lock`, which guarantees that the data is only
@@ -64,17 +64,17 @@ use raw_mutex::RawMutex;
 /// // let the main thread know once all increments are done.
 /// //
 /// // Here we're using an Arc to share memory among threads, and the data inside
 /// // the Arc is protected with a mutex.
 /// let data = Arc::new(Mutex::new(0));
 ///
 /// let (tx, rx) = channel();
 /// for _ in 0..10 {
-///     let (data, tx) = (data.clone(), tx.clone());
+///     let (data, tx) = (Arc::clone(&data), tx.clone());
 ///     thread::spawn(move || {
 ///         // The shared state can only be accessed once the lock is held.
 ///         // Our non-atomic increment is safe because we're the only thread
 ///         // which can access the shared state when the lock is held.
 ///         let mut data = data.lock();
 ///         *data += 1;
 ///         if *data == N {
 ///             tx.send(()).unwrap();
@@ -100,21 +100,24 @@ pub type MutexGuard<'a, T> = lock_api::M
 /// The main difference between `MappedMutexGuard` and `MutexGuard` is that the
 /// former doesn't support temporarily unlocking and re-locking, since that
 /// could introduce soundness issues if the locked object is modified by another
 /// thread.
 pub type MappedMutexGuard<'a, T> = lock_api::MappedMutexGuard<'a, RawMutex, T>;
 
 #[cfg(test)]
 mod tests {
+    use crate::{Condvar, Mutex};
     use std::sync::atomic::{AtomicUsize, Ordering};
     use std::sync::mpsc::channel;
     use std::sync::Arc;
     use std::thread;
-    use {Condvar, Mutex};
+
+    #[cfg(feature = "serde")]
+    use bincode::{deserialize, serialize};
 
     struct Packet<T>(Arc<(Mutex<T>, Condvar)>);
 
     #[derive(Eq, PartialEq, Debug)]
     struct NonCopy(i32);
 
     unsafe impl<T: Send> Send for Packet<T> {}
     unsafe impl<T> Sync for Packet<T> {}
@@ -248,17 +251,18 @@ mod tests {
             }
             impl Drop for Unwinder {
                 fn drop(&mut self) {
                     *self.i.lock() += 1;
                 }
             }
             let _u = Unwinder { i: arc2 };
             panic!();
-        }).join();
+        })
+        .join();
         let lock = arc.lock();
         assert_eq!(*lock, 2);
     }
 
     #[test]
     fn test_mutex_unsized() {
         let mutex: &Mutex<[i32]> = &Mutex::new([1, 2, 3]);
         {
@@ -278,21 +282,25 @@ mod tests {
         sync(mutex.lock());
     }
 
     #[test]
     fn test_mutex_debug() {
         let mutex = Mutex::new(vec![0u8, 10]);
 
         assert_eq!(format!("{:?}", mutex), "Mutex { data: [0, 10] }");
-        assert_eq!(
-            format!("{:#?}", mutex),
-            "Mutex {
-    data: [
-        0,
-        10
-    ]
-}"
-        );
         let _lock = mutex.lock();
-        assert_eq!(format!("{:?}", mutex), "Mutex { <locked> }");
+        assert_eq!(format!("{:?}", mutex), "Mutex { data: <locked> }");
+    }
+
+    #[cfg(feature = "serde")]
+    #[test]
+    fn test_serde() {
+        let contents: Vec<u8> = vec![0, 1, 2];
+        let mutex = Mutex::new(contents.clone());
+
+        let serialized = serialize(&mutex).unwrap();
+        let deserialized: Mutex<Vec<u8>> = deserialize(&serialized).unwrap();
+
+        assert_eq!(*(mutex.lock()), *(deserialized.lock()));
+        assert_eq!(contents, *(deserialized.lock()));
     }
 }
--- a/third_party/rust/parking_lot/src/once.rs
+++ b/third_party/rust/parking_lot/src/once.rs
@@ -1,30 +1,30 @@
 // Copyright 2016 Amanieu d'Antras
 //
 // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
 // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
 // http://opensource.org/licenses/MIT>, at your option. This file may not be
 // copied, modified, or distributed except according to those terms.
 
-use std::sync::atomic::{fence, Ordering};
-#[cfg(feature = "nightly")]
-use std::sync::atomic::{ATOMIC_U8_INIT, AtomicU8};
-#[cfg(feature = "nightly")]
+use crate::util::UncheckedOptionExt;
+#[cfg(has_sized_atomics)]
+use core::sync::atomic::AtomicU8;
+#[cfg(not(has_sized_atomics))]
+use core::sync::atomic::AtomicUsize as AtomicU8;
+use core::{
+    fmt, mem,
+    sync::atomic::{fence, Ordering},
+};
+use parking_lot_core::{self, SpinWait, DEFAULT_PARK_TOKEN, DEFAULT_UNPARK_TOKEN};
+
+#[cfg(has_sized_atomics)]
 type U8 = u8;
-#[cfg(not(feature = "nightly"))]
-use std::sync::atomic::AtomicUsize as AtomicU8;
-#[cfg(not(feature = "nightly"))]
-use std::sync::atomic::ATOMIC_USIZE_INIT as ATOMIC_U8_INIT;
-#[cfg(not(feature = "nightly"))]
+#[cfg(not(has_sized_atomics))]
 type U8 = usize;
-use parking_lot_core::{self, SpinWait, DEFAULT_PARK_TOKEN, DEFAULT_UNPARK_TOKEN};
-use std::fmt;
-use std::mem;
-use util::UncheckedOptionExt;
 
 const DONE_BIT: U8 = 1;
 const POISON_BIT: U8 = 2;
 const LOCKED_BIT: U8 = 4;
 const PARKED_BIT: U8 = 8;
 
 /// Current state of a `Once`.
 #[derive(Copy, Clone, Eq, PartialEq, Debug)]
@@ -33,34 +33,34 @@ pub enum OnceState {
     New,
 
     /// A closure was executed but panicked.
     Poisoned,
 
     /// A thread is currently executing a closure.
     InProgress,
 
-    /// A closure has completed sucessfully.
+    /// A closure has completed successfully.
     Done,
 }
 
 impl OnceState {
     /// Returns whether the associated `Once` has been poisoned.
     ///
-    /// Once an initalization routine for a `Once` has panicked it will forever
+    /// Once an initialization routine for a `Once` has panicked it will forever
     /// indicate to future forced initialization routines that it is poisoned.
     #[inline]
     pub fn poisoned(&self) -> bool {
         match *self {
             OnceState::Poisoned => true,
             _ => false,
         }
     }
 
-    /// Returns whether the associated `Once` has successfullly executed a
+    /// Returns whether the associated `Once` has successfully executed a
     /// closure.
     #[inline]
     pub fn done(&self) -> bool {
         match *self {
             OnceState::Done => true,
             _ => false,
         }
     }
@@ -76,42 +76,31 @@ impl OnceState {
 /// - Not required to be `'static`.
 /// - Relaxed memory barriers in the fast path, which can significantly improve
 ///   performance on some architectures.
 /// - Efficient handling of micro-contention using adaptive spinning.
 ///
 /// # Examples
 ///
 /// ```
-/// use parking_lot::{Once, ONCE_INIT};
+/// use parking_lot::Once;
 ///
-/// static START: Once = ONCE_INIT;
+/// static START: Once = Once::new();
 ///
 /// START.call_once(|| {
 ///     // run initialization here
 /// });
 /// ```
 pub struct Once(AtomicU8);
 
-/// Initialization value for static `Once` values.
-pub const ONCE_INIT: Once = Once(ATOMIC_U8_INIT);
-
 impl Once {
     /// Creates a new `Once` value.
-    #[cfg(feature = "nightly")]
     #[inline]
     pub const fn new() -> Once {
-        Once(ATOMIC_U8_INIT)
-    }
-
-    /// Creates a new `Once` value.
-    #[cfg(not(feature = "nightly"))]
-    #[inline]
-    pub fn new() -> Once {
-        Once(ATOMIC_U8_INIT)
+        Once(AtomicU8::new(0))
     }
 
     /// Returns the current state of this `Once`.
     #[inline]
     pub fn state(&self) -> OnceState {
         let state = self.0.load(Ordering::Acquire);
         if state & DONE_BIT != 0 {
             OnceState::Done
@@ -136,20 +125,20 @@ impl Once {
     /// guaranteed that any memory writes performed by the executed closure can
     /// be reliably observed by other threads at this point (there is a
     /// happens-before relation between the closure and code executing after the
     /// return).
     ///
     /// # Examples
     ///
     /// ```
-    /// use parking_lot::{Once, ONCE_INIT};
+    /// use parking_lot::Once;
     ///
     /// static mut VAL: usize = 0;
-    /// static INIT: Once = ONCE_INIT;
+    /// static INIT: Once = Once::new();
     ///
     /// // Accessing a `static mut` is unsafe much of the time, but if we do so
     /// // in a synchronized fashion (e.g. write once or read all) then we're
     /// // good to go!
     /// //
     /// // This function will only call `expensive_computation` once, and will
     /// // otherwise always return the value returned from the first invocation.
     /// fn get_cached_val() -> usize {
@@ -218,17 +207,17 @@ impl Once {
     // fast path. Essentially, this should help generate more straight line code
     // in LLVM.
     //
     // Finally, this takes an `FnMut` instead of a `FnOnce` because there's
     // currently no way to take an `FnOnce` and call it via virtual dispatch
     // without some allocation overhead.
     #[cold]
     #[inline(never)]
-    fn call_once_slow(&self, ignore_poison: bool, f: &mut FnMut(OnceState)) {
+    fn call_once_slow(&self, ignore_poison: bool, f: &mut dyn FnMut(OnceState)) {
         let mut spinwait = SpinWait::new();
         let mut state = self.0.load(Ordering::Relaxed);
         loop {
             // If another thread called the closure, we're done
             if state & DONE_BIT != 0 {
                 // An acquire fence is needed here since we didn't load the
                 // state with Ordering::Acquire.
                 fence(Ordering::Acquire);
@@ -339,44 +328,43 @@ impl Once {
 impl Default for Once {
     #[inline]
     fn default() -> Once {
         Once::new()
     }
 }
 
 impl fmt::Debug for Once {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
         f.debug_struct("Once")
             .field("state", &self.state())
             .finish()
     }
 }
 
 #[cfg(test)]
 mod tests {
-    #[cfg(feature = "nightly")]
+    use crate::Once;
     use std::panic;
     use std::sync::mpsc::channel;
     use std::thread;
-    use {Once, ONCE_INIT};
 
     #[test]
     fn smoke_once() {
-        static O: Once = ONCE_INIT;
+        static O: Once = Once::new();
         let mut a = 0;
         O.call_once(|| a += 1);
         assert_eq!(a, 1);
         O.call_once(|| a += 1);
         assert_eq!(a, 1);
     }
 
     #[test]
     fn stampede_once() {
-        static O: Once = ONCE_INIT;
+        static O: Once = Once::new();
         static mut RUN: bool = false;
 
         let (tx, rx) = channel();
         for _ in 0..10 {
             let tx = tx.clone();
             thread::spawn(move || {
                 for _ in 0..4 {
                     thread::yield_now()
@@ -400,20 +388,19 @@ mod tests {
             assert!(RUN);
         }
 
         for _ in 0..10 {
             rx.recv().unwrap();
         }
     }
 
-    #[cfg(feature = "nightly")]
     #[test]
     fn poison_bad() {
-        static O: Once = ONCE_INIT;
+        static O: Once = Once::new();
 
         // poison the once
         let t = panic::catch_unwind(|| {
             O.call_once(|| panic!());
         });
         assert!(t.is_err());
 
         // poisoning propagates
@@ -429,20 +416,19 @@ mod tests {
             assert!(p.poisoned())
         });
         assert!(called);
 
         // once any success happens, we stop propagating the poison
         O.call_once(|| {});
     }
 
-    #[cfg(feature = "nightly")]
     #[test]
     fn wait_for_force_to_finish() {
-        static O: Once = ONCE_INIT;
+        static O: Once = Once::new();
 
         // poison the once
         let t = panic::catch_unwind(|| {
             O.call_once(|| panic!());
         });
         assert!(t.is_err());
 
         // make sure someone's waiting inside the once via a force
@@ -470,19 +456,13 @@ mod tests {
         tx2.send(()).unwrap();
 
         assert!(t1.join().is_ok());
         assert!(t2.join().is_ok());
     }
 
     #[test]
     fn test_once_debug() {
-        static O: Once = ONCE_INIT;
+        static O: Once = Once::new();
 
         assert_eq!(format!("{:?}", O), "Once { state: New }");
-        assert_eq!(
-            format!("{:#?}", O),
-            "Once {
-    state: New
-}"
-        );
     }
 }
--- a/third_party/rust/parking_lot/src/raw_mutex.rs
+++ b/third_party/rust/parking_lot/src/raw_mutex.rs
@@ -1,30 +1,29 @@
 // Copyright 2016 Amanieu d'Antras
 //
 // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
 // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
 // http://opensource.org/licenses/MIT>, at your option. This file may not be
 // copied, modified, or distributed except according to those terms.
 
-use std::sync::atomic::Ordering;
-#[cfg(feature = "nightly")]
-use std::sync::atomic::{ATOMIC_U8_INIT, AtomicU8};
-#[cfg(feature = "nightly")]
-type U8 = u8;
-#[cfg(not(feature = "nightly"))]
-use std::sync::atomic::AtomicUsize as AtomicU8;
-#[cfg(not(feature = "nightly"))]
-use std::sync::atomic::ATOMIC_USIZE_INIT as ATOMIC_U8_INIT;
-#[cfg(not(feature = "nightly"))]
-type U8 = usize;
-use deadlock;
+use crate::{deadlock, util};
+#[cfg(has_sized_atomics)]
+use core::sync::atomic::AtomicU8;
+#[cfg(not(has_sized_atomics))]
+use core::sync::atomic::AtomicUsize as AtomicU8;
+use core::{sync::atomic::Ordering, time::Duration};
 use lock_api::{GuardNoSend, RawMutex as RawMutexTrait, RawMutexFair, RawMutexTimed};
 use parking_lot_core::{self, ParkResult, SpinWait, UnparkResult, UnparkToken, DEFAULT_PARK_TOKEN};
-use std::time::{Duration, Instant};
+use std::time::Instant;
+
+#[cfg(has_sized_atomics)]
+type U8 = u8;
+#[cfg(not(has_sized_atomics))]
+type U8 = usize;
 
 // UnparkToken used to indicate that that the target thread should attempt to
 // lock the mutex again as soon as it is unparked.
 pub(crate) const TOKEN_NORMAL: UnparkToken = UnparkToken(0);
 
 // UnparkToken used to indicate that the mutex is being handed off to the target
 // thread directly without unlocking it.
 pub(crate) const TOKEN_HANDOFF: UnparkToken = UnparkToken(1);
@@ -34,17 +33,17 @@ const PARKED_BIT: U8 = 2;
 
 /// Raw mutex type backed by the parking lot.
 pub struct RawMutex {
     state: AtomicU8,
 }
 
 unsafe impl RawMutexTrait for RawMutex {
     const INIT: RawMutex = RawMutex {
-        state: ATOMIC_U8_INIT,
+        state: AtomicU8::new(0),
     };
 
     type GuardMarker = GuardNoSend;
 
     #[inline]
     fn lock(&self) {
         if self
             .state
@@ -78,32 +77,32 @@ unsafe impl RawMutexTrait for RawMutex {
         }
     }
 
     #[inline]
     fn unlock(&self) {
         unsafe { deadlock::release_resource(self as *const _ as usize) };
         if self
             .state
-            .compare_exchange_weak(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed)
+            .compare_exchange(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed)
             .is_ok()
         {
             return;
         }
         self.unlock_slow(false);
     }
 }
 
 unsafe impl RawMutexFair for RawMutex {
     #[inline]
     fn unlock_fair(&self) {
         unsafe { deadlock::release_resource(self as *const _ as usize) };
         if self
             .state
-            .compare_exchange_weak(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed)
+            .compare_exchange(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed)
             .is_ok()
         {
             return;
         }
         self.unlock_slow(true);
     }
 
     #[inline]
@@ -139,17 +138,17 @@ unsafe impl RawMutexTimed for RawMutex {
     fn try_lock_for(&self, timeout: Duration) -> bool {
         let result = if self
             .state
             .compare_exchange_weak(0, LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed)
             .is_ok()
         {
             true
         } else {
-            self.lock_slow(Some(Instant::now() + timeout))
+            self.lock_slow(util::to_deadline(timeout))
         };
         if result {
             unsafe { deadlock::acquire_resource(self as *const _ as usize) };
         }
         result
     }
 }
 
@@ -259,25 +258,16 @@ impl RawMutex {
             spinwait.reset();
             state = self.state.load(Ordering::Relaxed);
         }
     }
 
     #[cold]
     #[inline(never)]
     fn unlock_slow(&self, force_fair: bool) {
-        // Unlock directly if there are no parked threads
-        if self
-            .state
-            .compare_exchange(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed)
-            .is_ok()
-        {
-            return;
-        }
-
         // Unpark one thread and leave the parked bit set if there might
         // still be parked threads on this address.
         unsafe {
             let addr = self as *const _ as usize;
             let callback = |result: UnparkResult| {
                 // If we are using a fair unlock then we should keep the
                 // mutex locked and hand it off to the unparked thread.
                 if result.unparked_threads != 0 && (force_fair || result.be_fair) {
--- a/third_party/rust/parking_lot/src/raw_rwlock.rs
+++ b/third_party/rust/parking_lot/src/raw_rwlock.rs
@@ -1,205 +1,178 @@
 // Copyright 2016 Amanieu d'Antras
 //
 // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
 // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
 // http://opensource.org/licenses/MIT>, at your option. This file may not be
 // copied, modified, or distributed except according to those terms.
 
-use deadlock;
-use elision::{have_elision, AtomicElisionExt};
+use crate::elision::{have_elision, AtomicElisionExt};
+use crate::raw_mutex::{TOKEN_HANDOFF, TOKEN_NORMAL};
+use crate::util;
+use core::{
+    cell::Cell,
+    sync::atomic::{AtomicUsize, Ordering},
+};
 use lock_api::{
     GuardNoSend, RawRwLock as RawRwLockTrait, RawRwLockDowngrade, RawRwLockFair,
     RawRwLockRecursive, RawRwLockRecursiveTimed, RawRwLockTimed, RawRwLockUpgrade,
     RawRwLockUpgradeDowngrade, RawRwLockUpgradeFair, RawRwLockUpgradeTimed,
 };
-use parking_lot_core::{self, FilterOp, ParkResult, ParkToken, SpinWait, UnparkResult};
-use raw_mutex::{TOKEN_HANDOFF, TOKEN_NORMAL};
-use std::cell::Cell;
-use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
+use parking_lot_core::{
+    self, deadlock, FilterOp, ParkResult, ParkToken, SpinWait, UnparkResult, UnparkToken,
+};
 use std::time::{Duration, Instant};
 
-const PARKED_BIT: usize = 0b001;
-const UPGRADING_BIT: usize = 0b010;
-// A shared guard acquires a single guard resource
-const SHARED_GUARD: usize = 0b100;
-const GUARD_COUNT_MASK: usize = !(SHARED_GUARD - 1);
-// An exclusive lock acquires all of guard resource (i.e. it is exclusive)
-const EXCLUSIVE_GUARD: usize = GUARD_COUNT_MASK;
-// An upgradable lock acquires just over half of the guard resource
-// This should be (GUARD_COUNT_MASK + SHARED_GUARD) >> 1, however this might
-// overflow, so we shift before adding (which is okay since the least
-// significant bit is zero for both GUARD_COUNT_MASK and SHARED_GUARD)
-const UPGRADABLE_GUARD: usize = (GUARD_COUNT_MASK >> 1) + (SHARED_GUARD >> 1);
+// This reader-writer lock implementation is based on Boost's upgrade_mutex:
+// https://github.com/boostorg/thread/blob/fc08c1fe2840baeeee143440fba31ef9e9a813c8/include/boost/thread/v2/shared_mutex.hpp#L432
+//
+// This implementation uses 2 wait queues, one at key [addr] and one at key
+// [addr + 1]. The primary queue is used for all new waiting threads, and the
+// secondary queue is used by the thread which has acquired WRITER_BIT but is
+// waiting for the remaining readers to exit the lock.
+//
+// This implementation is fair between readers and writers since it uses the
+// order in which threads first started queuing to alternate between read phases
+// and write phases. In particular is it not vulnerable to write starvation
+// since readers will block if there is a pending writer.
 
-// Token indicating what type of lock queued threads are trying to acquire
-const TOKEN_SHARED: ParkToken = ParkToken(SHARED_GUARD);
-const TOKEN_EXCLUSIVE: ParkToken = ParkToken(EXCLUSIVE_GUARD);
-const TOKEN_UPGRADABLE: ParkToken = ParkToken(UPGRADABLE_GUARD);
-const TOKEN_UPGRADING: ParkToken = ParkToken((EXCLUSIVE_GUARD - UPGRADABLE_GUARD) | UPGRADING_BIT);
+// There is at least one thread in the main queue.
+const PARKED_BIT: usize = 0b0001;
+// There is a parked thread holding WRITER_BIT. WRITER_BIT must be set.
+const WRITER_PARKED_BIT: usize = 0b0010;
+// A reader is holding an upgradable lock. The reader count must be non-zero and
+// WRITER_BIT must not be set.
+const UPGRADABLE_BIT: usize = 0b0100;
+// If the reader count is zero: a writer is currently holding an exclusive lock.
+// Otherwise: a writer is waiting for the remaining readers to exit the lock.
+const WRITER_BIT: usize = 0b1000;
+// Mask of bits used to count readers.
+const READERS_MASK: usize = !0b1111;
+// Base unit for counting readers.
+const ONE_READER: usize = 0b10000;
+
+// Token indicating what type of lock a queued thread is trying to acquire
+const TOKEN_SHARED: ParkToken = ParkToken(ONE_READER);
+const TOKEN_EXCLUSIVE: ParkToken = ParkToken(WRITER_BIT);
+const TOKEN_UPGRADABLE: ParkToken = ParkToken(ONE_READER | UPGRADABLE_BIT);
 
 /// Raw reader-writer lock type backed by the parking lot.
 pub struct RawRwLock {
     state: AtomicUsize,
 }
 
 unsafe impl RawRwLockTrait for RawRwLock {
     const INIT: RawRwLock = RawRwLock {
-        state: ATOMIC_USIZE_INIT,
+        state: AtomicUsize::new(0),
     };
 
     type GuardMarker = GuardNoSend;
 
     #[inline]
     fn lock_exclusive(&self) {
         if self
             .state
-            .compare_exchange_weak(0, EXCLUSIVE_GUARD, Ordering::Acquire, Ordering::Relaxed)
+            .compare_exchange_weak(0, WRITER_BIT, Ordering::Acquire, Ordering::Relaxed)
             .is_err()
         {
             let result = self.lock_exclusive_slow(None);
             debug_assert!(result);
         }
-        unsafe { deadlock::acquire_resource(self as *const _ as usize) };
+        self.deadlock_acquire();
     }
 
     #[inline]
     fn try_lock_exclusive(&self) -> bool {
         if self
             .state
-            .compare_exchange(0, EXCLUSIVE_GUARD, Ordering::Acquire, Ordering::Relaxed)
+            .compare_exchange(0, WRITER_BIT, Ordering::Acquire, Ordering::Relaxed)
             .is_ok()
         {
-            unsafe { deadlock::acquire_resource(self as *const _ as usize) };
+            self.deadlock_acquire();
             true
         } else {
             false
         }
     }
 
     #[inline]
     fn unlock_exclusive(&self) {
-        unsafe { deadlock::release_resource(self as *const _ as usize) };
+        self.deadlock_release();
         if self
             .state
-            .compare_exchange_weak(EXCLUSIVE_GUARD, 0, Ordering::Release, Ordering::Relaxed)
+            .compare_exchange(WRITER_BIT, 0, Ordering::Release, Ordering::Relaxed)
             .is_ok()
         {
             return;
         }
         self.unlock_exclusive_slow(false);
     }
 
     #[inline]
     fn lock_shared(&self) {
         if !self.try_lock_shared_fast(false) {
             let result = self.lock_shared_slow(false, None);
             debug_assert!(result);
         }
-        unsafe { deadlock::acquire_resource(self as *const _ as usize) };
+        self.deadlock_acquire();
     }
 
     #[inline]
     fn try_lock_shared(&self) -> bool {
         let result = if self.try_lock_shared_fast(false) {
             true
         } else {
             self.try_lock_shared_slow(false)
         };
         if result {
-            unsafe { deadlock::acquire_resource(self as *const _ as usize) };
+            self.deadlock_acquire();
         }
         result
     }
 
     #[inline]
     fn unlock_shared(&self) {
-        unsafe { deadlock::release_resource(self as *const _ as usize) };
-        let state = self.state.load(Ordering::Relaxed);
-        if state & PARKED_BIT == 0
-            || (state & UPGRADING_BIT == 0 && state & GUARD_COUNT_MASK != SHARED_GUARD)
-        {
-            if have_elision() {
-                if self
-                    .state
-                    .elision_release(state, state - SHARED_GUARD)
-                    .is_ok()
-                {
-                    return;
-                }
-            } else {
-                if self
-                    .state
-                    .compare_exchange_weak(
-                        state,
-                        state - SHARED_GUARD,
-                        Ordering::Release,
-                        Ordering::Relaxed,
-                    )
-                    .is_ok()
-                {
-                    return;
-                }
-            }
+        self.deadlock_release();
+        let state = if have_elision() {
+            self.state.elision_fetch_sub_release(ONE_READER)
+        } else {
+            self.state.fetch_sub(ONE_READER, Ordering::Release)
+        };
+        if state & (READERS_MASK | WRITER_PARKED_BIT) == (ONE_READER | WRITER_PARKED_BIT) {
+            self.unlock_shared_slow();
         }
-        self.unlock_shared_slow(false);
     }
 }
 
 unsafe impl RawRwLockFair for RawRwLock {
     #[inline]
     fn unlock_shared_fair(&self) {
-        unsafe { deadlock::release_resource(self as *const _ as usize) };
-        let state = self.state.load(Ordering::Relaxed);
-        if state & PARKED_BIT == 0
-            || (state & UPGRADING_BIT == 0 && state & GUARD_COUNT_MASK != SHARED_GUARD)
-        {
-            if have_elision() {
-                if self
-                    .state
-                    .elision_release(state, state - SHARED_GUARD)
-                    .is_ok()
-                {
-                    return;
-                }
-            } else {
-                if self
-                    .state
-                    .compare_exchange_weak(
-                        state,
-                        state - SHARED_GUARD,
-                        Ordering::Release,
-                        Ordering::Relaxed,
-                    )
-                    .is_ok()
-                {
-                    return;
-                }
-            }
-        }
-        self.unlock_shared_slow(true);
+        // Shared unlocking is always fair in this implementation.
+        self.unlock_shared();
     }
 
     #[inline]
     fn unlock_exclusive_fair(&self) {
-        unsafe { deadlock::release_resource(self as *const _ as usize) };
+        self.deadlock_release();
         if self
             .state
-            .compare_exchange_weak(EXCLUSIVE_GUARD, 0, Ordering::Release, Ordering::Relaxed)
+            .compare_exchange(WRITER_BIT, 0, Ordering::Release, Ordering::Relaxed)
             .is_ok()
         {
             return;
         }
         self.unlock_exclusive_slow(true);
     }
 
     #[inline]
     fn bump_shared(&self) {
-        if self.state.load(Ordering::Relaxed) & PARKED_BIT != 0 {
+        if self.state.load(Ordering::Relaxed) & (READERS_MASK | WRITER_BIT)
+            == ONE_READER | WRITER_BIT
+        {
             self.bump_shared_slow();
         }
     }
 
     #[inline]
     fn bump_exclusive(&self) {
         if self.state.load(Ordering::Relaxed) & PARKED_BIT != 0 {
             self.bump_exclusive_slow();
@@ -207,17 +180,17 @@ unsafe impl RawRwLockFair for RawRwLock 
     }
 }
 
 unsafe impl RawRwLockDowngrade for RawRwLock {
     #[inline]
     fn downgrade(&self) {
         let state = self
             .state
-            .fetch_sub(EXCLUSIVE_GUARD - SHARED_GUARD, Ordering::Release);
+            .fetch_add(ONE_READER - WRITER_BIT, Ordering::Release);
 
         // Wake up parked shared and upgradable threads if there are any
         if state & PARKED_BIT != 0 {
             self.downgrade_slow();
         }
     }
 }
 
@@ -225,235 +198,246 @@ unsafe impl RawRwLockTimed for RawRwLock
     type Duration = Duration;
     type Instant = Instant;
 
     #[inline]
     fn try_lock_shared_for(&self, timeout: Self::Duration) -> bool {
         let result = if self.try_lock_shared_fast(false) {
             true
         } else {
-            self.lock_shared_slow(false, Some(Instant::now() + timeout))
+            self.lock_shared_slow(false, util::to_deadline(timeout))
         };
         if result {
-            unsafe { deadlock::acquire_resource(self as *const _ as usize) };
+            self.deadlock_acquire();
         }
         result
     }
 
     #[inline]
     fn try_lock_shared_until(&self, timeout: Self::Instant) -> bool {
         let result = if self.try_lock_shared_fast(false) {
             true
         } else {
             self.lock_shared_slow(false, Some(timeout))
         };
         if result {
-            unsafe { deadlock::acquire_resource(self as *const _ as usize) };
+            self.deadlock_acquire();
         }
         result
     }
 
     #[inline]
     fn try_lock_exclusive_for(&self, timeout: Duration) -> bool {
         let result = if self
             .state
-            .compare_exchange_weak(0, EXCLUSIVE_GUARD, Ordering::Acquire, Ordering::Relaxed)
+            .compare_exchange_weak(0, WRITER_BIT, Ordering::Acquire, Ordering::Relaxed)
             .is_ok()
         {
             true
         } else {
-            self.lock_exclusive_slow(Some(Instant::now() + timeout))
+            self.lock_exclusive_slow(util::to_deadline(timeout))
         };
         if result {
-            unsafe { deadlock::acquire_resource(self as *const _ as usize) };
+            self.deadlock_acquire();
         }
         result
     }
 
     #[inline]
     fn try_lock_exclusive_until(&self, timeout: Instant) -> bool {
         let result = if self
             .state
-            .compare_exchange_weak(0, EXCLUSIVE_GUARD, Ordering::Acquire, Ordering::Relaxed)
+            .compare_exchange_weak(0, WRITER_BIT, Ordering::Acquire, Ordering::Relaxed)
             .is_ok()
         {
             true
         } else {
             self.lock_exclusive_slow(Some(timeout))
         };
         if result {
-            unsafe { deadlock::acquire_resource(self as *const _ as usize) };
+            self.deadlock_acquire();
         }
         result
     }
 }
 
 unsafe impl RawRwLockRecursive for RawRwLock {
     #[inline]
     fn lock_shared_recursive(&self) {
         if !self.try_lock_shared_fast(true) {
             let result = self.lock_shared_slow(true, None);
             debug_assert!(result);
         }
-        unsafe { deadlock::acquire_resource(self as *const _ as usize) };
+        self.deadlock_acquire();
     }
 
     #[inline]
     fn try_lock_shared_recursive(&self) -> bool {
         let result = if self.try_lock_shared_fast(true) {
             true
         } else {
             self.try_lock_shared_slow(true)
         };
         if result {
-            unsafe { deadlock::acquire_resource(self as *const _ as usize) };
+            self.deadlock_acquire();
         }
         result
     }
 }
 
 unsafe impl RawRwLockRecursiveTimed for RawRwLock {
     #[inline]
     fn try_lock_shared_recursive_for(&self, timeout: Self::Duration) -> bool {
         let result = if self.try_lock_shared_fast(true) {
             true
         } else {
-            self.lock_shared_slow(true, Some(Instant::now() + timeout))
+            self.lock_shared_slow(true, util::to_deadline(timeout))
         };
         if result {
-            unsafe { deadlock::acquire_resource(self as *const _ as usize) };
+            self.deadlock_acquire();
         }
         result
     }
 
     #[inline]
     fn try_lock_shared_recursive_until(&self, timeout: Self::Instant) -> bool {
         let result = if self.try_lock_shared_fast(true) {
             true
         } else {
             self.lock_shared_slow(true, Some(timeout))
         };
         if result {
-            unsafe { deadlock::acquire_resource(self as *const _ as usize) };
+            self.deadlock_acquire();
         }
         result
     }
 }
 
 unsafe impl RawRwLockUpgrade for RawRwLock {
     #[inline]
     fn lock_upgradable(&self) {
         if !self.try_lock_upgradable_fast() {
             let result = self.lock_upgradable_slow(None);
             debug_assert!(result);
         }
-        unsafe { deadlock::acquire_resource(self as *const _ as usize) };
+        self.deadlock_acquire();
     }
 
     #[inline]
     fn try_lock_upgradable(&self) -> bool {
         let result = if self.try_lock_upgradable_fast() {
             true
         } else {
             self.try_lock_upgradable_slow()
         };
         if result {
-            unsafe { deadlock::acquire_resource(self as *const _ as usize) };
+            self.deadlock_acquire();
         }
         result
     }
 
     #[inline]
     fn unlock_upgradable(&self) {
-        unsafe { deadlock::release_resource(self as *const _ as usize) };
-        if self
-            .state
-            .compare_exchange_weak(UPGRADABLE_GUARD, 0, Ordering::Release, Ordering::Relaxed)
-            .is_ok()
-        {
-            return;
+        self.deadlock_release();
+        let state = self.state.load(Ordering::Relaxed);
+        if state & PARKED_BIT == 0 {
+            if self
+                .state
+                .compare_exchange_weak(
+                    state,
+                    state - (ONE_READER | UPGRADABLE_BIT),
+                    Ordering::Release,
+                    Ordering::Relaxed,
+                )
+                .is_ok()
+            {
+                return;
+            }
         }
         self.unlock_upgradable_slow(false);
     }
 
     #[inline]
     fn upgrade(&self) {
-        if self
-            .state
-            .compare_exchange_weak(
-                UPGRADABLE_GUARD,
-                EXCLUSIVE_GUARD,
-                Ordering::Relaxed,
-                Ordering::Relaxed,
-            )
-            .is_err()
-        {
+        let state = self.state.fetch_sub(
+            (ONE_READER | UPGRADABLE_BIT) - WRITER_BIT,
+            Ordering::Relaxed,
+        );
+        if state & READERS_MASK != ONE_READER {
             let result = self.upgrade_slow(None);
             debug_assert!(result);
         }
     }
 
+    #[inline]
     fn try_upgrade(&self) -> bool {
         if self
             .state
             .compare_exchange_weak(
-                UPGRADABLE_GUARD,
-                EXCLUSIVE_GUARD,
+                ONE_READER | UPGRADABLE_BIT,
+                WRITER_BIT,
                 Ordering::Relaxed,
                 Ordering::Relaxed,
             )
             .is_ok()
         {
             true
         } else {
             self.try_upgrade_slow()
         }
     }
 }
 
 unsafe impl RawRwLockUpgradeFair for RawRwLock {
     #[inline]
     fn unlock_upgradable_fair(&self) {
-        unsafe { deadlock::release_resource(self as *const _ as usize) };
-        if self
-            .state
-            .compare_exchange_weak(UPGRADABLE_GUARD, 0, Ordering::Release, Ordering::Relaxed)
-            .is_ok()
-        {
-            return;
+        self.deadlock_release();
+        let state = self.state.load(Ordering::Relaxed);
+        if state & PARKED_BIT == 0 {
+            if self
+                .state
+                .compare_exchange_weak(
+                    state,
+                    state - (ONE_READER | UPGRADABLE_BIT),
+                    Ordering::Release,
+                    Ordering::Relaxed,
+                )
+                .is_ok()
+            {
+                return;
+            }
         }
-        self.unlock_upgradable_slow(true);
+        self.unlock_upgradable_slow(false);
     }
 
     #[inline]
     fn bump_upgradable(&self) {
-        if self.state.load(Ordering::Relaxed) & PARKED_BIT != 0 {
+        if self.state.load(Ordering::Relaxed) == ONE_READER | UPGRADABLE_BIT | PARKED_BIT {
             self.bump_upgradable_slow();
         }
     }
 }
 
 unsafe impl RawRwLockUpgradeDowngrade for RawRwLock {
     #[inline]
     fn downgrade_upgradable(&self) {
-        let state = self
-            .state
-            .fetch_sub(UPGRADABLE_GUARD - SHARED_GUARD, Ordering::Relaxed);
+        let state = self.state.fetch_sub(UPGRADABLE_BIT, Ordering::Relaxed);
 
-        // Wake up parked shared and upgradable threads if there are any
+        // Wake up parked upgradable threads if there are any
         if state & PARKED_BIT != 0 {
-            self.downgrade_upgradable_slow(state);
+            self.downgrade_slow();
         }
     }
 
     #[inline]
     fn downgrade_to_upgradable(&self) {
-        let state = self
-            .state
-            .fetch_sub(EXCLUSIVE_GUARD - UPGRADABLE_GUARD, Ordering::Release);
+        let state = self.state.fetch_add(
+            (ONE_READER | UPGRADABLE_BIT) - WRITER_BIT,
+            Ordering::Release,
+        );
 
         // Wake up parked shared threads if there are any
         if state & PARKED_BIT != 0 {
             self.downgrade_to_upgradable_slow();
         }
     }
 }
 
@@ -461,187 +445,661 @@ unsafe impl RawRwLockUpgradeTimed for Ra
     #[inline]
     fn try_lock_upgradable_until(&self, timeout: Instant) -> bool {
         let result = if self.try_lock_upgradable_fast() {
             true
         } else {
             self.lock_upgradable_slow(Some(timeout))
         };
         if result {
-            unsafe { deadlock::acquire_resource(self as *const _ as usize) };
+            self.deadlock_acquire();
         }
         result
     }
 
     #[inline]
     fn try_lock_upgradable_for(&self, timeout: Duration) -> bool {
         let result = if self.try_lock_upgradable_fast() {
             true
         } else {
-            self.lock_upgradable_slow(Some(Instant::now() + timeout))
+            self.lock_upgradable_slow(util::to_deadline(timeout))
         };
         if result {
-            unsafe { deadlock::acquire_resource(self as *const _ as usize) };
+            self.deadlock_acquire();
         }
         result
     }
 
     #[inline]
     fn try_upgrade_until(&self, timeout: Instant) -> bool {
-        if self
-            .state
-            .compare_exchange_weak(
-                UPGRADABLE_GUARD,
-                EXCLUSIVE_GUARD,
-                Ordering::Relaxed,
-                Ordering::Relaxed,
-            )
-            .is_ok()
-        {
+        let state = self.state.fetch_sub(
+            (ONE_READER | UPGRADABLE_BIT) - WRITER_BIT,
+            Ordering::Relaxed,
+        );
+        if state & READERS_MASK == ONE_READER {
             true
         } else {
             self.upgrade_slow(Some(timeout))
         }
     }
 
     #[inline]
     fn try_upgrade_for(&self, timeout: Duration) -> bool {
-        if self
-            .state
-            .compare_exchange_weak(
-                UPGRADABLE_GUARD,
-                EXCLUSIVE_GUARD,
-                Ordering::Relaxed,
-                Ordering::Relaxed,
-            )
-            .is_ok()
-        {
+        let state = self.state.fetch_sub(
+            (ONE_READER | UPGRADABLE_BIT) - WRITER_BIT,
+            Ordering::Relaxed,
+        );
+        if state & READERS_MASK == ONE_READER {
             true
         } else {
-            self.upgrade_slow(Some(Instant::now() + timeout))
+            self.upgrade_slow(util::to_deadline(timeout))
         }
     }
 }
 
 impl RawRwLock {
     #[inline(always)]
     fn try_lock_shared_fast(&self, recursive: bool) -> bool {
         let state = self.state.load(Ordering::Relaxed);
 
-        // We can't allow grabbing a shared lock while there are parked threads
-        // since that could lead to writer starvation.
-        if !recursive && state & PARKED_BIT != 0 {
-            return false;
+        // We can't allow grabbing a shared lock if there is a writer, even if
+        // the writer is still waiting for the remaining readers to exit.
+        if state & WRITER_BIT != 0 {
+            // To allow recursive locks, we make an exception and allow readers
+            // to skip ahead of a pending writer to avoid deadlocking, at the
+            // cost of breaking the fairness guarantees.
+            if !recursive || state & READERS_MASK == 0 {
+                return false;
+            }
         }
 
         // Use hardware lock elision to avoid cache conflicts when multiple
         // readers try to acquire the lock. We only do this if the lock is
         // completely empty since elision handles conflicts poorly.
         if have_elision() && state == 0 {
-            self.state.elision_acquire(0, SHARED_GUARD).is_ok()
-        } else if let Some(new_state) = state.checked_add(SHARED_GUARD) {
+            self.state
+                .elision_compare_exchange_acquire(0, ONE_READER)
+                .is_ok()
+        } else if let Some(new_state) = state.checked_add(ONE_READER) {
             self.state
                 .compare_exchange_weak(state, new_state, Ordering::Acquire, Ordering::Relaxed)
                 .is_ok()
         } else {
             false
         }
     }
 
+    #[cold]
+    #[inline(never)]
+    fn try_lock_shared_slow(&self, recursive: bool) -> bool {
+        let mut state = self.state.load(Ordering::Relaxed);
+        loop {
+            // This mirrors the condition in try_lock_shared_fast
+            if state & WRITER_BIT != 0 {
+                if !recursive || state & READERS_MASK == 0 {
+                    return false;
+                }
+            }
+            if have_elision() && state == 0 {
+                match self.state.elision_compare_exchange_acquire(0, ONE_READER) {
+                    Ok(_) => return true,
+                    Err(x) => state = x,
+                }
+            } else {
+                match self.state.compare_exchange_weak(
+                    state,
+                    state
+                        .checked_add(ONE_READER)
+                        .expect("RwLock reader count overflow"),
+                    Ordering::Acquire,
+                    Ordering::Relaxed,
+                ) {
+                    Ok(_) => return true,
+                    Err(x) => state = x,
+                }
+            }
+        }
+    }
+
     #[inline(always)]
     fn try_lock_upgradable_fast(&self) -> bool {
         let state = self.state.load(Ordering::Relaxed);
 
-        // We can't allow grabbing an upgradable lock while there are parked threads
-        // since that could lead to writer starvation.
-        if state & PARKED_BIT != 0 {
+        // We can't grab an upgradable lock if there is already a writer or
+        // upgradable reader.
+        if state & (WRITER_BIT | UPGRADABLE_BIT) != 0 {
             return false;
         }
 
-        if let Some(new_state) = state.checked_add(UPGRADABLE_GUARD) {
+        if let Some(new_state) = state.checked_add(ONE_READER | UPGRADABLE_BIT) {
             self.state
                 .compare_exchange_weak(state, new_state, Ordering::Acquire, Ordering::Relaxed)
                 .is_ok()
         } else {
             false
         }
     }
 
     #[cold]
     #[inline(never)]
-    fn lock_exclusive_slow(&self, timeout: Option<Instant>) -> bool {
-        let mut spinwait = SpinWait::new();
+    fn try_lock_upgradable_slow(&self) -> bool {
         let mut state = self.state.load(Ordering::Relaxed);
         loop {
-            // Grab the lock if it isn't locked, even if there are other
-            // threads parked.
-            if let Some(new_state) = state.checked_add(EXCLUSIVE_GUARD) {
+            // This mirrors the condition in try_lock_upgradable_fast
+            if state & (WRITER_BIT | UPGRADABLE_BIT) != 0 {
+                return false;
+            }
+
+            match self.state.compare_exchange_weak(
+                state,
+                state
+                    .checked_add(ONE_READER | UPGRADABLE_BIT)
+                    .expect("RwLock reader count overflow"),
+                Ordering::Acquire,
+                Ordering::Relaxed,
+            ) {
+                Ok(_) => return true,
+                Err(x) => state = x,
+            }
+        }
+    }
+
+    #[cold]
+    #[inline(never)]
+    fn lock_exclusive_slow(&self, timeout: Option<Instant>) -> bool {
+        // Step 1: grab exclusive ownership of WRITER_BIT
+        let timed_out = !self.lock_common(
+            timeout,
+            TOKEN_EXCLUSIVE,
+            |state| {
+                loop {
+                    if *state & (WRITER_BIT | UPGRADABLE_BIT) != 0 {
+                        return false;
+                    }
+
+                    // Grab WRITER_BIT if it isn't set, even if there are parked threads.
+                    match self.state.compare_exchange_weak(
+                        *state,
+                        *state | WRITER_BIT,
+                        Ordering::Acquire,
+                        Ordering::Relaxed,
+                    ) {
+                        Ok(_) => return true,
+                        Err(x) => *state = x,
+                    }
+                }
+            },
+            |state| state & (WRITER_BIT | UPGRADABLE_BIT) != 0,
+        );
+        if timed_out {
+            return false;
+        }
+
+        // Step 2: wait for all remaining readers to exit the lock.
+        self.wait_for_readers(timeout, 0)
+    }
+
+    #[cold]
+    #[inline(never)]
+    fn unlock_exclusive_slow(&self, force_fair: bool) {
+        // There are threads to unpark. Try to unpark as many as we can.
+        let callback = |mut new_state, result: UnparkResult| {
+            // If we are using a fair unlock then we should keep the
+            // rwlock locked and hand it off to the unparked threads.
+            if result.unparked_threads != 0 && (force_fair || result.be_fair) {
+                if result.have_more_threads {
+                    new_state |= PARKED_BIT;
+                }
+                self.state.store(new_state, Ordering::Release);
+                TOKEN_HANDOFF
+            } else {
+                // Clear the parked bit if there are no more parked threads.
+                if result.have_more_threads {
+                    self.state.store(PARKED_BIT, Ordering::Release);
+                } else {
+                    self.state.store(0, Ordering::Release);
+                }
+                TOKEN_NORMAL
+            }
+        };
+        self.wake_parked_threads(0, callback);
+    }
+
+    #[cold]
+    #[inline(never)]
+    fn lock_shared_slow(&self, recursive: bool, timeout: Option<Instant>) -> bool {
+        self.lock_common(
+            timeout,
+            TOKEN_SHARED,
+            |state| {
+                let mut spinwait_shared = SpinWait::new();
+                loop {
+                    // Use hardware lock elision to avoid cache conflicts when multiple
+                    // readers try to acquire the lock. We only do this if the lock is
+                    // completely empty since elision handles conflicts poorly.
+                    if have_elision() && *state == 0 {
+                        match self.state.elision_compare_exchange_acquire(0, ONE_READER) {
+                            Ok(_) => return true,
+                            Err(x) => *state = x,
+                        }
+                    }
+
+                    // This is the same condition as try_lock_shared_fast
+                    if *state & WRITER_BIT != 0 {
+                        if !recursive || *state & READERS_MASK == 0 {
+                            return false;
+                        }
+                    }
+
+                    if self
+                        .state
+                        .compare_exchange_weak(
+                            *state,
+                            state
+                                .checked_add(ONE_READER)
+                                .expect("RwLock reader count overflow"),
+                            Ordering::Acquire,
+                            Ordering::Relaxed,
+                        )
+                        .is_ok()
+                    {
+                        return true;
+                    }
+
+                    // If there is high contention on the reader count then we want
+                    // to leave some time between attempts to acquire the lock to
+                    // let other threads make progress.
+                    spinwait_shared.spin_no_yield();
+                    *state = self.state.load(Ordering::Relaxed);
+                }
+            },
+            |state| state & WRITER_BIT != 0,
+        )
+    }
+
+    #[cold]
+    #[inline(never)]
+    fn unlock_shared_slow(&self) {
+        // At this point WRITER_PARKED_BIT is set and READER_MASK is empty. We
+        // just need to wake up a potentially sleeping pending writer.
+        unsafe {
+            // Using the 2nd key at addr + 1
+            let addr = self as *const _ as usize + 1;
+            let callback = |result: UnparkResult| {
+                // Clear the WRITER_PARKED_BIT here since there can only be one
+                // parked writer thread.
+                debug_assert!(!result.have_more_threads);
+                self.state.fetch_and(!WRITER_PARKED_BIT, Ordering::Relaxed);
+                TOKEN_NORMAL
+            };
+            parking_lot_core::unpark_one(addr, callback);
+        }
+    }
+
+    #[cold]
+    #[inline(never)]
+    fn lock_upgradable_slow(&self, timeout: Option<Instant>) -> bool {
+        self.lock_common(
+            timeout,
+            TOKEN_UPGRADABLE,
+            |state| {
+                let mut spinwait_shared = SpinWait::new();
+                loop {
+                    if *state & (WRITER_BIT | UPGRADABLE_BIT) != 0 {
+                        return false;
+                    }
+
+                    if self
+                        .state
+                        .compare_exchange_weak(
+                            *state,
+                            state
+                                .checked_add(ONE_READER | UPGRADABLE_BIT)
+                                .expect("RwLock reader count overflow"),
+                            Ordering::Acquire,
+                            Ordering::Relaxed,
+                        )
+                        .is_ok()
+                    {
+                        return true;
+                    }
+
+                    // If there is high contention on the reader count then we want
+                    // to leave some time between attempts to acquire the lock to
+                    // let other threads make progress.
+                    spinwait_shared.spin_no_yield();
+                    *state = self.state.load(Ordering::Relaxed);
+                }
+            },
+            |state| state & (WRITER_BIT | UPGRADABLE_BIT) != 0,
+        )
+    }
+
+    #[cold]
+    #[inline(never)]
+    fn unlock_upgradable_slow(&self, force_fair: bool) {
+        // Just release the lock if there are no parked threads.
+        let mut state = self.state.load(Ordering::Relaxed);
+        while state & PARKED_BIT == 0 {
+            match self.state.compare_exchange_weak(
+                state,
+                state - (ONE_READER | UPGRADABLE_BIT),
+                Ordering::Release,
+                Ordering::Relaxed,
+            ) {
+                Ok(_) => return,
+                Err(x) => state = x,
+            }
+        }
+
+        // There are threads to unpark. Try to unpark as many as we can.
+        let callback = |new_state, result: UnparkResult| {
+            // If we are using a fair unlock then we should keep the
+            // rwlock locked and hand it off to the unparked threads.
+            let mut state = self.state.load(Ordering::Relaxed);
+            if force_fair || result.be_fair {
+                // Fall back to normal unpark on overflow. Panicking is
+                // not allowed in parking_lot callbacks.
+                while let Some(mut new_state) =
+                    (state - (ONE_READER | UPGRADABLE_BIT)).checked_add(new_state)
+                {
+                    if result.have_more_threads {
+                        new_state |= PARKED_BIT;
+                    } else {
+                        new_state &= !PARKED_BIT;
+                    }
+                    match self.state.compare_exchange_weak(
+                        state,
+                        new_state,
+                        Ordering::Relaxed,
+                        Ordering::Relaxed,
+                    ) {
+                        Ok(_) => return TOKEN_HANDOFF,
+                        Err(x) => state = x,
+                    }
+                }
+            }
+
+            // Otherwise just release the upgradable lock and update PARKED_BIT.
+            loop {
+                let mut new_state = state - (ONE_READER | UPGRADABLE_BIT);
+                if result.have_more_threads {
+                    new_state |= PARKED_BIT;
+                } else {
+                    new_state &= !PARKED_BIT;
+                }
                 match self.state.compare_exchange_weak(
                     state,
                     new_state,
-                    Ordering::Acquire,
+                    Ordering::Relaxed,
                     Ordering::Relaxed,
                 ) {
-                    Ok(_) => return true,
+                    Ok(_) => return TOKEN_NORMAL,
                     Err(x) => state = x,
                 }
+            }
+        };
+        self.wake_parked_threads(0, callback);
+    }
+
+    #[cold]
+    #[inline(never)]
+    fn try_upgrade_slow(&self) -> bool {
+        let mut state = self.state.load(Ordering::Relaxed);
+        loop {
+            if state & READERS_MASK != ONE_READER {
+                return false;
+            }
+            match self.state.compare_exchange_weak(
+                state,
+                state - (ONE_READER | UPGRADABLE_BIT) + WRITER_BIT,
+                Ordering::Relaxed,
+                Ordering::Relaxed,
+            ) {
+                Ok(_) => return true,
+                Err(x) => state = x,
+            }
+        }
+    }
+
+    #[cold]
+    #[inline(never)]
+    fn upgrade_slow(&self, timeout: Option<Instant>) -> bool {
+        self.wait_for_readers(timeout, ONE_READER | UPGRADABLE_BIT)
+    }
+
+    #[cold]
+    #[inline(never)]
+    fn downgrade_slow(&self) {
+        // We only reach this point if PARKED_BIT is set.
+        let callback = |_, result: UnparkResult| {
+            // Clear the parked bit if there no more parked threads
+            if !result.have_more_threads {
+                self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed);
+            }
+            TOKEN_NORMAL
+        };
+        self.wake_parked_threads(ONE_READER, callback);
+    }
+
+    #[cold]
+    #[inline(never)]
+    fn downgrade_to_upgradable_slow(&self) {
+        // We only reach this point if PARKED_BIT is set.
+        let callback = |_, result: UnparkResult| {
+            // Clear the parked bit if there no more parked threads
+            if !result.have_more_threads {
+                self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed);
+            }
+            TOKEN_NORMAL
+        };
+        self.wake_parked_threads(ONE_READER | UPGRADABLE_BIT, callback);
+    }
+
+    #[cold]
+    #[inline(never)]
+    fn bump_shared_slow(&self) {
+        self.unlock_shared();
+        self.lock_shared();
+    }
+
+    #[cold]
+    #[inline(never)]
+    fn bump_exclusive_slow(&self) {
+        self.deadlock_release();
+        self.unlock_exclusive_slow(true);
+        self.lock_exclusive();
+    }
+
+    #[cold]
+    #[inline(never)]
+    fn bump_upgradable_slow(&self) {
+        self.deadlock_release();
+        self.unlock_upgradable_slow(true);
+        self.lock_upgradable();
+    }
+
+    // Common code for waking up parked threads after releasing WRITER_BIT or
+    // UPGRADABLE_BIT.
+    #[inline]
+    fn wake_parked_threads<C>(&self, new_state: usize, callback: C)
+    where
+        C: FnOnce(usize, UnparkResult) -> UnparkToken,
+    {
+        // We must wake up at least one upgrader or writer if there is one,
+        // otherwise they may end up parked indefinitely since unlock_shared
+        // does not call wake_parked_threads.
+        let new_state = Cell::new(new_state);
+        unsafe {
+            let addr = self as *const _ as usize;
+            let filter = |ParkToken(token)| {
+                let s = new_state.get();
+
+                // If we are waking up a writer, don't wake anything else.
+                if s & WRITER_BIT != 0 {
+                    return FilterOp::Stop;
+                }
+
+                // Otherwise wake *all* readers and one upgrader/writer.
+                if token & (UPGRADABLE_BIT | WRITER_BIT) != 0 && s & UPGRADABLE_BIT != 0 {
+                    // Skip writers and upgradable readers if we already have
+                    // a writer/upgradable reader.
+                    FilterOp::Skip
+                } else {
+                    new_state.set(s + token);
+                    FilterOp::Unpark
+                }
+            };
+            parking_lot_core::unpark_filter(addr, filter, |result| {
+                callback(new_state.get(), result)
+            });
+        }
+    }
+
+    // Common code for waiting for readers to exit the lock after acquiring
+    // WRITER_BIT.
+    #[inline]
+    fn wait_for_readers(&self, timeout: Option<Instant>, prev_value: usize) -> bool {
+        // At this point WRITER_BIT is already set, we just need to wait for the
+        // remaining readers to exit the lock.
+        let mut spinwait = SpinWait::new();
+        let mut state = self.state.load(Ordering::Relaxed);
+        while state & READERS_MASK != 0 {
+            // Spin a few times to wait for readers to exit
+            if spinwait.spin() {
+                state = self.state.load(Ordering::Relaxed);
                 continue;
             }
 
-            // If there are no parked threads and only one reader or writer, try
-            // spinning a few times.
-            if (state == EXCLUSIVE_GUARD || state == SHARED_GUARD || state == UPGRADABLE_GUARD)
-                && spinwait.spin()
-            {
+            // Set the parked bit
+            if state & WRITER_PARKED_BIT == 0 {
+                if let Err(x) = self.state.compare_exchange_weak(
+                    state,
+                    state | WRITER_PARKED_BIT,
+                    Ordering::Relaxed,
+                    Ordering::Relaxed,
+                ) {
+                    state = x;
+                    continue;
+                }
+            }
+
+            // Park our thread until we are woken up by an unlock
+            unsafe {
+                // Using the 2nd key at addr + 1
+                let addr = self as *const _ as usize + 1;
+                let validate = || {
+                    let state = self.state.load(Ordering::Relaxed);
+                    state & READERS_MASK != 0 && state & WRITER_PARKED_BIT != 0
+                };
+                let before_sleep = || {};
+                let timed_out = |_, _| {};
+                match parking_lot_core::park(
+                    addr,
+                    validate,
+                    before_sleep,
+                    timed_out,
+                    TOKEN_EXCLUSIVE,
+                    timeout,
+                ) {
+                    // We still need to re-check the state if we are unparked
+                    // since a previous writer timing-out could have allowed
+                    // another reader to sneak in before we parked.
+                    ParkResult::Unparked(_) | ParkResult::Invalid => {
+                        state = self.state.load(Ordering::Relaxed);
+                        continue;
+                    }
+
+                    // Timeout expired
+                    ParkResult::TimedOut => {
+                        // We need to release WRITER_BIT and revert back to
+                        // our previous value. We also wake up any threads that
+                        // might be waiting on WRITER_BIT.
+                        let state = self.state.fetch_add(
+                            prev_value.wrapping_sub(WRITER_BIT | WRITER_PARKED_BIT),
+                            Ordering::Relaxed,
+                        );
+                        if state & PARKED_BIT != 0 {
+                            let callback = |_, result: UnparkResult| {
+                                // Clear the parked bit if there no more parked threads
+                                if !result.have_more_threads {
+                                    self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed);
+                                }
+                                TOKEN_NORMAL
+                            };
+                            self.wake_parked_threads(ONE_READER | UPGRADABLE_BIT, callback);
+                        }
+                        return false;
+                    }
+                }
+            }
+        }
+        true
+    }
+
+    // Common code for acquiring a lock
+    #[inline]
+    fn lock_common<F, V>(
+        &self,
+        timeout: Option<Instant>,
+        token: ParkToken,
+        mut try_lock: F,
+        validate: V,
+    ) -> bool
+    where
+        F: FnMut(&mut usize) -> bool,
+        V: Fn(usize) -> bool,
+    {
+        let mut spinwait = SpinWait::new();
+        let mut state = self.state.load(Ordering::Relaxed);
+        loop {
+            // Attempt to grab the lock
+            if try_lock(&mut state) {
+                return true;
+            }
+
+            // If there are no parked threads, try spinning a few times.
+            if state & (PARKED_BIT | WRITER_PARKED_BIT) == 0 && spinwait.spin() {
                 state = self.state.load(Ordering::Relaxed);
                 continue;
             }
 
+            // Set the parked bit
+            if state & PARKED_BIT == 0 {
+                if let Err(x) = self.state.compare_exchange_weak(
+                    state,
+                    state | PARKED_BIT,
+                    Ordering::Relaxed,
+                    Ordering::Relaxed,
+                ) {
+                    state = x;
+                    continue;
+                }
+            }
+
             // Park our thread until we are woken up by an unlock
             unsafe {
                 let addr = self as *const _ as usize;
                 let validate = || {
-                    let mut state = self.state.load(Ordering::Relaxed);
-                    loop {
-                        // If the rwlock is free, abort the park and try to grab
-                        // it immediately.
-                        if state & GUARD_COUNT_MASK == 0 {
-                            return false;
-                        }
-
-                        // Nothing to do if the parked bit is already set
-                        if state & PARKED_BIT != 0 {
-                            return true;
-                        }
-
-                        // Set the parked bit
-                        match self.state.compare_exchange_weak(
-                            state,
-                            state | PARKED_BIT,
-                            Ordering::Relaxed,
-                            Ordering::Relaxed,
-                        ) {
-                            Ok(_) => return true,
-                            Err(x) => state = x,
-                        }
-                    }
+                    let state = self.state.load(Ordering::Relaxed);
+                    state & PARKED_BIT != 0 && validate(state)
                 };
                 let before_sleep = || {};
                 let timed_out = |_, was_last_thread| {
                     // Clear the parked bit if we were the last parked thread
                     if was_last_thread {
                         self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed);
                     }
                 };
                 match parking_lot_core::park(
                     addr,
                     validate,
                     before_sleep,
                     timed_out,
-                    TOKEN_EXCLUSIVE,
+                    token,
                     timeout,
                 ) {
                     // The thread that unparked us passed the lock on to us
                     // directly without unlocking it.
                     ParkResult::Unparked(TOKEN_HANDOFF) => return true,
 
                     // We were unparked normally, try acquiring the lock again
                     ParkResult::Unparked(_) => (),
@@ -655,748 +1113,20 @@ impl RawRwLock {
             }
 
             // Loop back and try locking again
             spinwait.reset();
             state = self.state.load(Ordering::Relaxed);
         }
     }
 
-    #[cold]
-    #[inline(never)]
-    fn unlock_exclusive_slow(&self, force_fair: bool) {
-        // Unlock directly if there are no parked threads
-        if self
-            .state
-            .compare_exchange(EXCLUSIVE_GUARD, 0, Ordering::Release, Ordering::Relaxed)
-            .is_ok()
-        {
-            return;
-        };
-
-        // There are threads to unpark. We unpark threads up to the guard capacity.
-        let guard_count = Cell::new(0usize);
-        unsafe {
-            let addr = self as *const _ as usize;
-            let filter = |ParkToken(token)| -> FilterOp {
-                match guard_count.get().checked_add(token) {
-                    Some(new_guard_count) => {
-                        guard_count.set(new_guard_count);
-                        FilterOp::Unpark
-                    }
-                    None => FilterOp::Stop,
-                }
-            };
-            let callback = |result: UnparkResult| {
-                // If we are using a fair unlock then we should keep the
-                // rwlock locked and hand it off to the unparked threads.
-                if result.unparked_threads != 0 && (force_fair || result.be_fair) {
-                    // We need to set the guard count accordingly.
-                    let mut new_state = guard_count.get();
-
-                    if result.have_more_threads {
-                        new_state |= PARKED_BIT;
-                    }
-
-                    self.state.store(new_state, Ordering::Release);
-                    TOKEN_HANDOFF
-                } else {
-                    // Clear the parked bit if there are no more parked threads.
-                    if result.have_more_threads {
-                        self.state.store(PARKED_BIT, Ordering::Release);
-                    } else {
-                        self.state.store(0, Ordering::Release);
-                    }
-                    TOKEN_NORMAL
-                }
-            };
-            parking_lot_core::unpark_filter(addr, filter, callback);
-        }
-    }
-
-    #[cold]
-    #[inline(never)]
-    fn downgrade_slow(&self) {
-        unsafe {
-            let addr = self as *const _ as usize;
-            let mut guard_count = SHARED_GUARD;
-            let filter = |ParkToken(token)| -> FilterOp {
-                match guard_count.checked_add(token) {
-                    Some(new_guard_count) => {
-                        guard_count = new_guard_count;
-                        FilterOp::Unpark
-                    }
-                    None => FilterOp::Stop,
-                }
-            };
-            let callback = |result: UnparkResult| {
-                // Clear the parked bit if there no more parked threads
-                if !result.have_more_threads {
-                    self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed);
-                }
-                TOKEN_NORMAL
-            };
-            parking_lot_core::unpark_filter(addr, filter, callback);
-        }
-    }
-
-    #[cold]
-    #[inline(never)]
-    fn downgrade_to_upgradable_slow(&self) {
-        unsafe {
-            let addr = self as *const _ as usize;
-            let mut guard_count = UPGRADABLE_GUARD;
-            let filter = |ParkToken(token)| -> FilterOp {
-                match guard_count.checked_add(token) {
-                    Some(new_guard_count) => {
-                        guard_count = new_guard_count;
-                        FilterOp::Unpark
-                    }
-                    None => FilterOp::Stop,
-                }
-            };
-            let callback = |result: UnparkResult| {
-                // Clear the parked bit if there no more parked threads
-                if !result.have_more_threads {
-                    self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed);
-                }
-                TOKEN_NORMAL
-            };
-            parking_lot_core::unpark_filter(addr, filter, callback);
-        }
-    }
-
-    #[cold]
-    #[inline(never)]
-    fn lock_shared_slow(&self, recursive: bool, timeout: Option<Instant>) -> bool {
-        let mut spinwait = SpinWait::new();
-        let mut spinwait_shared = SpinWait::new();
-        let mut state = self.state.load(Ordering::Relaxed);
-        let mut unparked = false;
-        loop {
-            // Use hardware lock elision to avoid cache conflicts when multiple
-            // readers try to acquire the lock. We only do this if the lock is
-            // completely empty since elision handles conflicts poorly.
-            if have_elision() && state == 0 {
-                match self.state.elision_acquire(0, SHARED_GUARD) {
-                    Ok(_) => return true,
-                    Err(x) => state = x,
-                }
-            }
-
-            // Grab the lock if there are no exclusive threads locked or
-            // waiting. However if we were unparked then we are allowed to grab
-            // the lock even if there are pending exclusive threads.
-            if unparked || recursive || state & PARKED_BIT == 0 {
-                if let Some(new_state) = state.checked_add(SHARED_GUARD) {
-                    if self
-                        .state
-                        .compare_exchange_weak(
-                            state,
-                            new_state,
-                            Ordering::Acquire,
-                            Ordering::Relaxed,
-                        )
-                        .is_ok()
-                    {
-                        return true;
-                    }
-
-                    // If there is high contention on the reader count then we want
-                    // to leave some time between attempts to acquire the lock to
-                    // let other threads make progress.
-                    spinwait_shared.spin_no_yield();
-                    state = self.state.load(Ordering::Relaxed);
-                    continue;
-                } else {
-                    // We were unparked spuriously, reset unparked flag.
-                    unparked = false;
-                }
-            }
-
-            // If there are no parked threads, try spinning a few times
-            if state & PARKED_BIT == 0 && spinwait.spin() {
-                state = self.state.load(Ordering::Relaxed);
-                continue;
-            }
-
-            // Park our thread until we are woken up by an unlock
-            unsafe {
-                let addr = self as *const _ as usize;
-                let validate = || {
-                    let mut state = self.state.load(Ordering::Relaxed);
-                    loop {
-                        // Nothing to do if the parked bit is already set
-                        if state & PARKED_BIT != 0 {
-                            return true;
-                        }
-
-                        // If the parked bit is not set then it means we are at
-                        // the front of the queue. If there is space for another
-                        // lock then we should abort the park and try acquiring
-                        // the lock again.
-                        if state & GUARD_COUNT_MASK != GUARD_COUNT_MASK {
-                            return false;
-                        }
-
-                        // Set the parked bit
-                        match self.state.compare_exchange_weak(
-                            state,
-                            state | PARKED_BIT,
-                            Ordering::Relaxed,
-                            Ordering::Relaxed,
-                        ) {
-                            Ok(_) => return true,
-                            Err(x) => state = x,
-                        }
-                    }
-                };
-                let before_sleep = || {};
-                let timed_out = |_, was_last_thread| {
-                    // Clear the parked bit if we were the last parked thread
-                    if was_last_thread {
-                        self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed);
-                    }
-                };
-                match parking_lot_core::park(
-                    addr,
-                    validate,
-                    before_sleep,
-                    timed_out,
-                    TOKEN_SHARED,
-                    timeout,
-                ) {
-                    // The thread that unparked us passed the lock on to us
-                    // directly without unlocking it.
-                    ParkResult::Unparked(TOKEN_HANDOFF) => return true,
-
-                    // We were unparked normally, try acquiring the lock again
-                    ParkResult::Unparked(_) => (),
-
-                    // The validation function failed, try locking again
-                    ParkResult::Invalid => (),
-
-                    // Timeout expired
-                    ParkResult::TimedOut => return false,
-                }
-            }
-
-            // Loop back and try locking again
-            spinwait.reset();
-            spinwait_shared.reset();
-            state = self.state.load(Ordering::Relaxed);
-            unparked = true;
-        }
-    }
-
-    #[cold]
-    #[inline(never)]
-    fn try_lock_shared_slow(&self, recursive: bool) -> bool {
-        let mut state = self.state.load(Ordering::Relaxed);
-        loop {
-            if !recursive && state & PARKED_BIT != 0 {
-                return false;
-            }
-            if have_elision() && state == 0 {
-                match self.state.elision_acquire(0, SHARED_GUARD) {
-                    Ok(_) => return true,
-                    Err(x) => state = x,
-                }
-            } else {
-                match state.checked_add(SHARED_GUARD) {
-                    Some(new_state) => match self.state.compare_exchange_weak(
-                        state,
-                        new_state,
-                        Ordering::Acquire,
-                        Ordering::Relaxed,
-                    ) {
-                        Ok(_) => return true,
-                        Err(x) => state = x,
-                    },
-                    None => return false,
-                }
-            }
-        }
-    }
-
-    #[cold]
-    #[inline(never)]
-    fn unlock_shared_slow(&self, force_fair: bool) {
-        let mut state = self.state.load(Ordering::Relaxed);
-        loop {
-            // Just release the lock if there are no parked thread or if we are
-            // not the last shared thread.
-            if state & PARKED_BIT == 0
-                || (state & UPGRADING_BIT == 0 && state & GUARD_COUNT_MASK != SHARED_GUARD)
-                || (state & UPGRADING_BIT != 0
-                    && state & GUARD_COUNT_MASK != UPGRADABLE_GUARD + SHARED_GUARD)
-            {
-                match self.state.compare_exchange_weak(
-                    state,
-                    state - SHARED_GUARD,
-                    Ordering::Release,
-                    Ordering::Relaxed,
-                ) {
-                    Ok(_) => return,
-                    Err(x) => state = x,
-                }
-                continue;
-            }
-
-            break;
-        }
-
-        // There are threads to unpark. If there is a thread waiting to be
-        // upgraded, we find that thread and let it upgrade, otherwise we
-        // unpark threads up to the guard capacity. Note that there is a
-        // potential race condition here: another thread might grab a shared
-        // lock between now and when we actually release our lock.
-        let additional_guards = Cell::new(0usize);
-        let has_upgraded = Cell::new(if state & UPGRADING_BIT == 0 {
-            None
-        } else {
-            Some(false)
-        });
-        unsafe {
-            let addr = self as *const _ as usize;
-            let filter = |ParkToken(token)| -> FilterOp {
-                match has_upgraded.get() {
-                    None => match additional_guards.get().checked_add(token) {
-                        Some(x) => {
-                            additional_guards.set(x);
-                            FilterOp::Unpark
-                        }
-                        None => FilterOp::Stop,
-                    },
-                    Some(false) => if token & UPGRADING_BIT != 0 {
-                        additional_guards.set(token & !UPGRADING_BIT);
-                        has_upgraded.set(Some(true));
-                        FilterOp::Unpark
-                    } else {
-                        FilterOp::Skip
-                    },
-                    Some(true) => FilterOp::Stop,
-                }
-            };
-            let callback = |result: UnparkResult| {
-                let mut state = self.state.load(Ordering::Relaxed);
-                loop {
-                    // Release our shared lock
-                    let mut new_state = state - SHARED_GUARD;
-
-                    // Clear the parked bit if there are no more threads in
-                    // the queue.
-                    if !result.have_more_threads {
-                        new_state &= !PARKED_BIT;
-                    }
-
-                    // Clear the upgrading bit if we are upgrading a thread.
-                    if let Some(true) = has_upgraded.get() {
-                        new_state &= !UPGRADING_BIT;
-                    }
-
-                    // Consider using fair unlocking. If we are, then we should set
-                    // the state to the new value and tell the threads that we are
-                    // handing the lock directly.
-                    let token = if result.unparked_threads != 0 && (force_fair || result.be_fair) {
-                        match new_state.checked_add(additional_guards.get()) {
-                            Some(x) => {
-                                new_state = x;
-                                TOKEN_HANDOFF
-                            }
-                            None => TOKEN_NORMAL,
-                        }
-                    } else {
-                        TOKEN_NORMAL
-                    };
-
-                    match self.state.compare_exchange_weak(
-                        state,
-                        new_state,
-                        Ordering::Release,
-                        Ordering::Relaxed,
-                    ) {
-                        Ok(_) => return token,
-                        Err(x) => state = x,
-                    }
-                }
-            };
-            parking_lot_core::unpark_filter(addr, filter, callback);
-        }
+    #[inline]
+    fn deadlock_acquire(&self) {
+        unsafe { deadlock::acquire_resource(self as *const _ as usize) };
+        unsafe { deadlock::acquire_resource(self as *const _ as usize + 1) };
     }
 
-    #[cold]
-    #[inline(never)]
-    fn lock_upgradable_slow(&self, timeout: Option<Instant>) -> bool {
-        let mut spinwait = SpinWait::new();
-        let mut spinwait_shared = SpinWait::new();
-        let mut state = self.state.load(Ordering::Relaxed);
-        let mut unparked = false;
-        loop {
-            // Grab the lock if there are no exclusive or upgradable threads
-            // locked or waiting. However if we were unparked then we are
-            // allowed to grab the lock even if there are pending exclusive threads.
-            if unparked || state & PARKED_BIT == 0 {
-                if let Some(new_state) = state.checked_add(UPGRADABLE_GUARD) {
-                    if self
-                        .state
-                        .compare_exchange_weak(
-                            state,
-                            new_state,
-                            Ordering::Acquire,
-                            Ordering::Relaxed,
-                        )
-                        .is_ok()
-                    {
-                        return true;
-                    }
-
-                    // If there is high contention on the reader count then we want
-                    // to leave some time between attempts to acquire the lock to
-                    // let other threads make progress.
-                    spinwait_shared.spin_no_yield();
-                    state = self.state.load(Ordering::Relaxed);
-                    continue;
-                } else {
-                    // We were unparked spuriously, reset unparked flag.
-                    unparked = false;
-                }
-            }
-
-            // If there are no parked threads, try spinning a few times
-            if state & PARKED_BIT == 0 && spinwait.spin() {
-                state = self.state.load(Ordering::Relaxed);
-                continue;
-            }
-
-            // Park our thread until we are woken up by an unlock
-            unsafe {
-                let addr = self as *const _ as usize;
-                let validate = || {
-                    let mut state = self.state.load(Ordering::Relaxed);
-                    loop {
-                        // Nothing to do if the parked bit is already set
-                        if state & PARKED_BIT != 0 {
-                            return true;
-                        }
-
-                        // If the parked bit is not set then it means we are at
-                        // the front of the queue. If there is space for an
-                        // upgradable lock then we should abort the park and try
-                        // acquiring the lock again.
-                        if state & UPGRADABLE_GUARD != UPGRADABLE_GUARD {
-                            return false;
-                        }
-
-                        // Set the parked bit
-                        match self.state.compare_exchange_weak(
-                            state,
-                            state | PARKED_BIT,
-                            Ordering::Relaxed,
-                            Ordering::Relaxed,
-                        ) {
-                            Ok(_) => return true,
-                            Err(x) => state = x,
-                        }
-                    }
-                };
-                let before_sleep = || {};
-                let timed_out = |_, was_last_thread| {
-                    // Clear the parked bit if we were the last parked thread
-                    if was_last_thread {
-                        self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed);
-                    }
-                };
-                match parking_lot_core::park(
-                    addr,
-                    validate,
-                    before_sleep,
-                    timed_out,
-                    TOKEN_UPGRADABLE,
-                    timeout,
-                ) {
-                    // The thread that unparked us passed the lock on to us
-                    // directly without unlocking it.
-                    ParkResult::Unparked(TOKEN_HANDOFF) => return true,
-
-                    // We were unparked normally, try acquiring the lock again
-                    ParkResult::Unparked(_) => (),
-
-                    // The validation function failed, try locking again
-                    ParkResult::Invalid => (),
-
-                    // Timeout expired
-                    ParkResult::TimedOut => return false,
-                }
-            }
-
-            // Loop back and try locking again
-            spinwait.reset();
-            spinwait_shared.reset();
-            state = self.state.load(Ordering::Relaxed);
-            unparked = true;
-        }
-    }
-
-    #[cold]
-    #[inline(never)]
-    fn try_lock_upgradable_slow(&self) -> bool {
-        let mut state = self.state.load(Ordering::Relaxed);
-        loop {
-            if state & PARKED_BIT != 0 {
-                return false;
-            }
-
-            match state.checked_add(UPGRADABLE_GUARD) {
-                Some(new_state) => match self.state.compare_exchange_weak(
-                    state,
-                    new_state,
-                    Ordering::Acquire,
-                    Ordering::Relaxed,
-                ) {
-                    Ok(_) => return true,
-                    Err(x) => state = x,
-                },
-                None => return false,
-            }
-        }
-    }
-
-    #[cold]
-    #[inline(never)]
-    fn unlock_upgradable_slow(&self, force_fair: bool) {
-        let mut state = self.state.load(Ordering::Relaxed);
-        loop {
-            // Just release the lock if there are no parked threads.
-            if state & PARKED_BIT == 0 {
-                match self.state.compare_exchange_weak(
-                    state,
-                    state - UPGRADABLE_GUARD,
-                    Ordering::Release,
-                    Ordering::Relaxed,
-                ) {
-                    Ok(_) => return,
-                    Err(x) => state = x,
-                }
-                continue;
-            }
-
-            break;
-        }
-
-        // There are threads to unpark. We unpark threads up to the guard capacity.
-        let additional_guards = Cell::new(0usize);
-        unsafe {
-            let addr = self as *const _ as usize;
-            let filter = |ParkToken(token)| -> FilterOp {
-                match additional_guards.get().checked_add(token) {
-                    Some(x) => {
-                        additional_guards.set(x);
-                        FilterOp::Unpark
-                    }
-                    None => FilterOp::Stop,
-                }
-            };
-            let callback = |result: UnparkResult| {
-                let mut state = self.state.load(Ordering::Relaxed);
-                loop {
-                    // Release our upgradable lock
-                    let mut new_state = state - UPGRADABLE_GUARD;
-
-                    // Clear the parked bit if there are no more threads in
-                    // the queue
-                    if !result.have_more_threads {
-                        new_state &= !PARKED_BIT;
-                    }
-
-                    // Consider using fair unlocking. If we are, then we should set
-                    // the state to the new value and tell the threads that we are
-                    // handing the lock directly.
-                    let token = if result.unparked_threads != 0 && (force_fair || result.be_fair) {
-                        match new_state.checked_add(additional_guards.get()) {
-                            Some(x) => {
-                                new_state = x;
-                                TOKEN_HANDOFF
-                            }
-                            None => TOKEN_NORMAL,
-                        }
-                    } else {
-                        TOKEN_NORMAL
-                    };
-
-                    match self.state.compare_exchange_weak(
-                        state,
-                        new_state,
-                        Ordering::Release,
-                        Ordering::Relaxed,
-                    ) {
-                        Ok(_) => return token,
-                        Err(x) => state = x,
-                    }
-                }
-            };
-            parking_lot_core::unpark_filter(addr, filter, callback);
-        }
-    }
-
-    #[cold]
-    #[inline(never)]
-    fn downgrade_upgradable_slow(&self, state: usize) {
-        unsafe {
-            let addr = self as *const _ as usize;
-            let mut guard_count = (state & GUARD_COUNT_MASK) - UPGRADABLE_GUARD;
-            let filter = |ParkToken(token)| -> FilterOp {
-                match guard_count.checked_add(token) {
-                    Some(x) => {
-                        guard_count = x;
-                        FilterOp::Unpark
-                    }
-                    None => FilterOp::Stop,
-                }
-            };
-            let callback = |result: UnparkResult| {
-                // Clear the parked bit if there no more parked threads
-                if !result.have_more_threads {
-                    self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed);
-                }
-                TOKEN_NORMAL
-            };
-            parking_lot_core::unpark_filter(addr, filter, callback);
-        }
-    }
-
-    #[cold]
-    #[inline(never)]
-    fn try_upgrade_slow(&self) -> bool {
-        let mut state = self.state.load(Ordering::Relaxed);
-        loop {
-            match state.checked_add(EXCLUSIVE_GUARD - SHARED_GUARD) {
-                Some(new_state) => match self.state.compare_exchange_weak(
-                    state,
-                    new_state,
-                    Ordering::Relaxed,
-                    Ordering::Relaxed,
-                ) {
-                    Ok(_) => return true,
-                    Err(x) => state = x,
-                },
-                None => return false,
-            }
-        }
-    }
-
-    #[cold]
-    #[inline(never)]
-    fn upgrade_slow(&self, timeout: Option<Instant>) -> bool {
-        let mut spinwait = SpinWait::new();
-        let mut state = self.state.load(Ordering::Relaxed);
-        loop {
-            // Grab the lock if it isn't locked, even if there are other
-            // threads parked.
-            if let Some(new_state) = state.checked_add(EXCLUSIVE_GUARD - UPGRADABLE_GUARD) {
-                match self.state.compare_exchange_weak(
-                    state,
-                    new_state,
-                    Ordering::Acquire,
-                    Ordering::Relaxed,
-                ) {
-                    Ok(_) => return true,
-                    Err(x) => state = x,
-                }
-                continue;
-            }
-
-            // If there are no parked threads and only one other reader, try
-            // spinning a few times.
-            if state == UPGRADABLE_GUARD | SHARED_GUARD && spinwait.spin() {
-                state = self.state.load(Ordering::Relaxed);
-                continue;
-            }
-
-            // Park our thread until we are woken up by an unlock
-            unsafe {
-                let addr = self as *const _ as usize;
-                let validate = || {
-                    let mut state = self.state.load(Ordering::Relaxed);
-                    loop {
-                        // If the rwlock is free, abort the park and try to grab
-                        // it immediately.
-                        if state & GUARD_COUNT_MASK == UPGRADABLE_GUARD {
-                            return false;
-                        }
-
-                        // Set the upgrading and parked bits
-                        match self.state.compare_exchange_weak(
-                            state,
-                            state | (UPGRADING_BIT | PARKED_BIT),
-                            Ordering::Relaxed,
-                            Ordering::Relaxed,
-                        ) {
-                            Ok(_) => return true,
-                            Err(x) => state = x,
-                        }
-                    }
-                };
-                let before_sleep = || {};
-                let timed_out = |_, was_last_thread| {
-                    // Clear the upgrading bit
-                    let mut flags = UPGRADING_BIT;
-
-                    // Clear the parked bit if we were the last parked thread
-                    if was_last_thread {
-                        flags |= PARKED_BIT;
-                    }
-
-                    self.state.fetch_and(!flags, Ordering::Relaxed);
-                };
-                match parking_lot_core::park(
-                    addr,
-                    validate,
-                    before_sleep,
-                    timed_out,
-                    TOKEN_UPGRADING,
-                    timeout,
-                ) {
-                    // The thread that unparked us passed the lock on to us
-                    // directly without unlocking it.
-                    ParkResult::Unparked(TOKEN_HANDOFF) => return true,
-
-                    // We were unparked normally, try acquiring the lock again
-                    ParkResult::Unparked(_) => (),
-
-                    // The validation function failed, try locking again
-                    ParkResult::Invalid => (),
-
-                    // Timeout expired
-                    ParkResult::TimedOut => return false,
-                }
-            }
-
-            // Loop back and try locking again
-            spinwait.reset();
-            state = self.state.load(Ordering::Relaxed);
-        }
-    }
-
-    #[cold]
-    #[inline(never)]
-    fn bump_shared_slow(&self) {
+    #[inline]
+    fn deadlock_release(&self) {
         unsafe { deadlock::release_resource(self as *const _ as usize) };
-        self.unlock_shared_slow(true);
-        self.lock_shared();
-    }
-
-    #[cold]
-    #[inline(never)]
-    fn bump_exclusive_slow(&self) {
-        unsafe { deadlock::release_resource(self as *const _ as usize) };
-        self.unlock_exclusive_slow(true);
-        self.lock_exclusive();
-    }
-
-    #[cold]
-    #[inline(never)]
-    fn bump_upgradable_slow(&self) {
-        unsafe { deadlock::release_resource(self as *const _ as usize) };
-        self.unlock_upgradable_slow(true);
-        self.lock_upgradable();
+        unsafe { deadlock::release_resource(self as *const _ as usize + 1) };
     }
 }
--- a/third_party/rust/parking_lot/src/remutex.rs
+++ b/third_party/rust/parking_lot/src/remutex.rs
@@ -1,17 +1,17 @@
 // Copyright 2016 Amanieu d'Antras
 //
 // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
 // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
 // http://opensource.org/licenses/MIT>, at your option. This file may not be
 // copied, modified, or distributed except according to those terms.
 
+use crate::raw_mutex::RawMutex;
 use lock_api::{self, GetThreadId};
-use raw_mutex::RawMutex;
 
 /// Implementation of the `GetThreadId` trait for `lock_api::ReentrantMutex`.
 pub struct RawThreadId;
 
 unsafe impl GetThreadId for RawThreadId {
     const INIT: RawThreadId = RawThreadId;
 
     fn nonzero_thread_id(&self) -> usize {
@@ -35,35 +35,37 @@ unsafe impl GetThreadId for RawThreadId 
 /// primitive.
 pub type ReentrantMutex<T> = lock_api::ReentrantMutex<RawMutex, RawThreadId, T>;
 
 /// An RAII implementation of a "scoped lock" of a reentrant mutex. When this structure
 /// is dropped (falls out of scope), the lock will be unlocked.
 ///
 /// The data protected by the mutex can be accessed through this guard via its
 /// `Deref` implementation.
-pub type ReentrantMutexGuard<'a, T> =
-    lock_api::ReentrantMutexGuard<'a, RawMutex, RawThreadId, T>;
+pub type ReentrantMutexGuard<'a, T> = lock_api::ReentrantMutexGuard<'a, RawMutex, RawThreadId, T>;
 
 /// An RAII mutex guard returned by `ReentrantMutexGuard::map`, which can point to a
 /// subfield of the protected data.
 ///
 /// The main difference between `MappedReentrantMutexGuard` and `ReentrantMutexGuard` is that the
 /// former doesn't support temporarily unlocking and re-locking, since that
 /// could introduce soundness issues if the locked object is modified by another
 /// thread.
 pub type MappedReentrantMutexGuard<'a, T> =
     lock_api::MappedReentrantMutexGuard<'a, RawMutex, RawThreadId, T>;
 
 #[cfg(test)]
 mod tests {
+    use crate::ReentrantMutex;
     use std::cell::RefCell;
     use std::sync::Arc;
     use std::thread;
-    use ReentrantMutex;
+
+    #[cfg(feature = "serde")]
+    use bincode::{deserialize, serialize};
 
     #[test]
     fn smoke() {
         let m = ReentrantMutex::new(());
         {
             let a = m.lock();
             {
                 let b = m.lock();
@@ -98,29 +100,34 @@ mod tests {
     fn trylock_works() {
         let m = Arc::new(ReentrantMutex::new(()));
         let m2 = m.clone();
         let _lock = m.try_lock();
         let _lock2 = m.try_lock();
         thread::spawn(move || {
             let lock = m2.try_lock();
             assert!(lock.is_none());
-        }).join()
-            .unwrap();
+        })
+        .join()
+        .unwrap();
         let _lock3 = m.try_lock();
     }
 
     #[test]
     fn test_reentrant_mutex_debug() {
         let mutex = ReentrantMutex::new(vec![0u8, 10]);
 
         assert_eq!(format!("{:?}", mutex), "ReentrantMutex { data: [0, 10] }");
-        assert_eq!(
-            format!("{:#?}", mutex),
-            "ReentrantMutex {
-    data: [
-        0,
-        10
-    ]
-}"
-        );
+    }
+
+    #[cfg(feature = "serde")]
+    #[test]
+    fn test_serde() {
+        let contents: Vec<u8> = vec![0, 1, 2];
+        let mutex = ReentrantMutex::new(contents.clone());
+
+        let serialized = serialize(&mutex).unwrap();
+        let deserialized: ReentrantMutex<Vec<u8>> = deserialize(&serialized).unwrap();
+
+        assert_eq!(*(mutex.lock()), *(deserialized.lock()));
+        assert_eq!(contents, *(deserialized.lock()));
     }
 }
--- a/third_party/rust/parking_lot/src/rwlock.rs
+++ b/third_party/rust/parking_lot/src/rwlock.rs
@@ -1,17 +1,17 @@
 // Copyright 2016 Amanieu d'Antras
 //
 // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
 // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
 // http://opensource.org/licenses/MIT>, at your option. This file may not be
 // copied, modified, or distributed except according to those terms.
 
+use crate::raw_rwlock::RawRwLock;
 use lock_api;
-use raw_rwlock::RawRwLock;
 
 /// A reader-writer lock
 ///
 /// This type of lock allows a number of readers or at most one writer at any
 /// point in time. The write portion of this lock typically allows modification
 /// of the underlying data (exclusive access) and the read portion of this lock
 /// typically allows for read-only access (shared access).
 ///
@@ -111,29 +111,30 @@ pub type MappedRwLockReadGuard<'a, T> = 
 /// The main difference between `MappedRwLockWriteGuard` and `RwLockWriteGuard` is that the
 /// former doesn't support temporarily unlocking and re-locking, since that
 /// could introduce soundness issues if the locked object is modified by another
 /// thread.
 pub type MappedRwLockWriteGuard<'a, T> = lock_api::MappedRwLockWriteGuard<'a, RawRwLock, T>;
 
 /// RAII structure used to release the upgradable read access of a lock when
 /// dropped.
-pub type RwLockUpgradableReadGuard<'a, T> =
-    lock_api::RwLockUpgradableReadGuard<'a, RawRwLock, T>;
+pub type RwLockUpgradableReadGuard<'a, T> = lock_api::RwLockUpgradableReadGuard<'a, RawRwLock, T>;
 
 #[cfg(test)]
 mod tests {
-    extern crate rand;
-    use self::rand::Rng;
+    use crate::{RwLock, RwLockUpgradableReadGuard, RwLockWriteGuard};
+    use rand::Rng;
     use std::sync::atomic::{AtomicUsize, Ordering};
     use std::sync::mpsc::channel;
     use std::sync::Arc;
     use std::thread;
     use std::time::Duration;
-    use {RwLock, RwLockUpgradableReadGuard, RwLockWriteGuard};
+
+    #[cfg(feature = "serde")]
+    use bincode::{deserialize, serialize};
 
     #[derive(Eq, PartialEq, Debug)]
     struct NonCopy(i32);
 
     #[test]
     fn smoke() {
         let l = RwLock::new(());
         drop(l.read());
@@ -173,53 +174,57 @@ mod tests {
 
     #[test]
     fn test_rw_arc_no_poison_wr() {
         let arc = Arc::new(RwLock::new(1));
         let arc2 = arc.clone();
         let _: Result<(), _> = thread::spawn(move || {
             let _lock = arc2.write();
             panic!();
-        }).join();
+        })
+        .join();
         let lock = arc.read();
         assert_eq!(*lock, 1);
     }
 
     #[test]
     fn test_rw_arc_no_poison_ww() {
         let arc = Arc::new(RwLock::new(1));
         let arc2 = arc.clone();
         let _: Result<(), _> = thread::spawn(move || {
             let _lock = arc2.write();
             panic!();
-        }).join();
+        })
+        .join();
         let lock = arc.write();
         assert_eq!(*lock, 1);
     }
 
     #[test]
     fn test_rw_arc_no_poison_rr() {
         let arc = Arc::new(RwLock::new(1));
         let arc2 = arc.clone();
         let _: Result<(), _> = thread::spawn(move || {
             let _lock = arc2.read();
             panic!();
-        }).join();
+        })
+        .join();
         let lock = arc.read();
         assert_eq!(*lock, 1);
     }
 
     #[test]
     fn test_rw_arc_no_poison_rw() {
         let arc = Arc::new(RwLock::new(1));
         let arc2 = arc.clone();
         let _: Result<(), _> = thread::spawn(move || {
             let _lock = arc2.read();
             panic!()
-        }).join();
+        })
+        .join();
         let lock = arc.write();
         assert_eq!(*lock, 1);
     }
 
     #[test]
     fn test_ruw_arc() {
         let arc = Arc::new(RwLock::new(0));
         let arc2 = arc.clone();
@@ -324,17 +329,18 @@ mod tests {
             impl Drop for Unwinder {
                 fn drop(&mut self) {
                     let mut lock = self.i.write();
                     *lock += 1;
                 }
             }
             let _u = Unwinder { i: arc2 };
             panic!();
-        }).join();
+        })
+        .join();
         let lock = arc.read();
         assert_eq!(*lock, 2);
     }
 
     #[test]
     fn test_rwlock_unsized() {
         let rw: &RwLock<[i32]> = &RwLock::new([1, 2, 3]);
         {
@@ -525,40 +531,52 @@ mod tests {
     #[test]
     fn test_rwlock_recursive() {
         let arc = Arc::new(RwLock::new(1));
         let arc2 = arc.clone();
         let _lock1 = arc.read();
         thread::spawn(move || {
             let _lock = arc2.write();
         });
-        thread::sleep(Duration::from_millis(100));
+
+        if cfg!(not(all(target_env = "sgx", target_vendor = "fortanix"))) {
+            thread::sleep(Duration::from_millis(100));
+        } else {
+            // FIXME: https://github.com/fortanix/rust-sgx/issues/31
+            for _ in 0..100 {
+                thread::yield_now();
+            }
+        }
 
         // A normal read would block here since there is a pending writer
         let _lock2 = arc.read_recursive();
     }
 
     #[test]
     fn test_rwlock_debug() {
         let x = RwLock::new(vec![0u8, 10]);
 
         assert_eq!(format!("{:?}", x), "RwLock { data: [0, 10] }");
-        assert_eq!(
-            format!("{:#?}", x),
-            "RwLock {
-    data: [
-        0,
-        10
-    ]
-}"
-        );
         let _lock = x.write();
-        assert_eq!(format!("{:?}", x), "RwLock { <locked> }");
+        assert_eq!(format!("{:?}", x), "RwLock { data: <locked> }");
     }
 
     #[test]
     fn test_clone() {
         let rwlock = RwLock::new(Arc::new(1));
         let a = rwlock.read_recursive();
         let b = a.clone();
         assert_eq!(Arc::strong_count(&b), 2);
     }
+
+    #[cfg(feature = "serde")]
+    #[test]
+    fn test_serde() {
+        let contents: Vec<u8> = vec![0, 1, 2];
+        let mutex = RwLock::new(contents.clone());
+
+        let serialized = serialize(&mutex).unwrap();
+        let deserialized: RwLock<Vec<u8>> = deserialize(&serialized).unwrap();
+
+        assert_eq!(*(mutex.read()), *(deserialized.read()));
+        assert_eq!(contents, *(deserialized.read()));
+    }
 }
--- a/third_party/rust/parking_lot/src/util.rs
+++ b/third_party/rust/parking_lot/src/util.rs
@@ -1,15 +1,17 @@
 // Copyright 2016 Amanieu d'Antras
 //
 // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
 // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
 // http://opensource.org/licenses/MIT>, at your option. This file may not be
 // copied, modified, or distributed except according to those terms.
 
+use std::time::{Duration, Instant};
+
 // Option::unchecked_unwrap
 pub trait UncheckedOptionExt<T> {
     unsafe fn unchecked_unwrap(self) -> T;
 }
 
 impl<T> UncheckedOptionExt<T> for Option<T> {
     #[inline]
     unsafe fn unchecked_unwrap(self) -> T {
@@ -25,8 +27,18 @@ impl<T> UncheckedOptionExt<T> for Option
 unsafe fn unreachable() -> ! {
     if cfg!(debug_assertions) {
         unreachable!();
     } else {
         enum Void {}
         match *(1 as *const Void) {}
     }
 }
+
+#[inline]
+pub fn to_deadline(timeout: Duration) -> Option<Instant> {
+    #[cfg(has_checked_instant)]
+    let deadline = Instant::now().checked_add(timeout);
+    #[cfg(not(has_checked_instant))]
+    let deadline = Some(Instant::now() + timeout);
+
+    deadline
+}
--- a/third_party/rust/parking_lot_core/.cargo-checksum.json
+++ b/third_party/rust/parking_lot_core/.cargo-checksum.json
@@ -1,1 +1,1 @@
-{"files":{"Cargo.toml":"220144666e4c0a4b3b3235e7d3b10f4f34cb3b8ca292ee19437f23c9a15758de","src/lib.rs":"e80f927665ef24660878e5e4a4ea3c26892c2849889d59aacee6beb59d02020d","src/parking_lot.rs":"2da388ff4c13003fc30531bb6110e4feedac30ad3ce905912e657711a6b0fdad","src/spinwait.rs":"cbd2d2464ef6fa5fb05109bdb3ca588467949dcd4ee9194deafef6004d10215e","src/thread_parker/generic.rs":"0c30db3d1c96bd5ef284a4761a829aba8d21fc813b3d1d70b2baf5f00744e006","src/thread_parker/linux.rs":"1c4c023ebb58fcc16451683c6c8b68311e87ab34537dc17a060ddf5aad02a215","src/thread_parker/unix.rs":"dc6f4af965618cc2d87d3bef6455ba78b44ffe5b38dff9d41fb86e1526cbbcd1","src/thread_parker/windows/keyed_event.rs":"efe64f7bcdfe03049a7b901d2573bc7db1bb73b8ab4a040245423d95c8f9514f","src/thread_parker/windows/mod.rs":"f31eed53f3e402477d80a70a7c6d474c01ba4c9ad952bbe562509448cd3cc1ad","src/thread_parker/windows/waitaddress.rs":"09d1e6a5a6c3f23f375ae4beee946290f7c66d183e69d476ce69b21a4a5aa7af","src/util.rs":"2d07c0c010a857790ae2ed6a1215eeed8af76859e076797ea1ba8dec82169e84","src/word_lock.rs":"692f443c52672c6e88c0cad259cf7c89dc2a1b54aa95eeeea582401b2a7d058d"},"package":"4db1a8ccf734a7bce794cc19b3df06ed87ab2f3907036b693c68f56b4d4537fa"}
\ No newline at end of file
+{"files":{"Cargo.toml":"99e468923e11bcd61cd9961fd5c0a8e0151fae5b6695c1aaaa802a9ee790b91b","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"c9a75f18b9ab2927829a208fc6aa2cf4e63b8420887ba29cdb265d6619ae82d5","build.rs":"d6aa24b67fdcacf238778c5efaf1f622ec7f7a7ec27fa051f415a1e2d31f3532","src/lib.rs":"4b754002784224bf94136139eadc30136c638feacdc0d25dab44834a29805e60","src/parking_lot.rs":"ecda8f1230f796d4f8014699f13436b42a3e3b02edf2694cff5d02497d0d8f19","src/spinwait.rs":"d568d8a81f9144ec4c4a139dc934d7d04ee1656a4a221eb548742fe7aba09ab1","src/thread_parker/cloudabi.rs":"8096eefdf3a7b6fe1af223b548eabae067e4a838e49f1834b3dbb92c6c10169f","src/thread_parker/generic.rs":"fb89e50fba40956e2322a4aa8bd409cf14186c757a6a525cca3e71215b814e59","src/thread_parker/linux.rs":"d52fc55e2c17e9111d5d5a00efe58a87d0e72def22f18f1f34f5364744c79ff6","src/thread_parker/redox.rs":"4fa0ac04dcc740ebab57653dc685853d9fb950af545bbba93dbe61d985788a8e","src/thread_parker/sgx.rs":"0e30172ecf48c56bc85e26d976661c493142eeb71bd7713e21465067256ded90","src/thread_parker/unix.rs":"09418fec4845d0d6cc3039c4196cec7c96d53e65d552eb1b0c0a88fb6f72dd3e","src/thread_parker/wasm.rs":"29f5c518184a73f83d00097c7f3747406b008dc112937520414e5d41e50f2779","src/thread_parker/windows/keyed_event.rs":"e0c2ed647e0550bffa003160405b5f4ddd40500134c2eb15c3eb598792c30e84","src/thread_parker/windows/mod.rs":"7252790b6d1126d773f17760692e3664c140abebea9930058c84113bedd3b48d","src/thread_parker/windows/waitaddress.rs":"06d994633006e237dc940f377432ea00cf1609e56096d69d46f7bb3b80eeb857","src/util.rs":"2d07c0c010a857790ae2ed6a1215eeed8af76859e076797ea1ba8dec82169e84","src/word_lock.rs":"471b4fdf7877da693d26f5b80c732120af752f6eaffc65d4ca316bca3601e44a"},"package":"cb88cb1cb3790baa6776844f968fea3be44956cf184fa1be5a03341f5491278c"}
\ No newline at end of file
--- a/third_party/rust/parking_lot_core/Cargo.toml
+++ b/third_party/rust/parking_lot_core/Cargo.toml
@@ -1,47 +1,57 @@
 # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
 #
 # When uploading crates to the registry Cargo will automatically
 # "normalize" Cargo.toml files for maximal compatibility
 # with all versions of Cargo and also rewrite `path` dependencies
-# to registry (e.g. crates.io) dependencies
+# to registry (e.g., crates.io) dependencies
 #
 # If you believe there's an error in this file please file an
 # issue against the rust-lang/cargo repository. If you're
 # editing this file be aware that the upstream Cargo.toml
 # will likely look very different (and much more reasonable)
 
 [package]
+edition = "2018"
 name = "parking_lot_core"
-version = "0.2.14"
+version = "0.5.0"
 authors = ["Amanieu d'Antras <amanieu@gmail.com>"]
 description = "An advanced API for creating custom synchronization primitives."
-documentation = "https://amanieu.github.io/parking_lot/parking_lot_core/index.html"
 keywords = ["mutex", "condvar", "rwlock", "once", "thread"]
+categories = ["concurrency"]
 license = "Apache-2.0/MIT"
 repository = "https://github.com/Amanieu/parking_lot"
 [dependencies.backtrace]
 version = "0.3.2"
 optional = true
 
+[dependencies.cfg-if]
+version = "0.1"
+
 [dependencies.petgraph]
 version = "0.4.5"
 optional = true
 
 [dependencies.rand]
-version = "0.4"
+version = "0.6"
 
 [dependencies.smallvec]
 version = "0.6"
 
 [dependencies.thread-id]
 version = "3.2.0"
 optional = true
+[build-dependencies.rustc_version]
+version = "0.2"
 
 [features]
 deadlock_detection = ["petgraph", "thread-id", "backtrace"]
 nightly = []
+[target."cfg(target_os = \"cloudabi\")".dependencies.cloudabi]
+version = "0.0.3"
+[target."cfg(target_os = \"redox\")".dependencies.redox_syscall]
+version = "0.1"
 [target."cfg(unix)".dependencies.libc]
 version = "0.2.27"
 [target."cfg(windows)".dependencies.winapi]
 version = "0.3"
 features = ["winnt", "ntstatus", "minwindef", "winerror", "winbase", "errhandlingapi", "handleapi"]
new file mode 100644
--- /dev/null
+++ b/third_party/rust/parking_lot_core/LICENSE-APACHE
@@ -0,0 +1,201 @@
+                              Apache License
+                        Version 2.0, January 2004
+                     http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+   "License" shall mean the terms and conditions for use, reproduction,
+   and distribution as defined by Sections 1 through 9 of this document.
+
+   "Licensor" shall mean the copyright owner or entity authorized by
+   the copyright owner that is granting the License.
+
+   "Legal Entity" shall mean the union of the acting entity and all
+   other entities that control, are controlled by, or are under common
+   control with that entity. For the purposes of this definition,
+   "control" means (i) the power, direct or indirect, to cause the
+   direction or management of such entity, whether by contract or
+   otherwise, or (ii) ownership of fifty percent (50%) or more of the
+   outstanding shares, or (iii) beneficial ownership of such entity.
+
+   "You" (or "Your") shall mean an individual or Legal Entity
+   exercising permissions granted by this License.
+
+   "Source" form shall mean the preferred form for making modifications,
+   including but not limited to software source code, documentation
+   source, and configuration files.
+
+   "Object" form shall mean any form resulting from mechanical
+   transformation or translation of a Source form, including but
+   not limited to compiled object code, generated documentation,
+   and conversions to other media types.
+
+   "Work" shall mean the work of authorship, whether in Source or
+   Object form, made available under the License, as indicated by a
+   copyright notice that is included in or attached to the work
+   (an example is provided in the Appendix below).
+
+   "Derivative Works" shall mean any work, whether in Source or Object
+   form, that is based on (or derived from) the Work and for which the
+   editorial revisions, annotations, elaborations, or other modifications
+   represent, as a whole, an original work of authorship. For the purposes
+   of this License, Derivative Works shall not include works that remain
+   separable from, or merely link (or bind by name) to the interfaces of,
+   the Work and Derivative Works thereof.
+
+   "Contribution" shall mean any work of authorship, including
+   the original version of the Work and any modifications or additions
+   to that Work or Derivative Works thereof, that is intentionally
+   submitted to Licensor for inclusion in the Work by the copyright owner
+   or by an individual or Legal Entity authorized to submit on behalf of
+   the copyright owner. For the purposes of this definition, "submitted"
+   means any form of electronic, verbal, or written communication sent
+   to the Licensor or its representatives, including but not limited to
+   communication on electronic mailing lists, source code control systems,
+   and issue tracking systems that are managed by, or on behalf of, the
+   Licensor for the purpose of discussing and improving the Work, but
+   excluding communication that is conspicuously marked or otherwise
+   designated in writing by the copyright owner as "Not a Contribution."
+
+   "Contributor" shall mean Licensor and any individual or Legal Entity
+   on behalf of whom a Contribution has been received by Licensor and
+   subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+   this License, each Contributor hereby grants to You a perpetual,
+   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+   copyright license to reproduce, prepare Derivative Works of,
+   publicly display, publicly perform, sublicense, and distribute the
+   Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+   this License, each Contributor hereby grants to You a perpetual,
+   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+   (except as stated in this section) patent license to make, have made,
+   use, offer to sell, sell, import, and otherwise transfer the Work,
+   where such license applies only to those patent claims licensable
+   by such Contributor that are necessarily infringed by their
+   Contribution(s) alone or by combination of their Contribution(s)
+   with the Work to which such Contribution(s) was submitted. If You
+   institute patent litigation against any entity (including a
+   cross-claim or counterclaim in a lawsuit) alleging that the Work
+   or a Contribution incorporated within the Work constitutes direct
+   or contributory patent infringement, then any patent licenses
+   granted to You under this License for that Work shall terminate
+   as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+   Work or Derivative Works thereof in any medium, with or without
+   modifications, and in Source or Object form, provided that You
+   meet the following conditions:
+
+   (a) You must give any other recipients of the Work or
+       Derivative Works a copy of this License; and
+
+   (b) You must cause any modified files to carry prominent notices
+       stating that You changed the files; and
+
+   (c) You must retain, in the Source form of any Derivative Works
+       that You distribute, all copyright, patent, trademark, and
+       attribution notices from the Source form of the Work,
+       excluding those notices that do not pertain to any part of
+       the Derivative Works; and
+
+   (d) If the Work includes a "NOTICE" text file as part of its
+       distribution, then any Derivative Works that You distribute must
+       include a readable copy of the attribution notices contained
+       within such NOTICE file, excluding those notices that do not
+       pertain to any part of the Derivative Works, in at least one
+       of the following places: within a NOTICE text file distributed
+       as part of the Derivative Works; within the Source form or
+       documentation, if provided along with the Derivative Works; or,
+       within a display generated by the Derivative Works, if and
+       wherever such third-party notices normally appear. The contents
+       of the NOTICE file are for informational purposes only and
+       do not modify the License. You may add Your own attribution
+       notices within Derivative Works that You distribute, alongside
+       or as an addendum to the NOTICE text from the Work, provided
+       that such additional attribution notices cannot be construed
+       as modifying the License.
+
+   You may add Your own copyright statement to Your modifications and
+   may provide additional or different license terms and conditions
+   for use, reproduction, or distribution of Your modifications, or
+   for any such Derivative Works as a whole, provided Your use,
+   reproduction, and distribution of the Work otherwise complies with
+   the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+   any Contribution intentionally submitted for inclusion in the Work
+   by You to the Licensor shall be under the terms and conditions of
+   this License, without any additional terms or conditions.
+   Notwithstanding the above, nothing herein shall supersede or modify
+   the terms of any separate license agreement you may have executed
+   with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+   names, trademarks, service marks, or product names of the Licensor,
+   except as required for reasonable and customary use in describing the
+   origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+   agreed to in writing, Licensor provides the Work (and each
+   Contributor provides its Contributions) on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+   implied, including, without limitation, any warranties or conditions
+   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+   PARTICULAR PURPOSE. You are solely responsible for determining the
+   appropriateness of using or redistributing the Work and assume any
+   risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+   whether in tort (including negligence), contract, or otherwise,
+   unless required by applicable law (such as deliberate and grossly
+   negligent acts) or agreed to in writing, shall any Contributor be
+   liable to You for damages, including any direct, indirect, special,
+   incidental, or consequential damages of any character arising as a
+   result of this License or out of the use or inability to use the
+   Work (including but not limited to damages for loss of goodwill,
+   work stoppage, computer failure or malfunction, or any and all
+   other commercial damages or losses), even if such Contributor
+   has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+   the Work or Derivative Works thereof, You may choose to offer,
+   and charge a fee for, acceptance of support, warranty, indemnity,
+   or other liability obligations and/or rights consistent with this
+   License. However, in accepting such obligations, You may act only
+   on Your own behalf and on Your sole responsibility, not on behalf
+   of any other Contributor, and only if You agree to indemnify,
+   defend, and hold each Contributor harmless for any liability
+   incurred by, or claims asserted against, such Contributor by reason
+   of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+   To apply the Apache License to your work, attach the following
+   boilerplate notice, with the fields enclosed by brackets "[]"
+   replaced with your own identifying information. (Don't include
+   the brackets!)  The text should be enclosed in the appropriate
+   comment syntax for the file format. We also recommend that a
+   file or class name and description of purpose be included on the
+   same "printed page" as the copyright notice for easier
+   identification within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+	http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
new file mode 100644
--- /dev/null
+++ b/third_party/rust/parking_lot_core/LICENSE-MIT
@@ -0,0 +1,25 @@
+Copyright (c) 2016 The Rust Project Developers
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
new file mode 100644
--- /dev/null
+++ b/third_party/rust/parking_lot_core/build.rs
@@ -0,0 +1,7 @@
+use rustc_version::{version, Version};
+
+fn main() {
+    if version().unwrap() >= Version::parse("1.34.0").unwrap() {
+        println!("cargo:rustc-cfg=has_sized_atomics");
+    }
+}
--- a/third_party/rust/parking_lot_core/src/lib.rs
+++ b/third_party/rust/parking_lot_core/src/lib.rs
@@ -33,49 +33,72 @@
 //! Building custom synchronization primitives is very simple since the parking
 //! lot takes care of all the hard parts for you. A simple example for a
 //! custom primitive would be to integrate a `Mutex` inside another data type.
 //! Since a mutex only requires 2 bits, it can share space with other data.
 //! For example, one could create an `ArcMutex` type that combines the atomic
 //! reference count and the two mutex bits in the same atomic word.
 
 #![warn(missing_docs)]
-#![cfg_attr(all(feature = "nightly", target_os = "linux"), feature(integer_atomics))]
-
-extern crate rand;
-extern crate smallvec;
+#![warn(rust_2018_idioms)]
+#![cfg_attr(
+    all(target_env = "sgx", target_vendor = "fortanix"),
+    feature(sgx_platform)
+)]
+#![cfg_attr(
+    all(
+        feature = "nightly",
+        target_arch = "wasm32",
+        target_feature = "atomics"
+    ),
+    feature(checked_duration_since, stdsimd)
+)]
+#![cfg_attr(
+    all(feature = "nightly", target_os = "cloudabi",),
+    feature(thread_local, checked_duration_since)
+)]
 
-#[cfg(feature = "deadlock_detection")]
-extern crate backtrace;
-#[cfg(feature = "deadlock_detection")]
-extern crate petgraph;
-#[cfg(feature = "deadlock_detection")]
-extern crate thread_id;
-
-#[cfg(unix)]
-extern crate libc;
-
-#[cfg(windows)]
-extern crate winapi;
+use cfg_if::cfg_if;
 
-#[cfg(all(feature = "nightly", target_os = "linux"))]
-#[path = "thread_parker/linux.rs"]
-mod thread_parker;
-#[cfg(all(unix, not(all(feature = "nightly", target_os = "linux"))))]
-#[path = "thread_parker/unix.rs"]
-mod thread_parker;
-#[cfg(windows)]
-#[path = "thread_parker/windows/mod.rs"]
-mod thread_parker;
-#[cfg(not(any(windows, unix)))]
-#[path = "thread_parker/generic.rs"]
-mod thread_parker;
+cfg_if! {
+    if #[cfg(all(has_sized_atomics, target_os = "linux"))] {
+        #[path = "thread_parker/linux.rs"]
+        mod thread_parker;
+    } else if #[cfg(unix)] {
+        #[path = "thread_parker/unix.rs"]
+        mod thread_parker;
+    } else if #[cfg(windows)] {
+        #[path = "thread_parker/windows/mod.rs"]
+        mod thread_parker;
+    } else if #[cfg(all(has_sized_atomics, target_os = "redox"))] {
+        #[path = "thread_parker/redox.rs"]
+        mod thread_parker;
+    } else if #[cfg(all(target_env = "sgx", target_vendor = "fortanix"))] {
+        #[path = "thread_parker/sgx.rs"]
+        mod thread_parker;
+    } else if #[cfg(all(
+        feature = "nightly",
+        target_arch = "wasm32",
+        target_feature = "atomics"
+    ))] {
+        #[path = "thread_parker/wasm.rs"]
+        mod thread_parker;
+    } else if #[cfg(all(feature = "nightly", target_os = "cloudabi"))] {
+        #[path = "thread_parker/cloudabi.rs"]
+        mod thread_parker;
+    } else {
+        #[path = "thread_parker/generic.rs"]
+        mod thread_parker;
+    }
+}
 
-mod util;
+mod parking_lot;
 mod spinwait;
+mod util;
 mod word_lock;
-mod parking_lot;
 
-pub use parking_lot::{FilterOp, ParkResult, ParkToken, RequeueOp, UnparkResult, UnparkToken};
-pub use parking_lot::{DEFAULT_PARK_TOKEN, DEFAULT_UNPARK_TOKEN};
-pub use parking_lot::{park, unpark_all, unpark_filter, unpark_one, unpark_requeue};
-pub use spinwait::SpinWait;
-pub use parking_lot::deadlock;
+pub use self::parking_lot::deadlock;
+pub use self::parking_lot::{park, unpark_all, unpark_filter, unpark_one, unpark_requeue};
+pub use self::parking_lot::{
+    FilterOp, ParkResult, ParkToken, RequeueOp, UnparkResult, UnparkToken,
+};
+pub use self::parking_lot::{DEFAULT_PARK_TOKEN, DEFAULT_UNPARK_TOKEN};
+pub use self::spinwait::SpinWait;
--- a/third_party/rust/parking_lot_core/src/parking_lot.rs
+++ b/third_party/rust/parking_lot_core/src/parking_lot.rs
@@ -1,31 +1,29 @@
 // Copyright 2016 Amanieu d'Antras
 //
 // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
 // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
 // http://opensource.org/licenses/MIT>, at your option. This file may not be
 // copied, modified, or distributed except according to those terms.
 
-use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
+use crate::thread_parker::ThreadParker;
+use crate::util::UncheckedOptionExt;
+use crate::word_lock::WordLock;
+use core::{
+    cell::{Cell, UnsafeCell},
+    ptr,
+    sync::atomic::{AtomicPtr, AtomicUsize, Ordering},
+};
+use rand::{rngs::SmallRng, FromEntropy, Rng};
+use smallvec::SmallVec;
 use std::time::{Duration, Instant};
-use std::cell::{Cell, UnsafeCell};
-use std::ptr;
-use std::mem;
-use std::thread::LocalKey;
-#[cfg(not(feature = "nightly"))]
-use std::panic;
-use smallvec::SmallVec;
-use rand::{self, Rng, XorShiftRng};
-use thread_parker::ThreadParker;
-use word_lock::WordLock;
-use util::UncheckedOptionExt;
 
-static NUM_THREADS: AtomicUsize = ATOMIC_USIZE_INIT;
-static HASHTABLE: AtomicUsize = ATOMIC_USIZE_INIT;
+static NUM_THREADS: AtomicUsize = AtomicUsize::new(0);
+static HASHTABLE: AtomicPtr<HashTable> = AtomicPtr::new(ptr::null_mut());
 
 // Even with 3x more buckets than threads, the memory overhead per thread is
 // still only a few hundred bytes per thread.
 const LOAD_FACTOR: usize = 3;
 
 struct HashTable {
     // Hash buckets for the table
     entries: Box<[Bucket]>,
@@ -33,81 +31,79 @@ struct HashTable {
     // Number of bits used for the hash function
     hash_bits: u32,
 
     // Previous table. This is only kept to keep leak detectors happy.
     _prev: *const HashTable,
 }
 
 impl HashTable {
+    #[inline]
     fn new(num_threads: usize, prev: *const HashTable) -> Box<HashTable> {
         let new_size = (num_threads * LOAD_FACTOR).next_power_of_two();
         let hash_bits = 0usize.leading_zeros() - new_size.leading_zeros() - 1;
-        let bucket = Bucket {
-            mutex: WordLock::new(),
-            queue_head: Cell::new(ptr::null()),
-            queue_tail: Cell::new(ptr::null()),
-            fair_timeout: UnsafeCell::new(FairTimeout::new()),
-            _padding: unsafe { mem::uninitialized() },
-        };
+
+        let now = Instant::now();
+        let mut entries = Vec::with_capacity(new_size);
+        for _ in 0..new_size {
+            entries.push(Bucket::new(now));
+        }
+
         Box::new(HashTable {
-            entries: vec![bucket; new_size].into_boxed_slice(),
-            hash_bits: hash_bits,
+            entries: entries.into_boxed_slice(),
+            hash_bits,
             _prev: prev,
         })
     }
 }
 
+#[repr(align(64))]
 struct Bucket {
     // Lock protecting the queue
     mutex: WordLock,
 
     // Linked list of threads waiting on this bucket
     queue_head: Cell<*const ThreadData>,
     queue_tail: Cell<*const ThreadData>,
 
     // Next time at which point be_fair should be set
     fair_timeout: UnsafeCell<FairTimeout>,
-
-    // Padding to avoid false sharing between buckets. Ideally we would just
-    // align the bucket structure to 64 bytes, but Rust doesn't support that
-    // yet.
-    _padding: [u8; 64],
 }
 
-// Implementation of Clone for Bucket, needed to make vec![] work
-impl Clone for Bucket {
-    fn clone(&self) -> Bucket {
-        Bucket {
-            mutex: WordLock::new(),
+impl Bucket {
+    #[inline]
+    pub fn new(timeout: Instant) -> Self {
+        Self {
+            mutex: WordLock::INIT,
             queue_head: Cell::new(ptr::null()),
             queue_tail: Cell::new(ptr::null()),
-            fair_timeout: UnsafeCell::new(FairTimeout::new()),
-            _padding: unsafe { mem::uninitialized() },
+            fair_timeout: UnsafeCell::new(FairTimeout::new(timeout)),
         }
     }
 }
 
 struct FairTimeout {
     // Next time at which point be_fair should be set
     timeout: Instant,
 
     // Random number generator for calculating the next timeout
-    rng: XorShiftRng,
+    rng: SmallRng,
 }
 
 impl FairTimeout {
-    fn new() -> FairTimeout {
+    #[inline]
+    fn new(timeout: Instant) -> FairTimeout {
         FairTimeout {
-            timeout: Instant::now(),
-            rng: rand::weak_rng(),
+            timeout,
+            rng: SmallRng::from_entropy(),
         }
     }
 
     // Determine whether we should force a fair unlock, and update the timeout
+    #[inline]
     fn should_timeout(&mut self) -> bool {
         let now = Instant::now();
         if now > self.timeout {
             self.timeout = now + Duration::new(0, self.rng.gen_range(0, 1000000));
             true
         } else {
             false
         }
@@ -129,18 +125,18 @@ struct ThreadData {
 
     // ParkToken value set by the thread when it was parked
     park_token: Cell<ParkToken>,
 
     // Is the thread parked with a timeout?
     parked_with_timeout: Cell<bool>,
 
     // Extra data for deadlock detection
-    // TODO: once supported in stable replace with #[cfg...] & remove dummy struct/impl
-    #[allow(dead_code)] deadlock_data: deadlock::DeadlockData,
+    #[cfg(feature = "deadlock_detection")]
+    deadlock_data: deadlock::DeadlockData,
 }
 
 impl ThreadData {
     fn new() -> ThreadData {
         // Keep track of the total number of live ThreadData objects and resize
         // the hash table accordingly.
         let num_threads = NUM_THREADS.fetch_add(1, Ordering::Relaxed) + 1;
         unsafe {
@@ -149,118 +145,128 @@ impl ThreadData {
 
         ThreadData {
             parker: ThreadParker::new(),
             key: AtomicUsize::new(0),
             next_in_queue: Cell::new(ptr::null()),
             unpark_token: Cell::new(DEFAULT_UNPARK_TOKEN),
             park_token: Cell::new(DEFAULT_PARK_TOKEN),
             parked_with_timeout: Cell::new(false),
+            #[cfg(feature = "deadlock_detection")]
             deadlock_data: deadlock::DeadlockData::new(),
         }
     }
 }
 
-// Returns a ThreadData structure for the current thread
-unsafe fn get_thread_data(local: &mut Option<ThreadData>) -> &ThreadData {
-    // Try to read from thread-local storage, but return None if the TLS has
-    // already been destroyed.
-    #[cfg(feature = "nightly")]
-    fn try_get_tls(key: &'static LocalKey<ThreadData>) -> Option<*const ThreadData> {
-        key.try_with(|x| x as *const ThreadData).ok()
-    }
-    #[cfg(not(feature = "nightly"))]
-    fn try_get_tls(key: &'static LocalKey<ThreadData>) -> Option<*const ThreadData> {
-        panic::catch_unwind(|| key.with(|x| x as *const ThreadData)).ok()
-    }
+// Invokes the given closure with a reference to the current thread `ThreadData`.
+#[inline(always)]
+fn with_thread_data<F, T>(f: F) -> T
+where
+    F: FnOnce(&ThreadData) -> T,
+{
+    // Unlike word_lock::ThreadData, parking_lot::ThreadData is always expensive
+    // to construct. Try to use a thread-local version if possible. Otherwise just
+    // create a ThreadData on the stack
+    let mut thread_data_storage = None;
+    thread_local!(static THREAD_DATA: ThreadData = ThreadData::new());
+    let thread_data_ptr = THREAD_DATA
+        .try_with(|x| x as *const ThreadData)
+        .unwrap_or_else(|_| thread_data_storage.get_or_insert_with(ThreadData::new));
 
-    // Unlike word_lock::ThreadData, parking_lot::ThreadData is always expensive
-    // to construct. Try to use a thread-local version if possible.
-    thread_local!(static THREAD_DATA: ThreadData = ThreadData::new());
-    if let Some(tls) = try_get_tls(&THREAD_DATA) {
-        return &*tls;
-    }
-
-    // Otherwise just create a ThreadData on the stack
-    *local = Some(ThreadData::new());
-    local.as_ref().unwrap()
+    f(unsafe { &*thread_data_ptr })
 }
 
 impl Drop for ThreadData {
     fn drop(&mut self) {
         NUM_THREADS.fetch_sub(1, Ordering::Relaxed);
     }
 }
 
 // Get a pointer to the latest hash table, creating one if it doesn't exist yet.
-unsafe fn get_hashtable() -> *const HashTable {
-    let mut table = HASHTABLE.load(Ordering::Acquire);
+#[inline]
+fn get_hashtable() -> *mut HashTable {
+    let table = HASHTABLE.load(Ordering::Acquire);
 
     // If there is no table, create one
-    if table == 0 {
-        let new_table = Box::into_raw(HashTable::new(LOAD_FACTOR, ptr::null()));
+    if table.is_null() {
+        create_hashtable()
+    } else {
+        table
+    }
+}
+
+// Get a pointer to the latest hash table, creating one if it doesn't exist yet.
+#[cold]
+#[inline(never)]
+fn create_hashtable() -> *mut HashTable {
+    let new_table = Box::into_raw(HashTable::new(LOAD_FACTOR, ptr::null()));
 
-        // If this fails then it means some other thread created the hash
-        // table first.
-        match HASHTABLE.compare_exchange(
-            0,
-            new_table as usize,
-            Ordering::Release,
-            Ordering::Relaxed,
-        ) {
-            Ok(_) => return new_table,
-            Err(x) => table = x,
+    // If this fails then it means some other thread created the hash
+    // table first.
+    match HASHTABLE.compare_exchange(
+        ptr::null_mut(),
+        new_table,
+        Ordering::Release,
+        Ordering::Relaxed,
+    ) {
+        Ok(_) => new_table,
+        Err(old_table) => {
+            // Free the table we created
+            unsafe {
+                Box::from_raw(new_table);
+            }
+            old_table
         }
-
-        // Free the table we created
-        Box::from_raw(new_table);
     }
-
-    table as *const HashTable
 }
 
 // Grow the hash table so that it is big enough for the given number of threads.
 // This isn't performance-critical since it is only done when a ThreadData is
 // created, which only happens once per thread.
 unsafe fn grow_hashtable(num_threads: usize) {
     // If there is no table, create one
-    if HASHTABLE.load(Ordering::Relaxed) == 0 {
+    if HASHTABLE.load(Ordering::Relaxed).is_null() {
         let new_table = Box::into_raw(HashTable::new(num_threads, ptr::null()));
 
         // If this fails then it means some other thread created the hash
         // table first.
         if HASHTABLE
-            .compare_exchange(0, new_table as usize, Ordering::Release, Ordering::Relaxed)
+            .compare_exchange(
+                ptr::null_mut(),
+                new_table,
+                Ordering::Release,
+                Ordering::Relaxed,
+            )
             .is_ok()
         {
             return;
         }
 
         // Free the table we created
         Box::from_raw(new_table);
     }
 
     let mut old_table;
     loop {
-        old_table = HASHTABLE.load(Ordering::Acquire) as *mut HashTable;
+        old_table = HASHTABLE.load(Ordering::Acquire);
 
         // Check if we need to resize the existing table
         if (*old_table).entries.len() >= LOAD_FACTOR * num_threads {
             return;
         }
 
         // Lock all buckets in the old table
         for b in &(*old_table).entries[..] {
             b.mutex.lock();
         }
 
         // Now check if our table is still the latest one. Another thread could
         // have grown the hash table between us reading HASHTABLE and locking
         // the buckets.
-        if HASHTABLE.load(Ordering::Relaxed) == old_table as usize {
+        if HASHTABLE.load(Ordering::Relaxed) == old_table {
             break;
         }
 
         // Unlock buckets and try again
         for b in &(*old_table).entries[..] {
             b.mutex.unlock();
         }
     }
@@ -285,86 +291,91 @@ unsafe fn grow_hashtable(num_threads: us
             (*current).next_in_queue.set(ptr::null());
             current = next;
         }
     }
 
     // Publish the new table. No races are possible at this point because
     // any other thread trying to grow the hash table is blocked on the bucket
     // locks in the old table.
-    HASHTABLE.store(Box::into_raw(new_table) as usize, Ordering::Release);
+    HASHTABLE.store(Box::into_raw(new_table), Ordering::Release);
 
     // Unlock all buckets in the old table
     for b in &(*old_table).entries[..] {
         b.mutex.unlock();
     }
 }
 
 // Hash function for addresses
 #[cfg(target_pointer_width = "32")]
+#[inline]
 fn hash(key: usize, bits: u32) -> usize {
     key.wrapping_mul(0x9E3779B9) >> (32 - bits)
 }
 #[cfg(target_pointer_width = "64")]
+#[inline]
 fn hash(key: usize, bits: u32) -> usize {
     key.wrapping_mul(0x9E3779B97F4A7C15) >> (64 - bits)
 }
 
 // Lock the bucket for the given key
+#[inline]
 unsafe fn lock_bucket<'a>(key: usize) -> &'a Bucket {
     let mut bucket;
     loop {
         let hashtable = get_hashtable();
 
         let hash = hash(key, (*hashtable).hash_bits);
         bucket = &(*hashtable).entries[hash];
 
         // Lock the bucket
         bucket.mutex.lock();
 
         // If no other thread has rehashed the table before we grabbed the lock
         // then we are good to go! The lock we grabbed prevents any rehashes.
-        if HASHTABLE.load(Ordering::Relaxed) == hashtable as usize {
+        if HASHTABLE.load(Ordering::Relaxed) == hashtable {
             return bucket;
         }
 
         // Unlock the bucket and try again
         bucket.mutex.unlock();
     }
 }
 
 // Lock the bucket for the given key, but check that the key hasn't been changed
 // in the meantime due to a requeue.
+#[inline]
 unsafe fn lock_bucket_checked<'a>(key: &AtomicUsize) -> (usize, &'a Bucket) {
     let mut bucket;
     loop {
         let hashtable = get_hashtable();
         let current_key = key.load(Ordering::Relaxed);
 
         let hash = hash(current_key, (*hashtable).hash_bits);
         bucket = &(*hashtable).entries[hash];
 
         // Lock the bucket
         bucket.mutex.lock();
 
         // Check that both the hash table and key are correct while the bucket
         // is locked. Note that the key can't change once we locked the proper
         // bucket for it, so we just keep trying until we have the correct key.
-        if HASHTABLE.load(Ordering::Relaxed) == hashtable as usize
+        if HASHTABLE.load(Ordering::Relaxed) == hashtable
             && key.load(Ordering::Relaxed) == current_key
         {
             return (current_key, bucket);
         }
 
         // Unlock the bucket and try again
         bucket.mutex.unlock();
     }
 }
 
 // Lock the two buckets for the given pair of keys
+#[inline]
 unsafe fn lock_bucket_pair<'a>(key1: usize, key2: usize) -> (&'a Bucket, &'a Bucket) {
     let mut bucket1;
     loop {
         let hashtable = get_hashtable();
 
         // Get the lowest bucket first
         let hash1 = hash(key1, (*hashtable).hash_bits);
         let hash2 = hash(key2, (*hashtable).hash_bits);
@@ -374,17 +385,17 @@ unsafe fn lock_bucket_pair<'a>(key1: usi
             bucket1 = &(*hashtable).entries[hash2];
         }
 
         // Lock the first bucket
         bucket1.mutex.lock();
 
         // If no other thread has rehashed the table before we grabbed the lock
         // then we are good to go! The lock we grabbed prevents any rehashes.
-        if HASHTABLE.load(Ordering::Relaxed) == hashtable as usize {
+        if HASHTABLE.load(Ordering::Relaxed) == hashtable {
             // Now lock the second bucket and return the two buckets
             if hash1 == hash2 {
                 return (bucket1, bucket1);
             } else if hash1 < hash2 {
                 let bucket2 = &(*hashtable).entries[hash2];
                 bucket2.mutex.lock();
                 return (bucket1, bucket2);
             } else {
@@ -395,16 +406,17 @@ unsafe fn lock_bucket_pair<'a>(key1: usi
         }
 
         // Unlock the bucket and try again
         bucket1.mutex.unlock();
     }
 }
 
 // Unlock a pair of buckets
+#[inline]
 unsafe fn unlock_bucket_pair(bucket1: &Bucket, bucket2: &Bucket) {
     if bucket1 as *const _ == bucket2 as *const _ {
         bucket1.mutex.unlock();
     } else if bucket1 as *const _ < bucket2 as *const _ {
         bucket2.mutex.unlock();
         bucket1.mutex.unlock();
     } else {
         bucket1.mutex.unlock();
@@ -422,52 +434,65 @@ pub enum ParkResult {
     Invalid,
 
     /// The timeout expired.
     TimedOut,
 }
 
 impl ParkResult {
     /// Returns true if we were unparked by another thread.
+    #[inline]
     pub fn is_unparked(self) -> bool {
         if let ParkResult::Unparked(_) = self {
             true
         } else {
             false
         }
     }
 }
 
 /// Result of an unpark operation.
-#[derive(Copy, Clone, Eq, PartialEq, Debug)]
+#[derive(Copy, Clone, Default, Eq, PartialEq, Debug)]
 pub struct UnparkResult {
     /// The number of threads that were unparked.
     pub unparked_threads: usize,
 
+    /// The number of threads that were requeued.
+    pub requeued_threads: usize,
+
     /// Whether there are any threads remaining in the queue. This only returns
     /// true if a thread was unparked.
     pub have_more_threads: bool,
 
     /// This is set to true on average once every 0.5ms for any given key. It
     /// should be used to switch to a fair unlocking mechanism for a particular
     /// unlock.
     pub be_fair: bool,
+
+    /// Private field so new fields can be added without breakage.
+    _sealed: (),
 }
 
 /// Operation that `unpark_requeue` should perform.
 #[derive(Copy, Clone, Eq, PartialEq, Debug)]
 pub enum RequeueOp {
     /// Abort the operation without doing anything.
     Abort,
 
     /// Unpark one thread and requeue the rest onto the target queue.
     UnparkOneRequeueRest,
 
     /// Requeue all threads onto the target queue.
     RequeueAll,
+
+    /// Unpark one thread and leave the rest parked. No requeuing is done.
+    UnparkOne,
+
+    /// Requeue one thread and leave the rest parked on the original queue.
+    RequeueOne,
 }
 
 /// Operation that `unpark_filter` should perform for each thread.
 #[derive(Copy, Clone, Eq, PartialEq, Debug)]
 pub enum FilterOp {
     /// Unpark the thread and continue scanning the list of parked threads.
     Unpark,
 
@@ -529,139 +554,119 @@ pub unsafe fn park<V, B, T>(
     park_token: ParkToken,
     timeout: Option<Instant>,
 ) -> ParkResult
 where
     V: FnOnce() -> bool,
     B: FnOnce(),
     T: FnOnce(usize, bool),
 {
-    let mut v = Some(validate);
-    let mut b = Some(before_sleep);
-    let mut t = Some(timed_out);
-    park_internal(
-        key,
-        &mut || v.take().unchecked_unwrap()(),
-        &mut || b.take().unchecked_unwrap()(),
-        &mut |key, was_last_thread| t.take().unchecked_unwrap()(key, was_last_thread),
-        park_token,
-        timeout,
-    )
-}
+    // Grab our thread data, this also ensures that the hash table exists
+    with_thread_data(|thread_data| {
+        // Lock the bucket for the given key
+        let bucket = lock_bucket(key);
+
+        // If the validation function fails, just return
+        if !validate() {
+            bucket.mutex.unlock();
+            return ParkResult::Invalid;
+        }
 
-// Non-generic version to reduce monomorphization cost
-unsafe fn park_internal(
-    key: usize,
-    validate: &mut FnMut() -> bool,
-    before_sleep: &mut FnMut(),
-    timed_out: &mut FnMut(usize, bool),
-    park_token: ParkToken,
-    timeout: Option<Instant>,
-) -> ParkResult {
-    // Grab our thread data, this also ensures that the hash table exists
-    let mut thread_data = None;
-    let thread_data = get_thread_data(&mut thread_data);
+        // Append our thread data to the queue and unlock the bucket
+        thread_data.parked_with_timeout.set(timeout.is_some());
+        thread_data.next_in_queue.set(ptr::null());
+        thread_data.key.store(key, Ordering::Relaxed);
+        thread_data.park_token.set(park_token);
+        thread_data.parker.prepare_park();
+        if !bucket.queue_head.get().is_null() {
+            (*bucket.queue_tail.get()).next_in_queue.set(thread_data);
+        } else {
+            bucket.queue_head.set(thread_data);
+        }
+        bucket.queue_tail.set(thread_data);
+        bucket.mutex.unlock();
 
-    // Lock the bucket for the given key
-    let bucket = lock_bucket(key);
-
-    // If the validation function fails, just return
-    if !validate() {
-        bucket.mutex.unlock();
-        return ParkResult::Invalid;
-    }
+        // Invoke the pre-sleep callback
+        before_sleep();
 
-    // Append our thread data to the queue and unlock the bucket
-    thread_data.parked_with_timeout.set(timeout.is_some());
-    thread_data.next_in_queue.set(ptr::null());
-    thread_data.key.store(key, Ordering::Relaxed);
-    thread_data.park_token.set(park_token);
-    thread_data.parker.prepare_park();
-    if !bucket.queue_head.get().is_null() {
-        (*bucket.queue_tail.get()).next_in_queue.set(thread_data);
-    } else {
-        bucket.queue_head.set(thread_data);
-    }
-    bucket.queue_tail.set(thread_data);
-    bucket.mutex.unlock();
+        // Park our thread and determine whether we were woken up by an unpark or by
+        // our timeout. Note that this isn't precise: we can still be unparked since
+        // we are still in the queue.
+        let unparked = match timeout {
+            Some(timeout) => thread_data.parker.park_until(timeout),
+            None => {
+                thread_data.parker.park();
+                // call deadlock detection on_unpark hook
+                deadlock::on_unpark(thread_data);
+                true
+            }
+        };
 
-    // Invoke the pre-sleep callback
-    before_sleep();
+        // If we were unparked, return now
+        if unparked {
+            return ParkResult::Unparked(thread_data.unpark_token.get());
+        }
+
+        // Lock our bucket again. Note that the hashtable may have been rehashed in
+        // the meantime. Our key may also have changed if we were requeued.
+        let (key, bucket) = lock_bucket_checked(&thread_data.key);
 
-    // Park our thread and determine whether we were woken up by an unpark or by
-    // our timeout. Note that this isn't precise: we can still be unparked since
-    // we are still in the queue.
-    let unparked = match timeout {
-        Some(timeout) => thread_data.parker.park_until(timeout),
-        None => {
-            thread_data.parker.park();
-            // call deadlock detection on_unpark hook
-            deadlock::on_unpark(thread_data);
-            true
+        // Now we need to check again if we were unparked or timed out. Unlike the
+        // last check this is precise because we hold the bucket lock.
+        if !thread_data.parker.timed_out() {
+            bucket.mutex.unlock();
+            return ParkResult::Unparked(thread_data.unpark_token.get());
         }
-    };
-
-    // If we were unparked, return now
-    if unparked {
-        return ParkResult::Unparked(thread_data.unpark_token.get());
-    }
-
-    // Lock our bucket again. Note that the hashtable may have been rehashed in
-    // the meantime. Our key may also have changed if we were requeued.
-    let (key, bucket) = lock_bucket_checked(&thread_data.key);
-
-    // Now we need to check again if we were unparked or timed out. Unlike the
-    // last check this is precise because we hold the bucket lock.
-    if !thread_data.parker.timed_out() {
-        bucket.mutex.unlock();
-        return ParkResult::Unparked(thread_data.unpark_token.get());
-    }
 
-    // We timed out, so we now need to remove our thread from the queue
-    let mut link = &bucket.queue_head;
-    let mut current = bucket.queue_head.get();
-    let mut previous = ptr::null();
-    while !current.is_null() {
-        if current == thread_data {
-            let next = (*current).next_in_queue.get();
-            link.set(next);
-            let mut was_last_thread = true;
-            if bucket.queue_tail.get() == current {
-                bucket.queue_tail.set(previous);
-            } else {
-                // Scan the rest of the queue to see if there are any other
-                // entries with the given key.
-                let mut scan = next;
-                while !scan.is_null() {
-                    if (*scan).key.load(Ordering::Relaxed) == key {
-                        was_last_thread = false;
-                        break;
+        // We timed out, so we now need to remove our thread from the queue
+        let mut link = &bucket.queue_head;
+        let mut current = bucket.queue_head.get();
+        let mut previous = ptr::null();
+        let mut was_last_thread = true;
+        while !current.is_null() {
+            if current == thread_data {
+                let next = (*current).next_in_queue.get();
+                link.set(next);
+                if bucket.queue_tail.get() == current {
+                    bucket.queue_tail.set(previous);
+                } else {
+                    // Scan the rest of the queue to see if there are any other
+                    // entries with the given key.
+                    let mut scan = next;
+                    while !scan.is_null() {
+                        if (*scan).key.load(Ordering::Relaxed) == key {
+                            was_last_thread = false;
+                            break;
+                        }
+                        scan = (*scan).next_in_queue.get();
                     }
-                    scan = (*scan).next_in_queue.get();
                 }
-            }
 
-            // Callback to indicate that we timed out, and whether we were the
-            // last thread on the queue.
-            timed_out(key, was_last_thread);
-            break;
-        } else {
-            link = &(*current).next_in_queue;
-            previous = current;
-            current = link.get();
+                // Callback to indicate that we timed out, and whether we were the
+                // last thread on the queue.
+                timed_out(key, was_last_thread);
+                break;
+            } else {
+                if (*current).key.load(Ordering::Relaxed) == key {
+                    was_last_thread = false;
+                }
+                link = &(*current).next_in_queue;
+                previous = current;
+                current = link.get();
+            }
         }
-    }
+
+        // There should be no way for our thread to have been removed from the queue
+        // if we timed out.
+        debug_assert!(!current.is_null());
 
-    // There should be no way for our thread to have been removed from the queue
-    // if we timed out.
-    debug_assert!(!current.is_null());
-
-    // Unlock the bucket, we are done
-    bucket.mutex.unlock();
-    ParkResult::TimedOut
+        // Unlock the bucket, we are done
+        bucket.mutex.unlock();
+        ParkResult::TimedOut
+    })
 }
 
 /// Unparks one thread from the queue associated with the given key.
 ///
 /// The `callback` function is called while the queue is locked and before the
 /// target thread is woken up. The `UnparkResult` argument to the function
 /// indicates whether a thread was found in the queue and whether this was the
 /// last thread in the queue. This value is also returned by `unpark_one`.
@@ -678,37 +683,24 @@ unsafe fn park_internal(
 ///
 /// The `callback` function is called while the queue is locked and must not
 /// panic or call into any function in `parking_lot`.
 #[inline]
 pub unsafe fn unpark_one<C>(key: usize, callback: C) -> UnparkResult
 where
     C: FnOnce(UnparkResult) -> UnparkToken,
 {
-    let mut c = Some(callback);
-    unpark_one_internal(key, &mut |result| c.take().unchecked_unwrap()(result))
-}
-
-// Non-generic version to reduce monomorphization cost
-unsafe fn unpark_one_internal(
-    key: usize,
-    callback: &mut FnMut(UnparkResult) -> UnparkToken,
-) -> UnparkResult {
     // Lock the bucket for the given key
     let bucket = lock_bucket(key);
 
     // Find a thread with a matching key and remove it from the queue
     let mut link = &bucket.queue_head;
     let mut current = bucket.queue_head.get();
     let mut previous = ptr::null();
-    let mut result = UnparkResult {
-        unparked_threads: 0,
-        have_more_threads: false,
-        be_fair: false,
-    };
+    let mut result = UnparkResult::default();
     while !current.is_null() {
         if (*current).key.load(Ordering::Relaxed) == key {
             // Remove the thread from the queue
             let next = (*current).next_in_queue.get();
             link.set(next);
             if bucket.queue_tail.get() == current {
                 bucket.queue_tail.set(previous);
             } else {
@@ -761,16 +753,17 @@ unsafe fn unpark_one_internal(
 ///
 /// This function returns the number of threads that were unparked.
 ///
 /// # Safety
 ///
 /// You should only call this function with an address that you control, since
 /// you could otherwise interfere with the operation of other synchronization
 /// primitives.
+#[inline]
 pub unsafe fn unpark_all(key: usize, unpark_token: UnparkToken) -> usize {
     // Lock the bucket for the given key
     let bucket = lock_bucket(key);
 
     // Remove all threads with the given key in the bucket
     let mut link = &bucket.queue_head;
     let mut current = bucket.queue_head.get();
     let mut previous = ptr::null();
@@ -811,21 +804,20 @@ pub unsafe fn unpark_all(key: usize, unp
 
     num_threads
 }
 
 /// Removes all threads from the queue associated with `key_from`, optionally
 /// unparks the first one and requeues the rest onto the queue associated with
 /// `key_to`.
 ///
-/// The `validate` function is called while both queues are locked and can abort
-/// the operation by returning `RequeueOp::Abort`. It can also choose to
-/// unpark the first thread in the source queue while moving the rest by
-/// returning `RequeueOp::UnparkFirstRequeueRest`. Returning
-/// `RequeueOp::RequeueAll` will move all threads to the destination queue.
+/// The `validate` function is called while both queues are locked. Its return
+/// value will determine which operation is performed, or whether the operation
+/// should be aborted. See `RequeueOp` for details about the different possible
+/// return values.
 ///
 /// The `callback` function is also called while both queues are locked. It is
 /// passed the `RequeueOp` returned by `validate` and an `UnparkResult`
 /// indicating whether a thread was unparked and whether there are threads still
 /// parked in the new queue. This `UnparkResult` value is also returned by
 /// `unpark_requeue`.
 ///
 /// The `callback` function should return an `UnparkToken` value which will be
@@ -846,42 +838,21 @@ pub unsafe fn unpark_requeue<V, C>(
     key_to: usize,
     validate: V,
     callback: C,
 ) -> UnparkResult
 where
     V: FnOnce() -> RequeueOp,
     C: FnOnce(RequeueOp, UnparkResult) -> UnparkToken,
 {
-    let mut v = Some(validate);
-    let mut c = Some(callback);
-    unpark_requeue_internal(
-        key_from,
-        key_to,
-        &mut || v.take().unchecked_unwrap()(),
-        &mut |op, r| c.take().unchecked_unwrap()(op, r),
-    )
-}
-
-// Non-generic version to reduce monomorphization cost
-unsafe fn unpark_requeue_internal(
-    key_from: usize,
-    key_to: usize,
-    validate: &mut FnMut() -> RequeueOp,
-    callback: &mut FnMut(RequeueOp, UnparkResult) -> UnparkToken,
-) -> UnparkResult {
     // Lock the two buckets for the given key
     let (bucket_from, bucket_to) = lock_bucket_pair(key_from, key_to);
 
     // If the validation function fails, just return
-    let mut result = UnparkResult {
-        unparked_threads: 0,
-        have_more_threads: false,
-        be_fair: false,
-    };
+    let mut result = UnparkResult::default();
     let op = validate();
     if op == RequeueOp::Abort {
         unlock_bucket_pair(bucket_from, bucket_to);
         return result;
     }
 
     // Remove all threads with the given key in the source bucket
     let mut link = &bucket_from.queue_head;
@@ -895,28 +866,43 @@ unsafe fn unpark_requeue_internal(
             // Remove the thread from the queue
             let next = (*current).next_in_queue.get();
             link.set(next);
             if bucket_from.queue_tail.get() == current {
                 bucket_from.queue_tail.set(previous);
             }
 
             // Prepare the first thread for wakeup and requeue the rest.
-            if op == RequeueOp::UnparkOneRequeueRest && wakeup_thread.is_none() {
+            if (op == RequeueOp::UnparkOneRequeueRest || op == RequeueOp::UnparkOne)
+                && wakeup_thread.is_none()
+            {
                 wakeup_thread = Some(current);
                 result.unparked_threads = 1;
             } else {
                 if !requeue_threads.is_null() {
                     (*requeue_threads_tail).next_in_queue.set(current);
                 } else {
                     requeue_threads = current;
                 }
                 requeue_threads_tail = current;
                 (*current).key.store(key_to, Ordering::Relaxed);
-                result.have_more_threads = true;
+                result.requeued_threads += 1;
+            }
+            if op == RequeueOp::UnparkOne || op == RequeueOp::RequeueOne {
+                // Scan the rest of the queue to see if there are any other
+                // entries with the given key.
+                let mut scan = next;
+                while !scan.is_null() {
+                    if (*scan).key.load(Ordering::Relaxed) == key_from {
+                        result.have_more_threads = true;
+                        break;
+                    }
+                    scan = (*scan).next_in_queue.get();
+                }
+                break;
             }
             current = next;
         } else {
             link = &(*current).next_in_queue;
             previous = current;
             current = link.get();
         }
     }
@@ -980,39 +966,25 @@ unsafe fn unpark_requeue_internal(
 /// The `filter` and `callback` functions are called while the queue is locked
 /// and must not panic or call into any function in `parking_lot`.
 #[inline]
 pub unsafe fn unpark_filter<F, C>(key: usize, mut filter: F, callback: C) -> UnparkResult
 where
     F: FnMut(ParkToken) -> FilterOp,
     C: FnOnce(UnparkResult) -> UnparkToken,
 {
-    let mut c = Some(callback);
-    unpark_filter_internal(key, &mut filter, &mut |r| c.take().unchecked_unwrap()(r))
-}
-
-// Non-generic version to reduce monomorphization cost
-unsafe fn unpark_filter_internal(
-    key: usize,
-    filter: &mut FnMut(ParkToken) -> FilterOp,
-    callback: &mut FnMut(UnparkResult) -> UnparkToken,
-) -> UnparkResult {
     // Lock the bucket for the given key
     let bucket = lock_bucket(key);
 
     // Go through the queue looking for threads with a matching key
     let mut link = &bucket.queue_head;
     let mut current = bucket.queue_head.get();
     let mut previous = ptr::null();
     let mut threads = SmallVec::<[_; 8]>::new();
-    let mut result = UnparkResult {
-        unparked_threads: 0,
-        have_more_threads: false,
-        be_fair: false,
-    };
+    let mut result = UnparkResult::default();
     while !current.is_null() {
         if (*current).key.load(Ordering::Relaxed) == key {
             // Call the filter function with the thread's ParkToken
             let next = (*current).next_in_queue.get();
             match filter((*current).park_token.get()) {
                 FilterOp::Unpark => {
                     // Remove the thread from the queue
                     link.set(next);
@@ -1063,36 +1035,26 @@ unsafe fn unpark_filter_internal(
     // from the queue.
     for (_, handle) in threads.into_iter() {
         handle.unchecked_unwrap().unpark();
     }
 
     result
 }
 
-/// [Experimental] Deadlock detection
+/// \[Experimental\] Deadlock detection
 ///
 /// Enabled via the `deadlock_detection` feature flag.
 pub mod deadlock {
     #[cfg(feature = "deadlock_detection")]
     use super::deadlock_impl;
 
     #[cfg(feature = "deadlock_detection")]
     pub(super) use super::deadlock_impl::DeadlockData;
 
-    #[cfg(not(feature = "deadlock_detection"))]
-    pub(super) struct DeadlockData {}
-
-    #[cfg(not(feature = "deadlock_detection"))]
-    impl DeadlockData {
-        pub(super) fn new() -> Self {
-            DeadlockData {}
-        }
-    }
-
     /// Acquire a resource identified by key in the deadlock detector
     /// Noop if deadlock_detection feature isn't enabled.
     /// Note: Call after the resource is acquired
     #[inline]
     pub unsafe fn acquire_resource(_key: usize) {
         #[cfg(feature = "deadlock_detection")]
         deadlock_impl::acquire_resource(_key);
     }
@@ -1120,25 +1082,26 @@ pub mod deadlock {
     pub(super) unsafe fn on_unpark(_td: &super::ThreadData) {
         #[cfg(feature = "deadlock_detection")]
         deadlock_impl::on_unpark(_td);
     }
 }
 
 #[cfg(feature = "deadlock_detection")]
 mod deadlock_impl {
-    use super::{get_hashtable, get_thread_data, lock_bucket, ThreadData, NUM_THREADS};
-    use std::cell::{Cell, UnsafeCell};
-    use std::sync::mpsc;
-    use std::sync::atomic::Ordering;
-    use std::collections::HashSet;
-    use thread_id;
+    use super::{get_hashtable, lock_bucket, with_thread_data, ThreadData, NUM_THREADS};
+    use crate::word_lock::WordLock;
     use backtrace::Backtrace;
     use petgraph;
     use petgraph::graphmap::DiGraphMap;
+    use std::cell::{Cell, UnsafeCell};
+    use std::collections::HashSet;
+    use std::sync::atomic::Ordering;
+    use std::sync::mpsc;
+    use thread_id;
 
     /// Representation of a deadlocked thread
     pub struct DeadlockedThread {
         thread_id: usize,
         backtrace: Backtrace,
     }
 
     impl DeadlockedThread {
@@ -1193,29 +1156,29 @@ mod deadlock_impl {
             // park until the end of the time
             td.parker.prepare_park();
             td.parker.park();
             unreachable!("unparked deadlocked thread!");
         }
     }
 
     pub unsafe fn acquire_resource(key: usize) {
-        let mut thread_data = None;
-        let thread_data = get_thread_data(&mut thread_data);
-        (*thread_data.deadlock_data.resources.get()).push(key);
+        with_thread_data(|thread_data| {
+            (*thread_data.deadlock_data.resources.get()).push(key);
+        });
     }
 
     pub unsafe fn release_resource(key: usize) {
-        let mut thread_data = None;
-        let thread_data = get_thread_data(&mut thread_data);
-        let resources = &mut (*thread_data.deadlock_data.resources.get());
-        match resources.iter().rposition(|x| *x == key) {
-            Some(p) => resources.swap_remove(p),
-            None => panic!("key {} not found in thread resources", key),
-        };
+        with_thread_data(|thread_data| {
+            let resources = &mut (*thread_data.deadlock_data.resources.get());
+            match resources.iter().rposition(|x| *x == key) {
+                Some(p) => resources.swap_remove(p),
+                None => panic!("key {} not found in thread resources", key),
+            };
+        });
     }
 
     pub fn check_deadlock() -> Vec<Vec<DeadlockedThread>> {
         unsafe {
             // fast pass
             if check_wait_graph_fast() {
                 // double check
                 check_wait_graph_slow()
@@ -1258,20 +1221,23 @@ mod deadlock_impl {
     #[derive(Hash, PartialEq, Eq, PartialOrd, Ord, Copy, Clone)]
     enum WaitGraphNode {
         Thread(*const ThreadData),
         Resource(usize),
     }
 
     use self::WaitGraphNode::*;
 
-    // Contrary to the _fast variant this locks the entrie table before looking for cycles.
+    // Contrary to the _fast variant this locks the entries table before looking for cycles.
     // Returns all detected thread wait cycles.
     // Note that once a cycle is reported it's never reported again.
     unsafe fn check_wait_graph_slow() -> Vec<Vec<DeadlockedThread>> {
+        static DEADLOCK_DETECTION_LOCK: WordLock = WordLock::INIT;
+        DEADLOCK_DETECTION_LOCK.lock();
+
         let mut table = get_hashtable();
         loop {
             // Lock all buckets in the old table
             for b in &(*table).entries[..] {
                 b.mutex.lock();
             }
 
             // Now check if our table is still the latest one. Another thread could
@@ -1335,16 +1301,18 @@ mod deadlock_impl {
                 // on unpark it'll notice the deadlocked flag and report back
                 handle.unpark();
             }
             // make sure to drop our sender before collecting results
             drop(sender);
             results.push(receiver.iter().collect());
         }
 
+        DEADLOCK_DETECTION_LOCK.unlock();
+
         results
     }
 
     // normalize a cycle to start with the "smallest" node
     fn normalize_cycle<T: Ord + Copy + Clone>(input: &[T]) -> Vec<T> {
         let min_pos = input
             .iter()
             .enumerate()
@@ -1357,24 +1325,25 @@ mod deadlock_impl {
             .skip(min_pos)
             .take(input.len())
             .cloned()
             .collect()
     }
 
     // returns all thread cycles in the wait graph
     fn graph_cycles(g: &DiGraphMap<WaitGraphNode, ()>) -> Vec<Vec<*const ThreadData>> {
-        use petgraph::visit::NodeIndexable;
         use petgraph::visit::depth_first_search;
         use petgraph::visit::DfsEvent;
+        use petgraph::visit::NodeIndexable;
 
         let mut cycles = HashSet::new();
         let mut path = Vec::with_capacity(g.node_bound());
         // start from threads to get the correct threads cycle
-        let threads = g.nodes()
+        let threads = g
+            .nodes()
             .filter(|n| if let &Thread(_) = n { true } else { false });
 
         depth_first_search(g, threads, |e| match e {
             DfsEvent::Discover(Thread(n), _) => path.push(n),
             DfsEvent::Finish(Thread(_), _) => {
                 path.pop();
             }
             DfsEvent::BackEdge(_, Thread(n)) => {
--- a/third_party/rust/parking_lot_core/src/spinwait.rs
+++ b/third_party/rust/parking_lot_core/src/spinwait.rs
@@ -1,82 +1,38 @@
 // Copyright 2016 Amanieu d'Antras
 //
 // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
 // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
 // http://opensource.org/licenses/MIT>, at your option. This file may not be
 // copied, modified, or distributed except according to those terms.
 
-#[cfg(unix)]
-use libc;
-#[cfg(windows)]
-use winapi;
-#[cfg(not(any(windows, unix)))]
-use std::thread;
+use crate::thread_parker;
 use std::sync::atomic::spin_loop_hint;
 
-// Yields the rest of the current timeslice to the OS
-#[cfg(windows)]
-#[inline]
-fn thread_yield() {
-    // Note that this is manually defined here rather than using the definition
-    // through `winapi`. The `winapi` definition comes from the `synchapi`
-    // header which enables the "synchronization.lib" library. It turns out,
-    // however that `Sleep` comes from `kernel32.dll` so this activation isn't
-    // necessary.
-    //
-    // This was originally identified in rust-lang/rust where on MinGW the
-    // libsynchronization.a library pulls in a dependency on a newer DLL not
-    // present in older versions of Windows. (see rust-lang/rust#49438)
-    //
-    // This is a bit of a hack for now and ideally we'd fix MinGW's own import
-    // libraries, but that'll probably take a lot longer than patching this here
-    // and avoiding the `synchapi` feature entirely.
-    extern "system" {
-        fn Sleep(a: winapi::shared::minwindef::DWORD);
-    }
-    unsafe {
-        // We don't use SwitchToThread here because it doesn't consider all
-        // threads in the system and the thread we are waiting for may not get
-        // selected.
-        Sleep(0);
-    }
-}
-#[cfg(unix)]
-#[inline]
-fn thread_yield() {
-    unsafe {
-        libc::sched_yield();
-    }
-}
-#[cfg(not(any(windows, unix)))]
-#[inline]
-fn thread_yield() {
-    thread::yield_now();
-}
-
 // Wastes some CPU time for the given number of iterations,
 // using a hint to indicate to the CPU that we are spinning.
 #[inline]
 fn cpu_relax(iterations: u32) {
     for _ in 0..iterations {
         spin_loop_hint()
     }
 }
 
 /// A counter used to perform exponential backoff in spin loops.
+#[derive(Default)]
 pub struct SpinWait {
     counter: u32,
 }
 
 impl SpinWait {
     /// Creates a new `SpinWait`.
     #[inline]
-    pub fn new() -> SpinWait {
-        SpinWait { counter: 0 }
+    pub fn new() -> Self {
+        Self::default()
     }
 
     /// Resets a `SpinWait` to its initial state.
     #[inline]
     pub fn reset(&mut self) {
         self.counter = 0;
     }
 
@@ -85,41 +41,34 @@ impl SpinWait {
     /// This function returns whether the sleep threshold has been reached, at
     /// which point further spinning has diminishing returns and the thread
     /// should be parked instead.
     ///
     /// The spin strategy will initially use a CPU-bound loop but will fall back
     /// to yielding the CPU to the OS after a few iterations.
     #[inline]
     pub fn spin(&mut self) -> bool {
-        if self.counter >= 20 {
+        if self.counter >= 10 {
             return false;
         }
         self.counter += 1;
-        if self.counter <= 10 {
-            cpu_relax(4 << self.counter);
+        if self.counter <= 3 {
+            cpu_relax(1 << self.counter);
         } else {
-            thread_yield();
+            thread_parker::thread_yield();
         }
         true
     }
 
     /// Spins without yielding the thread to the OS.
     ///
     /// Instead, the backoff is simply capped at a maximum value. This can be
     /// used to improve throughput in `compare_exchange` loops that have high
     /// contention.
     #[inline]
     pub fn spin_no_yield(&mut self) {
         self.counter += 1;
         if self.counter > 10 {
             self.counter = 10;
         }
-        cpu_relax(4 << self.counter);
+        cpu_relax(1 << self.counter);
     }
 }
-
-impl Default for SpinWait {
-    #[inline]
-    fn default() -> SpinWait {
-        SpinWait::new()
-    }
-}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/parking_lot_core/src/thread_parker/cloudabi.rs
@@ -0,0 +1,325 @@
+// Copyright 2016 Amanieu d'Antras
+//
+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
+// http://opensource.org/licenses/MIT>, at your option. This file may not be
+// copied, modified, or distributed except according to those terms.
+
+use cloudabi as abi;
+use core::{
+    cell::Cell,
+    mem,
+    sync::atomic::{AtomicU32, Ordering},
+};
+use std::{convert::TryFrom, thread, time::Instant};
+
+extern "C" {
+    #[thread_local]
+    static __pthread_thread_id: abi::tid;
+}
+
+struct Lock {
+    lock: AtomicU32,
+}
+
+impl Lock {
+    #[inline]
+    pub fn new() -> Self {
+        Lock {
+            lock: AtomicU32::new(abi::LOCK_UNLOCKED.0),
+        }
+    }
+
+    #[inline]
+    fn try_lock(&self) -> Option<LockGuard<'_>> {
+        // Attempt to acquire the lock.
+        if let Err(old) = self.lock.compare_exchange(
+            abi::LOCK_UNLOCKED.0,
+            unsafe { __pthread_thread_id.0 } | abi::LOCK_WRLOCKED.0,
+            Ordering::Acquire,
+            Ordering::Relaxed,
+        ) {
+            // Failure. Crash upon recursive acquisition.
+            debug_assert_ne!(
+                old & !abi::LOCK_KERNEL_MANAGED.0,
+                unsafe { __pthread_thread_id.0 } | abi::LOCK_WRLOCKED.0,
+                "Attempted to recursive write-lock a lock",
+            );
+            None
+        } else {
+            Some(LockGuard { inner: &self })
+        }
+    }
+
+    #[inline]
+    pub fn lock(&self) -> LockGuard<'_> {
+        self.try_lock().unwrap_or_else(|| {
+            // Call into the kernel to acquire a write lock.
+            unsafe {
+                let subscription = abi::subscription {
+                    type_: abi::eventtype::LOCK_WRLOCK,
+                    union: abi::subscription_union {
+                        lock: abi::subscription_lock {
+                            lock: self.ptr(),
+                            lock_scope: abi::scope::PRIVATE,
+                        },
+                    },
+                    ..mem::zeroed()
+                };
+                let mut event: abi::event = mem::uninitialized();
+                let mut nevents: usize = mem::uninitialized();
+                let ret = abi::poll(&subscription, &mut event, 1, &mut nevents);
+                debug_assert_eq!(ret, abi::errno::SUCCESS);
+                debug_assert_eq!(event.error, abi::errno::SUCCESS);
+            }
+            LockGuard { inner: &self }
+        })
+    }
+
+    #[inline]
+    fn ptr(&self) -> *mut abi::lock {
+        &self.lock as *const AtomicU32 as *mut abi::lock
+    }
+}
+
+struct LockGuard<'a> {
+    inner: &'a Lock,
+}
+
+impl LockGuard<'_> {
+    #[inline]
+    fn ptr(&self) -> *mut abi::lock {
+        &self.inner.lock as *const AtomicU32 as *mut abi::lock
+    }
+}
+
+impl Drop for LockGuard<'_> {
+    fn drop(&mut self) {
+        debug_assert_eq!(
+            self.inner.lock.load(Ordering::Relaxed) & !abi::LOCK_KERNEL_MANAGED.0,
+            unsafe { __pthread_thread_id.0 } | abi::LOCK_WRLOCKED.0,
+            "This lock is not write-locked by this thread"
+        );
+
+        if !self
+            .inner
+            .lock
+            .compare_exchange(
+                unsafe { __pthread_thread_id.0 } | abi::LOCK_WRLOCKED.0,
+                abi::LOCK_UNLOCKED.0,
+                Ordering::Release,
+                Ordering::Relaxed,
+            )
+            .is_ok()
+        {
+            // Lock is managed by kernelspace. Call into the kernel
+            // to unblock waiting threads.
+            let ret = unsafe { abi::lock_unlock(self.ptr(), abi::scope::PRIVATE) };
+            debug_assert_eq!(ret, abi::errno::SUCCESS);
+        }
+    }
+}
+
+struct Condvar {
+    condvar: AtomicU32,
+}
+
+impl Condvar {
+    #[inline]
+    pub fn new() -> Self {
+        Condvar {
+            condvar: AtomicU32::new(abi::CONDVAR_HAS_NO_WAITERS.0),
+        }
+    }
+
+    #[inline]
+    pub fn wait(&self, lock: &LockGuard<'_>) {
+        unsafe {
+            let subscription = abi::subscription {
+                type_: abi::eventtype::CONDVAR,
+                union: abi::subscription_union {
+                    condvar: abi::subscription_condvar {
+                        condvar: self.ptr(),
+                        condvar_scope: abi::scope::PRIVATE,
+                        lock: lock.ptr(),
+                        lock_scope: abi::scope::PRIVATE,
+                    },
+                },
+                ..mem::zeroed()
+            };
+            let mut event: abi::event = mem::uninitialized();
+            let mut nevents: usize = mem::uninitialized();
+
+            let ret = abi::poll(&subscription, &mut event, 1, &mut nevents);
+            debug_assert_eq!(ret, abi::errno::SUCCESS);
+            debug_assert_eq!(event.error, abi::errno::SUCCESS);
+        }
+    }
+
+    /// Waits for a signal on the condvar.
+    /// Returns false if it times out before anyone notified us.
+    #[inline]
+    pub fn wait_timeout(&self, lock: &LockGuard<'_>, timeout: abi::timestamp) -> bool {
+        unsafe {
+            let subscriptions = [
+                abi::subscription {
+                    type_: abi::eventtype::CONDVAR,
+                    union: abi::subscription_union {
+                        condvar: abi::subscription_condvar {
+                            condvar: self.ptr(),
+                            condvar_scope: abi::scope::PRIVATE,
+                            lock: lock.ptr(),
+                            lock_scope: abi::scope::PRIVATE,
+                        },
+                    },
+                    ..mem::zeroed()
+                },
+                abi::subscription {
+                    type_: abi::eventtype::CLOCK,
+                    union: abi::subscription_union {
+                        clock: abi::subscription_clock {
+                            clock_id: abi::clockid::MONOTONIC,
+                            timeout,
+                            ..mem::zeroed()
+                        },
+                    },
+                    ..mem::zeroed()
+                },
+            ];
+            let mut events: [abi::event; 2] = mem::uninitialized();
+            let mut nevents: usize = mem::uninitialized();
+
+            let ret = abi::poll(subscriptions.as_ptr(), events.as_mut_ptr(), 2, &mut nevents);
+            debug_assert_eq!(ret, abi::errno::SUCCESS);
+            for i in 0..nevents {
+                debug_assert_eq!(events[i].error, abi::errno::SUCCESS);
+                if events[i].type_ == abi::eventtype::CONDVAR {
+                    return true;
+                }
+            }
+        }
+        false
+    }
+
+    #[inline]
+    pub fn notify(&self) {
+        let ret = unsafe { abi::condvar_signal(self.ptr(), abi::scope::PRIVATE, 1) };
+        debug_assert_eq!(ret, abi::errno::SUCCESS);
+    }
+
+    #[inline]
+    fn ptr(&self) -> *mut abi::condvar {
+        &self.condvar as *const AtomicU32 as *mut abi::condvar
+    }
+}
+
+// Helper type for putting a thread to sleep until some other thread wakes it up
+pub struct ThreadParker {
+    should_park: Cell<bool>,
+    lock: Lock,
+    condvar: Condvar,
+}
+
+impl ThreadParker {
+    pub const IS_CHEAP_TO_CONSTRUCT: bool = true;
+
+    #[inline]
+    pub fn new() -> ThreadParker {
+        ThreadParker {
+            should_park: Cell::new(false),
+            lock: Lock::new(),
+            condvar: Condvar::new(),
+        }
+    }
+
+    // Prepares the parker. This should be called before adding it to the queue.
+    #[inline]
+    pub fn prepare_park(&self) {
+        self.should_park.set(true);
+    }
+
+    // Checks if the park timed out. This should be called while holding the
+    // queue lock after park_until has returned false.
+    #[inline]
+    pub fn timed_out(&self) -> bool {
+        // We need to grab the lock here because another thread may be
+        // concurrently executing UnparkHandle::unpark, which is done without
+        // holding the queue lock.
+        let _guard = self.lock.lock();
+        self.should_park.get()
+    }
+
+    // Parks the thread until it is unparked. This should be called after it has
+    // been added to the queue, after unlocking the queue.
+    #[inline]
+    pub fn park(&self) {
+        let guard = self.lock.lock();
+        while self.should_park.get() {
+            self.condvar.wait(&guard);
+        }
+    }
+
+    // Parks the thread until it is unparked or the timeout is reached. This
+    // should be called after it has been added to the queue, after unlocking
+    // the queue. Returns true if we were unparked and false if we timed out.
+    #[inline]
+    pub fn park_until(&self, timeout: Instant) -> bool {
+        let guard = self.lock.lock();
+        while self.should_park.get() {
+            if let Some(duration_left) = timeout.checked_duration_since(Instant::now()) {
+                if let Ok(nanos_left) = abi::timestamp::try_from(duration_left.as_nanos()) {
+                    self.condvar.wait_timeout(&guard, nanos_left);
+                } else {
+                    // remaining timeout overflows an abi::timestamp. Sleep indefinitely
+                    self.condvar.wait(&guard);
+                }
+            } else {
+                // We timed out
+                return false;
+            }
+        }
+        true
+    }
+
+    // Locks the parker to prevent the target thread from exiting. This is
+    // necessary to ensure that thread-local ThreadData objects remain valid.
+    // This should be called while holding the queue lock.
+    #[inline]
+    pub fn unpark_lock(&self) -> UnparkHandle<'_> {
+        let _lock_guard = self.lock.lock();
+
+        UnparkHandle {
+            thread_parker: self,
+            _lock_guard,
+        }
+    }
+}
+
+// Handle for a thread that is about to be unparked. We need to mark the thread
+// as unparked while holding the queue lock, but we delay the actual unparking
+// until after the queue lock is released.
+pub struct UnparkHandle<'a> {
+    thread_parker: *const ThreadParker,
+    _lock_guard: LockGuard<'a>,
+}
+
+impl UnparkHandle<'_> {
+    // Wakes up the parked thread. This should be called after the queue lock is
+    // released to avoid blocking the queue for too long.
+    #[inline]
+    pub fn unpark(self) {
+        unsafe {
+            (*self.thread_parker).should_park.set(false);
+
+            // We notify while holding the lock here to avoid races with the target
+            // thread. In particular, the thread could exit after we unlock the
+            // mutex, which would make the condvar access invalid memory.
+            (*self.thread_parker).condvar.notify();
+        }
+    }
+}
+
+#[inline]
+pub fn thread_yield() {
+    thread::yield_now();
+}
--- a/third_party/rust/parking_lot_core/src/thread_parker/generic.rs
+++ b/third_party/rust/parking_lot_core/src/thread_parker/generic.rs
@@ -1,98 +1,91 @@
 // Copyright 2016 Amanieu d'Antras
 //
 // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
 // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
 // http://opensource.org/licenses/MIT>, at your option. This file may not be
 // copied, modified, or distributed except according to those terms.
 
-use std::sync::{Condvar, Mutex, MutexGuard};
-use std::cell::Cell;
-use std::time::Instant;
+//! A simple spin lock based thread parker. Used on platforms without better
+//! parking facilities available.
+
+use core::sync::atomic::{spin_loop_hint, AtomicBool, Ordering};
+use std::{thread, time::Instant};
 
 // Helper type for putting a thread to sleep until some other thread wakes it up
 pub struct ThreadParker {
-    should_park: Cell<bool>,
-    mutex: Mutex<()>,
-    condvar: Condvar,
+    parked: AtomicBool,
 }
 
 impl ThreadParker {
+    pub const IS_CHEAP_TO_CONSTRUCT: bool = true;
+
+    #[inline]
     pub fn new() -> ThreadParker {
         ThreadParker {
-            should_park: Cell::new(false),
-            mutex: Mutex::new(()),
-            condvar: Condvar::new(),
+            parked: AtomicBool::new(false),
         }
     }
 
     // Prepares the parker. This should be called before adding it to the queue.
-    pub unsafe fn prepare_park(&self) {
-        self.should_park.set(true);
+    #[inline]
+    pub fn prepare_park(&self) {
+        self.parked.store(true, Ordering::Relaxed);
     }
 
     // Checks if the park timed out. This should be called while holding the
     // queue lock after park_until has returned false.
-    pub unsafe fn timed_out(&self) -> bool {
-        // We need to grab the mutex here because another thread may be
-        // concurrently executing UnparkHandle::unpark, which is done without
-        // holding the queue lock.
-        let _lock = self.mutex.lock().unwrap();
-        self.should_park.get()
+    #[inline]
+    pub fn timed_out(&self) -> bool {
+        self.parked.load(Ordering::Relaxed) != false
     }
 
     // Parks the thread until it is unparked. This should be called after it has
     // been added to the queue, after unlocking the queue.
-    pub unsafe fn park(&self) {
-        let mut lock = self.mutex.lock().unwrap();
-        while self.should_park.get() {
-            lock = self.condvar.wait(lock).unwrap();
+    #[inline]
+    pub fn park(&self) {
+        while self.parked.load(Ordering::Acquire) != false {
+            spin_loop_hint();
         }
     }
 
     // Parks the thread until it is unparked or the timeout is reached. This
     // should be called after it has been added to the queue, after unlocking
     // the queue. Returns true if we were unparked and false if we timed out.
-    pub unsafe fn park_until(&self, timeout: Instant) -> bool {
-        let mut lock = self.mutex.lock().unwrap();
-        while self.should_park.get() {
-            let now = Instant::now();
-            if timeout <= now {
+    #[inline]
+    pub fn park_until(&self, timeout: Instant) -> bool {
+        while self.parked.load(Ordering::Acquire) != false {
+            if Instant::now() >= timeout {
                 return false;
             }
-            let (new_lock, _) = self.condvar.wait_timeout(lock, timeout - now).unwrap();
-            lock = new_lock;
+            spin_loop_hint();
         }
         true
     }
 
     // Locks the parker to prevent the target thread from exiting. This is
     // necessary to ensure that thread-local ThreadData objects remain valid.
     // This should be called while holding the queue lock.
-    pub unsafe fn unpark_lock(&self) -> UnparkHandle {
-        UnparkHandle {
-            thread_parker: self,
-            _guard: self.mutex.lock().unwrap(),
-        }
+    #[inline]
+    pub fn unpark_lock(&self) -> UnparkHandle {
+        // We don't need to lock anything, just clear the state
+        self.parked.store(false, Ordering::Release);
+        UnparkHandle(())
     }
 }
 
 // Handle for a thread that is about to be unparked. We need to mark the thread
 // as unparked while holding the queue lock, but we delay the actual unparking
 // until after the queue lock is released.
-pub struct UnparkHandle<'a> {
-    thread_parker: *const ThreadParker,
-    _guard: MutexGuard<'a, ()>,
-}
+pub struct UnparkHandle(());
 
-impl<'a> UnparkHandle<'a> {
+impl UnparkHandle {
     // Wakes up the parked thread. This should be called after the queue lock is
     // released to avoid blocking the queue for too long.
-    pub unsafe fn unpark(self) {
-        (*self.thread_parker).should_park.set(false);
+    #[inline]
+    pub fn unpark(self) {}
+}
 
-        // We notify while holding the lock here to avoid races with the target
-        // thread. In particular, the thread could exit after we unlock the
-        // mutex, which would make the condvar access invalid memory.
-        (*self.thread_parker).condvar.notify_one();
-    }
+#[inline]
+pub fn thread_yield() {
+    thread::yield_now();
 }
--- a/third_party/rust/parking_lot_core/src/thread_parker/linux.rs
+++ b/third_party/rust/parking_lot_core/src/thread_parker/linux.rs
@@ -1,18 +1,21 @@
 // Copyright 2016 Amanieu d'Antras
 //
 // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
 // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
 // http://opensource.org/licenses/MIT>, at your option. This file may not be
 // copied, modified, or distributed except according to those terms.
 
-use std::sync::atomic::{AtomicI32, Ordering};
-use std::time::Instant;
+use core::{
+    ptr,
+    sync::atomic::{AtomicI32, Ordering},
+};
 use libc;
+use std::{thread, time::Instant};
 
 const FUTEX_WAIT: i32 = 0;
 const FUTEX_WAKE: i32 = 1;
 const FUTEX_PRIVATE: i32 = 128;
 
 // x32 Linux uses a non-standard type for tv_nsec in timespec.
 // See https://sourceware.org/bugzilla/show_bug.cgi?id=16437
 #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))]
@@ -23,96 +26,104 @@ type tv_nsec_t = i64;
 type tv_nsec_t = libc::c_long;
 
 // Helper type for putting a thread to sleep until some other thread wakes it up
 pub struct ThreadParker {
     futex: AtomicI32,
 }
 
 impl ThreadParker {
+    pub const IS_CHEAP_TO_CONSTRUCT: bool = true;
+
+    #[inline]
     pub fn new() -> ThreadParker {
         ThreadParker {
             futex: AtomicI32::new(0),
         }
     }
 
     // Prepares the parker. This should be called before adding it to the queue.
-    pub unsafe fn prepare_park(&self) {
+    #[inline]
+    pub fn prepare_park(&self) {
         self.futex.store(1, Ordering::Relaxed);
     }
 
     // Checks if the park timed out. This should be called while holding the
     // queue lock after park_until has returned false.
-    pub unsafe fn timed_out(&self) -> bool {
+    #[inline]
+    pub fn timed_out(&self) -> bool {
         self.futex.load(Ordering::Relaxed) != 0
     }
 
     // Parks the thread until it is unparked. This should be called after it has
     // been added to the queue, after unlocking the queue.
-    pub unsafe fn park(&self) {
+    #[inline]
+    pub fn park(&self) {
         while self.futex.load(Ordering::Acquire) != 0 {
-            let r = libc::syscall(
-                libc::SYS_futex,
-                &self.futex,
-                FUTEX_WAIT | FUTEX_PRIVATE,
-                1,
-                0,
-            );
-            debug_assert!(r == 0 || r == -1);
-            if r == -1 {
-                debug_assert!(
-                    *libc::__errno_location() == libc::EINTR
-                        || *libc::__errno_location() == libc::EAGAIN
-                );
-            }
+            self.futex_wait(None);
         }
     }
 
     // Parks the thread until it is unparked or the timeout is reached. This
     // should be called after it has been added to the queue, after unlocking
     // the queue. Returns true if we were unparked and false if we timed out.
-    pub unsafe fn park_until(&self, timeout: Instant) -> bool {
+    #[inline]
+    pub fn park_until(&self, timeout: Instant) -> bool {
         while self.futex.load(Ordering::Acquire) != 0 {
             let now = Instant::now();
             if timeout <= now {
                 return false;
             }
             let diff = timeout - now;
             if diff.as_secs() as libc::time_t as u64 != diff.as_secs() {
                 // Timeout overflowed, just sleep indefinitely
                 self.park();
                 return true;
             }
             let ts = libc::timespec {
                 tv_sec: diff.as_secs() as libc::time_t,
                 tv_nsec: diff.subsec_nanos() as tv_nsec_t,
             };
-            let r = libc::syscall(
+            self.futex_wait(Some(ts));
+        }
+        true
+    }
+
+    #[inline]
+    fn futex_wait(&self, ts: Option<libc::timespec>) {
+        let ts_ptr = ts
+            .as_ref()
+            .map(|ts_ref| ts_ref as *const _)
+            .unwrap_or(ptr::null());
+        let r = unsafe {
+            libc::syscall(
                 libc::SYS_futex,
                 &self.futex,
                 FUTEX_WAIT | FUTEX_PRIVATE,
                 1,
-                &ts,
-            );
-            debug_assert!(r == 0 || r == -1);
-            if r == -1 {
+                ts_ptr,
+            )
+        };
+        debug_assert!(r == 0 || r == -1);
+        if r == -1 {
+            unsafe {
                 debug_assert!(
                     *libc::__errno_location() == libc::EINTR
                         || *libc::__errno_location() == libc::EAGAIN
-                        || *libc::__errno_location() == libc::ETIMEDOUT
+                        || (ts.is_some() && *libc::__errno_location() == libc::ETIMEDOUT)
                 );
             }
         }
-        true
     }
 
     // Locks the parker to prevent the target thread from exiting. This is
     // necessary to ensure that thread-local ThreadData objects remain valid.
     // This should be called while holding the queue lock.
-    pub unsafe fn unpark_lock(&self) -> UnparkHandle {
+    #[inline]
+    pub fn unpark_lock(&self) -> UnparkHandle {
         // We don't need to lock anything, just clear the state
         self.futex.store(0, Ordering::Release);
 
         UnparkHandle { futex: &self.futex }
     }
 }
 
 // Handle for a thread that is about to be unparked. We need to mark the thread
@@ -120,18 +131,25 @@ impl ThreadParker {
 // until after the queue lock is released.
 pub struct UnparkHandle {
     futex: *const AtomicI32,
 }
 
 impl UnparkHandle {
     // Wakes up the parked thread. This should be called after the queue lock is
     // released to avoid blocking the queue for too long.
-    pub unsafe fn unpark(self) {
+    #[inline]
+    pub fn unpark(self) {
         // The thread data may have been freed at this point, but it doesn't
         // matter since the syscall will just return EFAULT in that case.
-        let r = libc::syscall(libc::SYS_futex, self.futex, FUTEX_WAKE | FUTEX_PRIVATE, 1);
+        let r =
+            unsafe { libc::syscall(libc::SYS_futex, self.futex, FUTEX_WAKE | FUTEX_PRIVATE, 1) };
         debug_assert!(r == 0 || r == 1 || r == -1);
         if r == -1 {
-            debug_assert_eq!(*libc::__errno_location(), libc::EFAULT);
+            debug_assert_eq!(unsafe { *libc::__errno_location() }, libc::EFAULT);
         }
     }
 }
+
+#[inline]
+pub fn thread_yield() {
+    thread::yield_now();
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/parking_lot_core/src/thread_parker/redox.rs
@@ -0,0 +1,150 @@
+// Copyright 2016 Amanieu d'Antras
+//
+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
+// http://opensource.org/licenses/MIT>, at your option. This file may not be
+// copied, modified, or distributed except according to those terms.
+
+use core::{
+    ptr,
+    sync::atomic::{AtomicI32, Ordering},
+};
+use std::{thread, time::Instant};
+use syscall::{
+    call::futex,
+    data::TimeSpec,
+    error::{Error, EAGAIN, EFAULT, EINTR, ETIMEDOUT},
+    flag::{FUTEX_WAIT, FUTEX_WAKE},
+};
+
+const UNPARKED: i32 = 0;
+const PARKED: i32 = 1;
+
+// Helper type for putting a thread to sleep until some other thread wakes it up
+pub struct ThreadParker {
+    futex: AtomicI32,
+}
+
+impl ThreadParker {
+    pub const IS_CHEAP_TO_CONSTRUCT: bool = true;
+
+    #[inline]
+    pub fn new() -> ThreadParker {
+        ThreadParker {
+            futex: AtomicI32::new(UNPARKED),
+        }
+    }
+
+    // Prepares the parker. This should be called before adding it to the queue.
+    #[inline]
+    pub fn prepare_park(&self) {
+        self.futex.store(PARKED, Ordering::Relaxed);
+    }
+
+    // Checks if the park timed out. This should be called while holding the
+    // queue lock after park_until has returned false.
+    #[inline]
+    pub fn timed_out(&self) -> bool {
+        self.futex.load(Ordering::Relaxed) != UNPARKED
+    }
+
+    // Parks the thread until it is unparked. This should be called after it has
+    // been added to the queue, after unlocking the queue.
+    #[inline]
+    pub fn park(&self) {
+        while self.futex.load(Ordering::Acquire) != UNPARKED {
+            self.futex_wait(None);
+        }
+    }
+
+    // Parks the thread until it is unparked or the timeout is reached. This
+    // should be called after it has been added to the queue, after unlocking
+    // the queue. Returns true if we were unparked and false if we timed out.
+    #[inline]
+    pub fn park_until(&self, timeout: Instant) -> bool {
+        while self.futex.load(Ordering::Acquire) != UNPARKED {
+            let now = Instant::now();
+            if timeout <= now {
+                return false;
+            }
+            let diff = timeout - now;
+            if diff.as_secs() > i64::max_value() as u64 {
+                // Timeout overflowed, just sleep indefinitely
+                self.park();
+                return true;
+            }
+            let ts = TimeSpec {
+                tv_sec: diff.as_secs() as i64,
+                tv_nsec: diff.subsec_nanos() as i32,
+            };
+            self.futex_wait(Some(ts));
+        }
+        true
+    }
+
+    #[inline]
+    fn futex_wait(&self, ts: Option<TimeSpec>) {
+        let ts_ptr = ts
+            .as_ref()
+            .map(|ts_ref| ts_ref as *const _)
+            .unwrap_or(ptr::null());
+        let r = unsafe {
+            futex(
+                self.ptr(),
+                FUTEX_WAIT,
+                PARKED,
+                ts_ptr as usize,
+                ptr::null_mut(),
+            )
+        };
+        match r {
+            Ok(r) => debug_assert_eq!(r, 0),
+            Err(Error { errno }) => {
+                debug_assert!(errno == EINTR || errno == EAGAIN || errno == ETIMEDOUT);
+            }
+        }
+    }
+
+    // Locks the parker to prevent the target thread from exiting. This is
+    // necessary to ensure that thread-local ThreadData objects remain valid.
+    // This should be called while holding the queue lock.
+    #[inline]
+    pub fn unpark_lock(&self) -> UnparkHandle {
+        // We don't need to lock anything, just clear the state
+        self.futex.store(UNPARKED, Ordering::Release);
+
+        UnparkHandle { futex: self.ptr() }
+    }
+
+    #[inline]
+    fn ptr(&self) -> *mut i32 {
+        &self.futex as *const AtomicI32 as *mut i32
+    }
+}
+
+// Handle for a thread that is about to be unparked. We need to mark the thread
+// as unparked while holding the queue lock, but we delay the actual unparking
+// until after the queue lock is released.
+pub struct UnparkHandle {
+    futex: *mut i32,
+}
+
+impl UnparkHandle {
+    // Wakes up the parked thread. This should be called after the queue lock is
+    // released to avoid blocking the queue for too long.
+    #[inline]
+    pub fn unpark(self) {
+        // The thread data may have been freed at this point, but it doesn't
+        // matter since the syscall will just return EFAULT in that case.
+        let r = unsafe { futex(self.futex, FUTEX_WAKE, PARKED, 0, ptr::null_mut()) };
+        match r {
+            Ok(num_woken) => debug_assert!(num_woken == 0 || num_woken == 1),
+            Err(Error { errno }) => debug_assert_eq!(errno, EFAULT),
+        }
+    }
+}
+
+#[inline]
+pub fn thread_yield() {
+    thread::yield_now();
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/parking_lot_core/src/thread_parker/sgx.rs
@@ -0,0 +1,108 @@
+// Copyright 2016 Amanieu d'Antras
+//
+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
+// http://opensource.org/licenses/MIT>, at your option. This file may not be
+// copied, modified, or distributed except according to those terms.
+
+use core::sync::atomic::{AtomicBool, Ordering};
+use std::{
+    io,
+    os::fortanix_sgx::{
+        thread::current as current_tcs,
+        usercalls::{
+            self,
+            raw::{Tcs, EV_UNPARK, WAIT_INDEFINITE},
+        },
+    },
+    thread,
+    time::Instant,
+};
+
+// Helper type for putting a thread to sleep until some other thread wakes it up
+pub struct ThreadParker {
+    parked: AtomicBool,
+    tcs: Tcs,
+}
+
+impl ThreadParker {
+    pub const IS_CHEAP_TO_CONSTRUCT: bool = true;
+
+    #[inline]
+    pub fn new() -> ThreadParker {
+        ThreadParker {
+            parked: AtomicBool::new(false),
+            tcs: current_tcs(),
+        }
+    }
+
+    // Prepares the parker. This should be called before adding it to the queue.
+    #[inline]
+    pub fn prepare_park(&self) {
+        self.parked.store(true, Ordering::Relaxed);
+    }
+
+    // Checks if the park timed out. This should be called while holding the
+    // queue lock after park_until has returned false.
+    #[inline]
+    pub fn timed_out(&self) -> bool {
+        self.parked.load(Ordering::Relaxed)
+    }
+
+    // Parks the thread until it is unparked. This should be called after it has
+    // been added to the queue, after unlocking the queue.
+    #[inline]
+    pub fn park(&self) {
+        while self.parked.load(Ordering::Acquire) {
+            let result = usercalls::wait(EV_UNPARK, WAIT_INDEFINITE);
+            debug_assert_eq!(result.expect("wait returned error") & EV_UNPARK, EV_UNPARK);
+        }
+    }
+
+    // Parks the thread until it is unparked or the timeout is reached. This
+    // should be called after it has been added to the queue, after unlocking
+    // the queue. Returns true if we were unparked and false if we timed out.
+    #[inline]
+    pub fn park_until(&self, _timeout: Instant) -> bool {
+        // FIXME: https://github.com/fortanix/rust-sgx/issues/31
+        panic!("timeout not supported in SGX");
+    }
+
+    // Locks the parker to prevent the target thread from exiting. This is
+    // necessary to ensure that thread-local ThreadData objects remain valid.
+    // This should be called while holding the queue lock.
+    #[inline]
+    pub fn unpark_lock(&self) -> UnparkHandle {
+        // We don't need to lock anything, just clear the state
+        self.parked.store(false, Ordering::Release);
+        UnparkHandle(self.tcs)
+    }
+}
+
+// Handle for a thread that is about to be unparked. We need to mark the thread
+// as unparked while holding the queue lock, but we delay the actual unparking
+// until after the queue lock is released.
+pub struct UnparkHandle(Tcs);
+
+impl UnparkHandle {
+    // Wakes up the parked thread. This should be called after the queue lock is
+    // released to avoid blocking the queue for too long.
+    #[inline]
+    pub fn unpark(self) {
+        let result = usercalls::send(EV_UNPARK, Some(self.0));
+        if cfg!(debug_assertions) {
+            if let Err(error) = result {
+                // `InvalidInput` may be returned if the thread we send to has
+                // already been unparked and exited.
+                if error.kind() != io::ErrorKind::InvalidInput {
+                    panic!("send returned an unexpected error: {:?}", error);
+                }
+            }
+        }
+    }
+}
+
+#[inline]
+pub fn thread_yield() {
+    thread::yield_now();
+}
--- a/third_party/rust/parking_lot_core/src/thread_parker/unix.rs
+++ b/third_party/rust/parking_lot_core/src/thread_parker/unix.rs
@@ -1,95 +1,118 @@
 // Copyright 2016 Amanieu d'Antras
 //
 // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
 // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
 // http://opensource.org/licenses/MIT>, at your option. This file may not be
 // copied, modified, or distributed except according to those terms.
 
-use std::cell::{Cell, UnsafeCell};
-use std::time::{Duration, Instant};
+#[cfg(any(target_os = "macos", target_os = "ios"))]
+use core::ptr;
+use core::{
+    cell::{Cell, UnsafeCell},
+    mem,
+};
 use libc;
-use std::mem;
-#[cfg(any(target_os = "macos", target_os = "ios"))]
-use std::ptr;
+use std::{
+    thread,
+    time::{Duration, Instant},
+};
+
+// x32 Linux uses a non-standard type for tv_nsec in timespec.
+// See https://sourceware.org/bugzilla/show_bug.cgi?id=16437
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))]
+#[allow(non_camel_case_types)]
+type tv_nsec_t = i64;
+#[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))]
+#[allow(non_camel_case_types)]
+type tv_nsec_t = libc::c_long;
 
 // Helper type for putting a thread to sleep until some other thread wakes it up
 pub struct ThreadParker {
     should_park: Cell<bool>,
     mutex: UnsafeCell<libc::pthread_mutex_t>,
     condvar: UnsafeCell<libc::pthread_cond_t>,
     initialized: Cell<bool>,
 }
 
 impl ThreadParker {
+    pub const IS_CHEAP_TO_CONSTRUCT: bool = false;
+
+    #[inline]
     pub fn new() -> ThreadParker {
         ThreadParker {
             should_park: Cell::new(false),
             mutex: UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER),
             condvar: UnsafeCell::new(libc::PTHREAD_COND_INITIALIZER),
             initialized: Cell::new(false),
         }
     }
 
     // Initializes the condvar to use CLOCK_MONOTONIC instead of CLOCK_REALTIME.
     #[cfg(any(target_os = "macos", target_os = "ios", target_os = "android"))]
+    #[inline]
     unsafe fn init(&self) {}
     #[cfg(not(any(target_os = "macos", target_os = "ios", target_os = "android")))]
+    #[inline]
     unsafe fn init(&self) {
         let mut attr: libc::pthread_condattr_t = mem::uninitialized();
         let r = libc::pthread_condattr_init(&mut attr);
         debug_assert_eq!(r, 0);
         let r = libc::pthread_condattr_setclock(&mut attr, libc::CLOCK_MONOTONIC);
         debug_assert_eq!(r, 0);
         let r = libc::pthread_cond_init(self.condvar.get(), &attr);
         debug_assert_eq!(r, 0);
         let r = libc::pthread_condattr_destroy(&mut attr);
         debug_assert_eq!(r, 0);
     }
 
     // Prepares the parker. This should be called before adding it to the queue.
+    #[inline]
     pub unsafe fn prepare_park(&self) {
         self.should_park.set(true);
         if !self.initialized.get() {
             self.init();
             self.initialized.set(true);
         }
     }
 
     // Checks if the park timed out. This should be called while holding the
     // queue lock after park_until has returned false.
+    #[inline]
     pub unsafe fn timed_out(&self) -> bool {
         // We need to grab the mutex here because another thread may be
         // concurrently executing UnparkHandle::unpark, which is done without
         // holding the queue lock.
         let r = libc::pthread_mutex_lock(self.mutex.get());
         debug_assert_eq!(r, 0);
         let should_park = self.should_park.get();
         let r = libc::pthread_mutex_unlock(self.mutex.get());
         debug_assert_eq!(r, 0);
         should_park
     }
 
     // Parks the thread until it is unparked. This should be called after it has
     // been added to the queue, after unlocking the queue.
+    #[inline]
     pub unsafe fn park(&self) {
         let r = libc::pthread_mutex_lock(self.mutex.get());
         debug_assert_eq!(r, 0);
         while self.should_park.get() {
             let r = libc::pthread_cond_wait(self.condvar.get(), self.mutex.get());
             debug_assert_eq!(r, 0);
         }
         let r = libc::pthread_mutex_unlock(self.mutex.get());
         debug_assert_eq!(r, 0);
     }
 
     // Parks the thread until it is unparked or the timeout is reached. This
     // should be called after it has been added to the queue, after unlocking
     // the queue. Returns true if we were unparked and false if we timed out.
+    #[inline]
     pub unsafe fn park_until(&self, timeout: Instant) -> bool {
         let r = libc::pthread_mutex_lock(self.mutex.get());
         debug_assert_eq!(r, 0);
         while self.should_park.get() {
             let now = Instant::now();
             if timeout <= now {
                 let r = libc::pthread_mutex_unlock(self.mutex.get());
                 debug_assert_eq!(r, 0);
@@ -115,27 +138,29 @@ impl ThreadParker {
         let r = libc::pthread_mutex_unlock(self.mutex.get());
         debug_assert_eq!(r, 0);
         true
     }
 
     // Locks the parker to prevent the target thread from exiting. This is
     // necessary to ensure that thread-local ThreadData objects remain valid.
     // This should be called while holding the queue lock.
+    #[inline]
     pub unsafe fn unpark_lock(&self) -> UnparkHandle {
         let r = libc::pthread_mutex_lock(self.mutex.get());
         debug_assert_eq!(r, 0);
 
         UnparkHandle {
             thread_parker: self,
         }
     }
 }
 
 impl Drop for ThreadParker {
+    #[inline]
     fn drop(&mut self) {
         // On DragonFly pthread_mutex_destroy() returns EINVAL if called on a
         // mutex that was just initialized with libc::PTHREAD_MUTEX_INITIALIZER.
         // Once it is used (locked/unlocked) or pthread_mutex_init() is called,
         // this behaviour no longer occurs. The same applies to condvars.
         unsafe {
             let r = libc::pthread_mutex_destroy(self.mutex.get());
             if cfg!(target_os = "dragonfly") {
@@ -158,68 +183,77 @@ impl Drop for ThreadParker {
 // until after the queue lock is released.
 pub struct UnparkHandle {
     thread_parker: *const ThreadParker,
 }
 
 impl UnparkHandle {
     // Wakes up the parked thread. This should be called after the queue lock is
     // released to avoid blocking the queue for too long.
+    #[inline]
     pub unsafe fn unpark(self) {
         (*self.thread_parker).should_park.set(false);
 
         // We notify while holding the lock here to avoid races with the target
         // thread. In particular, the thread could exit after we unlock the
         // mutex, which would make the condvar access invalid memory.
         let r = libc::pthread_cond_signal((*self.thread_parker).condvar.get());
         debug_assert_eq!(r, 0);
         let r = libc::pthread_mutex_unlock((*self.thread_parker).mutex.get());
         debug_assert_eq!(r, 0);
     }
 }
 
 // Returns the current time on the clock used by pthread_cond_t as a timespec.
 #[cfg(any(target_os = "macos", target_os = "ios"))]
-unsafe fn timespec_now() -> libc::timespec {
-    let mut now: libc::timeval = mem::uninitialized();
-    let r = libc::gettimeofday(&mut now, ptr::null_mut());
+#[inline]
+fn timespec_now() -> libc::timespec {
+    let mut now: libc::timeval = unsafe { mem::uninitialized() };
+    let r = unsafe { libc::gettimeofday(&mut now, ptr::null_mut()) };
     debug_assert_eq!(r, 0);
     libc::timespec {
         tv_sec: now.tv_sec,
-        tv_nsec: now.tv_usec as libc::c_long * 1000,
+        tv_nsec: now.tv_usec as tv_nsec_t * 1000,
     }
 }
 #[cfg(not(any(target_os = "macos", target_os = "ios")))]
-unsafe fn timespec_now() -> libc::timespec {
-    let mut now: libc::timespec = mem::uninitialized();
+#[inline]
+fn timespec_now() -> libc::timespec {
+    let mut now: libc::timespec = unsafe { mem::uninitialized() };
     let clock = if cfg!(target_os = "android") {
         // Android doesn't support pthread_condattr_setclock, so we need to
         // specify the timeout in CLOCK_REALTIME.
         libc::CLOCK_REALTIME
     } else {
         libc::CLOCK_MONOTONIC
     };
-    let r = libc::clock_gettime(clock, &mut now);
+    let r = unsafe { libc::clock_gettime(clock, &mut now) };
     debug_assert_eq!(r, 0);
     now
 }
 
 // Converts a relative timeout into an absolute timeout in the clock used by
 // pthread_cond_t.
-unsafe fn timeout_to_timespec(timeout: Duration) -> Option<libc::timespec> {
+#[inline]
+fn timeout_to_timespec(timeout: Duration) -> Option<libc::timespec> {
     // Handle overflows early on
     if timeout.as_secs() > libc::time_t::max_value() as u64 {
         return None;
     }
 
     let now = timespec_now();
-    let mut nsec = now.tv_nsec + timeout.subsec_nanos() as libc::c_long;
+    let mut nsec = now.tv_nsec + timeout.subsec_nanos() as tv_nsec_t;
     let mut sec = now.tv_sec.checked_add(timeout.as_secs() as libc::time_t);
     if nsec >= 1_000_000_000 {
         nsec -= 1_000_000_000;
         sec = sec.and_then(|sec| sec.checked_add(1));
     }
 
     sec.map(|sec| libc::timespec {
         tv_nsec: nsec,
         tv_sec: sec,
     })
 }
+
+#[inline]
+pub fn thread_yield() {
+    thread::yield_now();
+}
new file mode 100644
--- /dev/null
+++ b/third_party/rust/parking_lot_core/src/thread_parker/wasm.rs
@@ -0,0 +1,108 @@
+// Copyright 2016 Amanieu d'Antras
+//
+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
+// http://opensource.org/licenses/MIT>, at your option. This file may not be
+// copied, modified, or distributed except according to those terms.
+
+use core::{
+    arch::wasm32,
+    sync::atomic::{AtomicI32, Ordering},
+};
+use std::{convert::TryFrom, thread, time::Instant};
+
+// Helper type for putting a thread to sleep until some other thread wakes it up
+pub struct ThreadParker {
+    parked: AtomicI32,
+}
+
+const UNPARKED: i32 = 0;
+const PARKED: i32 = 1;
+
+impl ThreadParker {
+    pub const IS_CHEAP_TO_CONSTRUCT: bool = true;
+
+    #[inline]
+    pub fn new() -> ThreadParker {
+        ThreadParker {
+            parked: AtomicI32::new(UNPARKED),
+        }
+    }
+
+    // Prepares the parker. This should be called before adding it to the queue.
+    #[inline]
+    pub fn prepare_park(&self) {
+        self.parked.store(PARKED, Ordering::Relaxed);
+    }
+
+    // Checks if the park timed out. This should be called while holding the
+    // queue lock after park_until has returned false.
+    #[inline]
+    pub fn timed_out(&self) -> bool {
+        self.parked.load(Ordering::Relaxed) == PARKED
+    }
+
+    // Parks the thread until it is unparked. This should be called after it has
+    // been added to the queue, after unlocking the queue.
+    #[inline]
+    pub fn park(&self) {
+        while self.parked.load(Ordering::Acquire) == PARKED {
+            let r = unsafe { wasm32::i32_atomic_wait(self.ptr(), PARKED, -1) };
+            // we should have either woken up (0) or got a not-equal due to a
+            // race (1). We should never time out (2)
+            debug_assert!(r == 0 || r == 1);
+        }
+    }
+
+    // Parks the thread until it is unparked or the timeout is reached. This
+    // should be called after it has been added to the queue, after unlocking
+    // the queue. Returns true if we were unparked and false if we timed out.
+    #[inline]
+    pub fn park_until(&self, timeout: Instant) -> bool {
+        while self.parked.load(Ordering::Acquire) == PARKED {
+            if let Some(left) = timeout.checked_duration_since(Instant::now()) {
+                let nanos_left = i64::try_from(left.as_nanos()).unwrap_or(i64::max_value());
+                let r = unsafe { wasm32::i32_atomic_wait(self.ptr(), PARKED, nanos_left) };
+                debug_assert!(r == 0 || r == 1 || r == 2);
+            } else {
+                return false;
+            }
+        }
+        true
+    }
+
+    // Locks the parker to prevent the target thread from exiting. This is
+    // necessary to ensure that thread-local ThreadData objects remain valid.
+    // This should be called while holding the queue lock.
+    #[inline]
+    pub fn unpark_lock(&self) -> UnparkHandle {
+        // We don't need to lock anything, just clear the state
+        self.parked.store(UNPARKED, Ordering::Release);
+        UnparkHandle(self.ptr())
+    }
+
+    #[inline]
+    fn ptr(&self) -> *mut i32 {
+        &self.parked as *const AtomicI32 as *mut i32
+    }
+}
+
+// Handle for a thread that is about to be unparked. We need to mark the thread
+// as unparked while holding the queue lock, but we delay the actual unparking
+// until after the queue lock is released.
+pub struct UnparkHandle(*mut i32);
+
+impl UnparkHandle {
+    // Wakes up the parked thread. This should be called after the queue lock is
+    // released to avoid blocking the queue for too long.
+    #[inline]
+    pub fn unpark(self) {
+        let num_notified = unsafe { wasm32::atomic_notify(self.0 as *mut i32, 1) };
+        debug_assert!(num_notified == 0 || num_notified == 1);
+    }
+}
+
+#[inline]
+pub fn thread_yield() {
+    thread::yield_now();
+}
--- a/third_party/rust/parking_lot_core/src/thread_parker/windows/keyed_event.rs
+++ b/third_party/rust/parking_lot_core/src/thread_parker/windows/keyed_event.rs
@@ -1,27 +1,35 @@
 // Copyright 2016 Amanieu d'Antras
 //
 // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
 // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
 // http://opensource.org/licenses/MIT>, at your option. This file may not be
 // copied, modified, or distributed except according to those terms.
 
-use std::sync::atomic::{AtomicUsize, Ordering};
-use std::time::Instant;
-use std::ptr;
-use std::mem;
-
-use winapi::shared::minwindef::{TRUE, ULONG};
-use winapi::shared::ntdef::NTSTATUS;
-use winapi::shared::ntstatus::{STATUS_SUCCESS, STATUS_TIMEOUT};
-use winapi::um::handleapi::CloseHandle;
-use winapi::um::libloaderapi::{GetModuleHandleA, GetProcAddress};
-use winapi::um::winnt::{ACCESS_MASK, GENERIC_READ, GENERIC_WRITE, LPCSTR};
-use winapi::um::winnt::{BOOLEAN, HANDLE, LARGE_INTEGER, PHANDLE, PLARGE_INTEGER, PVOID};
+use core::{mem, ptr};
+use std::{
+    sync::atomic::{AtomicUsize, Ordering},
+    time::Instant,
+};
+use winapi::{
+    shared::{
+        minwindef::{TRUE, ULONG},
+        ntdef::NTSTATUS,
+        ntstatus::{STATUS_SUCCESS, STATUS_TIMEOUT},
+    },
+    um::{
+        handleapi::CloseHandle,
+        libloaderapi::{GetModuleHandleA, GetProcAddress},
+        winnt::{
+            ACCESS_MASK, BOOLEAN, GENERIC_READ, GENERIC_WRITE, HANDLE, LARGE_INTEGER, LPCSTR,
+            PHANDLE, PLARGE_INTEGER, PVOID,
+        },
+    },
+};
 
 const STATE_UNPARKED: usize = 0;
 const STATE_PARKED: usize = 1;
 const STATE_TIMED_OUT: usize = 2;
 
 #[allow(non_snake_case)]
 pub struct KeyedEvent {
     handle: HANDLE,
@@ -35,83 +43,92 @@ pub struct KeyedEvent {
         EventHandle: HANDLE,
         Key: PVOID,
         Alertable: BOOLEAN,
         Timeout: PLARGE_INTEGER,
     ) -> NTSTATUS,
 }
 
 impl KeyedEvent {
+    #[inline]
     unsafe fn wait_for(&self, key: PVOID, timeout: PLARGE_INTEGER) -> NTSTATUS {
         (self.NtWaitForKeyedEvent)(self.handle, key, 0, timeout)
     }
 
+    #[inline]
     unsafe fn release(&self, key: PVOID) -> NTSTATUS {
         (self.NtReleaseKeyedEvent)(self.handle, key, 0, ptr::null_mut())
     }
 
     #[allow(non_snake_case)]
-    pub unsafe fn create() -> Option<KeyedEvent> {
-        let ntdll = GetModuleHandleA(b"ntdll.dll\0".as_ptr() as LPCSTR);
-        if ntdll.is_null() {
-            return None;
-        }
+    pub fn create() -> Option<KeyedEvent> {
+        unsafe {
+            let ntdll = GetModuleHandleA(b"ntdll.dll\0".as_ptr() as LPCSTR);
+            if ntdll.is_null() {
+                return None;
+            }
 
-        let NtCreateKeyedEvent = GetProcAddress(ntdll, b"NtCreateKeyedEvent\0".as_ptr() as LPCSTR);
-        if NtCreateKeyedEvent.is_null() {
-            return None;
-        }
-        let NtReleaseKeyedEvent =
-            GetProcAddress(ntdll, b"NtReleaseKeyedEvent\0".as_ptr() as LPCSTR);
-        if NtReleaseKeyedEvent.is_null() {
-            return None;
-        }
-        let NtWaitForKeyedEvent =
-            GetProcAddress(ntdll, b"NtWaitForKeyedEvent\0".as_ptr() as LPCSTR);
-        if NtWaitForKeyedEvent.is_null() {
-            return None;
-        }
+            let NtCreateKeyedEvent =
+                GetProcAddress(ntdll, b"NtCreateKeyedEvent\0".as_ptr() as LPCSTR);
+            if NtCreateKeyedEvent.is_null() {
+                return None;
+            }
+            let NtReleaseKeyedEvent =
+                GetProcAddress(ntdll, b"NtReleaseKeyedEvent\0".as_ptr() as LPCSTR);
+            if NtReleaseKeyedEvent.is_null() {
+                return None;
+            }
+            let NtWaitForKeyedEvent =
+                GetProcAddress(ntdll, b"NtWaitForKeyedEvent\0".as_ptr() as LPCSTR);
+            if NtWaitForKeyedEvent.is_null() {
+                return None;
+            }
 
-        let NtCreateKeyedEvent: extern "system" fn(
-            KeyedEventHandle: PHANDLE,
-            DesiredAccess: ACCESS_MASK,
-            ObjectAttributes: PVOID,
-            Flags: ULONG,
-        ) -> NTSTATUS = mem::transmute(NtCreateKeyedEvent);
-        let mut handle = mem::uninitialized();
-        let status = NtCreateKeyedEvent(
-            &mut handle,
-            GENERIC_READ | GENERIC_WRITE,
-            ptr::null_mut(),
-            0,
-        );
-        if status != STATUS_SUCCESS {
-            return None;
+            let NtCreateKeyedEvent: extern "system" fn(
+                KeyedEventHandle: PHANDLE,
+                DesiredAccess: ACCESS_MASK,
+                ObjectAttributes: PVOID,
+                Flags: ULONG,
+            ) -> NTSTATUS = mem::transmute(NtCreateKeyedEvent);
+            let mut handle = mem::uninitialized();
+            let status = NtCreateKeyedEvent(
+                &mut handle,
+                GENERIC_READ | GENERIC_WRITE,
+                ptr::null_mut(),
+                0,
+            );
+            if status != STATUS_SUCCESS {
+                return None;
+            }
+
+            Some(KeyedEvent {
+                handle,
+                NtReleaseKeyedEvent: mem::transmute(NtReleaseKeyedEvent),
+                NtWaitForKeyedEvent: mem::transmute(NtWaitForKeyedEvent),
+            })
         }
-
-        Some(KeyedEvent {
-            handle,
-            NtReleaseKeyedEvent: mem::transmute(NtReleaseKeyedEvent),
-            NtWaitForKeyedEvent: mem::transmute(NtWaitForKeyedEvent),
-        })
     }
 
-    pub unsafe fn prepare_park(&'static self, key: &AtomicUsize) {
+    #[inline]
+    pub fn prepare_park(&'static self, key: &AtomicUsize) {
         key.store(STATE_PARKED, Ordering::Relaxed);
     }
 
-    pub unsafe fn timed_out(&'static self, key: &AtomicUsize) -> bool {
+    #[inline]
+    pub fn timed_out(&'static self, key: &AtomicUsize) -> bool {
         key.load(Ordering::Relaxed) == STATE_TIMED_OUT
     }
 
+    #[inline]
     pub unsafe fn park(&'static self, key: &AtomicUsize) {
         let status = self.wait_for(key as *const _ as PVOID, ptr::null_mut());
         debug_assert_eq!(status, STATUS_SUCCESS);
     }
 
+    #[inline]
     pub unsafe fn park_until(&'static self, key: &AtomicUsize, timeout: Instant) -> bool {
         let now = Instant::now();
         if timeout <= now {
             // If another thread unparked us, we need to call
             // NtWaitForKeyedEvent otherwise that thread will stay stuck at
             // NtReleaseKeyedEvent.
             if key.swap(STATE_TIMED_OUT, Ordering::Relaxed) == STATE_UNPARKED {
                 self.park(key);
@@ -147,16 +164,17 @@ impl KeyedEvent {
         // otherwise that thread will stay stuck at NtReleaseKeyedEvent.
         if key.swap(STATE_TIMED_OUT, Ordering::Relaxed) == STATE_UNPARKED {
             self.park(key);
             return true;
         }
         false
     }
 
+    #[inline]
     pub unsafe fn unpark_lock(&'static self, key: &AtomicUsize) -> UnparkHandle {
         // If the state was STATE_PARKED then we need to wake up the thread
         if key.swap(STATE_UNPARKED, Ordering::Relaxed) == STATE_PARKED {
             UnparkHandle {
                 key: key,
                 keyed_event: self,
             }
         } else {
@@ -164,16 +182,17 @@ impl KeyedEvent {
                 key: ptr::null(),
                 keyed_event: self,
             }
         }
     }
 }
 
 impl Drop for KeyedEvent {
+    #[inline]
     fn drop(&mut self) {
         unsafe {
             let ok = CloseHandle(self.handle);
             debug_assert_eq!(ok, TRUE);
         }
     }
 }
 
@@ -183,15 +202,16 @@ impl Drop for KeyedEvent {
 pub struct UnparkHandle {
     key: *const AtomicUsize,
     keyed_event: &'static KeyedEvent,
 }
 
 impl UnparkHandle {
     // Wakes up the parked thread. This should be called after the queue lock is
     // released to avoid blocking the queue for too long.
+    #[inline]
     pub unsafe fn unpark(self) {
         if !self.key.is_null() {
             let status = self.keyed_event.release(self.key as PVOID);
             debug_assert_eq!(status, STATUS_SUCCESS);
         }
     }
 }
--- a/third_party/rust/parking_lot_core/src/thread_parker/windows/mod.rs
+++ b/third_party/rust/parking_lot_core/src/thread_parker/windows/mod.rs
@@ -1,118 +1,143 @@
 // Copyright 2016 Amanieu d'Antras
 //
 // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
 // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
 // http://opensource.org/licenses/MIT>, at your option. This file may not be
 // copied, modified, or distributed except according to those terms.
 
-use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
+use core::{
+    ptr,
+    sync::atomic::{AtomicPtr, AtomicUsize, Ordering},
+};
 use std::time::Instant;
 
 mod keyed_event;
 mod waitaddress;
 
 enum Backend {
     KeyedEvent(keyed_event::KeyedEvent),
     WaitAddress(waitaddress::WaitAddress),
 }
 
-impl Backend {
-    unsafe fn get() -> &'static Backend {
-        static BACKEND: AtomicUsize = ATOMIC_USIZE_INIT;
+static BACKEND: AtomicPtr<Backend> = AtomicPtr::new(ptr::null_mut());
 
+impl Backend {
+    #[inline]
+    fn get() -> &'static Backend {
         // Fast path: use the existing object
-        let backend = BACKEND.load(Ordering::Acquire);
-        if backend != 0 {
-            return &*(backend as *const Backend);
+        let backend_ptr = BACKEND.load(Ordering::Acquire);
+        if !backend_ptr.is_null() {
+            return unsafe { &*backend_ptr };
         };
 
+        Backend::create()
+    }
+
+    #[cold]
+    #[inline(never)]
+    fn create() -> &'static Backend {
         // Try to create a new Backend
         let backend;
         if let Some(waitaddress) = waitaddress::WaitAddress::create() {
             backend = Backend::WaitAddress(waitaddress);
         } else if let Some(keyed_event) = keyed_event::KeyedEvent::create() {
             backend = Backend::KeyedEvent(keyed_event);
         } else {
             panic!(
                 "parking_lot requires either NT Keyed Events (WinXP+) or \
                  WaitOnAddress/WakeByAddress (Win8+)"
             );
         }
 
-        // Try to create a new object
-        let backend = Box::into_raw(Box::new(backend));
-        match BACKEND.compare_exchange(0, backend as usize, Ordering::Release, Ordering::Relaxed) {
-            Ok(_) => &*(backend as *const Backend),
-            Err(x) => {
-                // We lost the race, free our object and return the global one
-                Box::from_raw(backend);
-                &*(x as *const Backend)
+        // Try to set our new Backend as the global one
+        let backend_ptr = Box::into_raw(Box::new(backend));
+        match BACKEND.compare_exchange(
+            ptr::null_mut(),
+            backend_ptr,
+            Ordering::Release,
+            Ordering::Relaxed,
+        ) {
+            Ok(_) => unsafe { &*backend_ptr },
+            Err(global_backend_ptr) => {
+                unsafe {
+                    // We lost the race, free our object and return the global one
+                    Box::from_raw(backend_ptr);
+                    &*global_backend_ptr
+                }
             }
         }
     }
 }
 
 // Helper type for putting a thread to sleep until some other thread wakes it up
 pub struct ThreadParker {
     key: AtomicUsize,
     backend: &'static Backend,
 }
 
 impl ThreadParker {
+    pub const IS_CHEAP_TO_CONSTRUCT: bool = true;
+
+    #[inline]
     pub fn new() -> ThreadParker {
         // Initialize the backend here to ensure we don't get any panics
         // later on, which could leave synchronization primitives in a broken
         // state.
         ThreadParker {
             key: AtomicUsize::new(0),
-            backend: unsafe { Backend::get() },
+            backend: Backend::get(),
         }
     }
 
     // Prepares the parker. This should be called before adding it to the queue.
-    pub unsafe fn prepare_park(&self) {
+    #[inline]
+    pub fn prepare_park(&self) {
         match *self.backend {
             Backend::KeyedEvent(ref x) => x.prepare_park(&self.key),
             Backend::WaitAddress(ref x) => x.prepare_park(&self.key),
         }
     }
 
     // Checks if the park timed out. This should be called while holding the
     // queue lock after park_until has returned false.
-    pub unsafe fn timed_out(&self) -> bool {
+    #[inline]
+    pub fn timed_out(&self) -> bool {
         match *self.backend {
             Backend::KeyedEvent(ref x) => x.timed_out(&self.key),
             Backend::WaitAddress(ref x) => x.timed_out(&self.key),
         }
     }
 
     // Parks the thread until it is unparked. This should be called after it has
     // been added to the queue, after unlocking the queue.
+    #[inline]
     pub unsafe fn park(&self) {
         match *self.backend {
             Backend::KeyedEvent(ref x) => x.park(&self.key),
             Backend::WaitAddress(ref x) => x.park(&self.key),
         }
     }
 
     // Parks the thread until it is unparked or the timeout is reached. This
     // should be called after it has been added to the queue, after unlocking
     // the queue. Returns true if we were unparked and false if we timed out.
+    #[inline]
     pub unsafe fn park_until(&self, timeout: Instant) -> bool {
         match *self.backend {
             Backend::KeyedEvent(ref x) => x.park_until(&self.key, timeout),
             Backend::WaitAddress(ref x) => x.park_until(&self.key, timeout),
         }
     }
 
     // Locks the parker to prevent the target thread from exiting. This is
     // necessary to ensure that thread-local ThreadData objects remain valid.
     // This should be called while holding the queue lock.
+    #[inline]
     pub unsafe fn unpark_lock(&self) -> UnparkHandle {
         match *self.backend {
             Backend::KeyedEvent(ref x) => UnparkHandle::KeyedEvent(x.unpark_lock(&self.key)),
             Backend::WaitAddress(ref x) => UnparkHandle::WaitAddress(x.unpark_lock(&self.key)),
         }
     }
 }
 
@@ -122,15 +147,43 @@ impl ThreadParker {
 pub enum UnparkHandle {
     KeyedEvent(keyed_event::UnparkHandle),
     WaitAddress(waitaddress::UnparkHandle),
 }
 
 impl UnparkHandle {
     // Wakes up the parked thread. This should be called after the queue lock is
     // released to avoid blocking the queue for too long.
+    #[inline]
     pub unsafe fn unpark(self) {
         match self {
             UnparkHandle::KeyedEvent(x) => x.unpark(),
             UnparkHandle::WaitAddress(x) => x.unpark(),
         }
     }
 }
+
+// Yields the rest of the current timeslice to the OS
+#[inline]
+pub fn thread_yield() {
+    // Note that this is manually defined here rather than using the definition
+    // through `winapi`. The `winapi` definition comes from the `synchapi`
+    // header which enables the "synchronization.lib" library. It turns out,
+    // however that `Sleep` comes from `kernel32.dll` so this activation isn't
+    // necessary.
+    //
+    // This was originally identified in rust-lang/rust where on MinGW the
+    // libsynchronization.a library pulls in a dependency on a newer DLL not
+    // present in older versions of Windows. (see rust-lang/rust#49438)
+    //
+    // This is a bit of a hack for now and ideally we'd fix MinGW's own import
+    // libraries, but that'll probably take a lot longer than patching this here
+    // and avoiding the `synchapi` feature entirely.
+    extern "system" {
+        fn Sleep(a: winapi::shared::minwindef::DWORD);
+    }
+    unsafe {
+        // We don't use SwitchToThread here because it doesn't consider all
+        // threads in the system and the thread we are waiting for may not get
+        // selected.
+        Sleep(0);
+    }
+}
--- a/third_party/rust/parking_lot_core/src/thread_parker/windows/waitaddress.rs
+++ b/third_party/rust/parking_lot_core/src/thread_parker/windows/waitaddress.rs
@@ -1,134 +1,149 @@
 // Copyright 2016 Amanieu d'Antras
 //
 // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
 // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
 // http://opensource.org/licenses/MIT>, at your option. This file may not be
 // copied, modified, or distributed except according to those terms.
 
-use std::sync::atomic::{AtomicUsize, Ordering};
+use core::{
+    mem,
+    sync::atomic::{AtomicUsize, Ordering},
+};
 use std::time::Instant;
-use std::mem;
-
-use winapi::shared::basetsd::SIZE_T;
-use winapi::shared::minwindef::{BOOL, DWORD, FALSE, TRUE};
-use winapi::shared::winerror::ERROR_TIMEOUT;
-use winapi::um::errhandlingapi::GetLastError;
-use winapi::um::libloaderapi::{GetModuleHandleA, GetProcAddress};
-use winapi::um::winbase::INFINITE;
-use winapi::um::winnt::{LPCSTR, PVOID};
+use winapi::{
+    shared::{
+        basetsd::SIZE_T,
+        minwindef::{BOOL, DWORD, FALSE, TRUE},
+        winerror::ERROR_TIMEOUT,
+    },
+    um::{
+        errhandlingapi::GetLastError,
+        libloaderapi::{GetModuleHandleA, GetProcAddress},
+        winbase::INFINITE,
+        winnt::{LPCSTR, PVOID},
+    },
+};
 
 #[allow(non_snake_case)]
 pub struct WaitAddress {
     WaitOnAddress: extern "system" fn(
         Address: PVOID,
         CompareAddress: PVOID,
         AddressSize: SIZE_T,
         dwMilliseconds: DWORD,
     ) -> BOOL,
     WakeByAddressSingle: extern "system" fn(Address: PVOID),
 }
 
 impl WaitAddress {
     #[allow(non_snake_case)]
-    pub unsafe fn create() -> Option<WaitAddress> {
-        // MSDN claims that that WaitOnAddress and WakeByAddressSingle are
-        // located in kernel32.dll, but they are lying...
-        let synch_dll = GetModuleHandleA(b"api-ms-win-core-synch-l1-2-0.dll\0".as_ptr() as LPCSTR);
-        if synch_dll.is_null() {
-            return None;
-        }
+    pub fn create() -> Option<WaitAddress> {
+        unsafe {
+            // MSDN claims that that WaitOnAddress and WakeByAddressSingle are
+            // located in kernel32.dll, but they are lying...
+            let synch_dll =
+                GetModuleHandleA(b"api-ms-win-core-synch-l1-2-0.dll\0".as_ptr() as LPCSTR);
+            if synch_dll.is_null() {
+                return None;
+            }
 
-        let WaitOnAddress = GetProcAddress(synch_dll, b"WaitOnAddress\0".as_ptr() as LPCSTR);
-        if WaitOnAddress.is_null() {
-            return None;
+            let WaitOnAddress = GetProcAddress(synch_dll, b"WaitOnAddress\0".as_ptr() as LPCSTR);
+            if WaitOnAddress.is_null() {
+                return None;
+            }
+            let WakeByAddressSingle =
+                GetProcAddress(synch_dll, b"WakeByAddressSingle\0".as_ptr() as LPCSTR);
+            if WakeByAddressSingle.is_null() {
+                return None;
+            }
+            Some(WaitAddress {
+                WaitOnAddress: mem::transmute(WaitOnAddress),
+                WakeByAddressSingle: mem::transmute(WakeByAddressSingle),
+            })
         }
-        let WakeByAddressSingle =
-            GetProcAddress(synch_dll, b"WakeByAddressSingle\0".as_ptr() as LPCSTR);
-        if WakeByAddressSingle.is_null() {
-            return None;
-        }
-        Some(WaitAddress {
-            WaitOnAddress: mem::transmute(WaitOnAddress),
-            WakeByAddressSingle: mem::transmute(WakeByAddressSingle),
-        })
     }
 
-    pub unsafe fn prepare_park(&'static self, key: &AtomicUsize) {
+    #[inline]
+    pub fn prepare_park(&'static self, key: &AtomicUsize) {
         key.store(1, Ordering::Relaxed);
     }
 
-    pub unsafe fn timed_out(&'static self, key: &AtomicUsize) -> bool {
+    #[inline]
+    pub fn timed_out(&'static self, key: &AtomicUsize) -> bool {
         key.load(Ordering::Relaxed) != 0
     }
 
-    pub unsafe fn park(&'static self, key: &AtomicUsize) {
+    #[inline]
+    pub fn park(&'static self, key: &AtomicUsize) {
         while key.load(Ordering::Acquire) != 0 {
-            let cmp = 1usize;
-            let r = (self.WaitOnAddress)(
-                key as *const _ as PVOID,
-                &cmp as *const _ as PVOID,
-                mem::size_of::<usize>() as SIZE_T,
-                INFINITE,
-            );
+            let r = self.wait_on_address(key, INFINITE);
             debug_assert!(r == TRUE);
         }
     }
 
-    pub unsafe fn park_until(&'static self, key: &AtomicUsize, timeout: Instant) -> bool {
+    #[inline]
+    pub fn park_until(&'static self, key: &AtomicUsize, timeout: Instant) -> bool {
         while key.load(Ordering::Acquire) != 0 {
             let now = Instant::now();
             if timeout <= now {
                 return false;
             }
             let diff = timeout - now;
-            let timeout = diff.as_secs()
+            let timeout = diff
+                .as_secs()
                 .checked_mul(1000)
                 .and_then(|x| x.checked_add((diff.subsec_nanos() as u64 + 999999) / 1000000))
                 .map(|ms| {
                     if ms > <DWORD>::max_value() as u64 {
                         INFINITE
                     } else {
                         ms as DWORD
                     }
                 })
                 .unwrap_or(INFINITE);
-            let cmp = 1usize;
-            let r = (self.WaitOnAddress)(
-                key as *const _ as PVOID,
-                &cmp as *const _ as PVOID,
-                mem::size_of::<usize>() as SIZE_T,
-                timeout,
-            );
-            if r == FALSE {
-                debug_assert_eq!(GetLastError(), ERROR_TIMEOUT);
+            if self.wait_on_address(key, timeout) == FALSE {
+                debug_assert_eq!(unsafe { GetLastError() }, ERROR_TIMEOUT);
             }
         }
         true
     }
 
-    pub unsafe fn unpark_lock(&'static self, key: &AtomicUsize) -> UnparkHandle {
+    #[inline]
+    pub fn unpark_lock(&'static self, key: &AtomicUsize) -> UnparkHandle {
         // We don't need to lock anything, just clear the state
         key.store(0, Ordering::Release);
 
         UnparkHandle {
             key: key,
             waitaddress: self,
         }
     }
+
+    #[inline]
+    fn wait_on_address(&'static self, key: &AtomicUsize, timeout: DWORD) -> BOOL {
+        let cmp = 1usize;
+        (self.WaitOnAddress)(
+            key as *const _ as PVOID,
+            &cmp as *const _ as PVOID,
+            mem::size_of::<usize>() as SIZE_T,
+            timeout,
+        )
+    }
 }
 
 // Handle for a thread that is about to be unparked. We need to mark the thread
 // as unparked while holding the queue lock, but we delay the actual unparking
 // until after the queue lock is released.
 pub struct UnparkHandle {
     key: *const AtomicUsize,
     waitaddress: &'static WaitAddress,
 }
 
 impl UnparkHandle {
     // Wakes up the parked thread. This should be called after the queue lock is
     // released to avoid blocking the queue for too long.
-    pub unsafe fn unpark(self) {
+    #[inline]
+    pub fn unpark(self) {
         (self.waitaddress.WakeByAddressSingle)(self.key as PVOID);
     }
 }
--- a/third_party/rust/parking_lot_core/src/word_lock.rs
+++ b/third_party/rust/parking_lot_core/src/word_lock.rs
@@ -1,24 +1,22 @@
 // Copyright 2016 Amanieu d'Antras
 //
 // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
 // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
 // http://opensource.org/licenses/MIT>, at your option. This file may not be
 // copied, modified, or distributed except according to those terms.
 
-use std::sync::atomic::{fence, AtomicUsize, Ordering};
-use std::ptr;
-use std::mem;
-use std::cell::Cell;