No bug - Revendor rust dependencies
authorServo VCS Sync <servo-vcs-sync@mozilla.com>
Wed, 21 Mar 2018 19:17:05 +0000
changeset 409289 247b6bbb6a43066e4d4526eff9fa87373989c9ed
parent 409288 835fec339e7710561b138985480f9182aabad75d
child 409290 6b2d9fd01b32b3fd537f0a4de8abdc65fe8c71ff
push id33684
push useraiakab@mozilla.com
push dateWed, 21 Mar 2018 23:45:43 +0000
treeherdermozilla-central@7ad6eef4ccd3 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
milestone61.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
No bug - Revendor rust dependencies
Cargo.lock
third_party/rust/parking_lot/.cargo-checksum.json
third_party/rust/parking_lot/.travis.yml
third_party/rust/parking_lot/Cargo.toml
third_party/rust/parking_lot/README.md
third_party/rust/parking_lot/appveyor.yml
third_party/rust/parking_lot/src/condvar.rs
third_party/rust/parking_lot/src/deadlock.rs
third_party/rust/parking_lot/src/elision.rs
third_party/rust/parking_lot/src/lib.rs
third_party/rust/parking_lot/src/mutex.rs
third_party/rust/parking_lot/src/once.rs
third_party/rust/parking_lot/src/raw_mutex.rs
third_party/rust/parking_lot/src/raw_remutex.rs
third_party/rust/parking_lot/src/raw_rwlock.rs
third_party/rust/parking_lot/src/remutex.rs
third_party/rust/parking_lot/src/rwlock.rs
third_party/rust/parking_lot/src/stable.rs
third_party/rust/thread-id/.appveyor.yml
third_party/rust/thread-id/.cargo-checksum.json
third_party/rust/thread-id/.travis.yml
third_party/rust/thread-id/Cargo.toml
third_party/rust/thread-id/changelog.md
third_party/rust/thread-id/license-apache
third_party/rust/thread-id/license-mit
third_party/rust/thread-id/readme.md
third_party/rust/thread-id/src/lib.rs
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -689,17 +689,17 @@ dependencies = [
  "atomic_refcell 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "cssparser 0.23.2 (registry+https://github.com/rust-lang/crates.io-index)",
  "cstr 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
  "env_logger 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
  "libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)",
  "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
  "malloc_size_of 0.0.1",
  "nsstring 0.1.0",
- "parking_lot 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "parking_lot 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)",
  "selectors 0.19.0",
  "servo_arc 0.1.1",
  "smallvec 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "style 0.0.1",
  "style_traits 0.0.1",
 ]
 
 [[package]]
@@ -1250,22 +1250,21 @@ name = "owning_ref"
 version = "0.3.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "stable_deref_trait 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "parking_lot"
-version = "0.4.4"
+version = "0.5.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "owning_ref 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
  "parking_lot_core 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
- "thread-id 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "parking_lot_core"
 version = "0.2.7"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -1645,17 +1644,17 @@ dependencies = [
  "malloc_size_of_derive 0.0.1",
  "matches 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
  "nsstring 0.1.0",
  "num-integer 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
  "num-traits 0.1.41 (registry+https://github.com/rust-lang/crates.io-index)",
  "num_cpus 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "ordered-float 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "owning_ref 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "parking_lot 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "parking_lot 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)",
  "precomputed-hash 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "rayon 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "regex 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
  "selectors 0.19.0",
  "servo_arc 0.1.1",
  "smallbitvec 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)",
  "smallvec 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "style_derive 0.0.1",
@@ -1776,25 +1775,16 @@ name = "textwrap"
 version = "0.9.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "term_size 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "unicode-width 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
-name = "thread-id"
-version = "3.1.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
 name = "thread_local"
 version = "0.3.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "lazy_static 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
  "unreachable 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
@@ -2299,17 +2289,17 @@ dependencies = [
 "checksum num 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)" = "a311b77ebdc5dd4cf6449d81e4135d9f0e3b153839ac90e648a8ef538f923525"
 "checksum num-integer 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)" = "d1452e8b06e448a07f0e6ebb0bb1d92b8890eea63288c0b627331d53514d0fba"
 "checksum num-iter 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)" = "7485fcc84f85b4ecd0ea527b14189281cf27d60e583ae65ebc9c088b13dffe01"
 "checksum num-traits 0.1.41 (registry+https://github.com/rust-lang/crates.io-index)" = "cacfcab5eb48250ee7d0c7896b51a2c5eec99c1feea5f32025635f5ae4b00070"
 "checksum num-traits 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e7de20f146db9d920c45ee8ed8f71681fd9ade71909b48c3acbd766aa504cf10"
 "checksum num_cpus 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "514f0d73e64be53ff320680ca671b64fe3fb91da01e1ae2ddc99eb51d453b20d"
 "checksum ordered-float 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "da12c96037889ae0be29dd2bdd260e5a62a7df24e6466d5a15bb8131c1c200a8"
 "checksum owning_ref 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "cdf84f41639e037b484f93433aa3897863b561ed65c6e59c7073d7c561710f37"
-"checksum parking_lot 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "37f364e2ce5efa24c7d0b6646d5bb61145551a0112f107ffd7499f1a3e322fbd"
+"checksum parking_lot 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)" = "9fd9d732f2de194336fb02fe11f9eed13d9e76f13f4315b4d88a14ca411750cd"
 "checksum parking_lot_core 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)" = "6c677d78851950b3aec390e681a411f78cc250cba277d4f578758a377f727970"
 "checksum peeking_take_while 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099"
 "checksum percent-encoding 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "de154f638187706bde41d9b4738748933d64e6b37bdbffc0b47a97d16a6ae356"
 "checksum phf 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)" = "cb325642290f28ee14d8c6201159949a872f220c62af6e110a56ea914fbe42fc"
 "checksum phf_codegen 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)" = "d62594c0bb54c464f633175d502038177e90309daf2e0158be42ed5f023ce88f"
 "checksum phf_generator 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)" = "6b07ffcc532ccc85e3afc45865469bf5d9e4ef5bfcf9622e3cfe80c2d275ec03"
 "checksum phf_shared 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)" = "07e24b0ca9643bdecd0632f2b3da6b1b89bbb0030e0b992afc1113b23a7bc2f2"
 "checksum pkg-config 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "3a8b4c6b8165cd1a1cd4b9b120978131389f64bdaf456435caa41e630edba903"
@@ -2348,17 +2338,16 @@ dependencies = [
 "checksum strsim 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b4d15c810519a91cf877e7e36e63fe068815c678181439f2f29e2562147c3694"
 "checksum syn 0.11.11 (registry+https://github.com/rust-lang/crates.io-index)" = "d3b891b9015c88c576343b9b3e41c2c11a51c219ef067b264bd9c8aa9b441dad"
 "checksum syn 0.12.12 (registry+https://github.com/rust-lang/crates.io-index)" = "9e1c669ed757c0ebd04337f6a5bb972d05e0c08fe2540dd3ee3dd9e4daf1604c"
 "checksum synom 0.11.2 (registry+https://github.com/rust-lang/crates.io-index)" = "27e31aa4b09b9f4cb12dff3c30ba503e17b1a624413d764d32dab76e3920e5bc"
 "checksum synstructure 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "010366096045d8250555904c58da03377289e7f4b2ce7a5b1027e2b532f41000"
 "checksum tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "87974a6f5c1dfb344d733055601650059a3363de2a6104819293baff662132d6"
 "checksum term_size 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2b6b55df3198cc93372e85dd2ed817f0e38ce8cc0f22eb32391bfad9c4bf209"
 "checksum textwrap 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c0b59b6b4b44d867f1370ef1bd91bfb262bf07bf0ae65c202ea2fbc16153b693"
-"checksum thread-id 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8df7875b676fddfadffd96deea3b1124e5ede707d4884248931077518cf1f773"
 "checksum thread_local 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "1697c4b57aeeb7a536b647165a2825faddffb1d3bad386d507709bd51a90bb14"
 "checksum thread_profiler 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cf947d192a9be60ef5131cc7a4648886ba89d712f16700ebbf80c8a69d05d48f"
 "checksum time 0.1.38 (registry+https://github.com/rust-lang/crates.io-index)" = "d5d788d3aa77bc0ef3e9621256885555368b47bd495c13dd2e7413c89f845520"
 "checksum tokio-core 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "febd81b3e2ef615c6c8077347b33f3f3deec3d708ecd08194c9707b7a1eccfc9"
 "checksum tokio-io 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "b4ab83e7adb5677e42e405fa4ceff75659d93c4d7d7dd22f52fcec59ee9f02af"
 "checksum tokio-uds 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "65ae5d255ce739e8537221ed2942e0445f4b3b813daebac1c0050ddaaa3587f9"
 "checksum toml 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "a7540f4ffc193e0d3c94121edb19b055670d369f77d5804db11ae053a45b6e7e"
 "checksum traitobject 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "efd1f82c56340fdf16f2a953d7bda4f8fdffba13d93b00844c25572110b26079"
--- a/third_party/rust/parking_lot/.cargo-checksum.json
+++ b/third_party/rust/parking_lot/.cargo-checksum.json
@@ -1,1 +1,1 @@
-{"files":{".travis.yml":"8e424960f1e47843f45cae205873e9590e4317b5b2316090f9f94cf2f5d704e8","Cargo.toml":"a31940ea072ae30f6df4c28f4fcbae206929a9e7e8adf19956dd9ed75fa7e75d","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"c9a75f18b9ab2927829a208fc6aa2cf4e63b8420887ba29cdb265d6619ae82d5","README.md":"0c248175303f7dc19ce2cb30882c950a55a49da6b8c765c5ba49feb3e6eb7553","appveyor.yml":"cc608360622923f6f693cd68b4d7c1f64daa55f6b38f0df90f270825c6c276bc","src/condvar.rs":"962a3838e95cb664b261a44f536b003a284fe7bfdcb94a80c9a07c7679cae3dd","src/elision.rs":"0fef04d2991afeabafb041e81afeec74e89095d0eca49e5516bdcd5bc90c086f","src/lib.rs":"50951210148941266ce3a7d4017c339f8ad4419a9a8db6f915023890ed27d638","src/mutex.rs":"59cd61dd8deeaacceabd05e15b7fd6d2942e3f6c3c592221898d84a2ca804a6e","src/once.rs":"eada2e82bd8dcb9ed68d4fb2d9f8c336878eeb122f0bf8dde3feb2d77adfb598","src/raw_mutex.rs":"225cbf0ef951be062866da674e5eea8245fcc43ecd8a26da7097dea03b770bf5","src/raw_remutex.rs":"6c6d2aa83abe8f45db04de0efc04c70564cd0c55b6655da8ef4afe841c0add95","src/raw_rwlock.rs":"a7aebf70b8f7a43f96136388be1a54e5ca5b565c9da623f23434c99fb4c0b147","src/remutex.rs":"7f1640fa5a6eb43b592db47d9afa63904895030d246708ec8eac413dc8093514","src/rwlock.rs":"87d648c5fcccda784da165801b888a3592b6a85ddb605c1df3ae0e881dd22417","src/stable.rs":"cc18c58404dc6017924d88fb9f4ed94e5320b8cb0a36985162b23130b8cd7480","src/util.rs":"2d07c0c010a857790ae2ed6a1215eeed8af76859e076797ea1ba8dec82169e84"},"package":"37f364e2ce5efa24c7d0b6646d5bb61145551a0112f107ffd7499f1a3e322fbd"}
\ No newline at end of file
+{"files":{".travis.yml":"04d3d7425ce24e59d25df35da9c54f3ccd429c62ed8c9cf37b5ed2757afe96f1","Cargo.toml":"9e6a70c63617696e07a9130c27a80203180c1f240eb4ebdddde4429570da0c63","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"c9a75f18b9ab2927829a208fc6aa2cf4e63b8420887ba29cdb265d6619ae82d5","README.md":"9d1e4237f1063e54aca1f65fc00a87ad53f75fcc73d054f8dd139f62f4a0b15e","appveyor.yml":"cfa9c3ae2476c879fe4240c306d45de6c2c04025212d8217fa76690888117594","src/condvar.rs":"1a3de60460e832d7ff76a82d5dac3f387fe2255e6a8ad4a686fe37f134c088c7","src/deadlock.rs":"82de990ef5966c852f8019b511e3c60471b562e56fd7ed0ca340399968b44a2d","src/elision.rs":"89072fe0aca87d53abc0f56490ae77bcf9d77e28e291bd13e861b1924bbb079f","src/lib.rs":"02d5716f4f43c2598afa57234e53d1a4c5db4f91ede937a226ee34eabbdc4da5","src/mutex.rs":"d8f557d40c3aab3e36f81961db9eb32831580a3a6a4b2a59674cafe6621e4772","src/once.rs":"1f408083854f918e896fdba8a9ecf25ae79ee06613d8daec75b800fb78dfd3a8","src/raw_mutex.rs":"f98ddd76e1491bc239b7c24e94f3f6a94ae0f5828873e78e1245ef19621a257b","src/raw_remutex.rs":"86e1e339567c12f91e3274ca3126c4af004fd30dff88a6cd261fc67680e33798","src/raw_rwlock.rs":"d3c71098df5e8b22cdfd7f8d7c3f287951d0bac1ac9ede83a94f809576ed9d41","src/remutex.rs":"d73f4a0f22f4a5e8c6126b784c03157f34456b0c1b90570b98db9f1c6b1f4046","src/rwlock.rs":"28e6c3a3d1aea9add4950fa5c67ba79f4aeb2e72830ff4d4a66adc2a9afa12dc","src/util.rs":"2d07c0c010a857790ae2ed6a1215eeed8af76859e076797ea1ba8dec82169e84"},"package":"9fd9d732f2de194336fb02fe11f9eed13d9e76f13f4315b4d88a14ca411750cd"}
\ No newline at end of file
--- a/third_party/rust/parking_lot/.travis.yml
+++ b/third_party/rust/parking_lot/.travis.yml
@@ -1,38 +1,40 @@
 language: rust
 sudo: false
 
 rust:
-- 1.13.0
+- 1.18.0
 - stable
 - beta
 - nightly
 
 before_script:
 - |
   pip install 'travis-cargo<0.2' --user &&
   export PATH=$HOME/.local/bin:$PATH
 
 script:
 - cd core;
 - travis-cargo build;
 - cd ..;
 - travis-cargo build
 - travis-cargo test
-- travis-cargo doc -- --no-deps -p parking_lot -p parking_lot_core
+- travis-cargo test -- --features=deadlock_detection
+- travis-cargo --only nightly doc -- --all-features --no-deps -p parking_lot -p parking_lot_core
 - if [ "$TRAVIS_RUST_VERSION" != "1.8.0" ]; then
       cd benchmark;
       travis-cargo build;
       travis-cargo run -- --release --bin mutex 2 1 0 1;
       travis-cargo run -- --release --bin rwlock 1 1 1 0 1;
       cd ..;
   fi
 
 after_success:
 - travis-cargo --only nightly doc-upload
 
 env:
   global:
   - TRAVIS_CARGO_NIGHTLY_FEATURE=nightly
+  - RUST_TEST_THREADS=1
 
 notifications:
   email: false
--- a/third_party/rust/parking_lot/Cargo.toml
+++ b/third_party/rust/parking_lot/Cargo.toml
@@ -7,30 +7,29 @@
 #
 # If you believe there's an error in this file please file an
 # issue against the rust-lang/cargo repository. If you're
 # editing this file be aware that the upstream Cargo.toml
 # will likely look very different (and much more reasonable)
 
 [package]
 name = "parking_lot"
-version = "0.4.4"
+version = "0.5.4"
 authors = ["Amanieu d'Antras <amanieu@gmail.com>"]
 description = "More compact and efficient implementations of the standard synchronization primitives."
 documentation = "https://amanieu.github.io/parking_lot/parking_lot/index.html"
 readme = "README.md"
 keywords = ["mutex", "condvar", "rwlock", "once", "thread"]
 license = "Apache-2.0/MIT"
 repository = "https://github.com/Amanieu/parking_lot"
 [dependencies.owning_ref]
 version = "0.3"
 optional = true
 
 [dependencies.parking_lot_core]
 version = "0.2"
 [dev-dependencies.rand]
-version = "0.3"
+version = "0.4"
 
 [features]
+deadlock_detection = ["parking_lot_core/deadlock_detection"]
 default = ["owning_ref"]
 nightly = ["parking_lot_core/nightly"]
-[target."cfg(not(target_os = \"emscripten\"))".dependencies.thread-id]
-version = "3.0"
--- a/third_party/rust/parking_lot/README.md
+++ b/third_party/rust/parking_lot/README.md
@@ -42,32 +42,35 @@ 4. Microcontention (a contended lock wit
    lock.
 5. The locks are adaptive and will suspend a thread after a few failed spin
    attempts. This makes the locks suitable for both long and short critical
    sections.
 6. `Condvar`, `RwLock` and `Once` work on Windows XP, unlike the standard
    library versions of those types.
 7. `RwLock` takes advantage of hardware lock elision on processors that
    support it, which can lead to huge performance wins with many readers.
-8. `MutexGuard` (and the `RwLock` equivalents) is `Send`, which means it can
-   be unlocked by a different thread than the one that locked it.
-9. `RwLock` uses a task-fair locking policy, which avoids reader and writer
+8. `RwLock` uses a task-fair locking policy, which avoids reader and writer
    starvation, whereas the standard library version makes no guarantees.
-10. `Condvar` is guaranteed not to produce spurious wakeups. A thread will
+9. `Condvar` is guaranteed not to produce spurious wakeups. A thread will
     only be woken up if it timed out or it was woken up by a notification.
-11. `Condvar::notify_all` will only wake up a single thread and requeue the
+10. `Condvar::notify_all` will only wake up a single thread and requeue the
     rest to wait on the associated `Mutex`. This avoids a thundering herd
     problem where all threads try to acquire the lock at the same time.
-12. `RwLock` supports atomically downgrading a write lock into a read lock.
-13. `Mutex` and `RwLock` allow raw unlocking without a RAII guard object.
-14. `Mutex<()>` and `RwLock<()>` allow raw locking without a RAII guard
+11. `RwLock` supports atomically downgrading a write lock into a read lock.
+12. `Mutex` and `RwLock` allow raw unlocking without a RAII guard object.
+13. `Mutex<()>` and `RwLock<()>` allow raw locking without a RAII guard
     object.
-15. `Mutex` and `RwLock` support [eventual fairness](https://trac.webkit.org/changeset/203350)
+14. `Mutex` and `RwLock` support [eventual fairness](https://trac.webkit.org/changeset/203350)
     which allows them to be fair on average without sacrificing performance.
-16. A `ReentrantMutex` type which supports recursive locking.
+15. A `ReentrantMutex` type which supports recursive locking.
+16. An *experimental* deadlock detector that works for `Mutex`,
+    `RwLock` and `ReentrantMutex`. This feature is disabled by default and
+    can be enabled via the `deadlock_detection` feature.
+17. `RwLock` supports atomically upgrading an "upgradable" read lock into a
+    write lock.
 
 ## The parking lot
 
 To keep these primitives small, all thread queuing and suspending
 functionality is offloaded to the *parking lot*. The idea behind this is
 based on the Webkit [`WTF::ParkingLot`](https://webkit.org/blog/6161/locking-in-webkit/)
 class, which essentially consists of a hash table mapping of lock addresses
 to queues of parked (sleeping) threads. The Webkit parking lot was itself
@@ -82,38 +85,44 @@ There are a few restrictions when using 
 - `Mutex` and `Once` will use 1 word of space instead of 1 byte.
 - You will have to use `lazy_static!` to statically initialize `Mutex`,
   `Condvar` and `RwLock` types instead of `const fn`.
 - `RwLock` will not be able to take advantage of hardware lock elision for
   readers, which improves performance when there are multiple readers.
 - Slightly less efficient code may be generated for `compare_exchange`
   operations. This should not affect architectures like x86 though.
 
+To enable nightly-only functionality, you need to enable the `nightly` feature
+in Cargo (see below).
+
 ## Usage
 
 Add this to your `Cargo.toml`:
 
 ```toml
 [dependencies]
-parking_lot = "0.4"
+parking_lot = "0.5"
 ```
 
 and this to your crate root:
 
 ```rust
 extern crate parking_lot;
 ```
 
 To enable nightly-only features, add this to your `Cargo.toml` instead:
 
 ```toml
 [dependencies]
-parking_lot = {version = "0.4", features = ["nightly"]}
+parking_lot = {version = "0.5", features = ["nightly"]}
 ```
 
+The experimental deadlock detector can be enabled with the
+`deadlock_detection` Cargo feature.
+
 The core parking lot API is provided by the `parking_lot_core` crate. It is
 separate from the synchronization primitives in the `parking_lot` crate so that
 changes to the core API do not cause breaking changes for users of `parking_lot`.
 
 ## License
 
 Licensed under either of
 
--- a/third_party/rust/parking_lot/appveyor.yml
+++ b/third_party/rust/parking_lot/appveyor.yml
@@ -1,27 +1,29 @@
 environment:
   TRAVIS_CARGO_NIGHTLY_FEATURE: nightly
+  RUST_TEST_THREADS: 1
   matrix:
   - TARGET: nightly-x86_64-pc-windows-msvc
   - TARGET: nightly-i686-pc-windows-msvc
   - TARGET: nightly-x86_64-pc-windows-gnu
   - TARGET: nightly-i686-pc-windows-gnu
-  - TARGET: 1.13.0-x86_64-pc-windows-msvc
-  - TARGET: 1.13.0-i686-pc-windows-msvc
-  - TARGET: 1.13.0-x86_64-pc-windows-gnu
-  - TARGET: 1.13.0-i686-pc-windows-gnu
+  - TARGET: 1.18.0-x86_64-pc-windows-msvc
+  - TARGET: 1.18.0-i686-pc-windows-msvc
+  - TARGET: 1.18.0-x86_64-pc-windows-gnu
+  - TARGET: 1.18.0-i686-pc-windows-gnu
 
 install:
   - SET PATH=C:\Python27;C:\Python27\Scripts;%PATH%;%APPDATA%\Python\Scripts
   - pip install "travis-cargo<0.2" --user
   - ps: Start-FileDownload "https://static.rust-lang.org/dist/rust-${env:TARGET}.exe" -FileName "rust-install.exe"
   - ps: .\rust-install.exe /VERYSILENT /NORESTART /DIR="C:\rust" | Out-Null
   - ps: $env:PATH="$env:PATH;C:\rust\bin"
   - rustc -vV
   - cargo -vV
 
 build_script:
   - travis-cargo build
 
 test_script:
   - travis-cargo test
+  - travis-cargo test -- --features=deadlock_detection
   - travis-cargo doc
--- a/third_party/rust/parking_lot/src/condvar.rs
+++ b/third_party/rust/parking_lot/src/condvar.rs
@@ -3,19 +3,20 @@
 // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
 // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
 // http://opensource.org/licenses/MIT>, at your option. This file may not be
 // copied, modified, or distributed except according to those terms.
 
 use std::sync::atomic::{AtomicPtr, Ordering};
 use std::time::{Duration, Instant};
 use std::ptr;
-use parking_lot_core::{self, ParkResult, UnparkResult, RequeueOp, DEFAULT_PARK_TOKEN};
-use mutex::{MutexGuard, guard_lock};
-use raw_mutex::{RawMutex, TOKEN_NORMAL, TOKEN_HANDOFF};
+use parking_lot_core::{self, ParkResult, RequeueOp, UnparkResult, DEFAULT_PARK_TOKEN};
+use mutex::{guard_lock, MutexGuard};
+use raw_mutex::{RawMutex, TOKEN_HANDOFF, TOKEN_NORMAL};
+use deadlock;
 
 /// A type indicating whether a timed wait on a condition variable returned
 /// due to a time out or not.
 #[derive(Debug, PartialEq, Eq, Copy, Clone)]
 pub struct WaitTimeoutResult(bool);
 
 impl WaitTimeoutResult {
     /// Returns whether the wait was known to have timed out.
@@ -83,25 +84,29 @@ pub struct Condvar {
 }
 
 impl Condvar {
     /// Creates a new condition variable which is ready to be waited on and
     /// notified.
     #[cfg(feature = "nightly")]
     #[inline]
     pub const fn new() -> Condvar {
-        Condvar { state: AtomicPtr::new(ptr::null_mut()) }
+        Condvar {
+            state: AtomicPtr::new(ptr::null_mut()),
+        }
     }
 
     /// Creates a new condition variable which is ready to be waited on and
     /// notified.
     #[cfg(not(feature = "nightly"))]
     #[inline]
     pub fn new() -> Condvar {
-        Condvar { state: AtomicPtr::new(ptr::null_mut()) }
+        Condvar {
+            state: AtomicPtr::new(ptr::null_mut()),
+        }
     }
 
     /// Wakes up one blocked thread on this condvar.
     ///
     /// If there is a blocked thread on this condition variable, then it will
     /// be woken up from its call to `wait` or `wait_timeout`. Calls to
     /// `notify_one` are not buffered in any way.
     ///
@@ -233,20 +238,21 @@ impl Condvar {
     /// Like `wait`, the lock specified will be re-acquired when this function
     /// returns, regardless of whether the timeout elapsed or not.
     ///
     /// # Panics
     ///
     /// This function will panic if another thread is waiting on the `Condvar`
     /// with a different `Mutex` object.
     #[inline]
-    pub fn wait_until<T: ?Sized>(&self,
-                                 mutex_guard: &mut MutexGuard<T>,
-                                 timeout: Instant)
-                                 -> WaitTimeoutResult {
+    pub fn wait_until<T: ?Sized>(
+        &self,
+        mutex_guard: &mut MutexGuard<T>,
+        timeout: Instant,
+    ) -> WaitTimeoutResult {
         self.wait_until_internal(guard_lock(mutex_guard), Some(timeout))
     }
 
     // This is a non-generic function to reduce the monomorphization cost of
     // using `wait_until`.
     fn wait_until_internal(&self, mutex: &RawMutex, timeout: Option<Instant>) -> WaitTimeoutResult {
         unsafe {
             let result;
@@ -280,33 +286,37 @@ impl Condvar {
 
                     // If we were the last thread on the queue then we need to
                     // clear our state. This is normally done by the
                     // notify_{one,all} functions when not timing out.
                     if !requeued && was_last_thread {
                         self.state.store(ptr::null_mut(), Ordering::Relaxed);
                     }
                 };
-                result = parking_lot_core::park(addr,
-                                                validate,
-                                                before_sleep,
-                                                timed_out,
-                                                DEFAULT_PARK_TOKEN,
-                                                timeout);
+                result = parking_lot_core::park(
+                    addr,
+                    validate,
+                    before_sleep,
+                    timed_out,
+                    DEFAULT_PARK_TOKEN,
+                    timeout,
+                );
             }
 
             // Panic if we tried to use multiple mutexes with a Condvar. Note
             // that at this point the MutexGuard is still locked. It will be
             // unlocked by the unwinding logic.
             if bad_mutex {
                 panic!("attempted to use a condition variable with more than one mutex");
             }
 
             // ... and re-lock it once we are done sleeping
-            if result != ParkResult::Unparked(TOKEN_HANDOFF) {
+            if result == ParkResult::Unparked(TOKEN_HANDOFF) {
+                deadlock::acquire_resource(mutex as *const _ as usize);
+            } else {
                 mutex.lock();
             }
 
             WaitTimeoutResult(!(result.is_unparked() || requeued))
         }
     }
 
     /// Waits on this condition variable for a notification, timing out after a
@@ -323,20 +333,21 @@ impl Condvar {
     /// the system time.
     ///
     /// The returned `WaitTimeoutResult` value indicates if the timeout is
     /// known to have elapsed.
     ///
     /// Like `wait`, the lock specified will be re-acquired when this function
     /// returns, regardless of whether the timeout elapsed or not.
     #[inline]
-    pub fn wait_for<T: ?Sized>(&self,
-                               guard: &mut MutexGuard<T>,
-                               timeout: Duration)
-                               -> WaitTimeoutResult {
+    pub fn wait_for<T: ?Sized>(
+        &self,
+        guard: &mut MutexGuard<T>,
+        timeout: Duration,
+    ) -> WaitTimeoutResult {
         self.wait_until(guard, Instant::now() + timeout)
     }
 }
 
 impl Default for Condvar {
     #[inline]
     fn default() -> Condvar {
         Condvar::new()
@@ -437,19 +448,20 @@ mod tests {
 
         let mut g = m.lock();
         let no_timeout = c.wait_until(&mut g, Instant::now() + Duration::from_millis(1));
         assert!(no_timeout.timed_out());
         let _t = thread::spawn(move || {
             let _g = m2.lock();
             c2.notify_one();
         });
-        let timeout_res = c.wait_until(&mut g,
-                                       Instant::now() +
-                                       Duration::from_millis(u32::max_value() as u64));
+        let timeout_res = c.wait_until(
+            &mut g,
+            Instant::now() + Duration::from_millis(u32::max_value() as u64),
+        );
         assert!(!timeout_res.timed_out());
         drop(g);
     }
 
     #[test]
     #[should_panic]
     fn two_mutexes() {
         let m = Arc::new(Mutex::new(()));
new file mode 100644
--- /dev/null
+++ b/third_party/rust/parking_lot/src/deadlock.rs
@@ -0,0 +1,218 @@
+//! [Experimental] Deadlock detection
+//!
+//! This feature is optional and can be enabled via the `deadlock_detection` feature flag.
+//!
+//! # Example
+//!
+//! ```
+//! #[cfg(feature = "deadlock_detection")]
+//! { // only for #[cfg]
+//! use std::thread;
+//! use std::time::Duration;
+//! use parking_lot::deadlock;
+//!
+//! // Create a background thread which checks for deadlocks every 10s
+//! thread::spawn(move || {
+//!     loop {
+//!         thread::sleep(Duration::from_secs(10));
+//!         let deadlocks = deadlock::check_deadlock();
+//!         if deadlocks.is_empty() {
+//!             continue;
+//!         }
+//!
+//!         println!("{} deadlocks detected", deadlocks.len());
+//!         for (i, threads) in deadlocks.iter().enumerate() {
+//!             println!("Deadlock #{}", i);
+//!             for t in threads {
+//!                 println!("Thread Id {:#?}", t.thread_id());
+//!                 println!("{:#?}", t.backtrace());
+//!             }
+//!         }
+//!     }
+//! });
+//! } // only for #[cfg]
+//! ```
+
+#[cfg(feature = "deadlock_detection")]
+pub use parking_lot_core::deadlock::check_deadlock;
+pub(crate) use parking_lot_core::deadlock::{acquire_resource, release_resource};
+
+#[cfg(test)]
+#[cfg(feature = "deadlock_detection")]
+mod tests {
+    use std::thread::{self, sleep};
+    use std::sync::{Arc, Barrier};
+    use std::time::Duration;
+    use {Mutex, ReentrantMutex, RwLock};
+
+    fn check_deadlock() -> bool {
+        use parking_lot_core::deadlock::check_deadlock;
+        !check_deadlock().is_empty()
+    }
+
+    #[test]
+    fn test_mutex_deadlock() {
+        let m1: Arc<Mutex<()>> = Default::default();
+        let m2: Arc<Mutex<()>> = Default::default();
+        let m3: Arc<Mutex<()>> = Default::default();
+        let b = Arc::new(Barrier::new(4));
+
+        let m1_ = m1.clone();
+        let m2_ = m2.clone();
+        let m3_ = m3.clone();
+        let b1 = b.clone();
+        let b2 = b.clone();
+        let b3 = b.clone();
+
+        assert!(!check_deadlock());
+
+        let _t1 = thread::spawn(move || {
+            let _g = m1.lock();
+            b1.wait();
+            let _ = m2_.lock();
+        });
+
+        let _t2 = thread::spawn(move || {
+            let _g = m2.lock();
+            b2.wait();
+            let _ = m3_.lock();
+        });
+
+        let _t3 = thread::spawn(move || {
+            let _g = m3.lock();
+            b3.wait();
+            let _ = m1_.lock();
+        });
+
+        assert!(!check_deadlock());
+
+        b.wait();
+        sleep(Duration::from_millis(50));
+        assert!(check_deadlock());
+
+        assert!(!check_deadlock());
+    }
+
+    #[test]
+    fn test_mutex_deadlock_reentrant() {
+        let m1: Arc<Mutex<()>> = Default::default();
+
+        assert!(!check_deadlock());
+
+        let _t1 = thread::spawn(move || {
+            let _g = m1.lock();
+            let _ = m1.lock();
+        });
+
+        sleep(Duration::from_millis(50));
+        assert!(check_deadlock());
+
+        assert!(!check_deadlock());
+    }
+
+    #[test]
+    fn test_remutex_deadlock() {
+        let m1: Arc<ReentrantMutex<()>> = Default::default();
+        let m2: Arc<ReentrantMutex<()>> = Default::default();
+        let m3: Arc<ReentrantMutex<()>> = Default::default();
+        let b = Arc::new(Barrier::new(4));
+
+        let m1_ = m1.clone();
+        let m2_ = m2.clone();
+        let m3_ = m3.clone();
+        let b1 = b.clone();
+        let b2 = b.clone();
+        let b3 = b.clone();
+
+        assert!(!check_deadlock());
+
+        let _t1 = thread::spawn(move || {
+            let _g = m1.lock();
+            let _g = m1.lock();
+            b1.wait();
+            let _ = m2_.lock();
+        });
+
+        let _t2 = thread::spawn(move || {
+            let _g = m2.lock();
+            let _g = m2.lock();
+            b2.wait();
+            let _ = m3_.lock();
+        });
+
+        let _t3 = thread::spawn(move || {
+            let _g = m3.lock();
+            let _g = m3.lock();
+            b3.wait();
+            let _ = m1_.lock();
+        });
+
+        assert!(!check_deadlock());
+
+        b.wait();
+        sleep(Duration::from_millis(50));
+        assert!(check_deadlock());
+
+        assert!(!check_deadlock());
+    }
+
+    #[test]
+    fn test_rwlock_deadlock() {
+        let m1: Arc<RwLock<()>> = Default::default();
+        let m2: Arc<RwLock<()>> = Default::default();
+        let m3: Arc<RwLock<()>> = Default::default();
+        let b = Arc::new(Barrier::new(4));
+
+        let m1_ = m1.clone();
+        let m2_ = m2.clone();
+        let m3_ = m3.clone();
+        let b1 = b.clone();
+        let b2 = b.clone();
+        let b3 = b.clone();
+
+        assert!(!check_deadlock());
+
+        let _t1 = thread::spawn(move || {
+            let _g = m1.read();
+            b1.wait();
+            let _g = m2_.write();
+        });
+
+        let _t2 = thread::spawn(move || {
+            let _g = m2.read();
+            b2.wait();
+            let _g = m3_.write();
+        });
+
+        let _t3 = thread::spawn(move || {
+            let _g = m3.read();
+            b3.wait();
+            let _ = m1_.write();
+        });
+
+        assert!(!check_deadlock());
+
+        b.wait();
+        sleep(Duration::from_millis(50));
+        assert!(check_deadlock());
+
+        assert!(!check_deadlock());
+    }
+
+    #[test]
+    fn test_rwlock_deadlock_reentrant() {
+        let m1: Arc<RwLock<()>> = Default::default();
+
+        assert!(!check_deadlock());
+
+        let _t1 = thread::spawn(move || {
+            let _g = m1.read();
+            let _ = m1.write();
+        });
+
+        sleep(Duration::from_millis(50));
+        assert!(check_deadlock());
+
+        assert!(!check_deadlock());
+    }
+}
--- a/third_party/rust/parking_lot/src/elision.rs
+++ b/third_party/rust/parking_lot/src/elision.rs
@@ -1,41 +1,42 @@
 // Copyright 2016 Amanieu d'Antras
 //
 // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
 // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
 // http://opensource.org/licenses/MIT>, at your option. This file may not be
 // copied, modified, or distributed except according to those terms.
 
-#[cfg(feature = "nightly")]
 use std::sync::atomic::AtomicUsize;
-#[cfg(not(feature = "nightly"))]
-use stable::AtomicUsize;
 
 // Extension trait to add lock elision primitives to atomic types
 pub trait AtomicElisionExt {
     type IntType;
 
     // Perform a compare_exchange and start a transaction
-    fn elision_acquire(&self,
-                       current: Self::IntType,
-                       new: Self::IntType)
-                       -> Result<Self::IntType, Self::IntType>;
+    fn elision_acquire(
+        &self,
+        current: Self::IntType,
+        new: Self::IntType,
+    ) -> Result<Self::IntType, Self::IntType>;
     // Perform a compare_exchange and end a transaction
-    fn elision_release(&self,
-                       current: Self::IntType,
-                       new: Self::IntType)
-                       -> Result<Self::IntType, Self::IntType>;
+    fn elision_release(
+        &self,
+        current: Self::IntType,
+        new: Self::IntType,
+    ) -> Result<Self::IntType, Self::IntType>;
 }
 
 // Indicates whether the target architecture supports lock elision
 #[inline]
 pub fn have_elision() -> bool {
-    cfg!(all(feature = "nightly",
-             any(target_arch = "x86", target_arch = "x86_64")))
+    cfg!(all(
+        feature = "nightly",
+        any(target_arch = "x86", target_arch = "x86_64"),
+    ))
 }
 
 // This implementation is never actually called because it is guarded by
 // have_elision().
 #[cfg(not(all(feature = "nightly", any(target_arch = "x86", target_arch = "x86_64"))))]
 impl AtomicElisionExt for AtomicUsize {
     type IntType = usize;
 
@@ -58,56 +59,111 @@ impl AtomicElisionExt for AtomicUsize {
     fn elision_acquire(&self, current: usize, new: usize) -> Result<usize, usize> {
         unsafe {
             let prev: usize;
             asm!("xacquire; lock; cmpxchgl $2, $1"
                  : "={eax}" (prev), "+*m" (self)
                  : "r" (new), "{eax}" (current)
                  : "memory"
                  : "volatile");
-            if prev == current { Ok(prev) } else { Err(prev) }
+            if prev == current {
+                Ok(prev)
+            } else {
+                Err(prev)
+            }
         }
     }
 
     #[inline]
     fn elision_release(&self, current: usize, new: usize) -> Result<usize, usize> {
         unsafe {
             let prev: usize;
             asm!("xrelease; lock; cmpxchgl $2, $1"
                  : "={eax}" (prev), "+*m" (self)
                  : "r" (new), "{eax}" (current)
                  : "memory"
                  : "volatile");
-            if prev == current { Ok(prev) } else { Err(prev) }
+            if prev == current {
+                Ok(prev)
+            } else {
+                Err(prev)
+            }
         }
     }
 }
 
-#[cfg(all(feature = "nightly", target_arch = "x86_64"))]
+#[cfg(all(feature = "nightly", target_arch = "x86_64", target_pointer_width = "32"))]
+impl AtomicElisionExt for AtomicUsize {
+    type IntType = usize;
+
+    #[inline]
+    fn elision_acquire(&self, current: usize, new: usize) -> Result<usize, usize> {
+        unsafe {
+            let prev: usize;
+            asm!("xacquire; lock; cmpxchgl $2, $1"
+                 : "={rax}" (prev), "+*m" (self)
+                 : "r" (new), "{rax}" (current)
+                 : "memory"
+                 : "volatile");
+            if prev == current {
+                Ok(prev)
+            } else {
+                Err(prev)
+            }
+        }
+    }
+
+    #[inline]
+    fn elision_release(&self, current: usize, new: usize) -> Result<usize, usize> {
+        unsafe {
+            let prev: usize;
+            asm!("xrelease; lock; cmpxchgl $2, $1"
+                 : "={rax}" (prev), "+*m" (self)
+                 : "r" (new), "{rax}" (current)
+                 : "memory"
+                 : "volatile");
+            if prev == current {
+                Ok(prev)
+            } else {
+                Err(prev)
+            }
+        }
+    }
+}
+
+#[cfg(all(feature = "nightly", target_arch = "x86_64", target_pointer_width = "64"))]
 impl AtomicElisionExt for AtomicUsize {
     type IntType = usize;
 
     #[inline]
     fn elision_acquire(&self, current: usize, new: usize) -> Result<usize, usize> {
         unsafe {
             let prev: usize;
             asm!("xacquire; lock; cmpxchgq $2, $1"
                  : "={rax}" (prev), "+*m" (self)
                  : "r" (new), "{rax}" (current)
                  : "memory"
                  : "volatile");
-            if prev == current { Ok(prev) } else { Err(prev) }
+            if prev == current {
+                Ok(prev)
+            } else {
+                Err(prev)
+            }
         }
     }
 
     #[inline]
     fn elision_release(&self, current: usize, new: usize) -> Result<usize, usize> {
         unsafe {
             let prev: usize;
             asm!("xrelease; lock; cmpxchgq $2, $1"
                  : "={rax}" (prev), "+*m" (self)
                  : "r" (new), "{rax}" (current)
                  : "memory"
                  : "volatile");
-            if prev == current { Ok(prev) } else { Err(prev) }
+            if prev == current {
+                Ok(prev)
+            } else {
+                Err(prev)
+            }
         }
     }
 }
--- a/third_party/rust/parking_lot/src/lib.rs
+++ b/third_party/rust/parking_lot/src/lib.rs
@@ -12,40 +12,39 @@
 #![warn(missing_docs)]
 #![cfg_attr(feature = "nightly", feature(const_fn))]
 #![cfg_attr(feature = "nightly", feature(integer_atomics))]
 #![cfg_attr(feature = "nightly", feature(asm))]
 
 #[cfg(feature = "owning_ref")]
 extern crate owning_ref;
 
-#[cfg(not(target_os = "emscripten"))]
-extern crate thread_id;
-
 extern crate parking_lot_core;
 
-#[cfg(not(feature = "nightly"))]
-mod stable;
-
 mod util;
 mod elision;
 mod raw_mutex;
 mod raw_remutex;
 mod raw_rwlock;
 mod condvar;
 mod mutex;
 mod remutex;
 mod rwlock;
 mod once;
 
-pub use once::{Once, ONCE_INIT, OnceState};
+#[cfg(feature = "deadlock_detection")]
+pub mod deadlock;
+#[cfg(not(feature = "deadlock_detection"))]
+mod deadlock;
+
+pub use once::{Once, OnceState, ONCE_INIT};
 pub use mutex::{Mutex, MutexGuard};
 pub use remutex::{ReentrantMutex, ReentrantMutexGuard};
 pub use condvar::{Condvar, WaitTimeoutResult};
-pub use rwlock::{RwLock, RwLockReadGuard, RwLockWriteGuard};
+pub use rwlock::{RwLock, RwLockReadGuard, RwLockUpgradableReadGuard, RwLockWriteGuard};
 
 #[cfg(feature = "owning_ref")]
 use owning_ref::OwningRef;
 
 /// Typedef of an owning reference that uses a `MutexGuard` as the owner.
 #[cfg(feature = "owning_ref")]
 pub type MutexGuardRef<'a, T, U = T> = OwningRef<MutexGuard<'a, T>, U>;
 
@@ -55,8 +54,13 @@ pub type ReentrantMutexGuardRef<'a, T, U
 
 /// Typedef of an owning reference that uses a `RwLockReadGuard` as the owner.
 #[cfg(feature = "owning_ref")]
 pub type RwLockReadGuardRef<'a, T, U = T> = OwningRef<RwLockReadGuard<'a, T>, U>;
 
 /// Typedef of an owning reference that uses a `RwLockWriteGuard` as the owner.
 #[cfg(feature = "owning_ref")]
 pub type RwLockWriteGuardRef<'a, T, U = T> = OwningRef<RwLockWriteGuard<'a, T>, U>;
+
+/// Typedef of an owning reference that uses a `RwLockUpgradableReadGuard` as the owner.
+#[cfg(feature = "owning_ref")]
+pub type RwLockUpgradableReadGuardRef<'a, T, U = T> =
+    OwningRef<RwLockUpgradableReadGuard<'a, T>, U>;
--- a/third_party/rust/parking_lot/src/mutex.rs
+++ b/third_party/rust/parking_lot/src/mutex.rs
@@ -45,17 +45,16 @@ use owning_ref::StableAddress;
 /// You can also force a fair unlock by calling `MutexGuard::unlock_fair` when
 /// unlocking a mutex instead of simply dropping the `MutexGuard`.
 ///
 /// # Differences from the standard library `Mutex`
 ///
 /// - No poisoning, the lock is released normally on panic.
 /// - Only requires 1 byte of space, whereas the standard library boxes the
 ///   `Mutex` due to platform limitations.
-/// - A `MutexGuard` can be sent to another thread and unlocked there.
 /// - Can be statically constructed (requires the `const_fn` nightly feature).
 /// - Does not require any drop glue when dropped.
 /// - Inline fast path for the uncontended case.
 /// - Efficient handling of micro-contention using adaptive spinning.
 /// - Allows raw locking & unlocking without a guard.
 /// - Supports eventual fairness so that the mutex is fair on average.
 /// - Optionally allows making the mutex fair by calling `MutexGuard::unlock_fair`.
 ///
@@ -94,30 +93,33 @@ use owning_ref::StableAddress;
 ///
 /// rx.recv().unwrap();
 /// ```
 pub struct Mutex<T: ?Sized> {
     raw: RawMutex,
     data: UnsafeCell<T>,
 }
 
-unsafe impl<T: Send> Send for Mutex<T> {}
-unsafe impl<T: Send> Sync for Mutex<T> {}
+unsafe impl<T: ?Sized + Send> Send for Mutex<T> {}
+unsafe impl<T: ?Sized + Send> Sync for Mutex<T> {}
 
 /// An RAII implementation of a "scoped lock" of a mutex. When this structure is
 /// dropped (falls out of scope), the lock will be unlocked.
 ///
 /// The data protected by the mutex can be accessed through this guard via its
 /// `Deref` and `DerefMut` implementations.
 #[must_use]
 pub struct MutexGuard<'a, T: ?Sized + 'a> {
-    mutex: &'a Mutex<T>,
+    raw: &'a RawMutex,
+    data: *mut T,
     marker: PhantomData<&'a mut T>,
 }
 
+unsafe impl<'a, T: ?Sized + Sync + 'a> Sync for MutexGuard<'a, T> {}
+
 impl<T> Mutex<T> {
     /// Creates a new mutex in an unlocked state ready for use.
     #[cfg(feature = "nightly")]
     #[inline]
     pub const fn new(val: T) -> Mutex<T> {
         Mutex {
             data: UnsafeCell::new(val),
             raw: RawMutex::new(),
@@ -137,82 +139,79 @@ impl<T> Mutex<T> {
     /// Consumes this mutex, returning the underlying data.
     #[inline]
     pub fn into_inner(self) -> T {
         unsafe { self.data.into_inner() }
     }
 }
 
 impl<T: ?Sized> Mutex<T> {
+    #[inline]
+    fn guard(&self) -> MutexGuard<T> {
+        MutexGuard {
+            raw: &self.raw,
+            data: self.data.get(),
+            marker: PhantomData,
+        }
+    }
+
     /// Acquires a mutex, blocking the current thread until it is able to do so.
     ///
     /// This function will block the local thread until it is available to acquire
     /// the mutex. Upon returning, the thread is the only thread with the mutex
     /// held. An RAII guard is returned to allow scoped unlock of the lock. When
     /// the guard goes out of scope, the mutex will be unlocked.
     ///
     /// Attempts to lock a mutex in the thread which already holds the lock will
     /// result in a deadlock.
     #[inline]
     pub fn lock(&self) -> MutexGuard<T> {
         self.raw.lock();
-        MutexGuard {
-            mutex: self,
-            marker: PhantomData,
-        }
+        self.guard()
     }
 
     /// Attempts to acquire this lock.
     ///
     /// If the lock could not be acquired at this time, then `None` is returned.
     /// Otherwise, an RAII guard is returned. The lock will be unlocked when the
     /// guard is dropped.
     ///
     /// This function does not block.
     #[inline]
     pub fn try_lock(&self) -> Option<MutexGuard<T>> {
         if self.raw.try_lock() {
-            Some(MutexGuard {
-                mutex: self,
-                marker: PhantomData,
-            })
+            Some(self.guard())
         } else {
             None
         }
     }
 
     /// Attempts to acquire this lock until a timeout is reached.
     ///
     /// If the lock could not be acquired before the timeout expired, then
     /// `None` is returned. Otherwise, an RAII guard is returned. The lock will
     /// be unlocked when the guard is dropped.
     #[inline]
     pub fn try_lock_for(&self, timeout: Duration) -> Option<MutexGuard<T>> {
         if self.raw.try_lock_for(timeout) {
-            Some(MutexGuard {
-                mutex: self,
-                marker: PhantomData,
-            })
+            Some(self.guard())
         } else {
             None
         }
     }
 
     /// Attempts to acquire this lock until a timeout is reached.
     ///
     /// If the lock could not be acquired before the timeout expired, then
     /// `None` is returned. Otherwise, an RAII guard is returned. The lock will
     /// be unlocked when the guard is dropped.
     #[inline]
     pub fn try_lock_until(&self, timeout: Instant) -> Option<MutexGuard<T>> {
         if self.raw.try_lock_until(timeout) {
-            Some(MutexGuard {
-                mutex: self,
-                marker: PhantomData,
-            })
+            Some(self.guard())
         } else {
             None
         }
     }
 
     /// Returns a mutable reference to the underlying data.
     ///
     /// Since this call borrows the `Mutex` mutably, no actual locking needs to
@@ -295,59 +294,82 @@ impl<'a, T: ?Sized + 'a> MutexGuard<'a, 
     /// context switch on every mutex unlock. This can result in one thread
     /// acquiring a mutex many more times than other threads.
     ///
     /// However in some cases it can be beneficial to ensure fairness by forcing
     /// the lock to pass on to a waiting thread if there is one. This is done by
     /// using this method instead of dropping the `MutexGuard` normally.
     #[inline]
     pub fn unlock_fair(self) {
-        self.mutex.raw.unlock(true);
+        self.raw.unlock(true);
         mem::forget(self);
     }
+
+    /// Make a new `MutexGuard` for a component of the locked data.
+    ///
+    /// This operation cannot fail as the `MutexGuard` passed
+    /// in already locked the mutex.
+    ///
+    /// This is an associated function that needs to be
+    /// used as `MutexGuard::map(...)`. A method would interfere with methods of
+    /// the same name on the contents of the locked data.
+    #[inline]
+    pub fn map<U: ?Sized, F>(orig: Self, f: F) -> MutexGuard<'a, U>
+    where
+        F: FnOnce(&mut T) -> &mut U,
+    {
+        let raw = orig.raw;
+        let data = f(unsafe { &mut *orig.data });
+        mem::forget(orig);
+        MutexGuard {
+            raw,
+            data,
+            marker: PhantomData,
+        }
+    }
 }
 
 impl<'a, T: ?Sized + 'a> Deref for MutexGuard<'a, T> {
     type Target = T;
     #[inline]
     fn deref(&self) -> &T {
-        unsafe { &*self.mutex.data.get() }
+        unsafe { &*self.data }
     }
 }
 
 impl<'a, T: ?Sized + 'a> DerefMut for MutexGuard<'a, T> {
     #[inline]
     fn deref_mut(&mut self) -> &mut T {
-        unsafe { &mut *self.mutex.data.get() }
+        unsafe { &mut *self.data }
     }
 }
 
 impl<'a, T: ?Sized + 'a> Drop for MutexGuard<'a, T> {
     #[inline]
     fn drop(&mut self) {
-        self.mutex.raw.unlock(false);
+        self.raw.unlock(false);
     }
 }
 
 #[cfg(feature = "owning_ref")]
 unsafe impl<'a, T: ?Sized> StableAddress for MutexGuard<'a, T> {}
 
 // Helper function used by Condvar, not publicly exported
 #[inline]
-pub fn guard_lock<'a, T: ?Sized>(guard: &MutexGuard<'a, T>) -> &'a RawMutex {
-    &guard.mutex.raw
+pub(crate) fn guard_lock<'a, T: ?Sized>(guard: &MutexGuard<'a, T>) -> &'a RawMutex {
+    &guard.raw
 }
 
 #[cfg(test)]
 mod tests {
     use std::sync::mpsc::channel;
     use std::sync::Arc;
     use std::sync::atomic::{AtomicUsize, Ordering};
     use std::thread;
-    use {Mutex, Condvar};
+    use {Condvar, Mutex};
 
     struct Packet<T>(Arc<(Mutex<T>, Condvar)>);
 
     #[derive(Eq, PartialEq, Debug)]
     struct NonCopy(i32);
 
     unsafe impl<T: Send> Send for Packet<T> {}
     unsafe impl<T> Sync for Packet<T> {}
@@ -471,28 +493,27 @@ mod tests {
         rx.recv().unwrap();
     }
 
     #[test]
     fn test_mutex_arc_access_in_unwind() {
         let arc = Arc::new(Mutex::new(1));
         let arc2 = arc.clone();
         let _ = thread::spawn(move || -> () {
-                struct Unwinder {
-                    i: Arc<Mutex<i32>>,
+            struct Unwinder {
+                i: Arc<Mutex<i32>>,
+            }
+            impl Drop for Unwinder {
+                fn drop(&mut self) {
+                    *self.i.lock() += 1;
                 }
-                impl Drop for Unwinder {
-                    fn drop(&mut self) {
-                        *self.i.lock() += 1;
-                    }
-                }
-                let _u = Unwinder { i: arc2 };
-                panic!();
-            })
-            .join();
+            }
+            let _u = Unwinder { i: arc2 };
+            panic!();
+        }).join();
         let lock = arc.lock();
         assert_eq!(*lock, 2);
     }
 
     #[test]
     fn test_mutex_unsized() {
         let mutex: &Mutex<[i32]> = &Mutex::new([1, 2, 3]);
         {
@@ -500,15 +521,15 @@ mod tests {
             b[0] = 4;
             b[2] = 5;
         }
         let comp: &[i32] = &[4, 2, 5];
         assert_eq!(&*mutex.lock(), comp);
     }
 
     #[test]
-    fn test_mutexguard_send() {
-        fn send<T: Send>(_: T) {}
+    fn test_mutexguard_sync() {
+        fn sync<T: Sync>(_: T) {}
 
         let mutex = Mutex::new(());
-        send(mutex.lock());
+        sync(mutex.lock());
     }
 }
--- a/third_party/rust/parking_lot/src/once.rs
+++ b/third_party/rust/parking_lot/src/once.rs
@@ -1,24 +1,28 @@
 // Copyright 2016 Amanieu d'Antras
 //
 // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
 // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
 // http://opensource.org/licenses/MIT>, at your option. This file may not be
 // copied, modified, or distributed except according to those terms.
 
+use std::sync::atomic::{fence, Ordering};
 #[cfg(feature = "nightly")]
-use std::sync::atomic::{AtomicU8, ATOMIC_U8_INIT, Ordering, fence};
+use std::sync::atomic::{ATOMIC_U8_INIT, AtomicU8};
 #[cfg(feature = "nightly")]
 type U8 = u8;
 #[cfg(not(feature = "nightly"))]
-use stable::{AtomicU8, ATOMIC_U8_INIT, Ordering, fence};
+use std::sync::atomic::AtomicUsize as AtomicU8;
+#[cfg(not(feature = "nightly"))]
+use std::sync::atomic::ATOMIC_USIZE_INIT as ATOMIC_U8_INIT;
 #[cfg(not(feature = "nightly"))]
 type U8 = usize;
 use std::mem;
+use std::fmt;
 use parking_lot_core::{self, SpinWait, DEFAULT_PARK_TOKEN, DEFAULT_UNPARK_TOKEN};
 use util::UncheckedOptionExt;
 
 const DONE_BIT: U8 = 1;
 const POISON_BIT: U8 = 2;
 const LOCKED_BIT: U8 = 4;
 const PARKED_BIT: U8 = 8;
 
@@ -90,24 +94,24 @@ pub struct Once(AtomicU8);
 /// Initialization value for static `Once` values.
 pub const ONCE_INIT: Once = Once(ATOMIC_U8_INIT);
 
 impl Once {
     /// Creates a new `Once` value.
     #[cfg(feature = "nightly")]
     #[inline]
     pub const fn new() -> Once {
-        Once(AtomicU8::new(0))
+        Once(ATOMIC_U8_INIT)
     }
 
     /// Creates a new `Once` value.
     #[cfg(not(feature = "nightly"))]
     #[inline]
     pub fn new() -> Once {
-        Once(AtomicU8::new(0))
+        Once(ATOMIC_U8_INIT)
     }
 
     /// Returns the current state of this `Once`.
     #[inline]
     pub fn state(&self) -> OnceState {
         let state = self.0.load(Ordering::Acquire);
         if state & DONE_BIT != 0 {
             OnceState::Done
@@ -166,17 +170,18 @@ impl Once {
     /// # Panics
     ///
     /// The closure `f` will only be executed once if this is called
     /// concurrently amongst many threads. If that closure panics, however, then
     /// it will *poison* this `Once` instance, causing all future invocations of
     /// `call_once` to also panic.
     #[inline]
     pub fn call_once<F>(&self, f: F)
-        where F: FnOnce()
+    where
+        F: FnOnce(),
     {
         if self.0.load(Ordering::Acquire) == DONE_BIT {
             return;
         }
 
         let mut f = Some(f);
         self.call_once_slow(false, &mut |_| unsafe { f.take().unchecked_unwrap()() });
     }
@@ -187,25 +192,27 @@ impl Once {
     /// this function will continue to attempt to call initialization functions
     /// until one of them doesn't panic.
     ///
     /// The closure `f` is yielded a structure which can be used to query the
     /// state of this `Once` (whether initialization has previously panicked or
     /// not).
     #[inline]
     pub fn call_once_force<F>(&self, f: F)
-        where F: FnOnce(OnceState)
+    where
+        F: FnOnce(OnceState),
     {
         if self.0.load(Ordering::Acquire) == DONE_BIT {
             return;
         }
 
         let mut f = Some(f);
-        self.call_once_slow(true,
-                            &mut |state| unsafe { f.take().unchecked_unwrap()(state) });
+        self.call_once_slow(true, &mut |state| unsafe {
+            f.take().unchecked_unwrap()(state)
+        });
     }
 
     // This is a non-generic function to reduce the monomorphization cost of
     // using `call_once` (this isn't exactly a trivial or small implementation).
     //
     // Additionally, this is tagged with `#[cold]` as it should indeed be cold
     // and it helps let LLVM know that calls to this function should be off the
     // fast path. Essentially, this should help generate more straight line code
@@ -234,57 +241,62 @@ impl Once {
                 fence(Ordering::Acquire);
                 panic!("Once instance has previously been poisoned");
             }
 
             // Grab the lock if it isn't locked, even if there is a queue on it.
             // We also clear the poison bit since we are going to try running
             // the closure again.
             if state & LOCKED_BIT == 0 {
-                match self.0
-                    .compare_exchange_weak(state,
-                                           (state | LOCKED_BIT) & !POISON_BIT,
-                                           Ordering::Acquire,
-                                           Ordering::Relaxed) {
+                match self.0.compare_exchange_weak(
+                    state,
+                    (state | LOCKED_BIT) & !POISON_BIT,
+                    Ordering::Acquire,
+                    Ordering::Relaxed,
+                ) {
                     Ok(_) => break,
                     Err(x) => state = x,
                 }
                 continue;
             }
 
             // If there is no queue, try spinning a few times
             if state & PARKED_BIT == 0 && spinwait.spin() {
                 state = self.0.load(Ordering::Relaxed);
                 continue;
             }
 
             // Set the parked bit
             if state & PARKED_BIT == 0 {
-                if let Err(x) = self.0.compare_exchange_weak(state,
-                                                             state | PARKED_BIT,
-                                                             Ordering::Relaxed,
-                                                             Ordering::Relaxed) {
+                if let Err(x) = self.0.compare_exchange_weak(
+                    state,
+                    state | PARKED_BIT,
+                    Ordering::Relaxed,
+                    Ordering::Relaxed,
+                ) {
                     state = x;
                     continue;
                 }
             }
 
             // Park our thread until we are woken up by the thread that owns the
             // lock.
             unsafe {
                 let addr = self as *const _ as usize;
                 let validate = || self.0.load(Ordering::Relaxed) == LOCKED_BIT | PARKED_BIT;
                 let before_sleep = || {};
                 let timed_out = |_, _| unreachable!();
-                parking_lot_core::park(addr,
-                                       validate,
-                                       before_sleep,
-                                       timed_out,
-                                       DEFAULT_PARK_TOKEN,
-                                       None);
+                parking_lot_core::park(
+                    addr,
+                    validate,
+                    before_sleep,
+                    timed_out,
+                    DEFAULT_PARK_TOKEN,
+                    None,
+                );
             }
 
             // Loop back and check if the done bit was set
             spinwait.reset();
             state = self.0.load(Ordering::Relaxed);
         }
 
         struct PanicGuard<'a>(&'a Once);
@@ -326,16 +338,22 @@ impl Once {
 
 impl Default for Once {
     #[inline]
     fn default() -> Once {
         Once::new()
     }
 }
 
+impl fmt::Debug for Once {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        write!(f, "Once {{ state: {:?} }}", &self.state())
+    }
+}
+
 #[cfg(test)]
 mod tests {
     #[cfg(feature = "nightly")]
     use std::panic;
     use std::sync::mpsc::channel;
     use std::thread;
     use {Once, ONCE_INIT};
 
@@ -386,21 +404,25 @@ mod tests {
     }
 
     #[cfg(feature = "nightly")]
     #[test]
     fn poison_bad() {
         static O: Once = ONCE_INIT;
 
         // poison the once
-        let t = panic::catch_unwind(|| { O.call_once(|| panic!()); });
+        let t = panic::catch_unwind(|| {
+            O.call_once(|| panic!());
+        });
         assert!(t.is_err());
 
         // poisoning propagates
-        let t = panic::catch_unwind(|| { O.call_once(|| {}); });
+        let t = panic::catch_unwind(|| {
+            O.call_once(|| {});
+        });
         assert!(t.is_err());
 
         // we can subvert poisoning, however
         let mut called = false;
         O.call_once_force(|p| {
             called = true;
             assert!(p.poisoned())
         });
@@ -411,17 +433,19 @@ mod tests {
     }
 
     #[cfg(feature = "nightly")]
     #[test]
     fn wait_for_force_to_finish() {
         static O: Once = ONCE_INIT;
 
         // poison the once
-        let t = panic::catch_unwind(|| { O.call_once(|| panic!()); });
+        let t = panic::catch_unwind(|| {
+            O.call_once(|| panic!());
+        });
         assert!(t.is_err());
 
         // make sure someone's waiting inside the once via a force
         let (tx1, rx1) = channel();
         let (tx2, rx2) = channel();
         let t1 = thread::spawn(move || {
             O.call_once_force(|p| {
                 assert!(p.poisoned());
@@ -430,19 +454,20 @@ mod tests {
             });
         });
 
         rx1.recv().unwrap();
 
         // put another waiter on the once
         let t2 = thread::spawn(|| {
             let mut called = false;
-            O.call_once(|| { called = true; });
+            O.call_once(|| {
+                called = true;
+            });
             assert!(!called);
         });
 
         tx2.send(()).unwrap();
 
         assert!(t1.join().is_ok());
         assert!(t2.join().is_ok());
-
     }
 }
--- a/third_party/rust/parking_lot/src/raw_mutex.rs
+++ b/third_party/rust/parking_lot/src/raw_mutex.rs
@@ -1,25 +1,29 @@
 // Copyright 2016 Amanieu d'Antras
 //
 // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
 // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
 // http://opensource.org/licenses/MIT>, at your option. This file may not be
 // copied, modified, or distributed except according to those terms.
 
+use std::sync::atomic::Ordering;
 #[cfg(feature = "nightly")]
-use std::sync::atomic::{AtomicU8, Ordering};
+use std::sync::atomic::{ATOMIC_U8_INIT, AtomicU8};
 #[cfg(feature = "nightly")]
 type U8 = u8;
 #[cfg(not(feature = "nightly"))]
-use stable::{AtomicU8, Ordering};
+use std::sync::atomic::AtomicUsize as AtomicU8;
+#[cfg(not(feature = "nightly"))]
+use std::sync::atomic::ATOMIC_USIZE_INIT as ATOMIC_U8_INIT;
 #[cfg(not(feature = "nightly"))]
 type U8 = usize;
 use std::time::{Duration, Instant};
-use parking_lot_core::{self, ParkResult, UnparkResult, SpinWait, UnparkToken, DEFAULT_PARK_TOKEN};
+use parking_lot_core::{self, ParkResult, SpinWait, UnparkResult, UnparkToken, DEFAULT_PARK_TOKEN};
+use deadlock;
 
 // UnparkToken used to indicate that that the target thread should attempt to
 // lock the mutex again as soon as it is unparked.
 pub const TOKEN_NORMAL: UnparkToken = UnparkToken(0);
 
 // UnparkToken used to indicate that the mutex is being handed off to the target
 // thread directly without unlocking it.
 pub const TOKEN_HANDOFF: UnparkToken = UnparkToken(1);
@@ -30,160 +34,191 @@ const PARKED_BIT: U8 = 2;
 pub struct RawMutex {
     state: AtomicU8,
 }
 
 impl RawMutex {
     #[cfg(feature = "nightly")]
     #[inline]
     pub const fn new() -> RawMutex {
-        RawMutex { state: AtomicU8::new(0) }
+        RawMutex {
+            state: ATOMIC_U8_INIT,
+        }
     }
     #[cfg(not(feature = "nightly"))]
     #[inline]
     pub fn new() -> RawMutex {
-        RawMutex { state: AtomicU8::new(0) }
+        RawMutex {
+            state: ATOMIC_U8_INIT,
+        }
     }
 
     #[inline]
     pub fn lock(&self) {
         if self.state
             .compare_exchange_weak(0, LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed)
-            .is_ok() {
-            return;
+            .is_err()
+        {
+            self.lock_slow(None);
         }
-        self.lock_slow(None);
+        unsafe { deadlock::acquire_resource(self as *const _ as usize) };
     }
 
     #[inline]
     pub fn try_lock_until(&self, timeout: Instant) -> bool {
-        if self.state
+        let result = if self.state
             .compare_exchange_weak(0, LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed)
-            .is_ok() {
-            return true;
+            .is_ok()
+        {
+            true
+        } else {
+            self.lock_slow(Some(timeout))
+        };
+        if result {
+            unsafe { deadlock::acquire_resource(self as *const _ as usize) };
         }
-        self.lock_slow(Some(timeout))
+        result
     }
 
     #[inline]
     pub fn try_lock_for(&self, timeout: Duration) -> bool {
-        if self.state
+        let result = if self.state
             .compare_exchange_weak(0, LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed)
-            .is_ok() {
-            return true;
+            .is_ok()
+        {
+            true
+        } else {
+            self.lock_slow(Some(Instant::now() + timeout))
+        };
+        if result {
+            unsafe { deadlock::acquire_resource(self as *const _ as usize) };
         }
-        self.lock_slow(Some(Instant::now() + timeout))
+        result
     }
 
     #[inline]
     pub fn try_lock(&self) -> bool {
         let mut state = self.state.load(Ordering::Relaxed);
         loop {
             if state & LOCKED_BIT != 0 {
                 return false;
             }
-            match self.state.compare_exchange_weak(state,
-                                                   state | LOCKED_BIT,
-                                                   Ordering::Acquire,
-                                                   Ordering::Relaxed) {
-                Ok(_) => return true,
+            match self.state.compare_exchange_weak(
+                state,
+                state | LOCKED_BIT,
+                Ordering::Acquire,
+                Ordering::Relaxed,
+            ) {
+                Ok(_) => {
+                    unsafe { deadlock::acquire_resource(self as *const _ as usize) };
+                    return true;
+                }
                 Err(x) => state = x,
             }
         }
     }
 
     #[inline]
     pub fn unlock(&self, force_fair: bool) {
+        unsafe { deadlock::release_resource(self as *const _ as usize) };
         if self.state
             .compare_exchange_weak(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed)
-            .is_ok() {
+            .is_ok()
+        {
             return;
         }
         self.unlock_slow(force_fair);
     }
 
     // Used by Condvar when requeuing threads to us, must be called while
     // holding the queue lock.
     #[inline]
-    pub fn mark_parked_if_locked(&self) -> bool {
+    pub(crate) fn mark_parked_if_locked(&self) -> bool {
         let mut state = self.state.load(Ordering::Relaxed);
         loop {
             if state & LOCKED_BIT == 0 {
                 return false;
             }
-            match self.state.compare_exchange_weak(state,
-                                                   state | PARKED_BIT,
-                                                   Ordering::Relaxed,
-                                                   Ordering::Relaxed) {
+            match self.state.compare_exchange_weak(
+                state,
+                state | PARKED_BIT,
+                Ordering::Relaxed,
+                Ordering::Relaxed,
+            ) {
                 Ok(_) => return true,
                 Err(x) => state = x,
             }
         }
     }
 
     // Used by Condvar when requeuing threads to us, must be called while
     // holding the queue lock.
     #[inline]
-    pub fn mark_parked(&self) {
+    pub(crate) fn mark_parked(&self) {
         self.state.fetch_or(PARKED_BIT, Ordering::Relaxed);
     }
 
     #[cold]
     #[inline(never)]
     fn lock_slow(&self, timeout: Option<Instant>) -> bool {
         let mut spinwait = SpinWait::new();
         let mut state = self.state.load(Ordering::Relaxed);
         loop {
             // Grab the lock if it isn't locked, even if there is a queue on it
             if state & LOCKED_BIT == 0 {
-                match self.state
-                    .compare_exchange_weak(state,
-                                           state | LOCKED_BIT,
-                                           Ordering::Acquire,
-                                           Ordering::Relaxed) {
+                match self.state.compare_exchange_weak(
+                    state,
+                    state | LOCKED_BIT,
+                    Ordering::Acquire,
+                    Ordering::Relaxed,
+                ) {
                     Ok(_) => return true,
                     Err(x) => state = x,
                 }
                 continue;
             }
 
             // If there is no queue, try spinning a few times
             if state & PARKED_BIT == 0 && spinwait.spin() {
                 state = self.state.load(Ordering::Relaxed);
                 continue;
             }
 
             // Set the parked bit
             if state & PARKED_BIT == 0 {
-                if let Err(x) = self.state.compare_exchange_weak(state,
-                                                                 state | PARKED_BIT,
-                                                                 Ordering::Relaxed,
-                                                                 Ordering::Relaxed) {
+                if let Err(x) = self.state.compare_exchange_weak(
+                    state,
+                    state | PARKED_BIT,
+                    Ordering::Relaxed,
+                    Ordering::Relaxed,
+                ) {
                     state = x;
                     continue;
                 }
             }
 
             // Park our thread until we are woken up by an unlock
             unsafe {
                 let addr = self as *const _ as usize;
                 let validate = || self.state.load(Ordering::Relaxed) == LOCKED_BIT | PARKED_BIT;
                 let before_sleep = || {};
                 let timed_out = |_, was_last_thread| {
                     // Clear the parked bit if we were the last parked thread
                     if was_last_thread {
                         self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed);
                     }
                 };
-                match parking_lot_core::park(addr,
-                                             validate,
-                                             before_sleep,
-                                             timed_out,
-                                             DEFAULT_PARK_TOKEN,
-                                             timeout) {
+                match parking_lot_core::park(
+                    addr,
+                    validate,
+                    before_sleep,
+                    timed_out,
+                    DEFAULT_PARK_TOKEN,
+                    timeout,
+                ) {
                     // The thread that unparked us passed the lock on to us
                     // directly without unlocking it.
                     ParkResult::Unparked(TOKEN_HANDOFF) => return true,
 
                     // We were unparked normally, try acquiring the lock again
                     ParkResult::Unparked(_) => (),
 
                     // The validation function failed, try locking again
@@ -201,17 +236,18 @@ impl RawMutex {
     }
 
     #[cold]
     #[inline(never)]
     fn unlock_slow(&self, force_fair: bool) {
         // Unlock directly if there are no parked threads
         if self.state
             .compare_exchange(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed)
-            .is_ok() {
+            .is_ok()
+        {
             return;
         }
 
         // Unpark one thread and leave the parked bit set if there might
         // still be parked threads on this address.
         unsafe {
             let addr = self as *const _ as usize;
             let callback = |result: UnparkResult| {
--- a/third_party/rust/parking_lot/src/raw_remutex.rs
+++ b/third_party/rust/parking_lot/src/raw_remutex.rs
@@ -4,30 +4,21 @@
 // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
 // http://opensource.org/licenses/MIT>, at your option. This file may not be
 // copied, modified, or distributed except according to those terms.
 
 use std::sync::atomic::{AtomicUsize, Ordering};
 use std::time::{Duration, Instant};
 use std::cell::Cell;
 use raw_mutex::RawMutex;
-#[cfg(not(target_os = "emscripten"))]
-use thread_id;
 
 // Helper function to get a thread id
-#[cfg(not(target_os = "emscripten"))]
 fn get_thread_id() -> usize {
-    thread_id::get()
-}
-#[cfg(target_os = "emscripten")]
-fn get_thread_id() -> usize {
-    // pthread_self returns 0 on enscripten, but we use that as a
-    // reserved value to indicate an empty slot. We instead fall
-    // back to using the address of a thread-local variable, which
-    // is slightly slower but guaranteed to produce a non-zero value.
+    // The address of a thread-local variable is guaranteed to be unique to the
+    // current thread, and is also guaranteed to be non-zero.
     thread_local!(static KEY: u8 = unsafe { ::std::mem::uninitialized() });
     KEY.with(|x| x as *const _ as usize)
 }
 
 pub struct RawReentrantMutex {
     owner: AtomicUsize,
     lock_count: Cell<usize>,
     mutex: RawMutex,
@@ -54,20 +45,22 @@ impl RawReentrantMutex {
             mutex: RawMutex::new(),
         }
     }
 
     #[inline]
     fn lock_internal<F: FnOnce() -> bool>(&self, try_lock: F) -> bool {
         let id = get_thread_id();
         if self.owner.load(Ordering::Relaxed) == id {
-            self.lock_count.set(self.lock_count
-                .get()
-                .checked_add(1)
-                .expect("ReentrantMutex lock count overflow"));
+            self.lock_count.set(
+                self.lock_count
+                    .get()
+                    .checked_add(1)
+                    .expect("ReentrantMutex lock count overflow"),
+            );
         } else {
             if !try_lock() {
                 return false;
             }
             self.owner.store(id, Ordering::Relaxed);
             self.lock_count.set(1);
         }
         true
--- a/third_party/rust/parking_lot/src/raw_rwlock.rs
+++ b/third_party/rust/parking_lot/src/raw_rwlock.rs
@@ -1,265 +1,502 @@
 // Copyright 2016 Amanieu d'Antras
 //
 // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
 // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
 // http://opensource.org/licenses/MIT>, at your option. This file may not be
 // copied, modified, or distributed except according to those terms.
 
-#[cfg(feature = "nightly")]
 use std::sync::atomic::{AtomicUsize, Ordering};
 use std::cell::Cell;
-#[cfg(not(feature = "nightly"))]
-use stable::{AtomicUsize, Ordering};
 use std::time::{Duration, Instant};
-use parking_lot_core::{self, ParkResult, UnparkResult, SpinWait, ParkToken, FilterOp};
+use parking_lot_core::{self, FilterOp, ParkResult, ParkToken, SpinWait, UnparkResult};
 use elision::{have_elision, AtomicElisionExt};
-use util::UncheckedOptionExt;
-use raw_mutex::{TOKEN_NORMAL, TOKEN_HANDOFF};
+use raw_mutex::{TOKEN_HANDOFF, TOKEN_NORMAL};
+use deadlock;
+
+const USABLE_BITS_MASK: usize = {
+    #[cfg(feature = "nightly")]
+    {
+        const TOTAL_BITS: usize = ::std::mem::size_of::<usize>() * 8;
+        // specifies the number of usable bits, useful to test the
+        // implementation with fewer bits (the current implementation
+        // requires this to be at least 4)
+        const USABLE_BITS: usize = TOTAL_BITS;
+        (!0) >> (TOTAL_BITS - USABLE_BITS)
+    }
+    #[cfg(not(feature = "nightly"))]
+    {
+        !0
+    }
+};
+
+const PARKED_BIT: usize = 0b001;
+const UPGRADING_BIT: usize = 0b010;
+// A shared guard acquires a single guard resource
+const SHARED_GUARD: usize = 0b100;
+const GUARD_COUNT_MASK: usize = USABLE_BITS_MASK & !(SHARED_GUARD - 1);
+// An exclusive lock acquires all of guard resource (i.e. it is exclusive)
+const EXCLUSIVE_GUARD: usize = GUARD_COUNT_MASK;
+// An upgradable lock acquires just over half of the guard resource
+// This should be (GUARD_COUNT_MASK + SHARED_GUARD) >> 1, however this might
+// overflow, so we shift before adding (which is okay since the least
+// significant bit is zero for both GUARD_COUNT_MASK and SHARED_GUARD)
+const UPGRADABLE_GUARD: usize = (GUARD_COUNT_MASK >> 1) + (SHARED_GUARD >> 1);
 
 // Token indicating what type of lock queued threads are trying to acquire
-const TOKEN_SHARED: ParkToken = ParkToken(0);
-const TOKEN_EXCLUSIVE: ParkToken = ParkToken(1);
+const TOKEN_SHARED: ParkToken = ParkToken(SHARED_GUARD);
+const TOKEN_EXCLUSIVE: ParkToken = ParkToken(EXCLUSIVE_GUARD);
+const TOKEN_UPGRADABLE: ParkToken = ParkToken(UPGRADABLE_GUARD);
+const TOKEN_UPGRADING: ParkToken = ParkToken((EXCLUSIVE_GUARD - UPGRADABLE_GUARD) | UPGRADING_BIT);
 
-const PARKED_BIT: usize = 1;
-const LOCKED_BIT: usize = 2;
-const SHARED_COUNT_MASK: usize = !3;
-const SHARED_COUNT_INC: usize = 4;
-const SHARED_COUNT_SHIFT: usize = 2;
+#[inline(always)]
+fn checked_add(left: usize, right: usize) -> Option<usize> {
+    if USABLE_BITS_MASK == !0 {
+        left.checked_add(right)
+    } else {
+        debug_assert!(left <= USABLE_BITS_MASK && right <= USABLE_BITS_MASK);
+        let res = left + right;
+        if res & USABLE_BITS_MASK < right {
+            None
+        } else {
+            Some(res)
+        }
+    }
+}
 
 pub struct RawRwLock {
     state: AtomicUsize,
 }
 
 impl RawRwLock {
     #[cfg(feature = "nightly")]
     #[inline]
     pub const fn new() -> RawRwLock {
-        RawRwLock { state: AtomicUsize::new(0) }
+        RawRwLock {
+            state: AtomicUsize::new(0),
+        }
     }
     #[cfg(not(feature = "nightly"))]
     #[inline]
     pub fn new() -> RawRwLock {
-        RawRwLock { state: AtomicUsize::new(0) }
+        RawRwLock {
+            state: AtomicUsize::new(0),
+        }
     }
 
     #[inline]
     pub fn lock_exclusive(&self) {
         if self.state
-            .compare_exchange_weak(0, LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed)
-            .is_ok() {
-            return;
+            .compare_exchange_weak(0, EXCLUSIVE_GUARD, Ordering::Acquire, Ordering::Relaxed)
+            .is_err()
+        {
+            let result = self.lock_exclusive_slow(None);
+            debug_assert!(result);
         }
-        self.lock_exclusive_slow(None);
+        unsafe { deadlock::acquire_resource(self as *const _ as usize) };
     }
 
     #[inline]
     pub fn try_lock_exclusive_until(&self, timeout: Instant) -> bool {
-        if self.state
-            .compare_exchange_weak(0, LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed)
-            .is_ok() {
-            return true;
+        let result = if self.state
+            .compare_exchange_weak(0, EXCLUSIVE_GUARD, Ordering::Acquire, Ordering::Relaxed)
+            .is_ok()
+        {
+            true
+        } else {
+            self.lock_exclusive_slow(Some(timeout))
+        };
+        if result {
+            unsafe { deadlock::acquire_resource(self as *const _ as usize) };
         }
-        self.lock_exclusive_slow(Some(timeout))
+        result
     }
 
     #[inline]
     pub fn try_lock_exclusive_for(&self, timeout: Duration) -> bool {
-        if self.state
-            .compare_exchange_weak(0, LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed)
-            .is_ok() {
-            return true;
+        let result = if self.state
+            .compare_exchange_weak(0, EXCLUSIVE_GUARD, Ordering::Acquire, Ordering::Relaxed)
+            .is_ok()
+        {
+            true
+        } else {
+            self.lock_exclusive_slow(Some(Instant::now() + timeout))
+        };
+        if result {
+            unsafe { deadlock::acquire_resource(self as *const _ as usize) };
         }
-        self.lock_exclusive_slow(Some(Instant::now() + timeout))
+        result
     }
 
     #[inline]
     pub fn try_lock_exclusive(&self) -> bool {
-        self.state
-            .compare_exchange(0, LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed)
+        if self.state
+            .compare_exchange(0, EXCLUSIVE_GUARD, Ordering::Acquire, Ordering::Relaxed)
             .is_ok()
+        {
+            unsafe { deadlock::acquire_resource(self as *const _ as usize) };
+            true
+        } else {
+            false
+        }
     }
 
     #[inline]
     pub fn unlock_exclusive(&self, force_fair: bool) {
+        unsafe { deadlock::release_resource(self as *const _ as usize) };
         if self.state
-            .compare_exchange_weak(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed)
-            .is_ok() {
+            .compare_exchange_weak(EXCLUSIVE_GUARD, 0, Ordering::Release, Ordering::Relaxed)
+            .is_ok()
+        {
             return;
         }
         self.unlock_exclusive_slow(force_fair);
     }
 
     #[inline]
-    pub fn downgrade(&self) {
+    pub fn exclusive_to_shared(&self) {
         let state = self.state
-            .fetch_add(SHARED_COUNT_INC - LOCKED_BIT, Ordering::Release);
+            .fetch_sub(EXCLUSIVE_GUARD - SHARED_GUARD, Ordering::Release);
 
-        // Wake up parked shared threads if there are any
+        // Wake up parked shared and upgradable threads if there are any
         if state & PARKED_BIT != 0 {
-            self.downgrade_slow();
+            self.exclusive_to_shared_slow();
         }
     }
 
     #[inline(always)]
     fn try_lock_shared_fast(&self, recursive: bool) -> bool {
         let state = self.state.load(Ordering::Relaxed);
 
-        if !recursive {
-            // Even if there are no exclusive locks, we can't allow grabbing a
-            // shared lock while there are parked threads since that could lead to
-            // writer starvation.
-            if state & (LOCKED_BIT | PARKED_BIT) != 0 {
-                return false;
-            }
-        } else {
-            // Allow acquiring a lock even if a thread is parked to avoid
-            // deadlocks for recursive read locks.
-            if state & LOCKED_BIT != 0 {
-                return false;
-            }
+        // We can't allow grabbing a shared lock while there are parked threads
+        // since that could lead to writer starvation.
+        if !recursive && state & PARKED_BIT != 0 {
+            return false;
         }
 
         // Use hardware lock elision to avoid cache conflicts when multiple
         // readers try to acquire the lock. We only do this if the lock is
         // completely empty since elision handles conflicts poorly.
         if have_elision() && state == 0 {
-            self.state.elision_acquire(0, SHARED_COUNT_INC).is_ok()
-        } else if let Some(new_state) = state.checked_add(SHARED_COUNT_INC) {
+            self.state.elision_acquire(0, SHARED_GUARD).is_ok()
+        } else if let Some(new_state) = checked_add(state, SHARED_GUARD) {
             self.state
                 .compare_exchange_weak(state, new_state, Ordering::Acquire, Ordering::Relaxed)
                 .is_ok()
         } else {
             false
         }
     }
 
     #[inline]
     pub fn lock_shared(&self, recursive: bool) {
         if !self.try_lock_shared_fast(recursive) {
-            self.lock_shared_slow(recursive, None);
+            let result = self.lock_shared_slow(recursive, None);
+            debug_assert!(result);
         }
+        unsafe { deadlock::acquire_resource(self as *const _ as usize) };
     }
 
     #[inline]
     pub fn try_lock_shared_until(&self, recursive: bool, timeout: Instant) -> bool {
-        if self.try_lock_shared_fast(recursive) {
-            return true;
+        let result = if self.try_lock_shared_fast(recursive) {
+            true
+        } else {
+            self.lock_shared_slow(recursive, Some(timeout))
+        };
+        if result {
+            unsafe { deadlock::acquire_resource(self as *const _ as usize) };
         }
-        self.lock_shared_slow(recursive, Some(timeout))
+        result
     }
 
     #[inline]
     pub fn try_lock_shared_for(&self, recursive: bool, timeout: Duration) -> bool {
-        if self.try_lock_shared_fast(recursive) {
-            return true;
+        let result = if self.try_lock_shared_fast(recursive) {
+            true
+        } else {
+            self.lock_shared_slow(recursive, Some(Instant::now() + timeout))
+        };
+        if result {
+            unsafe { deadlock::acquire_resource(self as *const _ as usize) };
         }
-        self.lock_shared_slow(recursive, Some(Instant::now() + timeout))
+        result
     }
 
     #[inline]
     pub fn try_lock_shared(&self, recursive: bool) -> bool {
-        if self.try_lock_shared_fast(recursive) {
-            return true;
+        let result = if self.try_lock_shared_fast(recursive) {
+            true
+        } else {
+            self.try_lock_shared_slow(recursive)
+        };
+        if result {
+            unsafe { deadlock::acquire_resource(self as *const _ as usize) };
         }
-        self.try_lock_shared_slow(recursive)
+        result
     }
 
     #[inline]
     pub fn unlock_shared(&self, force_fair: bool) {
+        unsafe { deadlock::release_resource(self as *const _ as usize) };
         let state = self.state.load(Ordering::Relaxed);
-        if state & PARKED_BIT == 0 || state & SHARED_COUNT_MASK != SHARED_COUNT_INC {
+        if state & PARKED_BIT == 0
+            || (state & UPGRADING_BIT == 0 && state & GUARD_COUNT_MASK != SHARED_GUARD)
+        {
             if have_elision() {
-                if self.state.elision_release(state, state - SHARED_COUNT_INC).is_ok() {
+                if self.state
+                    .elision_release(state, state - SHARED_GUARD)
+                    .is_ok()
+                {
                     return;
                 }
             } else {
                 if self.state
-                    .compare_exchange_weak(state,
-                                           state - SHARED_COUNT_INC,
-                                           Ordering::Release,
-                                           Ordering::Relaxed)
-                    .is_ok() {
+                    .compare_exchange_weak(
+                        state,
+                        state - SHARED_GUARD,
+                        Ordering::Release,
+                        Ordering::Relaxed,
+                    )
+                    .is_ok()
+                {
                     return;
                 }
             }
         }
         self.unlock_shared_slow(force_fair);
     }
 
+    #[inline(always)]
+    fn try_lock_upgradable_fast(&self) -> bool {
+        let state = self.state.load(Ordering::Relaxed);
+
+        // We can't allow grabbing an upgradable lock while there are parked threads
+        // since that could lead to writer starvation.
+        if state & PARKED_BIT != 0 {
+            return false;
+        }
+
+        if let Some(new_state) = checked_add(state, UPGRADABLE_GUARD) {
+            self.state
+                .compare_exchange_weak(state, new_state, Ordering::Acquire, Ordering::Relaxed)
+                .is_ok()
+        } else {
+            false
+        }
+    }
+
+    #[inline]
+    pub fn lock_upgradable(&self) {
+        if !self.try_lock_upgradable_fast() {
+            let result = self.lock_upgradable_slow(None);
+            debug_assert!(result);
+        }
+        unsafe { deadlock::acquire_resource(self as *const _ as usize) };
+    }
+
+    #[inline]
+    pub fn try_lock_upgradable_until(&self, timeout: Instant) -> bool {
+        let result = if self.try_lock_upgradable_fast() {
+            true
+        } else {
+            self.lock_upgradable_slow(Some(timeout))
+        };
+        if result {
+            unsafe { deadlock::acquire_resource(self as *const _ as usize) };
+        }
+        result
+    }
+
+    #[inline]
+    pub fn try_lock_upgradable_for(&self, timeout: Duration) -> bool {
+        let result = if self.try_lock_upgradable_fast() {
+            true
+        } else {
+            self.lock_upgradable_slow(Some(Instant::now() + timeout))
+        };
+        if result {
+            unsafe { deadlock::acquire_resource(self as *const _ as usize) };
+        }
+        result
+    }
+
+    #[inline]
+    pub fn try_lock_upgradable(&self) -> bool {
+        let result = if self.try_lock_upgradable_fast() {
+            true
+        } else {
+            self.try_lock_upgradable_slow()
+        };
+        if result {
+            unsafe { deadlock::acquire_resource(self as *const _ as usize) };
+        }
+        result
+    }
+
+    #[inline]
+    pub fn unlock_upgradable(&self, force_fair: bool) {
+        unsafe { deadlock::release_resource(self as *const _ as usize) };
+        if self.state
+            .compare_exchange_weak(UPGRADABLE_GUARD, 0, Ordering::Release, Ordering::Relaxed)
+            .is_ok()
+        {
+            return;
+        }
+        self.unlock_upgradable_slow(force_fair);
+    }
+
+    #[inline]
+    pub fn upgradable_to_shared(&self) {
+        let state = self.state
+            .fetch_sub(UPGRADABLE_GUARD - SHARED_GUARD, Ordering::Relaxed);
+
+        // Wake up parked shared and upgradable threads if there are any
+        if state & PARKED_BIT != 0 {
+            self.upgradable_to_shared_slow(state);
+        }
+    }
+
+    #[inline]
+    pub fn upgradable_to_exclusive(&self) {
+        if self.state
+            .compare_exchange_weak(
+                UPGRADABLE_GUARD,
+                EXCLUSIVE_GUARD,
+                Ordering::Relaxed,
+                Ordering::Relaxed,
+            )
+            .is_err()
+        {
+            let result = self.upgradable_to_exclusive_slow(None);
+            debug_assert!(result);
+        }
+    }
+
+    #[inline]
+    pub fn try_upgradable_to_exclusive_until(&self, timeout: Instant) -> bool {
+        if self.state
+            .compare_exchange_weak(
+                UPGRADABLE_GUARD,
+                EXCLUSIVE_GUARD,
+                Ordering::Relaxed,
+                Ordering::Relaxed,
+            )
+            .is_ok()
+        {
+            true
+        } else {
+            self.upgradable_to_exclusive_slow(Some(timeout))
+        }
+    }
+
+    #[inline]
+    pub fn try_upgradable_to_exclusive_for(&self, timeout: Duration) -> bool {
+        if self.state
+            .compare_exchange_weak(
+                UPGRADABLE_GUARD,
+                EXCLUSIVE_GUARD,
+                Ordering::Relaxed,
+                Ordering::Relaxed,
+            )
+            .is_ok()
+        {
+            true
+        } else {
+            self.upgradable_to_exclusive_slow(Some(Instant::now() + timeout))
+        }
+    }
+
+    #[inline]
+    pub fn try_upgradable_to_exclusive(&self) -> bool {
+        self.state
+            .compare_exchange(
+                UPGRADABLE_GUARD,
+                EXCLUSIVE_GUARD,
+                Ordering::Relaxed,
+                Ordering::Relaxed,
+            )
+            .is_ok()
+    }
+
     #[cold]
     #[inline(never)]
     fn lock_exclusive_slow(&self, timeout: Option<Instant>) -> bool {
         let mut spinwait = SpinWait::new();
         let mut state = self.state.load(Ordering::Relaxed);
         loop {
             // Grab the lock if it isn't locked, even if there are other
             // threads parked.
-            if state & (LOCKED_BIT | SHARED_COUNT_MASK) == 0 {
-                match self.state
-                    .compare_exchange_weak(state,
-                                           state | LOCKED_BIT,
-                                           Ordering::Acquire,
-                                           Ordering::Relaxed) {
+            if let Some(new_state) = checked_add(state, EXCLUSIVE_GUARD) {
+                match self.state.compare_exchange_weak(
+                    state,
+                    new_state,
+                    Ordering::Acquire,
+                    Ordering::Relaxed,
+                ) {
                     Ok(_) => return true,
                     Err(x) => state = x,
                 }
                 continue;
             }
 
             // If there are no parked threads and only one reader or writer, try
             // spinning a few times.
-            if state & PARKED_BIT == 0 &&
-               (state & LOCKED_BIT != 0 || state & SHARED_COUNT_MASK == SHARED_COUNT_INC) &&
-               spinwait.spin() {
+            if (state == EXCLUSIVE_GUARD || state == SHARED_GUARD || state == UPGRADABLE_GUARD)
+                && spinwait.spin()
+            {
                 state = self.state.load(Ordering::Relaxed);
                 continue;
             }
 
             // Park our thread until we are woken up by an unlock
             unsafe {
                 let addr = self as *const _ as usize;
                 let validate = || {
                     let mut state = self.state.load(Ordering::Relaxed);
                     loop {
                         // If the rwlock is free, abort the park and try to grab
                         // it immediately.
-                        if state & (LOCKED_BIT | SHARED_COUNT_MASK) == 0 {
+                        if state & GUARD_COUNT_MASK == 0 {
                             return false;
                         }
 
                         // Nothing to do if the parked bit is already set
                         if state & PARKED_BIT != 0 {
                             return true;
                         }
 
                         // Set the parked bit
-                        match self.state.compare_exchange_weak(state,
-                                                               state | PARKED_BIT,
-                                                               Ordering::Relaxed,
-                                                               Ordering::Relaxed) {
+                        match self.state.compare_exchange_weak(
+                            state,
+                            state | PARKED_BIT,
+                            Ordering::Relaxed,
+                            Ordering::Relaxed,
+                        ) {
                             Ok(_) => return true,
                             Err(x) => state = x,
                         }
                     }
                 };
                 let before_sleep = || {};
                 let timed_out = |_, was_last_thread| {
                     // Clear the parked bit if we were the last parked thread
                     if was_last_thread {
                         self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed);
                     }
                 };
-                match parking_lot_core::park(addr,
-                                             validate,
-                                             before_sleep,
-                                             timed_out,
-                                             TOKEN_EXCLUSIVE,
-                                             timeout) {
+                match parking_lot_core::park(
+                    addr,
+                    validate,
+                    before_sleep,
+                    timed_out,
+                    TOKEN_EXCLUSIVE,
+                    timeout,
+                ) {
                     // The thread that unparked us passed the lock on to us
                     // directly without unlocking it.
                     ParkResult::Unparked(TOKEN_HANDOFF) => return true,
 
                     // We were unparked normally, try acquiring the lock again
                     ParkResult::Unparked(_) => (),
 
                     // The validation function failed, try locking again
@@ -276,87 +513,75 @@ impl RawRwLock {
         }
     }
 
     #[cold]
     #[inline(never)]
     fn unlock_exclusive_slow(&self, force_fair: bool) {
         // Unlock directly if there are no parked threads
         if self.state
-            .compare_exchange(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed)
-            .is_ok() {
+            .compare_exchange(EXCLUSIVE_GUARD, 0, Ordering::Release, Ordering::Relaxed)
+            .is_ok()
+        {
             return;
-        }
+        };
 
-        // There are threads to unpark. We can unpark a single exclusive
-        // thread or many shared threads.
-        let first_token = Cell::new(None);
+        // There are threads to unpark. We unpark threads up to the guard capacity.
+        let guard_count = Cell::new(0);
         unsafe {
             let addr = self as *const _ as usize;
-            let filter = |token| -> FilterOp {
-                if let Some(first_token) = first_token.get() {
-                    if first_token == TOKEN_EXCLUSIVE || token == TOKEN_EXCLUSIVE {
-                        FilterOp::Stop
-                    } else {
+            let filter = |ParkToken(token)| -> FilterOp {
+                match checked_add(guard_count.get(), token) {
+                    Some(new_guard_count) => {
+                        guard_count.set(new_guard_count);
                         FilterOp::Unpark
                     }
-                } else {
-                    first_token.set(Some(token));
-                    FilterOp::Unpark
+                    None => FilterOp::Stop,
                 }
             };
             let callback = |result: UnparkResult| {
                 // If we are using a fair unlock then we should keep the
                 // rwlock locked and hand it off to the unparked threads.
                 if result.unparked_threads != 0 && (force_fair || result.be_fair) {
-                    if first_token.get().unchecked_unwrap() == TOKEN_EXCLUSIVE {
-                        // If we unparked an exclusive thread, just clear the
-                        // parked bit if there are no more parked threads.
-                        if !result.have_more_threads {
-                            self.state.store(LOCKED_BIT, Ordering::Relaxed);
-                        }
+                    // We need to set the guard count accordingly.
+                    let mut new_state = guard_count.get();
+
+                    if result.have_more_threads {
+                        new_state |= PARKED_BIT;
+                    }
+
+                    self.state.store(new_state, Ordering::Release);
+                    TOKEN_HANDOFF
+                } else {
+                    // Clear the parked bit if there are no more parked threads.
+                    if result.have_more_threads {
+                        self.state.store(PARKED_BIT, Ordering::Release);
                     } else {
-                        // If we unparked shared threads then we need to set
-                        // the shared count accordingly.
-                        if result.have_more_threads {
-                            self.state.store((result.unparked_threads << SHARED_COUNT_SHIFT) |
-                                             PARKED_BIT,
-                                             Ordering::Release);
-                        } else {
-                            self.state.store(result.unparked_threads << SHARED_COUNT_SHIFT,
-                                             Ordering::Release);
-                        }
+                        self.state.store(0, Ordering::Release);
                     }
-                    return TOKEN_HANDOFF;
+                    TOKEN_NORMAL
                 }
-
-                // Clear the locked bit, and the parked bit as well if there
-                // are no more parked threads.
-                if result.have_more_threads {
-                    self.state.store(PARKED_BIT, Ordering::Release);
-                } else {
-                    self.state.store(0, Ordering::Release);
-                }
-                TOKEN_NORMAL
             };
             parking_lot_core::unpark_filter(addr, filter, callback);
         }
     }
 
     #[cold]
     #[inline(never)]
-    fn downgrade_slow(&self) {
-        // Unpark shared threads only
+    fn exclusive_to_shared_slow(&self) {
         unsafe {
             let addr = self as *const _ as usize;
-            let filter = |token| -> FilterOp {
-                if token == TOKEN_SHARED {
-                    FilterOp::Unpark
-                } else {
-                    FilterOp::Stop
+            let mut guard_count = SHARED_GUARD;
+            let filter = |ParkToken(token)| -> FilterOp {
+                match checked_add(guard_count, token) {
+                    Some(new_guard_count) => {
+                        guard_count = new_guard_count;
+                        FilterOp::Unpark
+                    }
+                    None => FilterOp::Stop,
                 }
             };
             let callback = |result: UnparkResult| {
                 // Clear the parked bit if there no more parked threads
                 if !result.have_more_threads {
                     self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed);
                 }
                 TOKEN_NORMAL
@@ -372,40 +597,49 @@ impl RawRwLock {
         let mut spinwait_shared = SpinWait::new();
         let mut state = self.state.load(Ordering::Relaxed);
         let mut unparked = false;
         loop {
             // Use hardware lock elision to avoid cache conflicts when multiple
             // readers try to acquire the lock. We only do this if the lock is
             // completely empty since elision handles conflicts poorly.
             if have_elision() && state == 0 {
-                match self.state.elision_acquire(0, SHARED_COUNT_INC) {
+                match self.state.elision_acquire(0, SHARED_GUARD) {
                     Ok(_) => return true,
                     Err(x) => state = x,
                 }
             }
 
             // Grab the lock if there are no exclusive threads locked or
             // waiting. However if we were unparked then we are allowed to grab
             // the lock even if there are pending exclusive threads.
-            if state & LOCKED_BIT == 0 && (unparked || recursive || state & PARKED_BIT == 0) {
-                let new = state.checked_add(SHARED_COUNT_INC)
-                    .expect("RwLock shared count overflow");
-                if self.state
-                    .compare_exchange_weak(state, new, Ordering::Acquire, Ordering::Relaxed)
-                    .is_ok() {
-                    return true;
+            if unparked || recursive || state & PARKED_BIT == 0 {
+                if let Some(new_state) = checked_add(state, SHARED_GUARD) {
+                    if self.state
+                        .compare_exchange_weak(
+                            state,
+                            new_state,
+                            Ordering::Acquire,
+                            Ordering::Relaxed,
+                        )
+                        .is_ok()
+                    {
+                        return true;
+                    }
+
+                    // If there is high contention on the reader count then we want
+                    // to leave some time between attempts to acquire the lock to
+                    // let other threads make progress.
+                    spinwait_shared.spin_no_yield();
+                    state = self.state.load(Ordering::Relaxed);
+                    continue;
+                } else {
+                    // We were unparked spuriously, reset unparked flag.
+                    unparked = false;
                 }
-
-                // If there is high contention on the reader count then we want
-                // to leave some time between attempts to acquire the lock to
-                // let other threads make progress.
-                spinwait_shared.spin_no_yield();
-                state = self.state.load(Ordering::Relaxed);
-                continue;
             }
 
             // If there are no parked threads, try spinning a few times
             if state & PARKED_BIT == 0 && spinwait.spin() {
                 state = self.state.load(Ordering::Relaxed);
                 continue;
             }
 
@@ -416,46 +650,50 @@ impl RawRwLock {
                     let mut state = self.state.load(Ordering::Relaxed);
                     loop {
                         // Nothing to do if the parked bit is already set
                         if state & PARKED_BIT != 0 {
                             return true;
                         }
 
                         // If the parked bit is not set then it means we are at
-                        // the front of the queue. If there is no exclusive lock
-                        // then we should abort the park and try acquiring the
-                        // lock again.
-                        if state & LOCKED_BIT == 0 {
+                        // the front of the queue. If there is space for another
+                        // lock then we should abort the park and try acquiring
+                        // the lock again.
+                        if state & GUARD_COUNT_MASK != GUARD_COUNT_MASK {
                             return false;
                         }
 
                         // Set the parked bit
-                        match self.state.compare_exchange_weak(state,
-                                                               state | PARKED_BIT,
-                                                               Ordering::Relaxed,
-                                                               Ordering::Relaxed) {
+                        match self.state.compare_exchange_weak(
+                            state,
+                            state | PARKED_BIT,
+                            Ordering::Relaxed,
+                            Ordering::Relaxed,
+                        ) {
                             Ok(_) => return true,
                             Err(x) => state = x,
                         }
                     }
                 };
                 let before_sleep = || {};
                 let timed_out = |_, was_last_thread| {
                     // Clear the parked bit if we were the last parked thread
                     if was_last_thread {
                         self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed);
                     }
                 };
-                match parking_lot_core::park(addr,
-                                             validate,
-                                             before_sleep,
-                                             timed_out,
-                                             TOKEN_SHARED,
-                                             timeout) {
+                match parking_lot_core::park(
+                    addr,
+                    validate,
+                    before_sleep,
+                    timed_out,
+                    TOKEN_SHARED,
+                    timeout,
+                ) {
                     // The thread that unparked us passed the lock on to us
                     // directly without unlocking it.
                     ParkResult::Unparked(TOKEN_HANDOFF) => return true,
 
                     // We were unparked normally, try acquiring the lock again
                     ParkResult::Unparked(_) => (),
 
                     // The validation function failed, try locking again
@@ -471,117 +709,474 @@ impl RawRwLock {
             spinwait_shared.reset();
             state = self.state.load(Ordering::Relaxed);
             unparked = true;
         }
     }
 
     #[cold]
     #[inline(never)]
-    pub fn try_lock_shared_slow(&self, recursive: bool) -> bool {
+    fn try_lock_shared_slow(&self, recursive: bool) -> bool {
         let mut state = self.state.load(Ordering::Relaxed);
         loop {
-            let mask = if recursive {
-                LOCKED_BIT
-            } else {
-                LOCKED_BIT | PARKED_BIT
-            };
-            if state & mask != 0 {
+            if !recursive && state & PARKED_BIT != 0 {
                 return false;
             }
             if have_elision() && state == 0 {
-                match self.state.elision_acquire(0, SHARED_COUNT_INC) {
+                match self.state.elision_acquire(0, SHARED_GUARD) {
                     Ok(_) => return true,
                     Err(x) => state = x,
                 }
             } else {
-                let new = state.checked_add(SHARED_COUNT_INC)
-                    .expect("RwLock shared count overflow");
-                match self.state
-                    .compare_exchange_weak(state, new, Ordering::Acquire, Ordering::Relaxed) {
-                    Ok(_) => return true,
-                    Err(x) => state = x,
+                match checked_add(state, SHARED_GUARD) {
+                    Some(new_state) => match self.state.compare_exchange_weak(
+                        state,
+                        new_state,
+                        Ordering::Acquire,
+                        Ordering::Relaxed,
+                    ) {
+                        Ok(_) => return true,
+                        Err(x) => state = x,
+                    },
+                    None => return false,
                 }
             }
         }
     }
 
     #[cold]
     #[inline(never)]
     fn unlock_shared_slow(&self, force_fair: bool) {
         let mut state = self.state.load(Ordering::Relaxed);
         loop {
             // Just release the lock if there are no parked thread or if we are
             // not the last shared thread.
-            if state & PARKED_BIT == 0 || state & SHARED_COUNT_MASK != SHARED_COUNT_INC {
-                match self.state
-                    .compare_exchange_weak(state,
-                                           state - SHARED_COUNT_INC,
-                                           Ordering::Release,
-                                           Ordering::Relaxed) {
+            if state & PARKED_BIT == 0
+                || (state & UPGRADING_BIT == 0 && state & GUARD_COUNT_MASK != SHARED_GUARD)
+                || (state & UPGRADING_BIT != 0
+                    && state & GUARD_COUNT_MASK != UPGRADABLE_GUARD + SHARED_GUARD)
+            {
+                match self.state.compare_exchange_weak(
+                    state,
+                    state - SHARED_GUARD,
+                    Ordering::Release,
+                    Ordering::Relaxed,
+                ) {
                     Ok(_) => return,
                     Err(x) => state = x,
                 }
                 continue;
             }
 
-            // There are threads to unpark. We can unpark a single exclusive
-            // thread or many shared threads. Note that there is a potential
-            // race condition here: another thread might grab a shared lock
-            // between now and when we actually release our lock.
-            let first_token = Cell::new(None);
+            break;
+        }
+
+        // There are threads to unpark. If there is a thread waiting to be
+        // upgraded, we find that thread and let it upgrade, otherwise we
+        // unpark threads up to the guard capacity. Note that there is a
+        // potential race condition here: another thread might grab a shared
+        // lock between now and when we actually release our lock.
+        let additional_guards = Cell::new(0);
+        let has_upgraded = Cell::new(if state & UPGRADING_BIT == 0 {
+            None
+        } else {
+            Some(false)
+        });
+        unsafe {
+            let addr = self as *const _ as usize;
+            let filter = |ParkToken(token)| -> FilterOp {
+                match has_upgraded.get() {
+                    None => match checked_add(additional_guards.get(), token) {
+                        Some(x) => {
+                            additional_guards.set(x);
+                            FilterOp::Unpark
+                        }
+                        None => FilterOp::Stop,
+                    },
+                    Some(false) => if token & UPGRADING_BIT != 0 {
+                        additional_guards.set(token & !UPGRADING_BIT);
+                        has_upgraded.set(Some(true));
+                        FilterOp::Unpark
+                    } else {
+                        FilterOp::Skip
+                    },
+                    Some(true) => FilterOp::Stop,
+                }
+            };
+            let callback = |result: UnparkResult| {
+                let mut state = self.state.load(Ordering::Relaxed);
+                loop {
+                    // Release our shared lock
+                    let mut new_state = state - SHARED_GUARD;
+
+                    // Clear the parked bit if there are no more threads in
+                    // the queue.
+                    if !result.have_more_threads {
+                        new_state &= !PARKED_BIT;
+                    }
+
+                    // Clear the upgrading bit if we are upgrading a thread.
+                    if let Some(true) = has_upgraded.get() {
+                        new_state &= !UPGRADING_BIT;
+                    }
+
+                    // Consider using fair unlocking. If we are, then we should set
+                    // the state to the new value and tell the threads that we are
+                    // handing the lock directly.
+                    let token = if result.unparked_threads != 0 && (force_fair || result.be_fair) {
+                        match checked_add(new_state, additional_guards.get()) {
+                            Some(x) => {
+                                new_state = x;
+                                TOKEN_HANDOFF
+                            }
+                            None => TOKEN_NORMAL,
+                        }
+                    } else {
+                        TOKEN_NORMAL
+                    };
+
+                    match self.state.compare_exchange_weak(
+                        state,
+                        new_state,
+                        Ordering::Release,
+                        Ordering::Relaxed,
+                    ) {
+                        Ok(_) => return token,
+                        Err(x) => state = x,
+                    }
+                }
+            };
+            parking_lot_core::unpark_filter(addr, filter, callback);
+        }
+    }
+
+    #[cold]
+    #[inline(never)]
+    fn lock_upgradable_slow(&self, timeout: Option<Instant>) -> bool {
+        let mut spinwait = SpinWait::new();
+        let mut spinwait_shared = SpinWait::new();
+        let mut state = self.state.load(Ordering::Relaxed);
+        let mut unparked = false;
+        loop {
+            // Grab the lock if there are no exclusive or upgradable threads
+            // locked or waiting. However if we were unparked then we are
+            // allowed to grab the lock even if there are pending exclusive threads.
+            if unparked || state & PARKED_BIT == 0 {
+                if let Some(new_state) = checked_add(state, UPGRADABLE_GUARD) {
+                    if self.state
+                        .compare_exchange_weak(
+                            state,
+                            new_state,
+                            Ordering::Acquire,
+                            Ordering::Relaxed,
+                        )
+                        .is_ok()
+                    {
+                        return true;
+                    }
+
+                    // If there is high contention on the reader count then we want
+                    // to leave some time between attempts to acquire the lock to
+                    // let other threads make progress.
+                    spinwait_shared.spin_no_yield();
+                    state = self.state.load(Ordering::Relaxed);
+                    continue;
+                } else {
+                    // We were unparked spuriously, reset unparked flag.
+                    unparked = false;
+                }
+            }
+
+            // If there are no parked threads, try spinning a few times
+            if state & PARKED_BIT == 0 && spinwait.spin() {
+                state = self.state.load(Ordering::Relaxed);
+                continue;
+            }
+
+            // Park our thread until we are woken up by an unlock
             unsafe {
                 let addr = self as *const _ as usize;
-                let filter = |token| -> FilterOp {
-                    if let Some(first_token) = first_token.get() {
-                        if first_token == TOKEN_EXCLUSIVE || token == TOKEN_EXCLUSIVE {
-                            FilterOp::Stop
-                        } else {
-                            FilterOp::Unpark
-                        }
-                    } else {
-                        first_token.set(Some(token));
-                        FilterOp::Unpark
-                    }
-                };
-                let callback = |result: UnparkResult| {
+                let validate = || {
                     let mut state = self.state.load(Ordering::Relaxed);
                     loop {
-                        // Release our shared lock
-                        let mut new = state - SHARED_COUNT_INC;
-
-                        // Clear the parked bit if there are no more threads in
-                        // the queue
-                        if !result.have_more_threads {
-                            new &= !PARKED_BIT;
+                        // Nothing to do if the parked bit is already set
+                        if state & PARKED_BIT != 0 {
+                            return true;
                         }
 
-                        // If we are the last shared thread and we unparked an
-                        // exclusive thread then we can consider using fair
-                        // unlocking. If we are then we should set the exclusive
-                        // locked bit and tell the thread that we are handing it
-                        // the lock directly.
-                        let token = if result.unparked_threads != 0 &&
-                                       new & SHARED_COUNT_MASK == 0 &&
-                                       first_token.get().unchecked_unwrap() == TOKEN_EXCLUSIVE &&
-                                       (force_fair || result.be_fair) {
-                            new |= LOCKED_BIT;
-                            TOKEN_HANDOFF
-                        } else {
-                            TOKEN_NORMAL
-                        };
+                        // If the parked bit is not set then it means we are at
+                        // the front of the queue. If there is space for an
+                        // upgradable lock then we should abort the park and try
+                        // acquiring the lock again.
+                        if state & UPGRADABLE_GUARD != UPGRADABLE_GUARD {
+                            return false;
+                        }
 
-                        match self.state
-                            .compare_exchange_weak(state,
-                                                   new,
-                                                   Ordering::Release,
-                                                   Ordering::Relaxed) {
-                            Ok(_) => return token,
+                        // Set the parked bit
+                        match self.state.compare_exchange_weak(
+                            state,
+                            state | PARKED_BIT,
+                            Ordering::Relaxed,
+                            Ordering::Relaxed,
+                        ) {
+                            Ok(_) => return true,
                             Err(x) => state = x,
                         }
                     }
                 };
-                parking_lot_core::unpark_filter(addr, filter, callback);
+                let before_sleep = || {};
+                let timed_out = |_, was_last_thread| {
+                    // Clear the parked bit if we were the last parked thread
+                    if was_last_thread {
+                        self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed);
+                    }
+                };
+                match parking_lot_core::park(
+                    addr,
+                    validate,
+                    before_sleep,
+                    timed_out,
+                    TOKEN_UPGRADABLE,
+                    timeout,
+                ) {
+                    // The thread that unparked us passed the lock on to us
+                    // directly without unlocking it.
+                    ParkResult::Unparked(TOKEN_HANDOFF) => return true,
+
+                    // We were unparked normally, try acquiring the lock again
+                    ParkResult::Unparked(_) => (),
+
+                    // The validation function failed, try locking again
+                    ParkResult::Invalid => (),
+
+                    // Timeout expired
+                    ParkResult::TimedOut => return false,
+                }
+            }
+
+            // Loop back and try locking again
+            spinwait.reset();
+            spinwait_shared.reset();
+            state = self.state.load(Ordering::Relaxed);
+            unparked = true;
+        }
+    }
+
+    #[cold]
+    #[inline(never)]
+    fn try_lock_upgradable_slow(&self) -> bool {
+        let mut state = self.state.load(Ordering::Relaxed);
+        loop {
+            if state & PARKED_BIT != 0 {
+                return false;
+            }
+
+            match checked_add(state, UPGRADABLE_GUARD) {
+                Some(new_state) => match self.state.compare_exchange_weak(
+                    state,
+                    new_state,
+                    Ordering::Acquire,
+                    Ordering::Relaxed,
+                ) {
+                    Ok(_) => return true,
+                    Err(x) => state = x,
+                },
+                None => return false,
+            }
+        }
+    }
+
+    #[cold]
+    #[inline(never)]
+    fn unlock_upgradable_slow(&self, force_fair: bool) {
+        let mut state = self.state.load(Ordering::Relaxed);
+        loop {
+            // Just release the lock if there are no parked threads.
+            if state & PARKED_BIT == 0 {
+                match self.state.compare_exchange_weak(
+                    state,
+                    state - UPGRADABLE_GUARD,
+                    Ordering::Release,
+                    Ordering::Relaxed,
+                ) {
+                    Ok(_) => return,
+                    Err(x) => state = x,
+                }
+                continue;
             }
+
             break;
         }
+
+        // There are threads to unpark. We unpark threads up to the guard capacity.
+        let additional_guards = Cell::new(0);
+        unsafe {
+            let addr = self as *const _ as usize;
+            let filter = |ParkToken(token)| -> FilterOp {
+                match checked_add(additional_guards.get(), token) {
+                    Some(x) => {
+                        additional_guards.set(x);
+                        FilterOp::Unpark
+                    }
+                    None => FilterOp::Stop,
+                }
+            };
+            let callback = |result: UnparkResult| {
+                let mut state = self.state.load(Ordering::Relaxed);
+                loop {
+                    // Release our upgradable lock
+                    let mut new_state = state - UPGRADABLE_GUARD;
+
+                    // Clear the parked bit if there are no more threads in
+                    // the queue
+                    if !result.have_more_threads {
+                        new_state &= !PARKED_BIT;
+                    }
+
+                    // Consider using fair unlocking. If we are, then we should set
+                    // the state to the new value and tell the threads that we are
+                    // handing the lock directly.
+                    let token = if result.unparked_threads != 0 && (force_fair || result.be_fair) {
+                        match checked_add(new_state, additional_guards.get()) {
+                            Some(x) => {
+                                new_state = x;
+                                TOKEN_HANDOFF
+                            }
+                            None => TOKEN_NORMAL,
+                        }
+                    } else {
+                        TOKEN_NORMAL
+                    };
+
+                    match self.state.compare_exchange_weak(
+                        state,
+                        new_state,
+                        Ordering::Release,
+                        Ordering::Relaxed,
+                    ) {
+                        Ok(_) => return token,
+                        Err(x) => state = x,
+                    }
+                }
+            };
+            parking_lot_core::unpark_filter(addr, filter, callback);
+        }
+    }
+
+    #[cold]
+    #[inline(never)]
+    fn upgradable_to_shared_slow(&self, state: usize) {
+        unsafe {
+            let addr = self as *const _ as usize;
+            let mut guard_count = (state & GUARD_COUNT_MASK) - UPGRADABLE_GUARD;
+            let filter = |ParkToken(token)| -> FilterOp {
+                match checked_add(guard_count, token) {
+                    Some(x) => {
+                        guard_count = x;
+                        FilterOp::Unpark
+                    }
+                    None => FilterOp::Stop,
+                }
+            };
+            let callback = |result: UnparkResult| {
+                // Clear the parked bit if there no more parked threads
+                if !result.have_more_threads {
+                    self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed);
+                }
+                TOKEN_NORMAL
+            };
+            parking_lot_core::unpark_filter(addr, filter, callback);
+        }
+    }
+
+    #[cold]
+    #[inline(never)]
+    fn upgradable_to_exclusive_slow(&self, timeout: Option<Instant>) -> bool {
+        let mut spinwait = SpinWait::new();
+        let mut state = self.state.load(Ordering::Relaxed);
+        loop {
+            // Grab the lock if it isn't locked, even if there are other
+            // threads parked.
+            if let Some(new_state) = checked_add(state, EXCLUSIVE_GUARD - UPGRADABLE_GUARD) {
+                match self.state.compare_exchange_weak(
+                    state,
+                    new_state,
+                    Ordering::Acquire,
+                    Ordering::Relaxed,
+                ) {
+                    Ok(_) => return true,
+                    Err(x) => state = x,
+                }
+                continue;
+            }
+
+            // If there are no parked threads and only one other reader, try
+            // spinning a few times.
+            if state == UPGRADABLE_GUARD + SHARED_GUARD && spinwait.spin() {
+                state = self.state.load(Ordering::Relaxed);
+                continue;
+            }
+
+            // Park our thread until we are woken up by an unlock
+            unsafe {
+                let addr = self as *const _ as usize;
+                let validate = || {
+                    let mut state = self.state.load(Ordering::Relaxed);
+                    loop {
+                        // If the rwlock is free, abort the park and try to grab
+                        // it immediately.
+                        if state & GUARD_COUNT_MASK == UPGRADABLE_GUARD {
+                            return false;
+                        }
+
+                        // Set the upgrading and parked bits
+                        match self.state.compare_exchange_weak(
+                            state,
+                            state | (UPGRADING_BIT | PARKED_BIT),
+                            Ordering::Relaxed,
+                            Ordering::Relaxed,
+                        ) {
+                            Ok(_) => return true,
+                            Err(x) => state = x,
+                        }
+                    }
+                };
+                let before_sleep = || {};
+                let timed_out = |_, was_last_thread| {
+                    // Clear the upgrading bit
+                    let mut flags = UPGRADING_BIT;
+
+                    // Clear the parked bit if we were the last parked thread
+                    if was_last_thread {
+                        flags |= PARKED_BIT;
+                    }
+
+                    self.state.fetch_and(!flags, Ordering::Relaxed);
+                };
+                match parking_lot_core::park(
+                    addr,
+                    validate,
+                    before_sleep,
+                    timed_out,
+                    TOKEN_UPGRADING,
+                    timeout,
+                ) {
+                    // The thread that unparked us passed the lock on to us
+                    // directly without unlocking it.
+                    ParkResult::Unparked(TOKEN_HANDOFF) => return true,
+
+                    // We were unparked normally, try acquiring the lock again
+                    ParkResult::Unparked(_) => (),
+
+                    // The validation function failed, try locking again
+                    ParkResult::Invalid => (),
+
+                    // Timeout expired
+                    ParkResult::TimedOut => return false,
+                }
+            }
+
+            // Loop back and try locking again
+            spinwait.reset();
+            state = self.state.load(Ordering::Relaxed);
+        }
     }
 }
--- a/third_party/rust/parking_lot/src/remutex.rs
+++ b/third_party/rust/parking_lot/src/remutex.rs
@@ -28,33 +28,32 @@ use owning_ref::StableAddress;
 ///
 /// See [`Mutex`](struct.Mutex.html) for more details about the underlying mutex
 /// primitive.
 pub struct ReentrantMutex<T: ?Sized> {
     raw: RawReentrantMutex,
     data: UnsafeCell<T>,
 }
 
-unsafe impl<T: Send> Send for ReentrantMutex<T> {}
-unsafe impl<T: Send> Sync for ReentrantMutex<T> {}
+unsafe impl<T: ?Sized + Send> Send for ReentrantMutex<T> {}
+unsafe impl<T: ?Sized + Send> Sync for ReentrantMutex<T> {}
 
 /// An RAII implementation of a "scoped lock" of a reentrant mutex. When this structure
 /// is dropped (falls out of scope), the lock will be unlocked.
 ///
 /// The data protected by the mutex can be accessed through this guard via its
 /// `Deref` implementation.
 #[must_use]
 pub struct ReentrantMutexGuard<'a, T: ?Sized + 'a> {
-    mutex: &'a ReentrantMutex<T>,
-
-    // The raw pointer here ensures that ReentrantMutexGuard is !Send
-    marker: PhantomData<(&'a T, *mut ())>,
+    raw: &'a RawReentrantMutex,
+    data: *const T,
+    marker: PhantomData<&'a T>,
 }
 
-unsafe impl<'a, T: ?Sized + 'a + Sync> Sync for ReentrantMutexGuard<'a, T> {}
+unsafe impl<'a, T: ?Sized + Sync + 'a> Sync for ReentrantMutexGuard<'a, T> {}
 
 impl<T> ReentrantMutex<T> {
     /// Creates a new reentrant mutex in an unlocked state ready for use.
     #[cfg(feature = "nightly")]
     #[inline]
     pub const fn new(val: T) -> ReentrantMutex<T> {
         ReentrantMutex {
             data: UnsafeCell::new(val),
@@ -75,83 +74,80 @@ impl<T> ReentrantMutex<T> {
     /// Consumes this reentrant mutex, returning the underlying data.
     #[inline]
     pub fn into_inner(self) -> T {
         unsafe { self.data.into_inner() }
     }
 }
 
 impl<T: ?Sized> ReentrantMutex<T> {
+    #[inline]
+    fn guard(&self) -> ReentrantMutexGuard<T> {
+        ReentrantMutexGuard {
+            raw: &self.raw,
+            data: self.data.get(),
+            marker: PhantomData,
+        }
+    }
+
     /// Acquires a reentrant mutex, blocking the current thread until it is able
     /// to do so.
     ///
     /// If the mutex is held by another thread then this function will block the
     /// local thread until it is available to acquire the mutex. If the mutex is
     /// already held by the current thread then this function will increment the
     /// lock reference count and return immediately. Upon returning,
     /// the thread is the only thread with the mutex held. An RAII guard is
     /// returned to allow scoped unlock of the lock. When the guard goes out of
     /// scope, the mutex will be unlocked.
     #[inline]
     pub fn lock(&self) -> ReentrantMutexGuard<T> {
         self.raw.lock();
-        ReentrantMutexGuard {
-            mutex: self,
-            marker: PhantomData,
-        }
+        self.guard()
     }
 
     /// Attempts to acquire this lock.
     ///
     /// If the lock could not be acquired at this time, then `None` is returned.
     /// Otherwise, an RAII guard is returned. The lock will be unlocked when the
     /// guard is dropped.
     ///
     /// This function does not block.
     #[inline]
     pub fn try_lock(&self) -> Option<ReentrantMutexGuard<T>> {
         if self.raw.try_lock() {
-            Some(ReentrantMutexGuard {
-                mutex: self,
-                marker: PhantomData,
-            })
+            Some(self.guard())
         } else {
             None
         }
     }
 
     /// Attempts to acquire this lock until a timeout is reached.
     ///
     /// If the lock could not be acquired before the timeout expired, then
     /// `None` is returned. Otherwise, an RAII guard is returned. The lock will
     /// be unlocked when the guard is dropped.
     #[inline]
     pub fn try_lock_for(&self, timeout: Duration) -> Option<ReentrantMutexGuard<T>> {
         if self.raw.try_lock_for(timeout) {
-            Some(ReentrantMutexGuard {
-                mutex: self,
-                marker: PhantomData,
-            })
+            Some(self.guard())
         } else {
             None
         }
     }
 
     /// Attempts to acquire this lock until a timeout is reached.
     ///
     /// If the lock could not be acquired before the timeout expired, then
     /// `None` is returned. Otherwise, an RAII guard is returned. The lock will
     /// be unlocked when the guard is dropped.
     #[inline]
     pub fn try_lock_until(&self, timeout: Instant) -> Option<ReentrantMutexGuard<T>> {
         if self.raw.try_lock_until(timeout) {
-            Some(ReentrantMutexGuard {
-                mutex: self,
-                marker: PhantomData,
-            })
+            Some(self.guard())
         } else {
             None
         }
     }
 
     /// Returns a mutable reference to the underlying data.
     ///
     /// Since this call borrows the `ReentrantMutex` mutably, no actual locking needs to
@@ -234,33 +230,56 @@ impl<'a, T: ?Sized + 'a> ReentrantMutexG
     /// context switch on every mutex unlock. This can result in one thread
     /// acquiring a mutex many more times than other threads.
     ///
     /// However in some cases it can be beneficial to ensure fairness by forcing
     /// the lock to pass on to a waiting thread if there is one. This is done by
     /// using this method instead of dropping the `ReentrantMutexGuard` normally.
     #[inline]
     pub fn unlock_fair(self) {
-        self.mutex.raw.unlock(true);
+        self.raw.unlock(true);
         mem::forget(self);
     }
+
+    /// Make a new `ReentrantMutexGuard` for a component of the locked data.
+    ///
+    /// This operation cannot fail as the `ReentrantMutexGuard` passed
+    /// in already locked the mutex.
+    ///
+    /// This is an associated function that needs to be
+    /// used as `ReentrantMutexGuard::map(...)`. A method would interfere with
+    /// methods of the same name on the contents of the locked data.
+    #[inline]
+    pub fn map<U: ?Sized, F>(orig: Self, f: F) -> ReentrantMutexGuard<'a, U>
+    where
+        F: FnOnce(&T) -> &U,
+    {
+        let raw = orig.raw;
+        let data = f(unsafe { &*orig.data });
+        mem::forget(orig);
+        ReentrantMutexGuard {
+            raw,
+            data,
+            marker: PhantomData,
+        }
+    }
 }
 
 impl<'a, T: ?Sized + 'a> Deref for ReentrantMutexGuard<'a, T> {
     type Target = T;
     #[inline]
     fn deref(&self) -> &T {
-        unsafe { &*self.mutex.data.get() }
+        unsafe { &*self.data }
     }
 }
 
 impl<'a, T: ?Sized + 'a> Drop for ReentrantMutexGuard<'a, T> {
     #[inline]
     fn drop(&mut self) {
-        self.mutex.raw.unlock(false);
+        self.raw.unlock(false);
     }
 }
 
 #[cfg(feature = "owning_ref")]
 unsafe impl<'a, T: ?Sized> StableAddress for ReentrantMutexGuard<'a, T> {}
 
 #[cfg(test)]
 mod tests {
@@ -305,16 +324,15 @@ mod tests {
 
     #[test]
     fn trylock_works() {
         let m = Arc::new(ReentrantMutex::new(()));
         let m2 = m.clone();
         let _lock = m.try_lock();
         let _lock2 = m.try_lock();
         thread::spawn(move || {
-                let lock = m2.try_lock();
-                assert!(lock.is_none());
-            })
-            .join()
+            let lock = m2.try_lock();
+            assert!(lock.is_none());
+        }).join()
             .unwrap();
         let _lock3 = m.try_lock();
     }
 }
--- a/third_party/rust/parking_lot/src/rwlock.rs
+++ b/third_party/rust/parking_lot/src/rwlock.rs
@@ -58,17 +58,16 @@ use owning_ref::StableAddress;
 ///
 /// # Differences from the standard library `RwLock`
 ///
 /// - Supports atomically downgrading a write lock into a read lock.
 /// - Task-fair locking policy instead of an unspecified platform default.
 /// - No poisoning, the lock is released normally on panic.
 /// - Only requires 1 word of space, whereas the standard library boxes the
 ///   `RwLock` due to platform limitations.
-/// - A lock guard can be sent to another thread and unlocked there.
 /// - Can be statically constructed (requires the `const_fn` nightly feature).
 /// - Does not require any drop glue when dropped.
 /// - Inline fast path for the uncontended case.
 /// - Efficient handling of micro-contention using adaptive spinning.
 /// - Allows raw locking & unlocking without a guard.
 /// - Supports eventual fairness so that the rwlock is fair on average.
 /// - Optionally allows making the rwlock fair by calling
 ///   `RwLockReadGuard::unlock_fair` and `RwLockWriteGuard::unlock_fair`.
@@ -102,28 +101,45 @@ pub struct RwLock<T: ?Sized> {
 
 unsafe impl<T: ?Sized + Send> Send for RwLock<T> {}
 unsafe impl<T: ?Sized + Send + Sync> Sync for RwLock<T> {}
 
 /// RAII structure used to release the shared read access of a lock when
 /// dropped.
 #[must_use]
 pub struct RwLockReadGuard<'a, T: ?Sized + 'a> {
-    rwlock: &'a RwLock<T>,
+    raw: &'a RawRwLock,
+    data: *const T,
     marker: PhantomData<&'a T>,
 }
 
+unsafe impl<'a, T: ?Sized + Sync + 'a> Sync for RwLockReadGuard<'a, T> {}
+
 /// RAII structure used to release the exclusive write access of a lock when
 /// dropped.
 #[must_use]
 pub struct RwLockWriteGuard<'a, T: ?Sized + 'a> {
-    rwlock: &'a RwLock<T>,
+    raw: &'a RawRwLock,
+    data: *mut T,
     marker: PhantomData<&'a mut T>,
 }
 
+unsafe impl<'a, T: ?Sized + Sync + 'a> Sync for RwLockWriteGuard<'a, T> {}
+
+/// RAII structure used to release the upgradable read access of a lock when
+/// dropped.
+#[must_use]
+pub struct RwLockUpgradableReadGuard<'a, T: ?Sized + 'a> {
+    raw: &'a RawRwLock,
+    data: *mut T,
+    marker: PhantomData<&'a T>,
+}
+
+unsafe impl<'a, T: ?Sized + Sync + 'a> Sync for RwLockUpgradableReadGuard<'a, T> {}
+
 impl<T> RwLock<T> {
     /// Creates a new instance of an `RwLock<T>` which is unlocked.
     ///
     /// # Examples
     ///
     /// ```
     /// use parking_lot::RwLock;
     ///
@@ -159,87 +175,102 @@ impl<T> RwLock<T> {
     /// Consumes this `RwLock`, returning the underlying data.
     #[inline]
     pub fn into_inner(self) -> T {
         unsafe { self.data.into_inner() }
     }
 }
 
 impl<T: ?Sized> RwLock<T> {
+    #[inline]
+    fn read_guard(&self) -> RwLockReadGuard<T> {
+        RwLockReadGuard {
+            raw: &self.raw,
+            data: self.data.get(),
+            marker: PhantomData,
+        }
+    }
+
+    #[inline]
+    fn write_guard(&self) -> RwLockWriteGuard<T> {
+        RwLockWriteGuard {
+            raw: &self.raw,
+            data: self.data.get(),
+            marker: PhantomData,
+        }
+    }
+
+    #[inline]
+    fn upgradable_guard(&self) -> RwLockUpgradableReadGuard<T> {
+        RwLockUpgradableReadGuard {
+            raw: &self.raw,
+            data: self.data.get(),
+            marker: PhantomData,
+        }
+    }
+
     /// Locks this rwlock with shared read access, blocking the current thread
     /// until it can be acquired.
     ///
     /// The calling thread will be blocked until there are no more writers which
     /// hold the lock. There may be other readers currently inside the lock when
     /// this method returns.
     ///
     /// Note that attempts to recursively acquire a read lock on a `RwLock` when
     /// the current thread already holds one may result in a deadlock.
     ///
     /// Returns an RAII guard which will release this thread's shared access
     /// once it is dropped.
     #[inline]
     pub fn read(&self) -> RwLockReadGuard<T> {
         self.raw.lock_shared(false);
-        RwLockReadGuard {
-            rwlock: self,
-            marker: PhantomData,
-        }
+        self.read_guard()
     }
 
     /// Attempts to acquire this rwlock with shared read access.
     ///
     /// If the access could not be granted at this time, then `None` is returned.
     /// Otherwise, an RAII guard is returned which will release the shared access
     /// when it is dropped.
     ///
     /// This function does not block.
     #[inline]
     pub fn try_read(&self) -> Option<RwLockReadGuard<T>> {
         if self.raw.try_lock_shared(false) {
-            Some(RwLockReadGuard {
-                rwlock: self,
-                marker: PhantomData,
-            })
+            Some(self.read_guard())
         } else {
             None
         }
     }
 
     /// Attempts to acquire this rwlock with shared read access until a timeout
     /// is reached.
     ///
     /// If the access could not be granted before the timeout expires, then
     /// `None` is returned. Otherwise, an RAII guard is returned which will
     /// release the shared access when it is dropped.
     #[inline]
     pub fn try_read_for(&self, timeout: Duration) -> Option<RwLockReadGuard<T>> {
         if self.raw.try_lock_shared_for(false, timeout) {
-            Some(RwLockReadGuard {
-                rwlock: self,
-                marker: PhantomData,
-            })
+            Some(self.read_guard())
         } else {
             None
         }
     }
 
     /// Attempts to acquire this rwlock with shared read access until a timeout
     /// is reached.
     ///
     /// If the access could not be granted before the timeout expires, then
     /// `None` is returned. Otherwise, an RAII guard is returned which will
     /// release the shared access when it is dropped.
     #[inline]
     pub fn try_read_until(&self, timeout: Instant) -> Option<RwLockReadGuard<T>> {
         if self.raw.try_lock_shared_until(false, timeout) {
-            Some(RwLockReadGuard {
-                rwlock: self,
-                marker: PhantomData,
-            })
+            Some(self.read_guard())
         } else {
             None
         }
     }
 
     /// Locks this rwlock with shared read access, blocking the current thread
     /// until it can be acquired.
     ///
@@ -253,39 +284,33 @@ impl<T: ?Sized> RwLock<T> {
     /// writers to starve since readers no longer block if a writer is waiting
     /// for the lock.
     ///
     /// Returns an RAII guard which will release this thread's shared access
     /// once it is dropped.
     #[inline]
     pub fn read_recursive(&self) -> RwLockReadGuard<T> {
         self.raw.lock_shared(true);
-        RwLockReadGuard {
-            rwlock: self,
-            marker: PhantomData,
-        }
+        self.read_guard()
     }
 
     /// Attempts to acquire this rwlock with shared read access.
     ///
     /// If the access could not be granted at this time, then `None` is returned.
     /// Otherwise, an RAII guard is returned which will release the shared access
     /// when it is dropped.
     ///
     /// This method is guaranteed to succeed if another read lock is held at the
     /// time of the call. See the documentation for `read_recursive` for details.
     ///
     /// This function does not block.
     #[inline]
     pub fn try_read_recursive(&self) -> Option<RwLockReadGuard<T>> {
         if self.raw.try_lock_shared(true) {
-            Some(RwLockReadGuard {
-                rwlock: self,
-                marker: PhantomData,
-            })
+            Some(self.read_guard())
         } else {
             None
         }
     }
 
     /// Attempts to acquire this rwlock with shared read access until a timeout
     /// is reached.
     ///
@@ -294,125 +319,173 @@ impl<T: ?Sized> RwLock<T> {
     /// release the shared access when it is dropped.
     ///
     /// This method is guaranteed to succeed without blocking if another read
     /// lock is held at the time of the call. See the documentation for
     /// `read_recursive` for details.
     #[inline]
     pub fn try_read_recursive_for(&self, timeout: Duration) -> Option<RwLockReadGuard<T>> {
         if self.raw.try_lock_shared_for(true, timeout) {
-            Some(RwLockReadGuard {
-                rwlock: self,
-                marker: PhantomData,
-            })
+            Some(self.read_guard())
         } else {
             None
         }
     }
 
     /// Attempts to acquire this rwlock with shared read access until a timeout
     /// is reached.
     ///
     /// If the access could not be granted before the timeout expires, then
     /// `None` is returned. Otherwise, an RAII guard is returned which will
     /// release the shared access when it is dropped.
     #[inline]
     pub fn try_read_recursive_until(&self, timeout: Instant) -> Option<RwLockReadGuard<T>> {
         if self.raw.try_lock_shared_until(true, timeout) {
-            Some(RwLockReadGuard {
-                rwlock: self,
-                marker: PhantomData,
-            })
+            Some(self.read_guard())
         } else {
             None
         }
     }
 
     /// Locks this rwlock with exclusive write access, blocking the current
     /// thread until it can be acquired.
     ///
     /// This function will not return while other writers or other readers
     /// currently have access to the lock.
     ///
     /// Returns an RAII guard which will drop the write access of this rwlock
     /// when dropped.
     #[inline]
     pub fn write(&self) -> RwLockWriteGuard<T> {
         self.raw.lock_exclusive();
-        RwLockWriteGuard {
-            rwlock: self,
-            marker: PhantomData,
-        }
+        self.write_guard()
     }
 
     /// Attempts to lock this rwlock with exclusive write access.
     ///
     /// If the lock could not be acquired at this time, then `None` is returned.
     /// Otherwise, an RAII guard is returned which will release the lock when
     /// it is dropped.
     ///
     /// This function does not block.
     #[inline]
     pub fn try_write(&self) -> Option<RwLockWriteGuard<T>> {
         if self.raw.try_lock_exclusive() {
-            Some(RwLockWriteGuard {
-                rwlock: self,
-                marker: PhantomData,
-            })
+            Some(self.write_guard())
         } else {
             None
         }
     }
 
     /// Attempts to acquire this rwlock with exclusive write access until a
     /// timeout is reached.
     ///
     /// If the access could not be granted before the timeout expires, then
     /// `None` is returned. Otherwise, an RAII guard is returned which will
     /// release the exclusive access when it is dropped.
     #[inline]
     pub fn try_write_for(&self, timeout: Duration) -> Option<RwLockWriteGuard<T>> {
         if self.raw.try_lock_exclusive_for(timeout) {
-            Some(RwLockWriteGuard {
-                rwlock: self,
-                marker: PhantomData,
-            })
+            Some(self.write_guard())
         } else {
             None
         }
     }
 
     /// Attempts to acquire this rwlock with exclusive write access until a
     /// timeout is reached.
     ///
     /// If the access could not be granted before the timeout expires, then
     /// `None` is returned. Otherwise, an RAII guard is returned which will
     /// release the exclusive access when it is dropped.
     #[inline]
     pub fn try_write_until(&self, timeout: Instant) -> Option<RwLockWriteGuard<T>> {
         if self.raw.try_lock_exclusive_until(timeout) {
-            Some(RwLockWriteGuard {
-                rwlock: self,
-                marker: PhantomData,
-            })
+            Some(self.write_guard())
+        } else {
+            None
+        }
+    }
+
+    /// Locks this rwlock with upgradable read access, blocking the current thread
+    /// until it can be acquired.
+    ///
+    /// The calling thread will be blocked until there are no more writers or other
+    /// upgradable reads which hold the lock. There may be other readers currently
+    /// inside the lock when this method returns.
+    ///
+    /// Returns an RAII guard which will release this thread's shared access
+    /// once it is dropped.
+    #[inline]
+    pub fn upgradable_read(&self) -> RwLockUpgradableReadGuard<T> {
+        self.raw.lock_upgradable();
+        self.upgradable_guard()
+    }
+
+    /// Attempts to acquire this rwlock with upgradable read access.
+    ///
+    /// If the access could not be granted at this time, then `None` is returned.
+    /// Otherwise, an RAII guard is returned which will release the shared access
+    /// when it is dropped.
+    ///
+    /// This function does not block.
+    #[inline]
+    pub fn try_upgradable_read(&self) -> Option<RwLockUpgradableReadGuard<T>> {
+        if self.raw.try_lock_upgradable() {
+            Some(self.upgradable_guard())
+        } else {
+            None
+        }
+    }
+
+    /// Attempts to acquire this rwlock with upgradable read access until a timeout
+    /// is reached.
+    ///
+    /// If the access could not be granted before the timeout expires, then
+    /// `None` is returned. Otherwise, an RAII guard is returned which will
+    /// release the shared access when it is dropped.
+    #[inline]
+    pub fn try_upgradable_read_for(
+        &self,
+        timeout: Duration,
+    ) -> Option<RwLockUpgradableReadGuard<T>> {
+        if self.raw.try_lock_upgradable_for(timeout) {
+            Some(self.upgradable_guard())
+        } else {
+            None
+        }
+    }
+
+    /// Attempts to acquire this rwlock with upgradable read access until a timeout
+    /// is reached.
+    ///
+    /// If the access could not be granted before the timeout expires, then
+    /// `None` is returned. Otherwise, an RAII guard is returned which will
+    /// release the shared access when it is dropped.
+    #[inline]
+    pub fn try_upgradable_read_until(
+        &self,
+        timeout: Instant,
+    ) -> Option<RwLockUpgradableReadGuard<T>> {
+        if self.raw.try_lock_upgradable_until(timeout) {
+            Some(self.upgradable_guard())
         } else {
             None
         }
     }
 
     /// Returns a mutable reference to the underlying data.
     ///
     /// Since this call borrows the `RwLock` mutably, no actual locking needs to
     /// take place---the mutable borrow statically guarantees no locks exist.
     #[inline]
     pub fn get_mut(&mut self) -> &mut T {
         unsafe { &mut *self.data.get() }
     }
 
-
     /// Releases shared read access of the rwlock.
     ///
     /// # Safety
     ///
     /// This function must only be called if the rwlock was locked using
     /// `raw_read` or `raw_try_read`, or if an `RwLockReadGuard` from this
     /// rwlock was leaked (e.g. with `mem::forget`). The rwlock must be locked
     /// with shared read access.
@@ -429,61 +502,107 @@ impl<T: ?Sized> RwLock<T> {
     /// `raw_write` or `raw_try_write`, or if an `RwLockWriteGuard` from this
     /// rwlock was leaked (e.g. with `mem::forget`). The rwlock must be locked
     /// with exclusive write access.
     #[inline]
     pub unsafe fn raw_unlock_write(&self) {
         self.raw.unlock_exclusive(false);
     }
 
+    /// Releases upgradable read access of the rwlock.
+    ///
+    /// # Safety
+    ///
+    /// This function must only be called if the rwlock was locked using
+    /// `raw_upgradable_read` or `raw_try_upgradable_read`, or if an
+    /// `RwLockUpgradableReadGuard` from this rwlock was leaked (e.g. with
+    /// `mem::forget`). The rwlock must be locked with upgradable read access.
+    #[inline]
+    pub unsafe fn raw_unlock_upgradable_read(&self) {
+        self.raw.unlock_upgradable(false);
+    }
+
     /// Releases shared read access of the rwlock using a fair unlock protocol.
     ///
     /// See `RwLockReadGuard::unlock_fair`.
     ///
     /// # Safety
     ///
     /// This function must only be called if the rwlock was locked using
-    /// `raw_read` or `raw_try_read`, or if an `RwLockReadGuard` from this
-    /// rwlock was leaked (e.g. with `mem::forget`). The rwlock must be locked
-    /// with shared read access.
+    /// `raw_write` or `raw_try_write`, a raw upgradable read lock was upgraded
+    /// using `raw_upgrade` or `raw_try_upgrade`, or if an `RwLockWriteGuard`
+    /// from this rwlock was leaked (e.g. with `mem::forget`). The rwlock must
+    /// be locked with exclusive write access.
     #[inline]
     pub unsafe fn raw_unlock_read_fair(&self) {
         self.raw.unlock_shared(true);
     }
 
     /// Releases exclusive write access of the rwlock using a fair unlock
     /// protocol.
     ///
     /// See `RwLockWriteGuard::unlock_fair`.
     ///
     /// # Safety
     ///
     /// This function must only be called if the rwlock was locked using
-    /// `raw_write` or `raw_try_write`, or if an `RwLockWriteGuard` from this
-    /// rwlock was leaked (e.g. with `mem::forget`). The rwlock must be locked
-    /// with exclusive write access.
+    /// `raw_write` or `raw_try_write`, a raw upgradable read lock was upgraded
+    /// using `raw_upgrade` or `raw_try_upgrade`, or if an `RwLockWriteGuard`
+    /// from this rwlock was leaked (e.g. with `mem::forget`). The rwlock must
+    /// be locked with exclusive write access.
     #[inline]
     pub unsafe fn raw_unlock_write_fair(&self) {
         self.raw.unlock_exclusive(true);
     }
 
-    /// Atomically downgrades a write lock into a read lock without allowing any
-    /// writers to take exclusive access of the lock in the meantime.
+    /// Releases upgradable read access of the rwlock using a fair unlock
+    /// protocol.
+    ///
+    /// # Safety
+    ///
+    /// This function must only be called if the rwlock was locked using
+    /// `raw_upgradable_read` or `raw_try_upgradable_read`, or if an
+    /// `RwLockUpgradableReadGuard` from this rwlock was leaked (e.g. with
+    /// `mem::forget`). The rwlock must be locked with upgradable read access.
+    #[inline]
+    pub unsafe fn raw_unlock_upgradable_read_fair(&self) {
+        self.raw.unlock_upgradable(true);
+    }
+
+    /// Atomically downgrades a write lock into a shared read lock without
+    /// allowing any writers to take exclusive access of the lock in the meantime.
     ///
     /// See `RwLockWriteGuard::downgrade`.
     ///
     /// # Safety
     ///
     /// This function must only be called if the rwlock was locked using
     /// `raw_write` or `raw_try_write`, or if an `RwLockWriteGuard` from this
     /// rwlock was leaked (e.g. with `mem::forget`). The rwlock must be locked
     /// with exclusive write access.
     #[inline]
     pub unsafe fn raw_downgrade(&self) {
-        self.raw.downgrade();
+        self.raw.exclusive_to_shared();
+    }
+
+    /// Atomically downgrades an upgradable read lock into a shared read lock
+    /// without allowing any writers to take exclusive access of the lock in
+    /// the meantime.
+    ///
+    /// See `RwLockUpgradableReadGuard::downgrade`.
+    ///
+    /// # Safety
+    ///
+    /// This function must only be called if the rwlock was locked using
+    /// `raw_upgradable_read` or `raw_try_upgradable_read`, or if an
+    /// `RwLockUpgradableReadGuard` from this rwlock was leaked (e.g. with
+    /// `mem::forget`). The rwlock must be locked with upgradable read access.
+    #[inline]
+    pub unsafe fn raw_downgrade_upgradable_read(&self) {
+        self.raw.upgradable_to_shared();
     }
 }
 
 impl RwLock<()> {
     /// Locks this rwlock with shared read access, blocking the current thread
     /// until it can be acquired.
     ///
     /// This is similar to `read`, except that a `RwLockReadGuard` is not
@@ -540,16 +659,69 @@ impl RwLock<()> {
     ///
     /// This is similar to `try_write`, except that a `RwLockReadGuard` is not
     /// returned. Instead you will need to call `raw_unlock` to release the
     /// rwlock.
     #[inline]
     pub fn raw_try_write(&self) -> bool {
         self.raw.try_lock_exclusive()
     }
+
+    /// Locks this rwlock with upgradable read access, blocking the current thread
+    /// until it can be acquired.
+    ///
+    /// This is similar to `upgradable_read`, except that a
+    /// `RwLockUpgradableReadGuard` is not returned. Instead you will need to call
+    /// `raw_unlock` to release the rwlock.
+    #[inline]
+    pub fn raw_upgradable_read(&self) {
+        self.raw.lock_upgradable();
+    }
+
+    /// Attempts to acquire this rwlock with upgradable read access.
+    ///
+    /// This is similar to `try_upgradable_read`, except that a
+    /// `RwLockUpgradableReadGuard` is not returned. Instead you will need to call
+    /// `raw_unlock` to release the rwlock.
+    #[inline]
+    pub fn raw_try_upgradable_read(&self) -> bool {
+        self.raw.try_lock_upgradable()
+    }
+
+    /// Upgrades this rwlock from upgradable read access to exclusive write access,
+    /// blocking the current thread until it can be acquired.
+    ///
+    /// See `RwLockUpgradableReadGuard::upgrade`.
+    ///
+    /// # Safety
+    ///
+    /// This function must only be called if the rwlock was locked using
+    /// `raw_upgradable_read` or `raw_try_upgradable_read`, or if an
+    /// `RwLockUpgradableReadGuard` from this rwlock was leaked (e.g. with
+    /// `mem::forget`). The rwlock must be locked with upgradable read access.
+    #[inline]
+    pub unsafe fn raw_upgrade(&self) {
+        self.raw.upgradable_to_exclusive();
+    }
+
+    /// Attempts to upgrade this rwlock from upgradable read access to exclusive
+    /// write access.
+    ///
+    /// See `RwLockUpgradableReadGuard::try_upgrade`.
+    ///
+    /// # Safety
+    ///
+    /// This function must only be called if the rwlock was locked using
+    /// `raw_upgradable_read` or `raw_try_upgradable_read`, or if an
+    /// `RwLockUpgradableReadGuard` from this rwlock was leaked (e.g. with
+    /// `mem::forget`). The rwlock must be locked with upgradable read access.
+    #[inline]
+    pub unsafe fn raw_try_upgrade(&self) -> bool {
+        self.raw.try_upgradable_to_exclusive()
+    }
 }
 
 impl<T: ?Sized + Default> Default for RwLock<T> {
     #[inline]
     fn default() -> RwLock<T> {
         RwLock::new(Default::default())
     }
 }
@@ -573,52 +745,102 @@ impl<'a, T: ?Sized + 'a> RwLockReadGuard
     /// forcing a context switch on every rwlock unlock. This can result in one
     /// thread acquiring a `RwLock` many more times than other threads.
     ///
     /// However in some cases it can be beneficial to ensure fairness by forcing
     /// the lock to pass on to a waiting thread if there is one. This is done by
     /// using this method instead of dropping the `RwLockReadGuard` normally.
     #[inline]
     pub fn unlock_fair(self) {
-        self.rwlock.raw.unlock_shared(true);
+        self.raw.unlock_shared(true);
         mem::forget(self);
     }
+
+    /// Make a new `RwLockReadGuard` for a component of the locked data.
+    ///
+    /// This operation cannot fail as the `RwLockReadGuard` passed
+    /// in already locked the data.
+    ///
+    /// This is an associated function that needs to be
+    /// used as `RwLockReadGuard::map(...)`. A method would interfere with methods of
+    /// the same name on the contents of the locked data.
+    #[inline]
+    pub fn map<U: ?Sized, F>(orig: Self, f: F) -> RwLockReadGuard<'a, U>
+    where
+        F: FnOnce(&T) -> &U,
+    {
+        let raw = orig.raw;
+        let data = f(unsafe { &*orig.data });
+        mem::forget(orig);
+        RwLockReadGuard {
+            raw,
+            data,
+            marker: PhantomData,
+        }
+    }
 }
 
 impl<'a, T: ?Sized + 'a> Deref for RwLockReadGuard<'a, T> {
     type Target = T;
     #[inline]
     fn deref(&self) -> &T {
-        unsafe { &*self.rwlock.data.get() }
+        unsafe { &*self.data }
     }
 }
 
 impl<'a, T: ?Sized + 'a> Drop for RwLockReadGuard<'a, T> {
     #[inline]
     fn drop(&mut self) {
-        self.rwlock.raw.unlock_shared(false);
+        self.raw.unlock_shared(false);
     }
 }
 
 #[cfg(feature = "owning_ref")]
 unsafe impl<'a, T: ?Sized> StableAddress for RwLockReadGuard<'a, T> {}
 
 impl<'a, T: ?Sized + 'a> RwLockWriteGuard<'a, T> {
     /// Atomically downgrades a write lock into a read lock without allowing any
     /// writers to take exclusive access of the lock in the meantime.
     ///
     /// Note that if there are any writers currently waiting to take the lock
     /// then other readers may not be able to acquire the lock even if it was
     /// downgraded.
     pub fn downgrade(self) -> RwLockReadGuard<'a, T> {
-        self.rwlock.raw.downgrade();
-        let rwlock = self.rwlock;
+        self.raw.exclusive_to_shared();
+        let raw = self.raw;
+        // Reborrow the value to avoid moving self.borrow,
+        // which isn't allow for types with destructors
+        let data = unsafe { &*self.data };
         mem::forget(self);
         RwLockReadGuard {
-            rwlock: rwlock,
+            raw,
+            data,
+            marker: PhantomData,
+        }
+    }
+
+    /// Make a new `RwLockWriteGuard` for a component of the locked data.
+    ///
+    /// This operation cannot fail as the `RwLockWriteGuard` passed
+    /// in already locked the data.
+    ///
+    /// This is an associated function that needs to be
+    /// used as `RwLockWriteGuard::map(...)`. A method would interfere with methods of
+    /// the same name on the contents of the locked data.
+    #[inline]
+    pub fn map<U: ?Sized, F>(orig: Self, f: F) -> RwLockWriteGuard<'a, U>
+    where
+        F: FnOnce(&mut T) -> &mut U,
+    {
+        let raw = orig.raw;
+        let data = f(unsafe { &mut *orig.data });
+        mem::forget(orig);
+        RwLockWriteGuard {
+            raw,
+            data,
             marker: PhantomData,
         }
     }
 
     /// Unlocks the `RwLock` using a fair unlock protocol.
     ///
     /// By default, `RwLock` is unfair and allow the current thread to re-lock
     /// the rwlock before another has the chance to acquire the lock, even if
@@ -627,46 +849,186 @@ impl<'a, T: ?Sized + 'a> RwLockWriteGuar
     /// forcing a context switch on every rwlock unlock. This can result in one
     /// thread acquiring a `RwLock` many more times than other threads.
     ///
     /// However in some cases it can be beneficial to ensure fairness by forcing
     /// the lock to pass on to a waiting thread if there is one. This is done by
     /// using this method instead of dropping the `RwLockWriteGuard` normally.
     #[inline]
     pub fn unlock_fair(self) {
-        self.rwlock.raw.unlock_exclusive(true);
+        self.raw.unlock_exclusive(true);
         mem::forget(self);
     }
 }
 
 impl<'a, T: ?Sized + 'a> Deref for RwLockWriteGuard<'a, T> {
     type Target = T;
     #[inline]
     fn deref(&self) -> &T {
-        unsafe { &*self.rwlock.data.get() }
+        unsafe { &*self.data }
     }
 }
 
 impl<'a, T: ?Sized + 'a> DerefMut for RwLockWriteGuard<'a, T> {
     #[inline]
     fn deref_mut(&mut self) -> &mut T {
-        unsafe { &mut *self.rwlock.data.get() }
+        unsafe { &mut *self.data }
     }
 }
 
 impl<'a, T: ?Sized + 'a> Drop for RwLockWriteGuard<'a, T> {
     #[inline]
     fn drop(&mut self) {
-        self.rwlock.raw.unlock_exclusive(false);
+        self.raw.unlock_exclusive(false);
     }
 }
 
 #[cfg(feature = "owning_ref")]
 unsafe impl<'a, T: ?Sized> StableAddress for RwLockWriteGuard<'a, T> {}
 
+impl<'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, T> {
+    /// Atomically downgrades an upgradable read lock lock into a shared read lock
+    /// without allowing any writers to take exclusive access of the lock in the
+    /// meantime.
+    ///
+    /// Note that if there are any writers currently waiting to take the lock
+    /// then other readers may not be able to acquire the lock even if it was
+    /// downgraded.
+    pub fn downgrade(self) -> RwLockReadGuard<'a, T> {
+        self.raw.upgradable_to_shared();
+        let raw = self.raw;
+        // Reborrow the value to avoid moving self.borrow,
+        // which isn't allow for types with destructors
+        let data = unsafe { &*self.data };
+        mem::forget(self);
+        RwLockReadGuard {
+            raw,
+            data,
+            marker: PhantomData,
+        }
+    }
+
+    /// Atomically upgrades an upgradable read lock lock into a exclusive write lock,
+    /// blocking the current thread until it can be aquired.
+    pub fn upgrade(self) -> RwLockWriteGuard<'a, T> {
+        self.raw.upgradable_to_exclusive();
+        let raw = self.raw;
+        // Reborrow the value to avoid moving self.borrow,
+        // which isn't allow for types with destructors
+        let data = unsafe { &mut *self.data };
+        mem::forget(self);
+        RwLockWriteGuard {
+            raw,
+            data,
+            marker: PhantomData,
+        }
+    }
+
+    /// Tries to atomically upgrade an upgradable read lock into a exclusive write lock.
+    ///
+    /// If the access could not be granted at this time, then the current guard is returned.
+    pub fn try_upgrade(self) -> Result<RwLockWriteGuard<'a, T>, Self> {
+        if self.raw.try_upgradable_to_exclusive() {
+            let raw = self.raw;
+            // Reborrow the value to avoid moving self.borrow,
+            // which isn't allow for types with destructors
+            let data = unsafe { &mut *self.data };
+            mem::forget(self);
+            Ok(RwLockWriteGuard {
+                raw,
+                data,
+                marker: PhantomData,
+            })
+        } else {
+            Err(self)
+        }
+    }
+
+    /// Tries to atomically upgrade an upgradable read lock into a exclusive
+    /// write lock, until a timeout is reached.
+    ///
+    /// If the access could not be granted before the timeout expires, then
+    /// the current guard is returned.
+    pub fn try_upgrade_for(self, timeout: Duration) -> Result<RwLockWriteGuard<'a, T>, Self> {
+        if self.raw.try_upgradable_to_exclusive_for(timeout) {
+            let raw = self.raw;
+            // Reborrow the value to avoid moving self.borrow,
+            // which isn't allow for types with destructors
+            let data = unsafe { &mut *self.data };
+            mem::forget(self);
+            Ok(RwLockWriteGuard {
+                raw,
+                data,
+                marker: PhantomData,
+            })
+        } else {
+            Err(self)
+        }
+    }
+
+    /// Tries to atomically upgrade an upgradable read lock into a exclusive
+    /// write lock, until a timeout is reached.
+    ///
+    /// If the access could not be granted before the timeout expires, then
+    /// the current guard is returned.
+    #[inline]
+    pub fn try_upgrade_until(self, timeout: Instant) -> Result<RwLockWriteGuard<'a, T>, Self> {
+        if self.raw.try_upgradable_to_exclusive_until(timeout) {
+            let raw = self.raw;
+            // Reborrow the value to avoid moving self.borrow,
+            // which isn't allow for types with destructors
+            let data = unsafe { &mut *self.data };
+            mem::forget(self);
+            Ok(RwLockWriteGuard {
+                raw,
+                data,
+                marker: PhantomData,
+            })
+        } else {
+            Err(self)
+        }
+    }
+
+    /// Unlocks the `RwLock` using a fair unlock protocol.
+    ///
+    /// By default, `RwLock` is unfair and allow the current thread to re-lock
+    /// the rwlock before another has the chance to acquire the lock, even if
+    /// that thread has been blocked on the `RwLock` for a long time. This is
+    /// the default because it allows much higher throughput as it avoids
+    /// forcing a context switch on every rwlock unlock. This can result in one
+    /// thread acquiring a `RwLock` many more times than other threads.
+    ///
+    /// However in some cases it can be beneficial to ensure fairness by forcing
+    /// the lock to pass on to a waiting thread if there is one. This is done by
+    /// using this method instead of dropping the `RwLockUpgradableReadGuard` normally.
+    #[inline]
+    pub fn unlock_fair(self) {
+        self.raw.unlock_upgradable(true);
+        mem::forget(self);
+    }
+}
+
+impl<'a, T: ?Sized + 'a> Deref for RwLockUpgradableReadGuard<'a, T> {
+    type Target = T;
+    #[inline]
+    fn deref(&self) -> &T {
+        unsafe { &*self.data }
+    }
+}
+
+impl<'a, T: ?Sized + 'a> Drop for RwLockUpgradableReadGuard<'a, T> {
+    #[inline]
+    fn drop(&mut self) {
+        self.raw.unlock_upgradable(false);
+    }
+}
+
+#[cfg(feature = "owning_ref")]
+unsafe impl<'a, T: ?Sized> StableAddress for RwLockUpgradableReadGuard<'a, T> {}
+
 #[cfg(test)]
 mod tests {
     extern crate rand;
     use self::rand::Rng;
     use std::sync::mpsc::channel;
     use std::thread;
     use std::sync::Arc;
     use std::sync::atomic::{AtomicUsize, Ordering};
@@ -676,17 +1038,19 @@ mod tests {
     #[derive(Eq, PartialEq, Debug)]
     struct NonCopy(i32);
 
     #[test]
     fn smoke() {
         let l = RwLock::new(());
         drop(l.read());
         drop(l.write());
+        drop(l.upgradable_read());
         drop((l.read(), l.read()));
+        drop((l.read(), l.upgradable_read()));
         drop(l.write());
     }
 
     #[test]
     fn frob() {
         const N: u32 = 10;
         const M: u32 = 1000;
 
@@ -712,63 +1076,116 @@ mod tests {
         let _ = rx.recv();
     }
 
     #[test]
     fn test_rw_arc_no_poison_wr() {
         let arc = Arc::new(RwLock::new(1));
         let arc2 = arc.clone();
         let _: Result<(), _> = thread::spawn(move || {
-                let _lock = arc2.write();
-                panic!();
-            })
-            .join();
+            let _lock = arc2.write();
+            panic!();
+        }).join();
         let lock = arc.read();
         assert_eq!(*lock, 1);
     }
 
     #[test]
     fn test_rw_arc_no_poison_ww() {
         let arc = Arc::new(RwLock::new(1));
         let arc2 = arc.clone();
         let _: Result<(), _> = thread::spawn(move || {
-                let _lock = arc2.write();
-                panic!();
-            })
-            .join();
+            let _lock = arc2.write();
+            panic!();
+        }).join();
         let lock = arc.write();
         assert_eq!(*lock, 1);
     }
 
     #[test]
     fn test_rw_arc_no_poison_rr() {
         let arc = Arc::new(RwLock::new(1));
         let arc2 = arc.clone();
         let _: Result<(), _> = thread::spawn(move || {
-                let _lock = arc2.read();
-                panic!();
-            })
-            .join();
+            let _lock = arc2.read();
+            panic!();
+        }).join();
         let lock = arc.read();
         assert_eq!(*lock, 1);
     }
+
     #[test]
     fn test_rw_arc_no_poison_rw() {
         let arc = Arc::new(RwLock::new(1));
         let arc2 = arc.clone();
         let _: Result<(), _> = thread::spawn(move || {
-                let _lock = arc2.read();
-                panic!()
-            })
-            .join();
+            let _lock = arc2.read();
+            panic!()
+        }).join();
         let lock = arc.write();
         assert_eq!(*lock, 1);
     }
 
     #[test]
+    fn test_ruw_arc() {
+        let arc = Arc::new(RwLock::new(0));
+        let arc2 = arc.clone();
+        let (tx, rx) = channel();
+
+        thread::spawn(move || {
+            for _ in 0..10 {
+                let mut lock = arc2.write();
+                let tmp = *lock;
+                *lock = -1;
+                thread::yield_now();
+                *lock = tmp + 1;
+            }
+            tx.send(()).unwrap();
+        });
+
+        let mut children = Vec::new();
+
+        // Upgradable readers try to catch the writer in the act and also
+        // try to touch the value
+        for _ in 0..5 {
+            let arc3 = arc.clone();
+            children.push(thread::spawn(move || {
+                let lock = arc3.upgradable_read();
+                let tmp = *lock;
+                assert!(tmp >= 0);
+                thread::yield_now();
+                let mut lock = lock.upgrade();
+                assert_eq!(tmp, *lock);
+                *lock = -1;
+                thread::yield_now();
+                *lock = tmp + 1;
+            }));
+        }
+
+        // Readers try to catch the writers in the act
+        for _ in 0..5 {
+            let arc4 = arc.clone();
+            children.push(thread::spawn(move || {
+                let lock = arc4.read();
+                assert!(*lock >= 0);
+            }));
+        }
+
+        // Wait for children to pass their asserts
+        for r in children {
+            assert!(r.join().is_ok());
+        }
+
+        // Wait for writer to finish
+        rx.recv().unwrap();
+        let lock = arc.read();
+        assert_eq!(*lock, 15);
+    }
+
+    #[test]
     fn test_rw_arc() {
         let arc = Arc::new(RwLock::new(0));
         let arc2 = arc.clone();
         let (tx, rx) = channel();
 
         thread::spawn(move || {
             let mut lock = arc2.write();
             for _ in 0..10 {
@@ -801,29 +1218,28 @@ mod tests {
         assert_eq!(*lock, 10);
     }
 
     #[test]
     fn test_rw_arc_access_in_unwind() {
         let arc = Arc::new(RwLock::new(1));
         let arc2 = arc.clone();
         let _ = thread::spawn(move || -> () {
-                struct Unwinder {
-                    i: Arc<RwLock<isize>>,
+            struct Unwinder {
+                i: Arc<RwLock<isize>>,
+            }
+            impl Drop for Unwinder {
+                fn drop(&mut self) {
+                    let mut lock = self.i.write();
+                    *lock += 1;
                 }
-                impl Drop for Unwinder {
-                    fn drop(&mut self) {
-                        let mut lock = self.i.write();
-                        *lock += 1;
-                    }
-                }
-                let _u = Unwinder { i: arc2 };
-                panic!();
-            })
-            .join();
+            }
+            let _u = Unwinder { i: arc2 };
+            panic!();
+        }).join();
         let lock = arc.read();
         assert_eq!(*lock, 2);
     }
 
     #[test]
     fn test_rwlock_unsized() {
         let rw: &RwLock<[i32]> = &RwLock::new([1, 2, 3]);
         {
@@ -831,30 +1247,127 @@ mod tests {
             b[0] = 4;
             b[2] = 5;
         }
         let comp: &[i32] = &[4, 2, 5];
         assert_eq!(&*rw.read(), comp);
     }
 
     #[test]
+    fn test_rwlock_try_read() {
+        let lock = RwLock::new(0isize);
+        {
+            let read_guard = lock.read();
+
+            let read_result = lock.try_read();
+            assert!(
+                read_result.is_some(),
+                "try_read should succeed while read_guard is in scope"
+            );
+
+            drop(read_guard);
+        }
+        {
+            let upgrade_guard = lock.upgradable_read();
+
+            let read_result = lock.try_read();
+            assert!(
+                read_result.is_some(),
+                "try_read should succeed while upgrade_guard is in scope"
+            );
+
+            drop(upgrade_guard);
+        }
+        {
+            let write_guard = lock.write();
+
+            let read_result = lock.try_read();
+            assert!(
+                read_result.is_none(),
+                "try_read should fail while write_guard is in scope"
+            );
+
+            drop(write_guard);
+        }
+    }
+
+    #[test]
     fn test_rwlock_try_write() {
         let lock = RwLock::new(0isize);
-        let read_guard = lock.read();
+        {
+            let read_guard = lock.read();
+
+            let write_result = lock.try_write();
+            assert!(
+                write_result.is_none(),
+                "try_write should fail while read_guard is in scope"
+            );
+
+            drop(read_guard);
+        }
+        {
+            let upgrade_guard = lock.upgradable_read();
+
+            let write_result = lock.try_write();
+            assert!(
+                write_result.is_none(),
+                "try_write should fail while upgrade_guard is in scope"
+            );
+
+            drop(upgrade_guard);
+        }
+        {
+            let write_guard = lock.write();
+
+            let write_result = lock.try_write();
+            assert!(
+                write_result.is_none(),
+                "try_write should fail while write_guard is in scope"
+            );
+
+            drop(write_guard);
+        }
+    }
 
-        let write_result = lock.try_write();
-        match write_result {
-            None => (),
-            Some(_) => {
-                assert!(false,
-                        "try_write should not succeed while read_guard is in scope")
-            }
+    #[test]
+    fn test_rwlock_try_upgrade() {
+        let lock = RwLock::new(0isize);
+        {
+            let read_guard = lock.read();
+
+            let upgrade_result = lock.try_upgradable_read();
+            assert!(
+                upgrade_result.is_some(),
+                "try_upgradable_read should succeed while read_guard is in scope"
+            );
+
+            drop(read_guard);
         }
+        {
+            let upgrade_guard = lock.upgradable_read();
 
-        drop(read_guard);
+            let upgrade_result = lock.try_upgradable_read();
+            assert!(
+                upgrade_result.is_none(),
+                "try_upgradable_read should fail while upgrade_guard is in scope"
+            );
+
+            drop(upgrade_guard);
+        }
+        {
+            let write_guard = lock.write();
+
+            let upgrade_result = lock.try_upgradable_read();
+            assert!(
+                upgrade_result.is_none(),
+                "try_upgradable should fail while write_guard is in scope"
+            );
+
+            drop(write_guard);
+        }
     }
 
     #[test]
     fn test_into_inner() {
         let m = RwLock::new(NonCopy(10));
         assert_eq!(m.into_inner(), NonCopy(10));
     }
 
@@ -879,48 +1392,52 @@ mod tests {
     #[test]
     fn test_get_mut() {
         let mut m = RwLock::new(NonCopy(10));
         *m.get_mut() = NonCopy(20);
         assert_eq!(m.into_inner(), NonCopy(20));
     }
 
     #[test]
-    fn test_rwlockguard_send() {
-        fn send<T: Send>(_: T) {}
+    fn test_rwlockguard_sync() {
+        fn sync<T: Sync>(_: T) {}
 
         let rwlock = RwLock::new(());
-        send(rwlock.read());
-        send(rwlock.write());
+        sync(rwlock.read());
+        sync(rwlock.write());
     }
 
     #[test]
     fn test_rwlock_downgrade() {
         let x = Arc::new(RwLock::new(0));
         let mut handles = Vec::new();
         for _ in 0..8 {
             let x = x.clone();
-            handles.push(thread::spawn(move || for _ in 0..100 {
-                let mut writer = x.write();
-                *writer += 1;
-                let cur_val = *writer;
-                let reader = writer.downgrade();
-                assert_eq!(cur_val, *reader);
+            handles.push(thread::spawn(move || {
+                for _ in 0..100 {
+                    let mut writer = x.write();
+                    *writer += 1;
+                    let cur_val = *writer;
+                    let reader = writer.downgrade();
+                    assert_eq!(cur_val, *reader);
+                }
             }));
         }
         for handle in handles {
             handle.join().unwrap()
         }
         assert_eq!(*x.read(), 800);
     }
 
     #[test]
     fn test_rwlock_recursive() {
         let arc = Arc::new(RwLock::new(1));
         let arc2 = arc.clone();
         let _lock1 = arc.read();
-        thread::spawn(move || { let _lock = arc2.write(); });
+        thread::spawn(move || {
+            let _lock = arc2.write();
+        });
         thread::sleep(Duration::from_millis(100));
 
         // A normal read would block here since there is a pending writer
         let _lock2 = arc.read_recursive();
     }
 }
deleted file mode 100644
--- a/third_party/rust/parking_lot/src/stable.rs
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright 2016 Amanieu d'Antras
-//
-// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
-// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
-// http://opensource.org/licenses/MIT>, at your option. This file may not be
-// copied, modified, or distributed except according to those terms.
-
-#![allow(dead_code)]
-
-use std::sync::atomic;
-
-// Re-export this for convenience
-pub use std::sync::atomic::{Ordering, fence};
-
-// Wrapper around AtomicUsize for non-nightly which has usable compare_exchange
-// and compare_exchange_weak methods.
-pub struct AtomicUsize(atomic::AtomicUsize);
-pub use self::AtomicUsize as AtomicU8;
-
-// Constants for static initialization
-pub const ATOMIC_USIZE_INIT: AtomicUsize = AtomicUsize(atomic::ATOMIC_USIZE_INIT);
-pub use self::ATOMIC_USIZE_INIT as ATOMIC_U8_INIT;
-
-impl AtomicUsize {
-    #[inline]
-    pub fn new(val: usize) -> AtomicUsize {
-        AtomicUsize(atomic::AtomicUsize::new(val))
-    }
-    #[inline]
-    pub fn load(&self, order: Ordering) -> usize {
-        self.0.load(order)
-    }
-    #[inline]
-    pub fn store(&self, val: usize, order: Ordering) {
-        self.0.store(val, order);
-    }
-    #[inline]
-    pub fn swap(&self, val: usize, order: Ordering) -> usize {
-        self.0.swap(val, order)
-    }
-    #[inline]
-    pub fn fetch_add(&self, val: usize, order: Ordering) -> usize {
-        self.0.fetch_add(val, order)
-    }
-    #[inline]
-    pub fn fetch_sub(&self, val: usize, order: Ordering) -> usize {
-        self.0.fetch_sub(val, order)
-    }
-    #[inline]
-    pub fn fetch_and(&self, val: usize, order: Ordering) -> usize {
-        self.0.fetch_and(val, order)
-    }
-    #[inline]
-    pub fn fetch_or(&self, val: usize, order: Ordering) -> usize {
-        self.0.fetch_or(val, order)
-    }
-    #[inline]
-    pub fn compare_exchange(&self,
-                            old: usize,
-                            new: usize,
-                            order: Ordering,
-                            _: Ordering)
-                            -> Result<usize, usize> {
-        let res = self.0.compare_and_swap(old, new, order);
-        if res == old { Ok(res) } else { Err(res) }
-    }
-    #[inline]
-    pub fn compare_exchange_weak(&self,
-                                 old: usize,
-                                 new: usize,
-                                 order: Ordering,
-                                 _: Ordering)
-                                 -> Result<usize, usize> {
-        let res = self.0.compare_and_swap(old, new, order);
-        if res == old { Ok(res) } else { Err(res) }
-    }
-}
deleted file mode 100644
--- a/third_party/rust/thread-id/.appveyor.yml
+++ /dev/null
@@ -1,50 +0,0 @@
-environment:
-  matrix:
-    - target: 1.8.0-x86_64-pc-windows-msvc
-    - target: 1.8.0-i686-pc-windows-msvc
-    - target: 1.9.0-x86_64-pc-windows-msvc
-    - target: 1.9.0-i686-pc-windows-msvc
-    - target: 1.10.0-x86_64-pc-windows-msvc
-    - target: 1.10.0-i686-pc-windows-msvc
-    - target: 1.11.0-x86_64-pc-windows-msvc
-    - target: 1.11.0-i686-pc-windows-msvc
-    - target: 1.12.0-x86_64-pc-windows-msvc
-    - target: 1.12.0-i686-pc-windows-msvc
-    - target: 1.13.0-x86_64-pc-windows-msvc
-    - target: 1.13.0-i686-pc-windows-msvc
-    - target: 1.14.0-x86_64-pc-windows-msvc
-    - target: 1.14.0-i686-pc-windows-msvc
-    - target: 1.15.1-x86_64-pc-windows-msvc
-    - target: 1.15.1-i686-pc-windows-msvc
-    - target: 1.16.0-x86_64-pc-windows-msvc
-    - target: 1.16.0-i686-pc-windows-msvc
-    - target: 1.17.0-x86_64-pc-windows-msvc
-    - target: 1.17.0-i686-pc-windows-msvc
-    - target: beta-x86_64-pc-windows-msvc
-    - target: beta-i686-pc-windows-msvc
-    - target: beta-x86_64-pc-windows-gnu
-    - target: beta-i686-pc-windows-gnu
-    - target: nightly-x86_64-pc-windows-msvc
-    - target: nightly-i686-pc-windows-msvc
-    - target: nightly-x86_64-pc-windows-gnu
-    - target: nightly-i686-pc-windows-gnu
-
-install:
-  # Download the Rust and Cargo installer.
-  - ps: Start-FileDownload "https://static.rust-lang.org/dist/rust-${env:target}.msi"
-
-  # Install Rust and Cargo and wait for installation to finish by using Write-Output.
-  - ps: msiexec /package "rust-${env:target}.msi" /quiet /norestart | Write-Output
-
-  # Pick up the new Path variable after the installer modified it.
-  - ps: $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine")
-
-  # Print versions for future reference.
-  - rustc --version
-  - cargo --version
-
-build_script:
-  - cargo build
-
-test_script:
-  - cargo test
deleted file mode 100644
--- a/third_party/rust/thread-id/.cargo-checksum.json
+++ /dev/null
@@ -1,1 +0,0 @@
-{"files":{".appveyor.yml":"86b8a7bf3ff316a2d8c58ba1fc365c5773c926ee3e4abccf889a95e5ec4f393a",".travis.yml":"731fd15b3f516c8da8241d2c5e7a56e638f0295f637d448fb009131092792264","Cargo.toml":"fbe4f546589b570dff000201397c059cc16a5807211a436c17f03aade65690ff","changelog.md":"71ecf0389739517ce6e84fc230f1b42f7532c545c0690fbcff270c83012f0d30","license-apache":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30","license-mit":"69a1ca9eaf8e5276eade8931f42808d7c39c6a26011e34450c4ebb10f11c653f","readme.md":"2dc47b0ce86bb82f007639782c12ba9376be7d31635ba5cdf7cc828e27a9ba54","src/lib.rs":"214ad7a56dd4715387bbdbe91e53cd93f49fedc88f4c818b4e52378b71d98232"},"package":"8df7875b676fddfadffd96deea3b1124e5ede707d4884248931077518cf1f773"}
\ No newline at end of file
deleted file mode 100644
--- a/third_party/rust/thread-id/.travis.yml
+++ /dev/null
@@ -1,15 +0,0 @@
-language: rust
-
-rust:
-  - 1.8.0
-  - 1.9.0
-  - 1.10.0
-  - 1.11.0
-  - 1.12.0
-  - 1.13.0
-  - 1.14.0
-  - 1.15.1
-  - 1.16.0
-  - 1.17.0
-  - beta
-  - nightly
deleted file mode 100644
--- a/third_party/rust/thread-id/Cargo.toml
+++ /dev/null
@@ -1,16 +0,0 @@
-[package]
-name = "thread-id"
-version = "3.1.0"
-authors = ["Ruud van Asseldonk <dev@veniogames.com>"]
-license = "MIT/Apache-2.0"
-readme = "readme.md"
-keywords = ["thread", "pthread", "getcurrentthreadid"]
-description = "Get a unique thread ID"
-repository = "https://github.com/ruuda/thread-id"
-documentation = "https://docs.rs/thread-id"
-
-[target.'cfg(unix)'.dependencies]
-libc = "0.2.6"
-
-[target.'cfg(windows)'.dependencies]
-kernel32-sys = "0.2.1"
deleted file mode 100644
--- a/third_party/rust/thread-id/changelog.md
+++ /dev/null
@@ -1,30 +0,0 @@
-# v3.1.0
-
-Released 2017-05-13.
-
- * Add the MIT license as an alternative to the Apache 2.0 license. This license
-   change applies retroactively to all versions, this is only a metadata change.
-
-# v3.0.0
-
-Released 2016-10-29.
-
- * Depend on libc only on Unix-like environments, and on kernel32-sys only
-   on Windows. This requires Rust 1.8 or later, hence the major version
-   bump.
-
-# v2.0.0
-
-Released 2016-04-09.
-
- * Change ID type to `usize` to better reflect the underlying platform IDs.
-   This is a breaking change.
- * Allow functions to be inlined to avoid call overhead.
-
-Many thanks to Amanieu d'Antras for contributing to this release.
-
-# v1.0.0
-
-Released 2016-03-13.
-
-Initial release with Windows and Linux support.
deleted file mode 100644
--- a/third_party/rust/thread-id/license-apache
+++ /dev/null
@@ -1,202 +0,0 @@
-
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "[]"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-
-   Copyright [yyyy] [name of copyright owner]
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
deleted file mode 100644
--- a/third_party/rust/thread-id/license-mit
+++ /dev/null
@@ -1,19 +0,0 @@
-Copyright 2017 Ruud van Asseldonk
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
deleted file mode 100644
--- a/third_party/rust/thread-id/readme.md
+++ /dev/null
@@ -1,53 +0,0 @@
-Thread-ID
-=========
-Get a unique ID for the current thread in Rust.
-
-[![Build Status][tr-img]][tr]
-[![Build Status][av-img]][av]
-[![Crates.io version][crate-img]][crate]
-[![Documentation][docs-img]][docs]
-
-For diagnostics and debugging it can often be useful to get an ID that is
-different for every thread. [Until Rust 1.14][stdlib-pr], the standard library
-did not expose a way to do that, hence this crate.
-
-Example
--------
-
-```rust
-use std::thread;
-use thread_id;
-
-thread::spawn(move || {
-    println!("spawned thread has id {}", thread_id::get());
-});
-
-println!("main thread has id {}", thread_id::get());
-```
-
-This will print two different numbers.
-
-License
--------
-Thread-ID is licensed under either the [Apache 2.0 license][apache2], or the
-[MIT license][mit], at your option. It may be used in free software as well as
-closed-source applications, both for commercial and non-commercial use under the
-conditions given in the license.
-
-Contributing
-------------
-Unless you explicitly state otherwise, any contribution intentionally submitted
-for inclusion in the work by you, as defined in the Apache 2.0 license, shall be
-dual licensed as above, without any additional terms or conditions.
-
-[tr-img]:    https://travis-ci.org/ruuda/thread-id.svg?branch=master
-[tr]:        https://travis-ci.org/ruuda/thread-id
-[av-img]:    https://ci.appveyor.com/api/projects/status/a6ccbm3x4fgi6wku?svg=true
-[av]:        https://ci.appveyor.com/project/ruuda/thread-id
-[crate-img]: https://img.shields.io/crates/v/thread-id.svg
-[crate]:     https://crates.io/crates/thread-id
-[docs-img]:  https://img.shields.io/badge/docs-online-blue.svg
-[docs]:      https://docs.rs/thread-id
-[stdlib-pr]: https://github.com/rust-lang/rust/pull/36341
-[apache2]:   https://www.apache.org/licenses/LICENSE-2.0
-[mit]:       https://opensource.org/licenses/MIT
deleted file mode 100644
--- a/third_party/rust/thread-id/src/lib.rs
+++ /dev/null
@@ -1,67 +0,0 @@
-// Thread-ID -- Get a unique thread ID
-// Copyright 2016 Ruud van Asseldonk
-//
-// Licensed under either the Apache License, Version 2.0, or the MIT license, at
-// your option. A copy of both licenses has been included in the root of the
-// repository.
-
-//! Thread-ID: get a unique ID for the current thread.
-//!
-//! For diagnostics and debugging it can often be useful to get an ID that is
-//! different for every thread. This crate provides that functionality.
-//!
-//! # Example
-//!
-//! ```
-//! use std::thread;
-//! use thread_id;
-//!
-//! thread::spawn(move || {
-//!     println!("spawned thread has id {}", thread_id::get());
-//! });
-//!
-//! println!("main thread has id {}", thread_id::get());
-//! ```
-
-#![warn(missing_docs)]
-
-#[cfg(unix)]
-extern crate libc;
-
-#[cfg(windows)]
-extern crate kernel32;
-
-/// Returns a number that is unique to the calling thread.
-///
-/// Calling this function twice from the same thread will return the same
-/// number. Calling this function from a different thread will return a
-/// different number.
-#[inline]
-pub fn get() -> usize {
-    get_internal()
-}
-
-#[cfg(unix)]
-#[inline]
-fn get_internal() -> usize {
-    unsafe { libc::pthread_self() as usize }
-}
-
-#[cfg(windows)]
-#[inline]
-fn get_internal() -> usize {
-    unsafe { kernel32::GetCurrentThreadId() as usize }
-}
-
-#[test]
-fn distinct_threads_have_distinct_ids() {
-    use std::sync::mpsc;
-    use std::thread;
-
-    let (tx, rx) = mpsc::channel();
-    thread::spawn(move || tx.send(::get()).unwrap()).join().unwrap();
-
-    let main_tid = ::get();
-    let other_tid = rx.recv().unwrap();
-    assert!(main_tid != other_tid);
-}