Bug 1552695 - Part 2: Revendor dependencies. r=froydnj
authorBastien Orivel <eijebong@bananium.fr>
Mon, 20 May 2019 12:22:04 +0000
changeset 474558 d487774b0d17d39c5836feb9184ab685d364489c
parent 474557 5db8c1e9f643ec26fc93e7a3fc7a90c742b11030
child 474559 2b968611e3dc6fd76b6a2f179198eea14081d074
push id36042
push userdvarga@mozilla.com
push dateTue, 21 May 2019 04:19:40 +0000
treeherdermozilla-central@ca560ff55451 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersfroydnj
bugs1552695
milestone69.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1552695 - Part 2: Revendor dependencies. r=froydnj Depends on D31737 Differential Revision: https://phabricator.services.mozilla.com/D31738
third_party/rust/cssparser-macros/.cargo-checksum.json
third_party/rust/cssparser-macros/Cargo.toml
third_party/rust/cssparser-macros/lib.rs
third_party/rust/cssparser/.cargo-checksum.json
third_party/rust/cssparser/Cargo.toml
third_party/rust/cssparser/build.rs
third_party/rust/cssparser/build/match_byte.rs
third_party/rust/cssparser/src/color.rs
third_party/rust/cssparser/src/cow_rc_str.rs
third_party/rust/cssparser/src/from_bytes.rs
third_party/rust/cssparser/src/lib.rs
third_party/rust/cssparser/src/macros.rs
third_party/rust/cssparser/src/nth.rs
third_party/rust/cssparser/src/parser.rs
third_party/rust/cssparser/src/rules_and_declarations.rs
third_party/rust/cssparser/src/serializer.rs
third_party/rust/cssparser/src/size_of_tests.rs
third_party/rust/cssparser/src/tests.rs
third_party/rust/cssparser/src/tokenizer.rs
third_party/rust/cssparser/src/unicode_range.rs
third_party/rust/cstr-macros/.cargo-checksum.json
third_party/rust/cstr-macros/Cargo.toml
third_party/rust/cstr-macros/src/lib.rs
third_party/rust/num-derive/.cargo-checksum.json
third_party/rust/num-derive/Cargo.toml
third_party/rust/num-derive/RELEASES.md
third_party/rust/num-derive/bors.toml
third_party/rust/num-derive/build.rs
third_party/rust/num-derive/ci/rustup.sh
third_party/rust/num-derive/ci/test_full.sh
third_party/rust/num-derive/src/lib.rs
third_party/rust/num-derive/tests/issue-6.rs
third_party/rust/num-derive/tests/issue-9.rs
third_party/rust/num-derive/tests/newtype.rs
third_party/rust/num-derive/tests/trivial.rs
third_party/rust/num-derive/tests/with_custom_values.rs
third_party/rust/proc-macro2-0.3.5/.cargo-checksum.json
third_party/rust/proc-macro2-0.3.5/.travis.yml
third_party/rust/proc-macro2-0.3.5/Cargo.toml
third_party/rust/proc-macro2-0.3.5/LICENSE-APACHE
third_party/rust/proc-macro2-0.3.5/LICENSE-MIT
third_party/rust/proc-macro2-0.3.5/README.md
third_party/rust/proc-macro2-0.3.5/src/lib.rs
third_party/rust/proc-macro2-0.3.5/src/stable.rs
third_party/rust/proc-macro2-0.3.5/src/strnom.rs
third_party/rust/proc-macro2-0.3.5/src/unstable.rs
third_party/rust/proc-macro2-0.3.5/tests/test.rs
third_party/rust/quote-0.5.2/.cargo-checksum.json
third_party/rust/quote-0.5.2/Cargo.toml
third_party/rust/quote-0.5.2/LICENSE-APACHE
third_party/rust/quote-0.5.2/LICENSE-MIT
third_party/rust/quote-0.5.2/README.md
third_party/rust/quote-0.5.2/src/lib.rs
third_party/rust/quote-0.5.2/src/to_tokens.rs
third_party/rust/quote-0.5.2/src/tokens.rs
third_party/rust/quote-0.5.2/tests/test.rs
third_party/rust/syn-0.13.1/.cargo-checksum.json
third_party/rust/syn-0.13.1/Cargo.toml
third_party/rust/syn-0.13.1/LICENSE-APACHE
third_party/rust/syn-0.13.1/LICENSE-MIT
third_party/rust/syn-0.13.1/README.md
third_party/rust/syn-0.13.1/src/attr.rs
third_party/rust/syn-0.13.1/src/buffer.rs
third_party/rust/syn-0.13.1/src/data.rs
third_party/rust/syn-0.13.1/src/derive.rs
third_party/rust/syn-0.13.1/src/error.rs
third_party/rust/syn-0.13.1/src/expr.rs
third_party/rust/syn-0.13.1/src/file.rs
third_party/rust/syn-0.13.1/src/gen/fold.rs
third_party/rust/syn-0.13.1/src/gen/visit.rs
third_party/rust/syn-0.13.1/src/gen/visit_mut.rs
third_party/rust/syn-0.13.1/src/gen_helper.rs
third_party/rust/syn-0.13.1/src/generics.rs
third_party/rust/syn-0.13.1/src/ident.rs
third_party/rust/syn-0.13.1/src/item.rs
third_party/rust/syn-0.13.1/src/lib.rs
third_party/rust/syn-0.13.1/src/lifetime.rs
third_party/rust/syn-0.13.1/src/lit.rs
third_party/rust/syn-0.13.1/src/mac.rs
third_party/rust/syn-0.13.1/src/macros.rs
third_party/rust/syn-0.13.1/src/op.rs
third_party/rust/syn-0.13.1/src/parse_quote.rs
third_party/rust/syn-0.13.1/src/parsers.rs
third_party/rust/syn-0.13.1/src/path.rs
third_party/rust/syn-0.13.1/src/punctuated.rs
third_party/rust/syn-0.13.1/src/spanned.rs
third_party/rust/syn-0.13.1/src/synom.rs
third_party/rust/syn-0.13.1/src/token.rs
third_party/rust/syn-0.13.1/src/tt.rs
third_party/rust/syn-0.13.1/src/ty.rs
third_party/rust/syn-0.14.6/.cargo-checksum.json
third_party/rust/syn-0.14.6/Cargo.toml
third_party/rust/syn-0.14.6/LICENSE-APACHE
third_party/rust/syn-0.14.6/LICENSE-MIT
third_party/rust/syn-0.14.6/README.md
third_party/rust/syn-0.14.6/src/attr.rs
third_party/rust/syn-0.14.6/src/buffer.rs
third_party/rust/syn-0.14.6/src/data.rs
third_party/rust/syn-0.14.6/src/derive.rs
third_party/rust/syn-0.14.6/src/error.rs
third_party/rust/syn-0.14.6/src/expr.rs
third_party/rust/syn-0.14.6/src/file.rs
third_party/rust/syn-0.14.6/src/gen/fold.rs
third_party/rust/syn-0.14.6/src/gen/visit.rs
third_party/rust/syn-0.14.6/src/gen/visit_mut.rs
third_party/rust/syn-0.14.6/src/gen_helper.rs
third_party/rust/syn-0.14.6/src/generics.rs
third_party/rust/syn-0.14.6/src/item.rs
third_party/rust/syn-0.14.6/src/lib.rs
third_party/rust/syn-0.14.6/src/lifetime.rs
third_party/rust/syn-0.14.6/src/lit.rs
third_party/rust/syn-0.14.6/src/mac.rs
third_party/rust/syn-0.14.6/src/macros.rs
third_party/rust/syn-0.14.6/src/op.rs
third_party/rust/syn-0.14.6/src/parse_quote.rs
third_party/rust/syn-0.14.6/src/parsers.rs
third_party/rust/syn-0.14.6/src/path.rs
third_party/rust/syn-0.14.6/src/punctuated.rs
third_party/rust/syn-0.14.6/src/spanned.rs
third_party/rust/syn-0.14.6/src/synom.rs
third_party/rust/syn-0.14.6/src/token.rs
third_party/rust/syn-0.14.6/src/tt.rs
third_party/rust/syn-0.14.6/src/ty.rs
third_party/rust/syn-0.14.6/src/verbatim.rs
--- a/third_party/rust/cssparser-macros/.cargo-checksum.json
+++ b/third_party/rust/cssparser-macros/.cargo-checksum.json
@@ -1,1 +1,1 @@
-{"files":{"Cargo.toml":"a359c50f2f1777724b126573a568b3e94ad2674f645fae321ddf4b4293cef4cc","LICENSE":"fab3dd6bdab226f1c08630b1dd917e11fcb4ec5e1e020e2c16f83a0a13863e85","lib.rs":"218edca3cc07d460a65b6f937b77024a13ccca9d9d62ee715eeeb8dff8859f04"},"package":"f3a5383ae18dbfdeb569ed62019f5bddb2a95cd2d3833313c475a0d014777805"}
\ No newline at end of file
+{"files":{"Cargo.toml":"865ab5a711a4fea88adf215f7c0207f1ad2ee80c310229f6d2a5770d2ff36486","LICENSE":"fab3dd6bdab226f1c08630b1dd917e11fcb4ec5e1e020e2c16f83a0a13863e85","lib.rs":"66c99e9dd05190f31f59ccfd3f9558540f94a501e88849bcaf09191f9f3323da"},"package":"b16e382d9b983fdb9ac6a36b37fdeb84ce3ea81f749febfee3463cfa7f24275e"}
\ No newline at end of file
--- a/third_party/rust/cssparser-macros/Cargo.toml
+++ b/third_party/rust/cssparser-macros/Cargo.toml
@@ -7,33 +7,33 @@
 #
 # If you believe there's an error in this file please file an
 # issue against the rust-lang/cargo repository. If you're
 # editing this file be aware that the upstream Cargo.toml
 # will likely look very different (and much more reasonable)
 
 [package]
 name = "cssparser-macros"
-version = "0.3.3"
+version = "0.3.5"
 authors = ["Simon Sapin <simon.sapin@exyr.org>"]
 description = "Procedural macros for cssparser"
 documentation = "https://docs.rs/cssparser-macros/"
 license = "MPL-2.0"
 repository = "https://github.com/servo/rust-cssparser"
 
 [lib]
 path = "lib.rs"
 proc-macro = true
 [dependencies.phf_codegen]
 version = "0.7"
 
 [dependencies.proc-macro2]
-version = "0.3"
+version = "0.4"
 
 [dependencies.procedural-masquerade]
 version = "0.1"
 
 [dependencies.quote]
-version = "0.5"
+version = "0.6"
 
 [dependencies.syn]
-version = "0.13"
+version = "0.15.12"
 features = ["full", "extra-traits"]
--- a/third_party/rust/cssparser-macros/lib.rs
+++ b/third_party/rust/cssparser-macros/lib.rs
@@ -5,16 +5,17 @@
 #[macro_use] extern crate procedural_masquerade;
 extern crate phf_codegen;
 extern crate proc_macro;
 extern crate proc_macro2;
 #[macro_use] extern crate quote;
 extern crate syn;
 
 #[allow(unused_imports)] use std::ascii::AsciiExt;
+use quote::TokenStreamExt;
 use std::iter;
 use proc_macro2::{TokenStream, TokenTree};
 
 define_proc_macros! {
     /// Input: the arms of a `match` expression.
     ///
     /// Output: a `MAX_LENGTH` constant with the length of the longest string pattern.
     ///
--- a/third_party/rust/cssparser/.cargo-checksum.json
+++ b/third_party/rust/cssparser/.cargo-checksum.json
@@ -1,1 +1,1 @@
-{"files":{"Cargo.toml":"150d450e43bcb9e523941408be883997ecffce7ff5f224329372edfe56334a55","LICENSE":"fab3dd6bdab226f1c08630b1dd917e11fcb4ec5e1e020e2c16f83a0a13863e85","README.md":"b9d6c5dc56ccc267db9e0e2389061dc2524daefa4baed88b36c98efc7a51c2a9","build.rs":"310d6d7b1931ff783a8aa1a4c6baee87b4c9130c858e4694ef69cc96df5e38dc","build/match_byte.rs":"6f7ec4235c9f2da403ea0be9339661ecd8e1f5e1c788cf88a41448b1080c59b8","docs/404.html":"025861f76f8d1f6d67c20ab624c6e418f4f824385e2dd8ad8732c4ea563c6a2e","docs/index.html":"025861f76f8d1f6d67c20ab624c6e418f4f824385e2dd8ad8732c4ea563c6a2e","src/color.rs":"43f996fbd8da54bd8ffa870f5e3610e5ba6e61543f92a129fa6c850e9b10db7e","src/cow_rc_str.rs":"541216f8ef74ee3cc5cbbc1347e5f32ed66588c401851c9a7d68b867aede1de0","src/from_bytes.rs":"331fe63af2123ae3675b61928a69461b5ac77799fff3ce9978c55cf2c558f4ff","src/lib.rs":"a474ee88ef8f73fcb7b7272d426e5eafb4ad10d104797a5a188d1676c8180972","src/macros.rs":"adb9773c157890381556ea83d7942dcc676f99eea71abbb6afeffee1e3f28960","src/nth.rs":"5c70fb542d1376cddab69922eeb4c05e4fcf8f413f27563a2af50f72a47c8f8c","src/parser.rs":"22067562160a1294fa92779b66c25cbccf259a2ef7dcf687c791fecdd020ce7f","src/rules_and_declarations.rs":"622ce07c117a511d40ce595602d4f4730659a59273388f28553d1a2b0fac92ce","src/serializer.rs":"3e2dfc60613f885cb6f99abfc854fde2a1e00de507431bd2e51178b61abfd69b","src/size_of_tests.rs":"385a0d77fbd6f86cb8013fd8d7541886980876a9da1da714bf175954c0e726cf","src/tests.rs":"9d08b3943d453664e01d58e307f79345e240f9f9ce6f8d36a842eff37155563e","src/tokenizer.rs":"adcf5811955e8df57a519e3d1e44fe3afeb5afeb1076daeb8d36fed1abcf1327","src/unicode_range.rs":"ae159d2ebe4123a6666e18dc0362f89b475240a6b7ed5fb6fe21b9e7a4139da8"},"package":"ba1ab4e1814be64bf6b6064ff532db0e34087f11b37706d6c96a21d32478761d"}
\ No newline at end of file
+{"files":{"Cargo.toml":"26e11f3e55baf0de8784dc3ec88771411ceae587c64e162fed3939b87c3cc591","LICENSE":"fab3dd6bdab226f1c08630b1dd917e11fcb4ec5e1e020e2c16f83a0a13863e85","README.md":"b9d6c5dc56ccc267db9e0e2389061dc2524daefa4baed88b36c98efc7a51c2a9","build.rs":"08e4a99d5184b2f22ab93bc0a024fec18dbd8fd38b9db638f19d4defede858ee","build/match_byte.rs":"e8537833ff1599a1bdbd0167f6295af7bd21e42024f32b982af32d58c156685c","docs/404.html":"025861f76f8d1f6d67c20ab624c6e418f4f824385e2dd8ad8732c4ea563c6a2e","docs/index.html":"025861f76f8d1f6d67c20ab624c6e418f4f824385e2dd8ad8732c4ea563c6a2e","src/color.rs":"8d3017ba8d644172908bd80d35e9be1081db477d2e0b0ea13971e29a466d451f","src/cow_rc_str.rs":"89b5dff5cf80eef3fcff0c11799e54a978d02d8b8963a621fbb999d35e7c03a3","src/from_bytes.rs":"b1cf15c4e975523fef46b575598737a39f3c63e5ce0b2bfd6ec627c69c6ea54a","src/lib.rs":"98b28ca7c72b8d20b3d76ae5b841be87bcadfc89e433ecc95fcf37aa15731442","src/macros.rs":"a50a0a7afa43e099dc008e54956e4c1fdfba2e9795d006b22e9eb45065fed61e","src/nth.rs":"a9d5fa0bd2c3ae7c48c851b9f5508ebdb07affdf5d0737bb8d85a7befab2ef9c","src/parser.rs":"fe2eb2be084923bf362de4b95c029beb21f172ad972a6452c400f640b43a583e","src/rules_and_declarations.rs":"712a5e893169e715bbcd18aac87a18ae728dc6bb922e79b237d34ce7a4548fcf","src/serializer.rs":"151305364cb1f20ea2dc0b3ebfbb77937e616ef4441d5dd3c9abda232f79a7af","src/size_of_tests.rs":"a628cacc876f240ac1bb9e287cdae293bffc4b86d45d9307e4fc2f822e8f3e84","src/tests.rs":"bf97071b691c0b0c932af5813e876142ce707ba57774742dbe60889b1dc54069","src/tokenizer.rs":"0450c38d140382161408a8fca5aac343f5a9405603234095dccd93f680831cb7","src/unicode_range.rs":"c4655c817db0dabb1d55669ac61a56ecf7f6a6c4353cf5b539b13bea6511c3dd"},"package":"e06795910fc2f585a75bdc9690fbcc51e83519f07b6eb981db43944643c04933"}
\ No newline at end of file
--- a/third_party/rust/cssparser/Cargo.toml
+++ b/third_party/rust/cssparser/Cargo.toml
@@ -7,17 +7,17 @@
 #
 # If you believe there's an error in this file please file an
 # issue against the rust-lang/cargo repository. If you're
 # editing this file be aware that the upstream Cargo.toml
 # will likely look very different (and much more reasonable)
 
 [package]
 name = "cssparser"
-version = "0.25.3"
+version = "0.25.5"
 authors = ["Simon Sapin <simon.sapin@exyr.org>"]
 build = "build.rs"
 exclude = ["src/css-parsing-tests/**", "src/big-data-url.css"]
 description = "Rust implementation of CSS Syntax Level 3"
 documentation = "https://docs.rs/cssparser/"
 readme = "README.md"
 keywords = ["css", "syntax", "parser"]
 license = "MPL-2.0"
--- a/third_party/rust/cssparser/build.rs
+++ b/third_party/rust/cssparser/build.rs
@@ -28,19 +28,22 @@ mod codegen {
     pub fn main() {
         let manifest_dir = env::var("CARGO_MANIFEST_DIR").unwrap();
 
         let input = Path::new(&manifest_dir).join("src/tokenizer.rs");
         let output = Path::new(&env::var("OUT_DIR").unwrap()).join("tokenizer.rs");
         println!("cargo:rerun-if-changed={}", input.display());
 
         // We have stack overflows on Servo's CI.
-        let handle = Builder::new().stack_size(128 * 1024 * 1024).spawn(move || {
-            match_byte::expand(&input, &output);
-        }).unwrap();
+        let handle = Builder::new()
+            .stack_size(128 * 1024 * 1024)
+            .spawn(move || {
+                match_byte::expand(&input, &output);
+            })
+            .unwrap();
 
         handle.join().unwrap();
     }
 }
 
 fn main() {
     if std::mem::size_of::<Option<bool>>() == 1 {
         // https://github.com/rust-lang/rust/pull/45225
--- a/third_party/rust/cssparser/build/match_byte.rs
+++ b/third_party/rust/cssparser/build/match_byte.rs
@@ -7,28 +7,37 @@ use std::fs::File;
 use std::io::{Read, Write};
 use std::path::Path;
 use syn;
 use syn::fold::Fold;
 use syn::parse::{Parse, ParseStream, Result};
 
 use proc_macro2::{Span, TokenStream};
 
-struct MatchByteParser {
-}
+struct MatchByteParser {}
 
 pub fn expand(from: &Path, to: &Path) {
     let mut source = String::new();
-    File::open(from).unwrap().read_to_string(&mut source).unwrap();
+    File::open(from)
+        .unwrap()
+        .read_to_string(&mut source)
+        .unwrap();
     let ast = syn::parse_file(&source).expect("Parsing rules.rs module");
     let mut m = MatchByteParser {};
     let ast = m.fold_file(ast);
 
-    let code = ast.into_token_stream().to_string().replace("{ ", "{\n").replace(" }", "\n}");
-    File::create(to).unwrap().write_all(code.as_bytes()).unwrap();
+    let code = ast
+        .into_token_stream()
+        .to_string()
+        .replace("{ ", "{\n")
+        .replace(" }", "\n}");
+    File::create(to)
+        .unwrap()
+        .write_all(code.as_bytes())
+        .unwrap();
 }
 
 struct MatchByte {
     expr: syn::Expr,
     arms: Vec<syn::Arm>,
 }
 
 impl Parse for MatchByte {
@@ -40,36 +49,34 @@ impl Parse for MatchByte {
                 expr
             },
             arms: {
                 let mut arms = Vec::new();
                 while !input.is_empty() {
                     arms.push(input.call(syn::Arm::parse)?);
                 }
                 arms
-            }
+            },
         })
     }
 }
 
 fn get_byte_from_expr_lit(expr: &Box<syn::Expr>) -> u8 {
     match **expr {
         syn::Expr::Lit(syn::ExprLit { ref lit, .. }) => {
             if let syn::Lit::Byte(ref byte) = *lit {
                 byte.value()
-            }
-            else {
+            } else {
                 panic!("Found a pattern that wasn't a byte")
             }
-        },
+        }
         _ => unreachable!(),
     }
 }
 
-
 /// Expand a TokenStream corresponding to the `match_byte` macro.
 ///
 /// ## Example
 ///
 /// ```rust
 /// match_byte! { tokenizer.next_byte_unchecked(),
 ///     b'a'..b'z' => { ... }
 ///     b'0'..b'9' => { ... }
@@ -88,50 +95,50 @@ fn expand_match_byte(body: &TokenStream)
 
     for (i, ref arm) in match_byte.arms.iter().enumerate() {
         let case_id = i + 1;
         let index = case_id as isize;
         let name = syn::Ident::new(&format!("Case{}", case_id), Span::call_site());
 
         for pat in &arm.pats {
             match pat {
-                &syn::Pat::Lit(syn::PatLit{ref expr}) => {
+                &syn::Pat::Lit(syn::PatLit { ref expr }) => {
                     let value = get_byte_from_expr_lit(expr);
                     if table[value as usize] == 0 {
                         table[value as usize] = case_id as u8;
                     }
-                },
+                }
                 &syn::Pat::Range(syn::PatRange { ref lo, ref hi, .. }) => {
                     let lo = get_byte_from_expr_lit(lo);
                     let hi = get_byte_from_expr_lit(hi);
                     for value in lo..hi {
                         if table[value as usize] == 0 {
                             table[value as usize] = case_id as u8;
                         }
                     }
                     if table[hi as usize] == 0 {
                         table[hi as usize] = case_id as u8;
                     }
-                },
+                }
                 &syn::Pat::Wild(_) => {
                     for byte in table.iter_mut() {
                         if *byte == 0 {
                             *byte = case_id as u8;
                         }
                     }
-                },
+                }
                 &syn::Pat::Ident(syn::PatIdent { ref ident, .. }) => {
                     assert_eq!(wildcard, None);
                     wildcard = Some(ident);
                     for byte in table.iter_mut() {
                         if *byte == 0 {
                             *byte = case_id as u8;
                         }
                     }
-                },
+                }
                 _ => {
                     panic!("Unexpected pattern: {:?}. Buggy code ?", pat);
                 }
             }
         }
         cases.push(quote!(#name = #index));
         let body = &arm.body;
         match_body.push(quote!(Case::#name => { #body }))
@@ -154,32 +161,35 @@ fn expand_match_byte(body: &TokenStream)
     };
 
     syn::parse2(expr.into()).unwrap()
 }
 
 impl Fold for MatchByteParser {
     fn fold_stmt(&mut self, stmt: syn::Stmt) -> syn::Stmt {
         match stmt {
-            syn::Stmt::Item(syn::Item::Macro(syn::ItemMacro{ ref mac, .. })) => {
+            syn::Stmt::Item(syn::Item::Macro(syn::ItemMacro { ref mac, .. })) => {
                 if mac.path == parse_quote!(match_byte) {
-                    return syn::fold::fold_stmt(self, syn::Stmt::Expr(expand_match_byte(&mac.tts)))
+                    return syn::fold::fold_stmt(
+                        self,
+                        syn::Stmt::Expr(expand_match_byte(&mac.tts)),
+                    );
                 }
-            },
+            }
             _ => {}
         }
 
         syn::fold::fold_stmt(self, stmt)
     }
 
     fn fold_expr(&mut self, expr: syn::Expr) -> syn::Expr {
         match expr {
-            syn::Expr::Macro(syn::ExprMacro{ ref mac, .. }) => {
+            syn::Expr::Macro(syn::ExprMacro { ref mac, .. }) => {
                 if mac.path == parse_quote!(match_byte) {
-                    return syn::fold::fold_expr(self, expand_match_byte(&mac.tts))
+                    return syn::fold::fold_expr(self, expand_match_byte(&mac.tts));
                 }
-            },
+            }
             _ => {}
         }
 
         syn::fold::fold_expr(self, expr)
     }
 }
--- a/third_party/rust/cssparser/src/color.rs
+++ b/third_party/rust/cssparser/src/color.rs
@@ -1,16 +1,16 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
+use std::f32::consts::PI;
 use std::fmt;
-use std::f32::consts::PI;
 
-use super::{Token, Parser, ToCss, ParseError, BasicParseError};
+use super::{BasicParseError, ParseError, Parser, ToCss, Token};
 
 #[cfg(feature = "serde")]
 use serde::{Deserialize, Deserializer, Serialize, Serializer};
 
 /// A color with red, green, blue, and alpha components, in a byte each.
 #[derive(Clone, Copy, PartialEq, Debug)]
 #[repr(C)]
 pub struct RGBA {
@@ -42,17 +42,22 @@ impl RGBA {
     #[inline]
     pub fn transparent() -> Self {
         Self::new(0, 0, 0, 0)
     }
 
     /// Same thing, but with `u8` values instead of floats in the 0 to 1 range.
     #[inline]
     pub fn new(red: u8, green: u8, blue: u8, alpha: u8) -> Self {
-        RGBA { red: red, green: green, blue: blue, alpha: alpha }
+        RGBA {
+            red: red,
+            green: green,
+            blue: blue,
+            alpha: alpha,
+        }
     }
 
     /// Returns the red channel in a floating point number form, from 0 to 1.
     #[inline]
     pub fn red_f32(&self) -> f32 {
         self.red as f32 / 255.0
     }
 
@@ -73,38 +78,41 @@ impl RGBA {
     pub fn alpha_f32(&self) -> f32 {
         self.alpha as f32 / 255.0
     }
 }
 
 #[cfg(feature = "serde")]
 impl Serialize for RGBA {
     fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
-        where S: Serializer
+    where
+        S: Serializer,
     {
         (self.red, self.green, self.blue, self.alpha).serialize(serializer)
     }
 }
 
 #[cfg(feature = "serde")]
 impl<'de> Deserialize<'de> for RGBA {
     fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
-        where D: Deserializer<'de>
+    where
+        D: Deserializer<'de>,
     {
         let (r, g, b, a) = Deserialize::deserialize(deserializer)?;
         Ok(RGBA::new(r, g, b, a))
     }
 }
 
 #[cfg(feature = "heapsize")]
 known_heap_size!(0, RGBA);
 
 impl ToCss for RGBA {
     fn to_css<W>(&self, dest: &mut W) -> fmt::Result
-        where W: fmt::Write,
+    where
+        W: fmt::Write,
     {
         let serialize_alpha = self.alpha != 255;
 
         dest.write_str(if serialize_alpha { "rgba(" } else { "rgb(" })?;
         self.red.to_css(dest)?;
         dest.write_str(", ")?;
         self.green.to_css(dest)?;
         dest.write_str(", ")?;
@@ -132,17 +140,20 @@ pub enum Color {
     /// Everything else gets converted to RGBA during parsing
     RGBA(RGBA),
 }
 
 #[cfg(feature = "heapsize")]
 known_heap_size!(0, Color);
 
 impl ToCss for Color {
-    fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
+    fn to_css<W>(&self, dest: &mut W) -> fmt::Result
+    where
+        W: fmt::Write,
+    {
         match *self {
             Color::CurrentColor => dest.write_str("currentcolor"),
             Color::RGBA(ref rgba) => rgba.to_css(dest),
         }
     }
 }
 
 /// Either a number or a percentage.
@@ -205,28 +216,30 @@ pub trait ColorComponentParser<'i> {
     /// Returns the result in degrees.
     fn parse_angle_or_number<'t>(
         &self,
         input: &mut Parser<'i, 't>,
     ) -> Result<AngleOrNumber, ParseError<'i, Self::Error>> {
         let location = input.current_source_location();
         Ok(match *input.next()? {
             Token::Number { value, .. } => AngleOrNumber::Number { value },
-            Token::Dimension { value: v, ref unit, .. } => {
+            Token::Dimension {
+                value: v, ref unit, ..
+            } => {
                 let degrees = match_ignore_ascii_case! { &*unit,
                     "deg" => v,
                     "grad" => v * 360. / 400.,
                     "rad" => v * 360. / (2. * PI),
                     "turn" => v * 360.,
                     _ => return Err(location.new_unexpected_token_error(Token::Ident(unit.clone()))),
                 };
 
                 AngleOrNumber::Angle { degrees }
             }
-            ref t => return Err(location.new_unexpected_token_error(t.clone()))
+            ref t => return Err(location.new_unexpected_token_error(t.clone())),
         })
     }
 
     /// Parse a `<percentage>` value.
     ///
     /// Returns the result in a number from 0.0 to 1.0.
     fn parse_percentage<'t>(
         &self,
@@ -247,17 +260,17 @@ pub trait ColorComponentParser<'i> {
     fn parse_number_or_percentage<'t>(
         &self,
         input: &mut Parser<'i, 't>,
     ) -> Result<NumberOrPercentage, ParseError<'i, Self::Error>> {
         let location = input.current_source_location();
         Ok(match *input.next()? {
             Token::Number { value, .. } => NumberOrPercentage::Number { value },
             Token::Percentage { unit_value, .. } => NumberOrPercentage::Percentage { unit_value },
-            ref t => return Err(location.new_unexpected_token_error(t.clone()))
+            ref t => return Err(location.new_unexpected_token_error(t.clone())),
         })
     }
 }
 
 struct DefaultComponentParser;
 impl<'i> ColorComponentParser<'i> for DefaultComponentParser {
     type Error = ();
 }
@@ -274,93 +287,91 @@ impl Color {
         ComponentParser: ColorComponentParser<'i>,
     {
         // FIXME: remove clone() when lifetimes are non-lexical
         let location = input.current_source_location();
         let token = input.next()?.clone();
         match token {
             Token::Hash(ref value) | Token::IDHash(ref value) => {
                 Color::parse_hash(value.as_bytes())
-            },
+            }
             Token::Ident(ref value) => parse_color_keyword(&*value),
             Token::Function(ref name) => {
                 return input.parse_nested_block(|arguments| {
                     parse_color_function(component_parser, &*name, arguments)
                 })
             }
-            _ => Err(())
-        }.map_err(|()| location.new_unexpected_token_error(token))
+            _ => Err(()),
+        }
+        .map_err(|()| location.new_unexpected_token_error(token))
     }
 
     /// Parse a <color> value, per CSS Color Module Level 3.
-    pub fn parse<'i, 't>(
-        input: &mut Parser<'i, 't>,
-    ) -> Result<Color, BasicParseError<'i>> {
+    pub fn parse<'i, 't>(input: &mut Parser<'i, 't>) -> Result<Color, BasicParseError<'i>> {
         let component_parser = DefaultComponentParser;
         Self::parse_with(&component_parser, input).map_err(ParseError::basic)
     }
 
     /// Parse a color hash, without the leading '#' character.
     #[inline]
     pub fn parse_hash(value: &[u8]) -> Result<Self, ()> {
         match value.len() {
             8 => Ok(rgba(
                 from_hex(value[0])? * 16 + from_hex(value[1])?,
                 from_hex(value[2])? * 16 + from_hex(value[3])?,
                 from_hex(value[4])? * 16 + from_hex(value[5])?,
-                from_hex(value[6])? * 16 + from_hex(value[7])?),
-            ),
+                from_hex(value[6])? * 16 + from_hex(value[7])?,
+            )),
             6 => Ok(rgb(
                 from_hex(value[0])? * 16 + from_hex(value[1])?,
                 from_hex(value[2])? * 16 + from_hex(value[3])?,
-                from_hex(value[4])? * 16 + from_hex(value[5])?),
-            ),
+                from_hex(value[4])? * 16 + from_hex(value[5])?,
+            )),
             4 => Ok(rgba(
                 from_hex(value[0])? * 17,
                 from_hex(value[1])? * 17,
                 from_hex(value[2])? * 17,
-                from_hex(value[3])? * 17),
-            ),
+                from_hex(value[3])? * 17,
+            )),
             3 => Ok(rgb(
                 from_hex(value[0])? * 17,
                 from_hex(value[1])? * 17,
-                from_hex(value[2])? * 17),
-            ),
-            _ => Err(())
+                from_hex(value[2])? * 17,
+            )),
+            _ => Err(()),
         }
     }
 }
 
 #[inline]
 fn rgb(red: u8, green: u8, blue: u8) -> Color {
     rgba(red, green, blue, 255)
 }
 
 #[inline]
 fn rgba(red: u8, green: u8, blue: u8, alpha: u8) -> Color {
     Color::RGBA(RGBA::new(red, green, blue, alpha))
 }
 
-
 /// Return the named color with the given name.
 ///
 /// Matching is case-insensitive in the ASCII range.
 /// CSS escaping (if relevant) should be resolved before calling this function.
 /// (For example, the value of an `Ident` token is fine.)
 #[inline]
 pub fn parse_color_keyword(ident: &str) -> Result<Color, ()> {
     macro_rules! rgb {
         ($red: expr, $green: expr, $blue: expr) => {
             Color::RGBA(RGBA {
                 red: $red,
                 green: $green,
                 blue: $blue,
                 alpha: 255,
             })
-        }
+        };
     }
     ascii_case_insensitive_phf_map! {
         keyword -> Color = {
             "black" => rgb!(0, 0, 0),
             "silver" => rgb!(192, 192, 192),
             "gray" => rgb!(128, 128, 128),
             "white" => rgb!(255, 255, 255),
             "maroon" => rgb!(128, 0, 0),
@@ -511,24 +522,23 @@ pub fn parse_color_keyword(ident: &str) 
 
             "transparent" => Color::RGBA(RGBA { red: 0, green: 0, blue: 0, alpha: 0 }),
             "currentcolor" => Color::CurrentColor,
         }
     }
     keyword(ident).cloned().ok_or(())
 }
 
-
 #[inline]
 fn from_hex(c: u8) -> Result<u8, ()> {
     match c {
-        b'0' ... b'9' => Ok(c - b'0'),
-        b'a' ... b'f' => Ok(c - b'a' + 10),
-        b'A' ... b'F' => Ok(c - b'A' + 10),
-        _ => Err(())
+        b'0'...b'9' => Ok(c - b'0'),
+        b'a'...b'f' => Ok(c - b'a' + 10),
+        b'A'...b'F' => Ok(c - b'A' + 10),
+        _ => Err(()),
     }
 }
 
 fn clamp_unit_f32(val: f32) -> u8 {
     // Whilst scaling by 256 and flooring would provide
     // an equal distribution of integers to percentage inputs,
     // this is not what Gecko does so we instead multiply by 255
     // and round (adding 0.5 and flooring is equivalent to rounding)
@@ -548,63 +558,62 @@ fn clamp_unit_f32(val: f32) -> u8 {
 fn clamp_floor_256_f32(val: f32) -> u8 {
     val.round().max(0.).min(255.) as u8
 }
 
 #[inline]
 fn parse_color_function<'i, 't, ComponentParser>(
     component_parser: &ComponentParser,
     name: &str,
-    arguments: &mut Parser<'i, 't>
+    arguments: &mut Parser<'i, 't>,
 ) -> Result<Color, ParseError<'i, ComponentParser::Error>>
 where
     ComponentParser: ColorComponentParser<'i>,
 {
     let (red, green, blue, uses_commas) = match_ignore_ascii_case! { name,
         "rgb" | "rgba" => parse_rgb_components_rgb(component_parser, arguments)?,
         "hsl" | "hsla" => parse_rgb_components_hsl(component_parser, arguments)?,
         _ => return Err(arguments.new_unexpected_token_error(Token::Ident(name.to_owned().into()))),
     };
 
     let alpha = if !arguments.is_exhausted() {
         if uses_commas {
             arguments.expect_comma()?;
         } else {
             arguments.expect_delim('/')?;
         };
-        clamp_unit_f32(component_parser.parse_number_or_percentage(arguments)?.unit_value())
+        clamp_unit_f32(
+            component_parser
+                .parse_number_or_percentage(arguments)?
+                .unit_value(),
+        )
     } else {
         255
     };
 
     arguments.expect_exhausted()?;
     Ok(rgba(red, green, blue, alpha))
 }
 
-
 #[inline]
 fn parse_rgb_components_rgb<'i, 't, ComponentParser>(
     component_parser: &ComponentParser,
-    arguments: &mut Parser<'i, 't>
+    arguments: &mut Parser<'i, 't>,
 ) -> Result<(u8, u8, u8, bool), ParseError<'i, ComponentParser::Error>>
 where
     ComponentParser: ColorComponentParser<'i>,
 {
     // Either integers or percentages, but all the same type.
     // https://drafts.csswg.org/css-color/#rgb-functions
     let (red, is_number) = match component_parser.parse_number_or_percentage(arguments)? {
-        NumberOrPercentage::Number { value } => {
-            (clamp_floor_256_f32(value), true)
-        }
-        NumberOrPercentage::Percentage { unit_value } => {
-            (clamp_unit_f32(unit_value), false)
-        }
+        NumberOrPercentage::Number { value } => (clamp_floor_256_f32(value), true),
+        NumberOrPercentage::Percentage { unit_value } => (clamp_unit_f32(unit_value), false),
     };
 
-    let uses_commas = arguments.try(|i| i.expect_comma()).is_ok();
+    let uses_commas = arguments.try_parse(|i| i.expect_comma()).is_ok();
 
     let green;
     let blue;
     if is_number {
         green = clamp_floor_256_f32(component_parser.parse_number(arguments)?);
         if uses_commas {
             arguments.expect_comma()?;
         }
@@ -618,55 +627,67 @@ where
     }
 
     Ok((red, green, blue, uses_commas))
 }
 
 #[inline]
 fn parse_rgb_components_hsl<'i, 't, ComponentParser>(
     component_parser: &ComponentParser,
-    arguments: &mut Parser<'i, 't>
+    arguments: &mut Parser<'i, 't>,
 ) -> Result<(u8, u8, u8, bool), ParseError<'i, ComponentParser::Error>>
 where
     ComponentParser: ColorComponentParser<'i>,
 {
     // Hue given as an angle
     // https://drafts.csswg.org/css-values/#angles
     let hue_degrees = component_parser.parse_angle_or_number(arguments)?.degrees();
 
     // Subtract an integer before rounding, to avoid some rounding errors:
     let hue_normalized_degrees = hue_degrees - 360. * (hue_degrees / 360.).floor();
     let hue = hue_normalized_degrees / 360.;
 
     // Saturation and lightness are clamped to 0% ... 100%
     // https://drafts.csswg.org/css-color/#the-hsl-notation
-    let uses_commas = arguments.try(|i| i.expect_comma()).is_ok();
+    let uses_commas = arguments.try_parse(|i| i.expect_comma()).is_ok();
 
     let saturation = component_parser.parse_percentage(arguments)?;
     let saturation = saturation.max(0.).min(1.);
 
     if uses_commas {
         arguments.expect_comma()?;
     }
 
     let lightness = component_parser.parse_percentage(arguments)?;
     let lightness = lightness.max(0.).min(1.);
 
     // https://drafts.csswg.org/css-color/#hsl-color
     // except with h pre-multiplied by 3, to avoid some rounding errors.
     fn hue_to_rgb(m1: f32, m2: f32, mut h3: f32) -> f32 {
-        if h3 < 0. { h3 += 3. }
-        if h3 > 3. { h3 -= 3. }
+        if h3 < 0. {
+            h3 += 3.
+        }
+        if h3 > 3. {
+            h3 -= 3.
+        }
 
-        if h3 * 2. < 1. { m1 + (m2 - m1) * h3 * 2. }
-        else if h3 * 2. < 3. { m2 }
-        else if h3 < 2. { m1 + (m2 - m1) * (2. - h3) * 2. }
-        else { m1 }
+        if h3 * 2. < 1. {
+            m1 + (m2 - m1) * h3 * 2.
+        } else if h3 * 2. < 3. {
+            m2
+        } else if h3 < 2. {
+            m1 + (m2 - m1) * (2. - h3) * 2.
+        } else {
+            m1
+        }
     }
-    let m2 = if lightness <= 0.5 { lightness * (saturation + 1.) }
-             else { lightness + saturation - lightness * saturation };
+    let m2 = if lightness <= 0.5 {
+        lightness * (saturation + 1.)
+    } else {
+        lightness + saturation - lightness * saturation
+    };
     let m1 = lightness * 2. - m2;
     let hue_times_3 = hue * 3.;
     let red = clamp_unit_f32(hue_to_rgb(m1, m2, hue_times_3 + 1.));
     let green = clamp_unit_f32(hue_to_rgb(m1, m2, hue_times_3));
     let blue = clamp_unit_f32(hue_to_rgb(m1, m2, hue_times_3 - 1.));
     return Ok((red, green, blue, uses_commas));
 }
--- a/third_party/rust/cssparser/src/cow_rc_str.rs
+++ b/third_party/rust/cssparser/src/cow_rc_str.rs
@@ -98,49 +98,41 @@ impl<'a> CowRcStr<'a> {
     }
 }
 
 impl<'a> Clone for CowRcStr<'a> {
     #[inline]
     fn clone(&self) -> Self {
         match self.unpack() {
             Err(ptr) => {
-                let rc = unsafe {
-                    Rc::from_raw(ptr)
-                };
+                let rc = unsafe { Rc::from_raw(ptr) };
                 let new_rc = rc.clone();
-                mem::forget(rc);  // Don’t actually take ownership of this strong reference
+                mem::forget(rc); // Don’t actually take ownership of this strong reference
                 CowRcStr::from_rc(new_rc)
             }
-            Ok(_) => {
-                CowRcStr { ..*self }
-            }
+            Ok(_) => CowRcStr { ..*self },
         }
     }
 }
 
 impl<'a> Drop for CowRcStr<'a> {
     #[inline]
     fn drop(&mut self) {
         if let Err(ptr) = self.unpack() {
-            mem::drop(unsafe {
-                Rc::from_raw(ptr)
-            })
+            mem::drop(unsafe { Rc::from_raw(ptr) })
         }
     }
 }
 
 impl<'a> Deref for CowRcStr<'a> {
     type Target = str;
 
     #[inline]
     fn deref(&self) -> &str {
-        self.unpack().unwrap_or_else(|ptr| unsafe {
-            &**ptr
-        })
+        self.unpack().unwrap_or_else(|ptr| unsafe { &**ptr })
     }
 }
 
 // Boilerplate / trivial impls below.
 
 impl<'a> AsRef<str> for CowRcStr<'a> {
     #[inline]
     fn as_ref(&self) -> &str {
--- a/third_party/rust/cssparser/src/from_bytes.rs
+++ b/third_party/rust/cssparser/src/from_bytes.rs
@@ -12,53 +12,53 @@ pub trait EncodingSupport {
 
     /// Return the UTF-8 encoding
     fn utf8() -> Self::Encoding;
 
     /// Whether the given encoding is UTF-16BE or UTF-16LE
     fn is_utf16_be_or_le(encoding: &Self::Encoding) -> bool;
 }
 
-
 /// Determine the character encoding of a CSS stylesheet.
 ///
 /// This is based on the presence of a BOM (Byte Order Mark), an `@charset` rule, and
 /// encoding meta-information.
 ///
 /// * `css_bytes`: A byte string.
 /// * `protocol_encoding`: The encoding label, if any, defined by HTTP or equivalent protocol.
 ///     (e.g. via the `charset` parameter of the `Content-Type` header.)
 /// * `environment_encoding`: An optional `Encoding` object for the [environment encoding]
 ///     (https://drafts.csswg.org/css-syntax/#environment-encoding), if any.
 ///
 /// Returns the encoding to use.
-pub fn stylesheet_encoding<E>(css: &[u8], protocol_encoding_label: Option<&[u8]>,
-                              environment_encoding: Option<E::Encoding>)
-                              -> E::Encoding
-                              where E: EncodingSupport {
+pub fn stylesheet_encoding<E>(
+    css: &[u8],
+    protocol_encoding_label: Option<&[u8]>,
+    environment_encoding: Option<E::Encoding>,
+) -> E::Encoding
+where
+    E: EncodingSupport,
+{
     // https://drafts.csswg.org/css-syntax/#the-input-byte-stream
-    match protocol_encoding_label {
-        None => (),
-        Some(label) => match E::from_label(label) {
-            None => (),
-            Some(protocol_encoding) => return protocol_encoding
-        }
-    }
+    if let Some(label) = protocol_encoding_label {
+        if let Some(protocol_encoding) = E::from_label(label) {
+            return protocol_encoding;
+        };
+    };
+
     let prefix = b"@charset \"";
     if css.starts_with(prefix) {
         let rest = &css[prefix.len()..];
-        match rest.iter().position(|&b| b == b'"') {
-            None => (),
-            Some(label_length) => if rest[label_length..].starts_with(b"\";") {
+        if let Some(label_length) = rest.iter().position(|&b| b == b'"') {
+            if rest[label_length..].starts_with(b"\";") {
                 let label = &rest[..label_length];
-                match E::from_label(label) {
-                    None => (),
-                    Some(charset_encoding) => if E::is_utf16_be_or_le(&charset_encoding) {
-                        return E::utf8()
+                if let Some(charset_encoding) = E::from_label(label) {
+                    if E::is_utf16_be_or_le(&charset_encoding) {
+                        return E::utf8();
                     } else {
-                        return charset_encoding
+                        return charset_encoding;
                     }
                 }
             }
         }
     }
     environment_encoding.unwrap_or_else(E::utf8)
 }
--- a/third_party/rust/cssparser/src/lib.rs
+++ b/third_party/rust/cssparser/src/lib.rs
@@ -1,15 +1,14 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #![crate_name = "cssparser"]
 #![crate_type = "rlib"]
-
 #![cfg_attr(feature = "bench", feature(test))]
 #![deny(missing_docs)]
 
 /*!
 
 Implementation of [CSS Syntax Module Level 3](https://drafts.csswg.org/css-syntax/) for Rust.
 
 # Input
@@ -27,101 +26,121 @@ which can be used together with rust-enc
 * When returning `Ok(_)`,
   the function must have consumed exactly the amount of input that represents the parsed value.
 * When returning `Err(())`, any amount of input may have been consumed.
 
 As a consequence, when calling another parsing function, either:
 
 * Any `Err(())` return value must be propagated.
   This happens by definition for tail calls,
-  and can otherwise be done with the `try!` macro.
+  and can otherwise be done with the `?` operator.
 * Or the call must be wrapped in a `Parser::try` call.
   `try` takes a closure that takes a `Parser` and returns a `Result`,
   calls it once,
   and returns itself that same result.
   If the result is `Err`,
   it restores the position inside the input to the one saved before calling the closure.
 
 Examples:
 
 ```{rust,ignore}
 // 'none' | <image>
 fn parse_background_image(context: &ParserContext, input: &mut Parser)
                                     -> Result<Option<Image>, ()> {
-    if input.try(|input| input.expect_ident_matching("none")).is_ok() {
+    if input.try_parse(|input| input.expect_ident_matching("none")).is_ok() {
         Ok(None)
     } else {
         Image::parse(context, input).map(Some)  // tail call
     }
 }
 ```
 
 ```{rust,ignore}
 // [ <length> | <percentage> ] [ <length> | <percentage> ]?
 fn parse_border_spacing(_context: &ParserContext, input: &mut Parser)
                           -> Result<(LengthOrPercentage, LengthOrPercentage), ()> {
-    let first = try!(LengthOrPercentage::parse);
-    let second = input.try(LengthOrPercentage::parse).unwrap_or(first);
+    let first = LengthOrPercentage::parse?;
+    let second = input.try_parse(LengthOrPercentage::parse).unwrap_or(first);
     (first, second)
 }
 ```
 
 */
 
-#![recursion_limit="200"]  // For color::parse_color_keyword
+#![recursion_limit = "200"] // For color::parse_color_keyword
 
 extern crate dtoa_short;
 extern crate itoa;
-#[macro_use] extern crate cssparser_macros;
-#[macro_use] extern crate matches;
-#[macro_use] extern crate procedural_masquerade;
-#[doc(hidden)] pub extern crate phf as _internal__phf;
-#[cfg(test)] extern crate encoding_rs;
-#[cfg(test)] extern crate difference;
-#[cfg(test)] extern crate rustc_serialize;
-#[cfg(feature = "serde")] extern crate serde;
-#[cfg(feature = "heapsize")] #[macro_use] extern crate heapsize;
+#[macro_use]
+extern crate cssparser_macros;
+#[macro_use]
+extern crate matches;
+#[macro_use]
+extern crate procedural_masquerade;
+#[cfg(test)]
+extern crate difference;
+#[cfg(test)]
+extern crate encoding_rs;
+#[doc(hidden)]
+pub extern crate phf as _internal__phf;
+#[cfg(test)]
+extern crate rustc_serialize;
+#[cfg(feature = "serde")]
+extern crate serde;
+#[cfg(feature = "heapsize")]
+#[macro_use]
+extern crate heapsize;
 extern crate smallvec;
 
 pub use cssparser_macros::*;
 
-pub use tokenizer::{Token, SourcePosition, SourceLocation};
-pub use rules_and_declarations::{parse_important};
-pub use rules_and_declarations::{DeclarationParser, DeclarationListParser, parse_one_declaration};
-pub use rules_and_declarations::{RuleListParser, parse_one_rule};
-pub use rules_and_declarations::{AtRuleType, QualifiedRuleParser, AtRuleParser};
+pub use color::{
+    parse_color_keyword, AngleOrNumber, Color, ColorComponentParser, NumberOrPercentage, RGBA,
+};
+pub use cow_rc_str::CowRcStr;
 pub use from_bytes::{stylesheet_encoding, EncodingSupport};
-pub use color::{RGBA, Color, parse_color_keyword, AngleOrNumber, NumberOrPercentage, ColorComponentParser};
 pub use nth::parse_nth;
-pub use serializer::{ToCss, CssStringWriter, serialize_identifier, serialize_name, serialize_string, TokenSerializationType};
-pub use parser::{Parser, Delimiter, Delimiters, ParserState, ParserInput};
-pub use parser::{ParseError, ParseErrorKind, BasicParseError, BasicParseErrorKind};
+pub use parser::{BasicParseError, BasicParseErrorKind, ParseError, ParseErrorKind};
+pub use parser::{Delimiter, Delimiters, Parser, ParserInput, ParserState};
+pub use rules_and_declarations::parse_important;
+pub use rules_and_declarations::{parse_one_declaration, DeclarationListParser, DeclarationParser};
+pub use rules_and_declarations::{parse_one_rule, RuleListParser};
+pub use rules_and_declarations::{AtRuleParser, AtRuleType, QualifiedRuleParser};
+pub use serializer::{
+    serialize_identifier, serialize_name, serialize_string, CssStringWriter, ToCss,
+    TokenSerializationType,
+};
+pub use tokenizer::{SourceLocation, SourcePosition, Token};
 pub use unicode_range::UnicodeRange;
-pub use cow_rc_str::CowRcStr;
 
 // For macros
-#[doc(hidden)] pub use macros::_internal__to_lowercase;
+#[doc(hidden)]
+pub use macros::_internal__to_lowercase;
 
 // For macros when used in this crate. Unsure how $crate works with procedural-masquerade.
-mod cssparser { pub use _internal__phf; }
+mod cssparser {
+    pub use _internal__phf;
+}
 
 #[macro_use]
 mod macros;
 
 mod rules_and_declarations;
 
 #[cfg(feature = "dummy_match_byte")]
 mod tokenizer;
 
 #[cfg(not(feature = "dummy_match_byte"))]
 mod tokenizer {
     include!(concat!(env!("OUT_DIR"), "/tokenizer.rs"));
 }
-mod parser;
+mod color;
+mod cow_rc_str;
 mod from_bytes;
-mod color;
 mod nth;
+mod parser;
 mod serializer;
 mod unicode_range;
-mod cow_rc_str;
 
-#[cfg(test)] mod tests;
-#[cfg(test)] mod size_of_tests;
+#[cfg(test)]
+mod size_of_tests;
+#[cfg(test)]
+mod tests;
--- a/third_party/rust/cssparser/src/macros.rs
+++ b/third_party/rust/cssparser/src/macros.rs
@@ -1,13 +1,13 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
-/// See docs of the `procedural-masquerade` crate.
+// See docs of the `procedural-masquerade` crate.
 define_invoke_proc_macro!(cssparser_internal__invoke_proc_macro);
 
 /// Expands to a `match` expression with string patterns,
 /// matching case-insensitively in the ASCII range.
 ///
 /// The patterns must not contain ASCII upper case letters. (They must be already be lower-cased.)
 ///
 /// # Example
@@ -109,42 +109,38 @@ macro_rules! ascii_case_insensitive_phf_
 #[macro_export]
 #[doc(hidden)]
 macro_rules! cssparser_internal__to_lowercase {
     ($input: expr, $BUFFER_SIZE: expr => $output: ident) => {
         // mem::uninitialized() is ok because `buffer` is only used in `_internal__to_lowercase`,
         // which initializes with `copy_from_slice` the part of the buffer it uses,
         // before it uses it.
         #[allow(unsafe_code)]
-        let mut buffer: [u8; $BUFFER_SIZE] = unsafe {
-            ::std::mem::uninitialized()
-        };
+        let mut buffer: [u8; $BUFFER_SIZE] = unsafe { ::std::mem::uninitialized() };
         let input: &str = $input;
         let $output = $crate::_internal__to_lowercase(&mut buffer, input);
-    }
+    };
 }
 
 /// Implementation detail of match_ignore_ascii_case! and ascii_case_insensitive_phf_map! macros.
 ///
 /// **This function is not part of the public API. It can change or be removed between any verisons.**
 ///
 /// If `input` is larger than buffer, return `None`.
 /// Otherwise, return `input` ASCII-lowercased, using `buffer` as temporary space if necessary.
 #[doc(hidden)]
 #[allow(non_snake_case)]
 pub fn _internal__to_lowercase<'a>(buffer: &'a mut [u8], input: &'a str) -> Option<&'a str> {
     if let Some(buffer) = buffer.get_mut(..input.len()) {
         if let Some(first_uppercase) = input.bytes().position(|byte| matches!(byte, b'A'...b'Z')) {
             buffer.copy_from_slice(input.as_bytes());
-            ::std::ascii::AsciiExt::make_ascii_lowercase(&mut buffer[first_uppercase..]);
+            buffer[first_uppercase..].make_ascii_lowercase();
             // `buffer` was initialized to a copy of `input` (which is &str so well-formed UTF-8)
             // then lowercased (which preserves UTF-8 well-formedness)
-            unsafe {
-                Some(::std::str::from_utf8_unchecked(buffer))
-            }
+            unsafe { Some(::std::str::from_utf8_unchecked(buffer)) }
         } else {
             // Input is already lower-case
             Some(input)
         }
     } else {
         // Input is longer than buffer, which has the length of the longest expected string:
         // none of the expected strings would match.
         None
--- a/third_party/rust/cssparser/src/nth.rs
+++ b/third_party/rust/cssparser/src/nth.rs
@@ -1,46 +1,47 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
-#[allow(unused_imports)] use std::ascii::AsciiExt;
-
-use super::{Token, Parser, ParserInput, BasicParseError};
-
+use super::{BasicParseError, Parser, ParserInput, Token};
 
 /// Parse the *An+B* notation, as found in the `:nth-child()` selector.
 /// The input is typically the arguments of a function,
 /// in which case the caller needs to check if the arguments’ parser is exhausted.
 /// Return `Ok((A, B))`, or `Err(())` for a syntax error.
 pub fn parse_nth<'i, 't>(input: &mut Parser<'i, 't>) -> Result<(i32, i32), BasicParseError<'i>> {
     // FIXME: remove .clone() when lifetimes are non-lexical.
     match input.next()?.clone() {
-        Token::Number { int_value: Some(b), .. } => {
-            Ok((0, b))
-        }
-        Token::Dimension { int_value: Some(a), unit, .. } => {
+        Token::Number {
+            int_value: Some(b), ..
+        } => Ok((0, b)),
+        Token::Dimension {
+            int_value: Some(a),
+            unit,
+            ..
+        } => {
             match_ignore_ascii_case! {
                 &unit,
-                "n" => Ok(try!(parse_b(input, a))),
-                "n-" => Ok(try!(parse_signless_b(input, a, -1))),
+                "n" => Ok(parse_b(input, a)?),
+                "n-" => Ok(parse_signless_b(input, a, -1)?),
                 _ => match parse_n_dash_digits(&*unit) {
                     Ok(b) => Ok((a, b)),
                     Err(()) => Err(input.new_basic_unexpected_token_error(Token::Ident(unit.clone())))
                 }
             }
         }
         Token::Ident(value) => {
             match_ignore_ascii_case! { &value,
                 "even" => Ok((2, 0)),
                 "odd" => Ok((2, 1)),
-                "n" => Ok(try!(parse_b(input, 1))),
-                "-n" => Ok(try!(parse_b(input, -1))),
-                "n-" => Ok(try!(parse_signless_b(input, 1, -1))),
-                "-n-" => Ok(try!(parse_signless_b(input, -1, -1))),
+                "n" => Ok(parse_b(input, 1)?),
+                "-n" => Ok(parse_b(input, -1)?),
+                "n-" => Ok(parse_signless_b(input, 1, -1)?),
+                "-n-" => Ok(parse_signless_b(input, -1, -1)?),
                 _ => {
                     let (slice, a) = if value.starts_with("-") {
                         (&value[1..], -1)
                     } else {
                         (&*value, 1)
                     };
                     match parse_n_dash_digits(slice) {
                         Ok(b) => Ok((a, b)),
@@ -62,56 +63,70 @@ pub fn parse_nth<'i, 't>(input: &mut Par
                 }
             }
             token => Err(input.new_basic_unexpected_token_error(token)),
         },
         token => Err(input.new_basic_unexpected_token_error(token)),
     }
 }
 
-
 fn parse_b<'i, 't>(input: &mut Parser<'i, 't>, a: i32) -> Result<(i32, i32), BasicParseError<'i>> {
     let start = input.state();
     match input.next() {
         Ok(&Token::Delim('+')) => parse_signless_b(input, a, 1),
         Ok(&Token::Delim('-')) => parse_signless_b(input, a, -1),
-        Ok(&Token::Number { has_sign: true, int_value: Some(b), .. }) => Ok((a, b)),
+        Ok(&Token::Number {
+            has_sign: true,
+            int_value: Some(b),
+            ..
+        }) => Ok((a, b)),
         _ => {
             input.reset(&start);
             Ok((a, 0))
         }
     }
 }
 
-fn parse_signless_b<'i, 't>(input: &mut Parser<'i, 't>, a: i32, b_sign: i32) -> Result<(i32, i32), BasicParseError<'i>> {
+fn parse_signless_b<'i, 't>(
+    input: &mut Parser<'i, 't>,
+    a: i32,
+    b_sign: i32,
+) -> Result<(i32, i32), BasicParseError<'i>> {
     // FIXME: remove .clone() when lifetimes are non-lexical.
     match input.next()?.clone() {
-        Token::Number { has_sign: false, int_value: Some(b), .. } => Ok((a, b_sign * b)),
-        token => Err(input.new_basic_unexpected_token_error(token))
+        Token::Number {
+            has_sign: false,
+            int_value: Some(b),
+            ..
+        } => Ok((a, b_sign * b)),
+        token => Err(input.new_basic_unexpected_token_error(token)),
     }
 }
 
 fn parse_n_dash_digits(string: &str) -> Result<i32, ()> {
     let bytes = string.as_bytes();
     if bytes.len() >= 3
-    && bytes[..2].eq_ignore_ascii_case(b"n-")
-    && bytes[2..].iter().all(|&c| matches!(c, b'0'...b'9'))
+        && bytes[..2].eq_ignore_ascii_case(b"n-")
+        && bytes[2..].iter().all(|&c| matches!(c, b'0'...b'9'))
     {
-        Ok(parse_number_saturate(&string[1..]).unwrap())  // Include the minus sign
+        Ok(parse_number_saturate(&string[1..]).unwrap()) // Include the minus sign
     } else {
         Err(())
     }
 }
 
 fn parse_number_saturate(string: &str) -> Result<i32, ()> {
     let mut input = ParserInput::new(string);
     let mut parser = Parser::new(&mut input);
-    let int = if let Ok(&Token::Number {int_value: Some(int), ..})
-                = parser.next_including_whitespace_and_comments() {
+    let int = if let Ok(&Token::Number {
+        int_value: Some(int),
+        ..
+    }) = parser.next_including_whitespace_and_comments()
+    {
         int
     } else {
-        return Err(())
+        return Err(());
     };
     if !parser.is_exhausted() {
-        return Err(())
+        return Err(());
     }
     Ok(int)
 }
--- a/third_party/rust/cssparser/src/parser.rs
+++ b/third_party/rust/cssparser/src/parser.rs
@@ -1,19 +1,17 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 use cow_rc_str::CowRcStr;
 use smallvec::SmallVec;
+use std::ops::BitOr;
 use std::ops::Range;
-#[allow(unused_imports)] use std::ascii::AsciiExt;
-use std::ops::BitOr;
-use tokenizer::{Token, Tokenizer, SourcePosition, SourceLocation};
-
+use tokenizer::{SourceLocation, SourcePosition, Token, Tokenizer};
 
 /// A capture of the internal state of a `Parser` (including the position within the input),
 /// obtained from the `Parser::position` method.
 ///
 /// Can be used with the `Parser::reset` method to restore that state.
 /// Should only be used with the `Parser` instance it came from.
 #[derive(Debug, Clone)]
 pub struct ParserState {
@@ -109,17 +107,20 @@ pub enum ParseErrorKind<'i, T: 'i> {
     /// A fundamental parse error from a built-in parsing routine.
     Basic(BasicParseErrorKind<'i>),
     /// A parse error reported by downstream consumer code.
     Custom(T),
 }
 
 impl<'i, T> ParseErrorKind<'i, T> {
     /// Like `std::convert::Into::into`
-    pub fn into<U>(self) -> ParseErrorKind<'i, U> where T: Into<U> {
+    pub fn into<U>(self) -> ParseErrorKind<'i, U>
+    where
+        T: Into<U>,
+    {
         match self {
             ParseErrorKind::Basic(basic) => ParseErrorKind::Basic(basic),
             ParseErrorKind::Custom(custom) => ParseErrorKind::Custom(custom.into()),
         }
     }
 }
 
 /// Extensible parse errors that can be encountered by client parsing implementations.
@@ -139,17 +140,20 @@ impl<'i, T> ParseError<'i, T> {
                 kind: kind,
                 location: self.location,
             },
             ParseErrorKind::Custom(_) => panic!("Not a basic parse error"),
         }
     }
 
     /// Like `std::convert::Into::into`
-    pub fn into<U>(self) -> ParseError<'i, U> where T: Into<U> {
+    pub fn into<U>(self) -> ParseError<'i, U>
+    where
+        T: Into<U>,
+    {
         ParseError {
             kind: self.kind.into(),
             location: self.location,
         }
     }
 }
 
 /// The owned input for a parser.
@@ -194,47 +198,43 @@ impl<'i> ParserInput<'i> {
 pub struct Parser<'i: 't, 't> {
     input: &'t mut ParserInput<'i>,
     /// If `Some(_)`, .parse_nested_block() can be called.
     at_start_of: Option<BlockType>,
     /// For parsers from `parse_until` or `parse_nested_block`
     stop_before: Delimiters,
 }
 
-
 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
 pub(crate) enum BlockType {
     Parenthesis,
     SquareBracket,
     CurlyBracket,
 }
 
-
 impl BlockType {
     fn opening(token: &Token) -> Option<BlockType> {
         match *token {
-            Token::Function(_) |
-            Token::ParenthesisBlock => Some(BlockType::Parenthesis),
+            Token::Function(_) | Token::ParenthesisBlock => Some(BlockType::Parenthesis),
             Token::SquareBracketBlock => Some(BlockType::SquareBracket),
             Token::CurlyBracketBlock => Some(BlockType::CurlyBracket),
-            _ => None
+            _ => None,
         }
     }
 
     fn closing(token: &Token) -> Option<BlockType> {
         match *token {
             Token::CloseParenthesis => Some(BlockType::Parenthesis),
             Token::CloseSquareBracket => Some(BlockType::SquareBracket),
             Token::CloseCurlyBracket => Some(BlockType::CurlyBracket),
-            _ => None
+            _ => None,
         }
     }
 }
 
-
 /// A set of characters, to be used with the `Parser::parse_until*` methods.
 ///
 /// The union of two sets can be obtained with the `|` operator. Example:
 ///
 /// ```{rust,ignore}
 /// input.parse_until_before(Delimiter::CurlyBracketBlock | Delimiter::Semicolon)
 /// ```
 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
@@ -268,17 +268,19 @@ mod ClosingDelimiter {
     pub const CloseParenthesis: Delimiters = Delimiters { bits: 1 << 7 };
 }
 
 impl BitOr<Delimiters> for Delimiters {
     type Output = Delimiters;
 
     #[inline]
     fn bitor(self, other: Delimiters) -> Delimiters {
-        Delimiters { bits: self.bits | other.bits }
+        Delimiters {
+            bits: self.bits | other.bits,
+        }
     }
 }
 
 impl Delimiters {
     #[inline]
     fn contains(self, other: Delimiters) -> bool {
         (self.bits & other.bits) != 0
     }
@@ -333,26 +335,31 @@ impl<'i: 't, 't> Parser<'i, 't> {
     ///
     /// This ignores whitespace and comments.
     #[inline]
     pub fn is_exhausted(&mut self) -> bool {
         self.expect_exhausted().is_ok()
     }
 
     /// Check whether the input is exhausted. That is, if `.next()` would return a token.
-    /// Return a `Result` so that the `try!` macro can be used: `try!(input.expect_exhausted())`
+    /// Return a `Result` so that the `?` operator can be used: `input.expect_exhausted()?`
     ///
     /// This ignores whitespace and comments.
     #[inline]
     pub fn expect_exhausted(&mut self) -> Result<(), BasicParseError<'i>> {
         let start = self.state();
         let result = match self.next() {
-            Err(BasicParseError { kind: BasicParseErrorKind::EndOfInput, .. }) => Ok(()),
+            Err(BasicParseError {
+                kind: BasicParseErrorKind::EndOfInput,
+                ..
+            }) => Ok(()),
             Err(e) => unreachable!("Unexpected error encountered: {:?}", e),
-            Ok(t) => Err(start.source_location().new_basic_unexpected_token_error(t.clone())),
+            Ok(t) => Err(start
+                .source_location()
+                .new_basic_unexpected_token_error(t.clone())),
         };
         self.reset(&start);
         result
     }
 
     /// Return the current position within the input.
     ///
     /// This can be used with the `Parser::slice` and `slice_from` methods.
@@ -421,29 +428,29 @@ impl<'i: 't, 't> Parser<'i, 't> {
         self.new_error(BasicParseErrorKind::UnexpectedToken(token))
     }
 
     /// Create a new unexpected token or EOF ParseError at the current location
     #[inline]
     pub fn new_error_for_next_token<E>(&mut self) -> ParseError<'i, E> {
         let token = match self.next() {
             Ok(token) => token.clone(),
-            Err(e) => return e.into()
+            Err(e) => return e.into(),
         };
         self.new_error(BasicParseErrorKind::UnexpectedToken(token))
     }
 
     /// Return the current internal state of the parser (including position within the input).
     ///
     /// This state can later be restored with the `Parser::reset` method.
     #[inline]
     pub fn state(&self) -> ParserState {
         ParserState {
             at_start_of: self.at_start_of,
-            .. self.input.tokenizer.state()
+            ..self.input.tokenizer.state()
         }
     }
 
     /// Advance the input until the next token that’s not whitespace or a comment.
     #[inline]
     pub fn skip_whitespace(&mut self) {
         if let Some(block_type) = self.at_start_of.take() {
             consume_until_end_of_block(block_type, &mut self.input.tokenizer);
@@ -460,17 +467,17 @@ impl<'i: 't, 't> Parser<'i, 't> {
 
         self.input.tokenizer.skip_cdc_and_cdo()
     }
 
     #[inline]
     pub(crate) fn next_byte(&self) -> Option<u8> {
         let byte = self.input.tokenizer.next_byte();
         if self.stop_before.contains(Delimiters::from_byte(byte)) {
-            return None
+            return None;
         }
         byte
     }
 
     /// Restore the internal state of the parser (including position within the input)
     /// to what was previously saved by the `Parser::position` method.
     ///
     /// Should only be used with `SourcePosition` values from the same `Parser` instance.
@@ -490,23 +497,34 @@ impl<'i: 't, 't> Parser<'i, 't> {
     /// Return whether a `var()` or `env()` function has been seen by the
     /// tokenizer since either `look_for_var_or_env_functions` was called, and
     /// stop looking.
     #[inline]
     pub fn seen_var_or_env_functions(&mut self) -> bool {
         self.input.tokenizer.seen_var_or_env_functions()
     }
 
+    /// The old name of `try_parse`, which requires raw identifiers in the Rust 2018 edition.
+    #[inline]
+    pub fn try<F, T, E>(&mut self, thing: F) -> Result<T, E>
+    where
+        F: FnOnce(&mut Parser<'i, 't>) -> Result<T, E>,
+    {
+        self.try_parse(thing)
+    }
+
     /// Execute the given closure, passing it the parser.
     /// If the result (returned unchanged) is `Err`,
     /// the internal state of the parser  (including position within the input)
     /// is restored to what it was before the call.
     #[inline]
-    pub fn try<F, T, E>(&mut self, thing: F) -> Result<T, E>
-    where F: FnOnce(&mut Parser<'i, 't>) -> Result<T, E> {
+    pub fn try_parse<F, T, E>(&mut self, thing: F) -> Result<T, E>
+    where
+        F: FnOnce(&mut Parser<'i, 't>) -> Result<T, E>,
+    {
         let start = self.state();
         let result = thing(self);
         if result.is_err() {
             self.reset(&start)
         }
         result
     }
 
@@ -538,101 +556,116 @@ impl<'i: 't, 't> Parser<'i, 't> {
         self.next_including_whitespace_and_comments()
     }
 
     /// Same as `Parser::next`, but does not skip whitespace tokens.
     pub fn next_including_whitespace(&mut self) -> Result<&Token<'i>, BasicParseError<'i>> {
         loop {
             match self.next_including_whitespace_and_comments() {
                 Err(e) => return Err(e),
-                Ok(&Token::Comment(_)) => {},
-                _ => break
+                Ok(&Token::Comment(_)) => {}
+                _ => break,
             }
         }
         Ok(self.input.cached_token_ref())
     }
 
     /// Same as `Parser::next`, but does not skip whitespace or comment tokens.
     ///
     /// **Note**: This should only be used in contexts like a CSS pre-processor
     /// where comments are preserved.
     /// When parsing higher-level values, per the CSS Syntax specification,
     /// comments should always be ignored between tokens.
-    pub fn next_including_whitespace_and_comments(&mut self) -> Result<&Token<'i>, BasicParseError<'i>> {
+    pub fn next_including_whitespace_and_comments(
+        &mut self,
+    ) -> Result<&Token<'i>, BasicParseError<'i>> {
         if let Some(block_type) = self.at_start_of.take() {
             consume_until_end_of_block(block_type, &mut self.input.tokenizer);
         }
 
         let byte = self.input.tokenizer.next_byte();
         if self.stop_before.contains(Delimiters::from_byte(byte)) {
-            return Err(self.new_basic_error(BasicParseErrorKind::EndOfInput))
+            return Err(self.new_basic_error(BasicParseErrorKind::EndOfInput));
         }
 
         let token_start_position = self.input.tokenizer.position();
-        let token;
-        match self.input.cached_token {
-            Some(ref cached_token)
-            if cached_token.start_position == token_start_position => {
-                self.input.tokenizer.reset(&cached_token.end_state);
-                match cached_token.token {
-                    Token::Function(ref name) => self.input.tokenizer.see_function(name),
-                    _ => {}
-                }
-                token = &cached_token.token
+        let using_cached_token = self
+            .input
+            .cached_token
+            .as_ref()
+            .map_or(false, |cached_token| {
+                cached_token.start_position == token_start_position
+            });
+        let token = if using_cached_token {
+            let cached_token = self.input.cached_token.as_ref().unwrap();
+            self.input.tokenizer.reset(&cached_token.end_state);
+            match cached_token.token {
+                Token::Function(ref name) => self.input.tokenizer.see_function(name),
+                _ => {}
             }
-            _ => {
-                let new_token = self.input.tokenizer.next()
-                    .map_err(|()| self.new_basic_error(BasicParseErrorKind::EndOfInput))?;
-                self.input.cached_token = Some(CachedToken {
-                    token: new_token,
-                    start_position: token_start_position,
-                    end_state: self.input.tokenizer.state(),
-                });
-                token = self.input.cached_token_ref()
-            }
-        }
+            &cached_token.token
+        } else {
+            let new_token = self
+                .input
+                .tokenizer
+                .next()
+                .map_err(|()| self.new_basic_error(BasicParseErrorKind::EndOfInput))?;
+            self.input.cached_token = Some(CachedToken {
+                token: new_token,
+                start_position: token_start_position,
+                end_state: self.input.tokenizer.state(),
+            });
+            self.input.cached_token_ref()
+        };
 
         if let Some(block_type) = BlockType::opening(token) {
             self.at_start_of = Some(block_type);
         }
         Ok(token)
     }
 
     /// Have the given closure parse something, then check the the input is exhausted.
     /// The result is overridden to `Err(())` if some input remains.
     ///
     /// This can help tell e.g. `color: green;` from `color: green 4px;`
     #[inline]
     pub fn parse_entirely<F, T, E>(&mut self, parse: F) -> Result<T, ParseError<'i, E>>
-    where F: FnOnce(&mut Parser<'i, 't>) -> Result<T, ParseError<'i, E>> {
+    where
+        F: FnOnce(&mut Parser<'i, 't>) -> Result<T, ParseError<'i, E>>,
+    {
         let result = parse(self)?;
         self.expect_exhausted()?;
         Ok(result)
     }
 
     /// Parse a list of comma-separated values, all with the same syntax.
     ///
     /// The given closure is called repeatedly with a "delimited" parser
     /// (see the `Parser::parse_until_before` method)
     /// so that it can over consume the input past a comma at this block/function nesting level.
     ///
     /// Successful results are accumulated in a vector.
     ///
     /// This method retuns `Err(())` the first time that a closure call does,
     /// or if a closure call leaves some input before the next comma or the end of the input.
     #[inline]
-    pub fn parse_comma_separated<F, T, E>(&mut self, mut parse_one: F) -> Result<Vec<T>, ParseError<'i, E>>
-    where F: for<'tt> FnMut(&mut Parser<'i, 'tt>) -> Result<T, ParseError<'i, E>> {
+    pub fn parse_comma_separated<F, T, E>(
+        &mut self,
+        mut parse_one: F,
+    ) -> Result<Vec<T>, ParseError<'i, E>>
+    where
+        F: for<'tt> FnMut(&mut Parser<'i, 'tt>) -> Result<T, ParseError<'i, E>>,
+    {
         // Vec grows from 0 to 4 by default on first push().  So allocate with
         // capacity 1, so in the somewhat common case of only one item we don't
         // way overallocate.  Note that we always push at least one item if
         // parsing succeeds.
         let mut values = Vec::with_capacity(1);
         loop {
-            self.skip_whitespace();  // Unnecessary for correctness, but may help try() in parse_one rewind less.
+            self.skip_whitespace(); // Unnecessary for correctness, but may help try() in parse_one rewind less.
             values.push(self.parse_until_before(Delimiter::Comma, &mut parse_one)?);
             match self.next() {
                 Err(_) => return Ok(values),
                 Ok(&Token::Comma) => continue,
                 Ok(_) => unreachable!(),
             }
         }
     }
@@ -644,55 +677,67 @@ impl<'i: 't, 't> Parser<'i, 't> {
     /// is not a on that marks the start of a block or function:
     /// a `Function`, `ParenthesisBlock`, `CurlyBracketBlock`, or `SquareBracketBlock`.
     ///
     /// The given closure is called with a "delimited" parser
     /// that stops at the end of the block or function (at the matching closing token).
     ///
     /// The result is overridden to `Err(())` if the closure leaves some input before that point.
     #[inline]
-    pub fn parse_nested_block<F, T, E>(&mut self, parse: F) -> Result <T, ParseError<'i, E>>
-    where F: for<'tt> FnOnce(&mut Parser<'i, 'tt>) -> Result<T, ParseError<'i, E>> {
+    pub fn parse_nested_block<F, T, E>(&mut self, parse: F) -> Result<T, ParseError<'i, E>>
+    where
+        F: for<'tt> FnOnce(&mut Parser<'i, 'tt>) -> Result<T, ParseError<'i, E>>,
+    {
         parse_nested_block(self, parse)
     }
 
     /// Limit parsing to until a given delimiter or the end of the input. (E.g.
     /// a semicolon for a property value.)
     ///
     /// The given closure is called with a "delimited" parser
     /// that stops before the first character at this block/function nesting level
     /// that matches the given set of delimiters, or at the end of the input.
     ///
     /// The result is overridden to `Err(())` if the closure leaves some input before that point.
     #[inline]
-    pub fn parse_until_before<F, T, E>(&mut self, delimiters: Delimiters, parse: F)
-                                    -> Result <T, ParseError<'i, E>>
-    where F: for<'tt> FnOnce(&mut Parser<'i, 'tt>) -> Result<T, ParseError<'i, E>> {
+    pub fn parse_until_before<F, T, E>(
+        &mut self,
+        delimiters: Delimiters,
+        parse: F,
+    ) -> Result<T, ParseError<'i, E>>
+    where
+        F: for<'tt> FnOnce(&mut Parser<'i, 'tt>) -> Result<T, ParseError<'i, E>>,
+    {
         parse_until_before(self, delimiters, parse)
     }
 
     /// Like `parse_until_before`, but also consume the delimiter token.
     ///
     /// This can be useful when you don’t need to know which delimiter it was
     /// (e.g. if these is only one in the given set)
     /// or if it was there at all (as opposed to reaching the end of the input).
     #[inline]
-    pub fn parse_until_after<F, T, E>(&mut self, delimiters: Delimiters, parse: F)
-                                   -> Result <T, ParseError<'i, E>>
-    where F: for<'tt> FnOnce(&mut Parser<'i, 'tt>) -> Result<T, ParseError<'i, E>> {
+    pub fn parse_until_after<F, T, E>(
+        &mut self,
+        delimiters: Delimiters,
+        parse: F,
+    ) -> Result<T, ParseError<'i, E>>
+    where
+        F: for<'tt> FnOnce(&mut Parser<'i, 'tt>) -> Result<T, ParseError<'i, E>>,
+    {
         parse_until_after(self, delimiters, parse)
     }
 
     /// Parse a <whitespace-token> and return its value.
     #[inline]
     pub fn expect_whitespace(&mut self) -> Result<&'i str, BasicParseError<'i>> {
         let start_location = self.current_source_location();
         match *self.next_including_whitespace()? {
             Token::WhiteSpace(value) => Ok(value),
-            ref t => Err(start_location.new_basic_unexpected_token_error(t.clone()))
+            ref t => Err(start_location.new_basic_unexpected_token_error(t.clone())),
         }
     }
 
     /// Parse a <ident-token> and return the unescaped value.
     #[inline]
     pub fn expect_ident(&mut self) -> Result<&CowRcStr<'i>, BasicParseError<'i>> {
         expect! {self,
             Token::Ident(ref value) => Ok(value),
@@ -702,17 +747,20 @@ impl<'i: 't, 't> Parser<'i, 't> {
     /// expect_ident, but clone the CowRcStr
     #[inline]
     pub fn expect_ident_cloned(&mut self) -> Result<CowRcStr<'i>, BasicParseError<'i>> {
         self.expect_ident().map(|s| s.clone())
     }
 
     /// Parse a <ident-token> whose unescaped value is an ASCII-insensitive match for the given value.
     #[inline]
-    pub fn expect_ident_matching(&mut self, expected_value: &str) -> Result<(), BasicParseError<'i>> {
+    pub fn expect_ident_matching(
+        &mut self,
+        expected_value: &str,
+    ) -> Result<(), BasicParseError<'i>> {
         expect! {self,
             Token::Ident(ref value) if value.eq_ignore_ascii_case(expected_value) => Ok(()),
         }
     }
 
     /// Parse a <string-token> and return the unescaped value.
     #[inline]
     pub fn expect_string(&mut self) -> Result<&CowRcStr<'i>, BasicParseError<'i>> {
@@ -739,31 +787,35 @@ impl<'i: 't, 't> Parser<'i, 't> {
     /// Parse a <url-token> and return the unescaped value.
     #[inline]
     pub fn expect_url(&mut self) -> Result<CowRcStr<'i>, BasicParseError<'i>> {
         // FIXME: revert early returns when lifetimes are non-lexical
         expect! {self,
             Token::UnquotedUrl(ref value) => return Ok(value.clone()),
             Token::Function(ref name) if name.eq_ignore_ascii_case("url") => {}
         }
-        self.parse_nested_block(|input| input.expect_string().map_err(Into::into).map(|s| s.clone()))
-            .map_err(ParseError::<()>::basic)
+        self.parse_nested_block(|input| {
+            input.expect_string().map_err(Into::into).map(|s| s.clone())
+        })
+        .map_err(ParseError::<()>::basic)
     }
 
     /// Parse either a <url-token> or a <string-token>, and return the unescaped value.
     #[inline]
     pub fn expect_url_or_string(&mut self) -> Result<CowRcStr<'i>, BasicParseError<'i>> {
         // FIXME: revert early returns when lifetimes are non-lexical
         expect! {self,
             Token::UnquotedUrl(ref value) => return Ok(value.clone()),
             Token::QuotedString(ref value) => return Ok(value.clone()),
             Token::Function(ref name) if name.eq_ignore_ascii_case("url") => {}
         }
-        self.parse_nested_block(|input| input.expect_string().map_err(Into::into).map(|s| s.clone()))
-            .map_err(ParseError::<()>::basic)
+        self.parse_nested_block(|input| {
+            input.expect_string().map_err(Into::into).map(|s| s.clone())
+        })
+        .map_err(ParseError::<()>::basic)
     }
 
     /// Parse a <number-token> and return the integer value.
     #[inline]
     pub fn expect_number(&mut self) -> Result<f32, BasicParseError<'i>> {
         expect! {self,
             Token::Number { value, .. } => Ok(value),
         }
@@ -857,58 +909,65 @@ impl<'i: 't, 't> Parser<'i, 't> {
             Token::Function(ref name) => Ok(name),
         }
     }
 
     /// Parse a <function> token whose name is an ASCII-insensitive match for the given value.
     ///
     /// If the result is `Ok`, you can then call the `Parser::parse_nested_block` method.
     #[inline]
-    pub fn expect_function_matching(&mut self, expected_name: &str) -> Result<(), BasicParseError<'i>> {
+    pub fn expect_function_matching(
+        &mut self,
+        expected_name: &str,
+    ) -> Result<(), BasicParseError<'i>> {
         expect! {self,
             Token::Function(ref name) if name.eq_ignore_ascii_case(expected_name) => Ok(()),
         }
     }
 
     /// Parse the input until exhaustion and check that it contains no “error” token.
     ///
     /// See `Token::is_parse_error`. This also checks nested blocks and functions recursively.
     #[inline]
     pub fn expect_no_error_token(&mut self) -> Result<(), BasicParseError<'i>> {
         // FIXME: remove break and intermediate variable when lifetimes are non-lexical
         let token;
         loop {
             match self.next_including_whitespace_and_comments() {
-                Ok(&Token::Function(_)) |
-                Ok(&Token::ParenthesisBlock) |
-                Ok(&Token::SquareBracketBlock) |
-                Ok(&Token::CurlyBracketBlock) => {}
+                Ok(&Token::Function(_))
+                | Ok(&Token::ParenthesisBlock)
+                | Ok(&Token::SquareBracketBlock)
+                | Ok(&Token::CurlyBracketBlock) => {}
                 Ok(t) => {
                     if t.is_parse_error() {
                         token = t.clone();
-                        break
+                        break;
                     }
-                    continue
+                    continue;
                 }
-                Err(_) => return Ok(())
+                Err(_) => return Ok(()),
             }
-            let result = self.parse_nested_block(|input| input.expect_no_error_token()
-                                                 .map_err(|e| Into::into(e)));
+            let result = self.parse_nested_block(|input| {
+                input.expect_no_error_token().map_err(|e| Into::into(e))
+            });
             result.map_err(ParseError::<()>::basic)?
         }
         // FIXME: maybe these should be separate variants of BasicParseError instead?
         Err(self.new_basic_unexpected_token_error(token))
     }
 }
 
-pub fn parse_until_before<'i: 't, 't, F, T, E>(parser: &mut Parser<'i, 't>,
-                                               delimiters: Delimiters,
-                                               parse: F)
-                                               -> Result <T, ParseError<'i, E>>
-    where F: for<'tt> FnOnce(&mut Parser<'i, 'tt>) -> Result<T, ParseError<'i, E>> {
+pub fn parse_until_before<'i: 't, 't, F, T, E>(
+    parser: &mut Parser<'i, 't>,
+    delimiters: Delimiters,
+    parse: F,
+) -> Result<T, ParseError<'i, E>>
+where
+    F: for<'tt> FnOnce(&mut Parser<'i, 'tt>) -> Result<T, ParseError<'i, E>>,
+{
     let delimiters = parser.stop_before | delimiters;
     let result;
     // Introduce a new scope to limit duration of nested_parser’s borrow
     {
         let mut delimited_parser = Parser {
             input: parser.input,
             at_start_of: parser.at_start_of.take(),
             stop_before: delimiters,
@@ -916,55 +975,68 @@ pub fn parse_until_before<'i: 't, 't, F,
         result = delimited_parser.parse_entirely(parse);
         if let Some(block_type) = delimited_parser.at_start_of {
             consume_until_end_of_block(block_type, &mut delimited_parser.input.tokenizer);
         }
     }
     // FIXME: have a special-purpose tokenizer method for this that does less work.
     loop {
         if delimiters.contains(Delimiters::from_byte(parser.input.tokenizer.next_byte())) {
-            break
+            break;
         }
         if let Ok(token) = parser.input.tokenizer.next() {
             if let Some(block_type) = BlockType::opening(&token) {
                 consume_until_end_of_block(block_type, &mut parser.input.tokenizer);
             }
         } else {
-            break
+            break;
         }
     }
     result
 }
 
-pub fn parse_until_after<'i: 't, 't, F, T, E>(parser: &mut Parser<'i, 't>,
-                                              delimiters: Delimiters,
-                                              parse: F)
-                                              -> Result <T, ParseError<'i, E>>
-    where F: for<'tt> FnOnce(&mut Parser<'i, 'tt>) -> Result<T, ParseError<'i, E>> {
+pub fn parse_until_after<'i: 't, 't, F, T, E>(
+    parser: &mut Parser<'i, 't>,
+    delimiters: Delimiters,
+    parse: F,
+) -> Result<T, ParseError<'i, E>>
+where
+    F: for<'tt> FnOnce(&mut Parser<'i, 'tt>) -> Result<T, ParseError<'i, E>>,
+{
     let result = parser.parse_until_before(delimiters, parse);
     let next_byte = parser.input.tokenizer.next_byte();
-    if next_byte.is_some() && !parser.stop_before.contains(Delimiters::from_byte(next_byte)) {
+    if next_byte.is_some()
+        && !parser
+            .stop_before
+            .contains(Delimiters::from_byte(next_byte))
+    {
         debug_assert!(delimiters.contains(Delimiters::from_byte(next_byte)));
         // We know this byte is ASCII.
         parser.input.tokenizer.advance(1);
         if next_byte == Some(b'{') {
             consume_until_end_of_block(BlockType::CurlyBracket, &mut parser.input.tokenizer);
         }
     }
     result
 }
 
-pub fn parse_nested_block<'i: 't, 't, F, T, E>(parser: &mut Parser<'i, 't>, parse: F)
-                                               -> Result <T, ParseError<'i, E>>
-    where F: for<'tt> FnOnce(&mut Parser<'i, 'tt>) -> Result<T, ParseError<'i, E>> {
-    let block_type = parser.at_start_of.take().expect("\
-        A nested parser can only be created when a Function, \
-        ParenthesisBlock, SquareBracketBlock, or CurlyBracketBlock \
-        token was just consumed.\
-        ");
+pub fn parse_nested_block<'i: 't, 't, F, T, E>(
+    parser: &mut Parser<'i, 't>,
+    parse: F,
+) -> Result<T, ParseError<'i, E>>
+where
+    F: for<'tt> FnOnce(&mut Parser<'i, 'tt>) -> Result<T, ParseError<'i, E>>,
+{
+    let block_type = parser.at_start_of.take().expect(
+        "\
+         A nested parser can only be created when a Function, \
+         ParenthesisBlock, SquareBracketBlock, or CurlyBracketBlock \
+         token was just consumed.\
+         ",
+    );
     let closing_delimiter = match block_type {
         BlockType::CurlyBracket => ClosingDelimiter::CloseCurlyBracket,
         BlockType::SquareBracket => ClosingDelimiter::CloseSquareBracket,
         BlockType::Parenthesis => ClosingDelimiter::CloseParenthesis,
     };
     let result;
     // Introduce a new scope to limit duration of nested_parser’s borrow
     {
--- a/third_party/rust/cssparser/src/rules_and_declarations.rs
+++ b/third_party/rust/cssparser/src/rules_and_declarations.rs
@@ -1,23 +1,22 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 // https://drafts.csswg.org/css-syntax/#parsing
 
-use cow_rc_str::CowRcStr;
-use parser::{parse_until_before, parse_until_after, parse_nested_block, ParserState};
-#[allow(unused_imports)] use std::ascii::AsciiExt;
 use super::{BasicParseError, BasicParseErrorKind, Delimiter};
 use super::{ParseError, Parser, SourceLocation, Token};
+use cow_rc_str::CowRcStr;
+use parser::{parse_nested_block, parse_until_after, parse_until_before, ParserState};
 
 /// Parse `!important`.
 ///
-/// Typical usage is `input.try(parse_important).is_ok()`
+/// Typical usage is `input.try_parse(parse_important).is_ok()`
 /// at the end of a `DeclarationParser::parse_value` implementation.
 pub fn parse_important<'i, 't>(input: &mut Parser<'i, 't>) -> Result<(), BasicParseError<'i>> {
     input.expect_delim('!')?;
     input.expect_ident_matching("important")
 }
 
 /// The return value for `AtRuleParser::parse_prelude`.
 /// Indicates whether the at-rule is expected to have a `{ /* ... */ }` block
@@ -56,20 +55,23 @@ pub trait DeclarationParser<'i> {
     /// This can be done with `std::ascii::Ascii::eq_ignore_ascii_case`,
     /// or with the `match_ignore_ascii_case!` macro.
     ///
     /// The given `input` is a "delimited" parser
     /// that ends wherever the declaration value should end.
     /// (In declaration lists, before the next semicolon or end of the current block.)
     ///
     /// If `!important` can be used in a given context,
-    /// `input.try(parse_important).is_ok()` should be used at the end
+    /// `input.try_parse(parse_important).is_ok()` should be used at the end
     /// of the implementation of this method and the result should be part of the return value.
-    fn parse_value<'t>(&mut self, name: CowRcStr<'i>, input: &mut Parser<'i, 't>)
-                       -> Result<Self::Declaration, ParseError<'i, Self::Error>>;
+    fn parse_value<'t>(
+        &mut self,
+        name: CowRcStr<'i>,
+        input: &mut Parser<'i, 't>,
+    ) -> Result<Self::Declaration, ParseError<'i, Self::Error>>;
 }
 
 /// A trait to provide various parsing of at-rules.
 ///
 /// For example, there could be different implementations for top-level at-rules
 /// (`@media`, `@font-face`, …)
 /// and for page-margin rules inside `@page`.
 ///
@@ -101,19 +103,22 @@ pub trait AtRuleParser<'i> {
     ///
     /// At-rule name matching should be case-insensitive in the ASCII range.
     /// This can be done with `std::ascii::Ascii::eq_ignore_ascii_case`,
     /// or with the `match_ignore_ascii_case!` macro.
     ///
     /// The given `input` is a "delimited" parser
     /// that ends wherever the prelude should end.
     /// (Before the next semicolon, the next `{`, or the end of the current block.)
-    fn parse_prelude<'t>(&mut self, name: CowRcStr<'i>, input: &mut Parser<'i, 't>)
-                     -> Result<AtRuleType<Self::PreludeNoBlock, Self::PreludeBlock>,
-                               ParseError<'i, Self::Error>> {
+    fn parse_prelude<'t>(
+        &mut self,
+        name: CowRcStr<'i>,
+        input: &mut Parser<'i, 't>,
+    ) -> Result<AtRuleType<Self::PreludeNoBlock, Self::PreludeBlock>, ParseError<'i, Self::Error>>
+    {
         let _ = name;
         let _ = input;
         Err(input.new_error(BasicParseErrorKind::AtRuleInvalid(name)))
     }
 
     /// End an at-rule which doesn't have block. Return the finished
     /// representation of the at-rule.
     ///
@@ -124,18 +129,20 @@ pub trait AtRuleParser<'i> {
     /// the end of the input.
     fn rule_without_block(
         &mut self,
         prelude: Self::PreludeNoBlock,
         location: SourceLocation,
     ) -> Self::AtRule {
         let _ = prelude;
         let _ = location;
-        panic!("The `AtRuleParser::rule_without_block` method must be overriden \
-                if `AtRuleParser::parse_prelude` ever returns `AtRuleType::WithoutBlock`.")
+        panic!(
+            "The `AtRuleParser::rule_without_block` method must be overriden \
+             if `AtRuleParser::parse_prelude` ever returns `AtRuleType::WithoutBlock`."
+        )
     }
 
     /// Parse the content of a `{ /* ... */ }` block for the body of the at-rule.
     ///
     /// The location passed in is source location of the start of the prelude.
     ///
     /// Return the finished representation of the at-rule
     /// as returned by `RuleListParser::next` or `DeclarationListParser::next`,
@@ -180,18 +187,20 @@ pub trait QualifiedRuleParser<'i> {
     ///
     /// Return the representation of the prelude,
     /// or `Err(())` to ignore the entire at-rule as invalid.
     ///
     /// The prelude is the part before the `{ /* ... */ }` block.
     ///
     /// The given `input` is a "delimited" parser
     /// that ends where the prelude should end (before the next `{`).
-    fn parse_prelude<'t>(&mut self, input: &mut Parser<'i, 't>)
-                         -> Result<Self::Prelude, ParseError<'i, Self::Error>> {
+    fn parse_prelude<'t>(
+        &mut self,
+        input: &mut Parser<'i, 't>,
+    ) -> Result<Self::Prelude, ParseError<'i, Self::Error>> {
         let _ = input;
         Err(input.new_error(BasicParseErrorKind::QualifiedRuleInvalid))
     }
 
     /// Parse the content of a `{ /* ... */ }` block for the body of the qualified rule.
     ///
     /// The location passed in is source location of the start of the prelude.
     ///
@@ -206,30 +215,29 @@ pub trait QualifiedRuleParser<'i> {
     ) -> Result<Self::QualifiedRule, ParseError<'i, Self::Error>> {
         let _ = prelude;
         let _ = location;
         let _ = input;
         Err(input.new_error(BasicParseErrorKind::QualifiedRuleInvalid))
     }
 }
 
-
 /// Provides an iterator for declaration list parsing.
 pub struct DeclarationListParser<'i: 't, 't: 'a, 'a, P> {
     /// The input given to `DeclarationListParser::new`
     pub input: &'a mut Parser<'i, 't>,
 
     /// The parser given to `DeclarationListParser::new`
     pub parser: P,
 }
 
-
 impl<'i: 't, 't: 'a, 'a, I, P, E: 'i> DeclarationListParser<'i, 't, 'a, P>
-where P: DeclarationParser<'i, Declaration = I, Error = E> +
-         AtRuleParser<'i, AtRule = I, Error = E> {
+where
+    P: DeclarationParser<'i, Declaration = I, Error = E> + AtRuleParser<'i, AtRule = I, Error = E>,
+{
     /// Create a new `DeclarationListParser` for the given `input` and `parser`.
     ///
     /// Note that all CSS declaration lists can on principle contain at-rules.
     /// Even if no such valid at-rule exists (yet),
     /// this affects error handling: at-rules end at `{}` blocks, not just semicolons.
     ///
     /// The given `parser` therefore needs to implement
     /// both `DeclarationParser` and `AtRuleParser` traits.
@@ -245,76 +253,85 @@ where P: DeclarationParser<'i, Declarati
             parser: parser,
         }
     }
 }
 
 /// `DeclarationListParser` is an iterator that yields `Ok(_)` for a valid declaration or at-rule
 /// or `Err(())` for an invalid one.
 impl<'i: 't, 't: 'a, 'a, I, P, E: 'i> Iterator for DeclarationListParser<'i, 't, 'a, P>
-where P: DeclarationParser<'i, Declaration = I, Error = E> +
-         AtRuleParser<'i, AtRule = I, Error = E> {
+where
+    P: DeclarationParser<'i, Declaration = I, Error = E> + AtRuleParser<'i, AtRule = I, Error = E>,
+{
     type Item = Result<I, (ParseError<'i, E>, &'i str)>;
 
     fn next(&mut self) -> Option<Self::Item> {
         loop {
             let start = self.input.state();
             // FIXME: remove intermediate variable when lifetimes are non-lexical
             let ident = match self.input.next_including_whitespace_and_comments() {
-                Ok(&Token::WhiteSpace(_)) | Ok(&Token::Comment(_)) | Ok(&Token::Semicolon) => continue,
+                Ok(&Token::WhiteSpace(_)) | Ok(&Token::Comment(_)) | Ok(&Token::Semicolon) => {
+                    continue
+                }
                 Ok(&Token::Ident(ref name)) => Ok(Ok(name.clone())),
                 Ok(&Token::AtKeyword(ref name)) => Ok(Err(name.clone())),
                 Ok(token) => Err(token.clone()),
                 Err(_) => return None,
             };
             match ident {
                 Ok(Ok(name)) => {
                     // Ident
                     let result = {
                         let parser = &mut self.parser;
                         // FIXME: https://github.com/rust-lang/rust/issues/42508
-                        parse_until_after::<'i, 't, _, _, _>(self.input, Delimiter::Semicolon, |input| {
-                            input.expect_colon()?;
-                            parser.parse_value(name, input)
-                        })
+                        parse_until_after::<'i, 't, _, _, _>(
+                            self.input,
+                            Delimiter::Semicolon,
+                            |input| {
+                                input.expect_colon()?;
+                                parser.parse_value(name, input)
+                            },
+                        )
                     };
-                    return Some(result.map_err(|e| (e, self.input.slice_from(start.position()))))
+                    return Some(result.map_err(|e| (e, self.input.slice_from(start.position()))));
                 }
                 Ok(Err(name)) => {
                     // At-keyword
-                    return Some(parse_at_rule(&start, name, self.input, &mut self.parser))
+                    return Some(parse_at_rule(&start, name, self.input, &mut self.parser));
                 }
                 Err(token) => {
                     let result = self.input.parse_until_after(Delimiter::Semicolon, |_| {
-                        Err(start.source_location().new_unexpected_token_error(token.clone()))
+                        Err(start
+                            .source_location()
+                            .new_unexpected_token_error(token.clone()))
                     });
-                    return Some(result.map_err(|e| (e, self.input.slice_from(start.position()))))
+                    return Some(result.map_err(|e| (e, self.input.slice_from(start.position()))));
                 }
             }
         }
     }
 }
 
-
 /// Provides an iterator for rule list parsing.
 pub struct RuleListParser<'i: 't, 't: 'a, 'a, P> {
     /// The input given to `RuleListParser::new`
     pub input: &'a mut Parser<'i, 't>,
 
     /// The parser given to `RuleListParser::new`
     pub parser: P,
 
     is_stylesheet: bool,
     any_rule_so_far: bool,
 }
 
-
 impl<'i: 't, 't: 'a, 'a, R, P, E: 'i> RuleListParser<'i, 't, 'a, P>
-where P: QualifiedRuleParser<'i, QualifiedRule = R, Error = E> +
-         AtRuleParser<'i, AtRule = R, Error = E> {
+where
+    P: QualifiedRuleParser<'i, QualifiedRule = R, Error = E>
+        + AtRuleParser<'i, AtRule = R, Error = E>,
+{
     /// Create a new `RuleListParser` for the given `input` at the top-level of a stylesheet
     /// and the given `parser`.
     ///
     /// The given `parser` needs to implement both `QualifiedRuleParser` and `AtRuleParser` traits.
     /// However, either of them can be an empty `impl`
     /// since the traits provide default implementations of their methods.
     ///
     /// The return type for finished qualified rules and at-rules also needs to be the same,
@@ -340,22 +357,22 @@ where P: QualifiedRuleParser<'i, Qualifi
             input: input,
             parser: parser,
             is_stylesheet: false,
             any_rule_so_far: false,
         }
     }
 }
 
-
-
 /// `RuleListParser` is an iterator that yields `Ok(_)` for a rule or `Err(())` for an invalid one.
 impl<'i: 't, 't: 'a, 'a, R, P, E: 'i> Iterator for RuleListParser<'i, 't, 'a, P>
-where P: QualifiedRuleParser<'i, QualifiedRule = R, Error = E> +
-         AtRuleParser<'i, AtRule = R, Error = E> {
+where
+    P: QualifiedRuleParser<'i, QualifiedRule = R, Error = E>
+        + AtRuleParser<'i, AtRule = R, Error = E>,
+{
     type Item = Result<R, (ParseError<'i, E>, &'i str)>;
 
     fn next(&mut self) -> Option<Self::Item> {
         loop {
             if self.is_stylesheet {
                 self.input.skip_cdc_and_cdo()
             } else {
                 self.input.skip_whitespace()
@@ -370,58 +387,70 @@ where P: QualifiedRuleParser<'i, Qualifi
                         _ => at_keyword = None,
                     }
                     // FIXME: move this back inside `match` when lifetimes are non-lexical
                     if at_keyword.is_none() {
                         self.input.reset(&start)
                     }
                 }
                 Some(_) => at_keyword = None,
-                None => return None
+                None => return None,
             }
 
             if let Some(name) = at_keyword {
                 let first_stylesheet_rule = self.is_stylesheet && !self.any_rule_so_far;
                 self.any_rule_so_far = true;
                 if first_stylesheet_rule && name.eq_ignore_ascii_case("charset") {
                     let delimiters = Delimiter::Semicolon | Delimiter::CurlyBracketBlock;
-                    let _: Result<(), ParseError<()>> = self.input.parse_until_after(delimiters, |_| Ok(()));
+                    let _: Result<(), ParseError<()>> =
+                        self.input.parse_until_after(delimiters, |_| Ok(()));
                 } else {
-                    return Some(parse_at_rule(&start, name.clone(), self.input, &mut self.parser))
+                    return Some(parse_at_rule(
+                        &start,
+                        name.clone(),
+                        self.input,
+                        &mut self.parser,
+                    ));
                 }
             } else {
                 self.any_rule_so_far = true;
                 let result = parse_qualified_rule(self.input, &mut self.parser);
-                return Some(result.map_err(|e| (e, self.input.slice_from(start.position()))))
+                return Some(result.map_err(|e| (e, self.input.slice_from(start.position()))));
             }
         }
     }
 }
 
-
 /// Parse a single declaration, such as an `( /* ... */ )` parenthesis in an `@supports` prelude.
-pub fn parse_one_declaration<'i, 't, P, E>(input: &mut Parser<'i, 't>, parser: &mut P)
-                                           -> Result<<P as DeclarationParser<'i>>::Declaration,
-                                                     (ParseError<'i, E>, &'i str)>
-                                           where P: DeclarationParser<'i, Error = E> {
+pub fn parse_one_declaration<'i, 't, P, E>(
+    input: &mut Parser<'i, 't>,
+    parser: &mut P,
+) -> Result<<P as DeclarationParser<'i>>::Declaration, (ParseError<'i, E>, &'i str)>
+where
+    P: DeclarationParser<'i, Error = E>,
+{
     let start_position = input.position();
-    input.parse_entirely(|input| {
-        let name = input.expect_ident()?.clone();
-        input.expect_colon()?;
-        parser.parse_value(name, input)
-    })
-    .map_err(|e| (e, input.slice_from(start_position)))
+    input
+        .parse_entirely(|input| {
+            let name = input.expect_ident()?.clone();
+            input.expect_colon()?;
+            parser.parse_value(name, input)
+        })
+        .map_err(|e| (e, input.slice_from(start_position)))
 }
 
-
 /// Parse a single rule, such as for CSSOM’s `CSSStyleSheet.insertRule`.
-pub fn parse_one_rule<'i, 't, R, P, E>(input: &mut Parser<'i, 't>, parser: &mut P)
-                                       -> Result<R, ParseError<'i, E>>
-where P: QualifiedRuleParser<'i, QualifiedRule = R, Error = E> +
-         AtRuleParser<'i, AtRule = R, Error = E> {
+pub fn parse_one_rule<'i, 't, R, P, E>(
+    input: &mut Parser<'i, 't>,
+    parser: &mut P,
+) -> Result<R, ParseError<'i, E>>
+where
+    P: QualifiedRuleParser<'i, QualifiedRule = R, Error = E>
+        + AtRuleParser<'i, AtRule = R, Error = E>,
+{
     input.parse_entirely(|input| {
         input.skip_whitespace();
         let start = input.state();
 
         let at_keyword;
         if input.next_byte() == Some(b'@') {
             match *input.next_including_whitespace_and_comments()? {
                 Token::AtKeyword(ref name) => at_keyword = Some(name.clone()),
@@ -445,81 +474,78 @@ where P: QualifiedRuleParser<'i, Qualifi
 
 fn parse_at_rule<'i: 't, 't, P, E>(
     start: &ParserState,
     name: CowRcStr<'i>,
     input: &mut Parser<'i, 't>,
     parser: &mut P,
 ) -> Result<<P as AtRuleParser<'i>>::AtRule, (ParseError<'i, E>, &'i str)>
 where
-    P: AtRuleParser<'i, Error = E>
+    P: AtRuleParser<'i, Error = E>,
 {
     let location = input.current_source_location();
     let delimiters = Delimiter::Semicolon | Delimiter::CurlyBracketBlock;
     // FIXME: https://github.com/rust-lang/rust/issues/42508
     let result = parse_until_before::<'i, 't, _, _, _>(input, delimiters, |input| {
         parser.parse_prelude(name, input)
     });
     match result {
-        Ok(AtRuleType::WithoutBlock(prelude)) => {
-            match input.next() {
-                Ok(&Token::Semicolon) | Err(_) => Ok(parser.rule_without_block(prelude, location)),
-                Ok(&Token::CurlyBracketBlock) => Err((
-                    input.new_unexpected_token_error(Token::CurlyBracketBlock),
-                    input.slice_from(start.position()),
-                )),
-                Ok(_) => unreachable!()
-            }
-        }
+        Ok(AtRuleType::WithoutBlock(prelude)) => match input.next() {
+            Ok(&Token::Semicolon) | Err(_) => Ok(parser.rule_without_block(prelude, location)),
+            Ok(&Token::CurlyBracketBlock) => Err((
+                input.new_unexpected_token_error(Token::CurlyBracketBlock),
+                input.slice_from(start.position()),
+            )),
+            Ok(_) => unreachable!(),
+        },
         Ok(AtRuleType::WithBlock(prelude)) => {
             match input.next() {
                 Ok(&Token::CurlyBracketBlock) => {
                     // FIXME: https://github.com/rust-lang/rust/issues/42508
-                    parse_nested_block::<'i, 't, _, _, _>(
-                        input,
-                        move |input| parser.parse_block(prelude, location, input)
-                    ).map_err(|e| (e, input.slice_from(start.position())))
+                    parse_nested_block::<'i, 't, _, _, _>(input, move |input| {
+                        parser.parse_block(prelude, location, input)
+                    })
+                    .map_err(|e| (e, input.slice_from(start.position())))
                 }
                 Ok(&Token::Semicolon) => Err((
                     input.new_unexpected_token_error(Token::Semicolon),
                     input.slice_from(start.position()),
                 )),
                 Err(e) => Err((e.into(), input.slice_from(start.position()))),
-                Ok(_) => unreachable!()
+                Ok(_) => unreachable!(),
             }
         }
         Err(error) => {
             let end_position = input.position();
             match input.next() {
-                Ok(&Token::CurlyBracketBlock) | Ok(&Token::Semicolon) | Err(_) => {},
-                _ => unreachable!()
+                Ok(&Token::CurlyBracketBlock) | Ok(&Token::Semicolon) | Err(_) => {}
+                _ => unreachable!(),
             };
             Err((error, input.slice(start.position()..end_position)))
         }
     }
 }
 
-
 fn parse_qualified_rule<'i, 't, P, E>(
     input: &mut Parser<'i, 't>,
     parser: &mut P,
 ) -> Result<<P as QualifiedRuleParser<'i>>::QualifiedRule, ParseError<'i, E>>
 where
-    P: QualifiedRuleParser<'i, Error = E>
+    P: QualifiedRuleParser<'i, Error = E>,
 {
     let location = input.current_source_location();
     // FIXME: https://github.com/rust-lang/rust/issues/42508
-    let prelude = parse_until_before::<'i, 't, _, _, _>(input, Delimiter::CurlyBracketBlock, |input| {
-        parser.parse_prelude(input)
-    });
+    let prelude =
+        parse_until_before::<'i, 't, _, _, _>(input, Delimiter::CurlyBracketBlock, |input| {
+            parser.parse_prelude(input)
+        });
     match *input.next()? {
         Token::CurlyBracketBlock => {
             // Do this here so that we consume the `{` even if the prelude is `Err`.
             let prelude = prelude?;
             // FIXME: https://github.com/rust-lang/rust/issues/42508
-            parse_nested_block::<'i, 't, _, _, _>(
-                input,
-                move |input| parser.parse_block(prelude, location, input),
-            )
+            parse_nested_block::<'i, 't, _, _, _>(input, move |input| {
+                parser.parse_block(prelude, location, input)
+            })
         }
-        _ => unreachable!()
+        _ => unreachable!(),
     }
 }
--- a/third_party/rust/cssparser/src/serializer.rs
+++ b/third_party/rust/cssparser/src/serializer.rs
@@ -1,105 +1,123 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 use dtoa_short::{self, Notation};
 use itoa;
-#[allow(unused_imports)] use std::ascii::AsciiExt;
 use std::fmt::{self, Write};
 use std::io;
 use std::str;
 
 use super::Token;
 
-
 /// Trait for things the can serialize themselves in CSS syntax.
 pub trait ToCss {
     /// Serialize `self` in CSS syntax, writing to `dest`.
-    fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write;
+    fn to_css<W>(&self, dest: &mut W) -> fmt::Result
+    where
+        W: fmt::Write;
 
     /// Serialize `self` in CSS syntax and return a string.
     ///
     /// (This is a convenience wrapper for `to_css` and probably should not be overridden.)
     #[inline]
     fn to_css_string(&self) -> String {
         let mut s = String::new();
         self.to_css(&mut s).unwrap();
         s
     }
 }
 
 #[inline]
-fn write_numeric<W>(value: f32, int_value: Option<i32>, has_sign: bool, dest: &mut W)
-                    -> fmt::Result where W: fmt::Write {
+fn write_numeric<W>(value: f32, int_value: Option<i32>, has_sign: bool, dest: &mut W) -> fmt::Result
+where
+    W: fmt::Write,
+{
     // `value.value >= 0` is true for negative 0.
     if has_sign && value.is_sign_positive() {
         dest.write_str("+")?;
     }
 
     let notation = if value == 0.0 && value.is_sign_negative() {
         // Negative zero. Work around #20596.
         dest.write_str("-0")?;
-        Notation { decimal_point: false, scientific: false }
+        Notation {
+            decimal_point: false,
+            scientific: false,
+        }
     } else {
         dtoa_short::write(dest, value)?
     };
 
     if int_value.is_none() && value.fract() == 0. {
         if !notation.decimal_point && !notation.scientific {
             dest.write_str(".0")?;
         }
     }
     Ok(())
 }
 
-
 impl<'a> ToCss for Token<'a> {
-    fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
+    fn to_css<W>(&self, dest: &mut W) -> fmt::Result
+    where
+        W: fmt::Write,
+    {
         match *self {
             Token::Ident(ref value) => serialize_identifier(&**value, dest)?,
             Token::AtKeyword(ref value) => {
                 dest.write_str("@")?;
                 serialize_identifier(&**value, dest)?;
-            },
+            }
             Token::Hash(ref value) => {
                 dest.write_str("#")?;
                 serialize_name(value, dest)?;
-            },
+            }
             Token::IDHash(ref value) => {
                 dest.write_str("#")?;
                 serialize_identifier(&**value, dest)?;
             }
             Token::QuotedString(ref value) => serialize_string(&**value, dest)?,
             Token::UnquotedUrl(ref value) => {
                 dest.write_str("url(")?;
                 serialize_unquoted_url(&**value, dest)?;
                 dest.write_str(")")?;
-            },
+            }
             Token::Delim(value) => dest.write_char(value)?,
 
-            Token::Number { value, int_value, has_sign } => {
-                write_numeric(value, int_value, has_sign, dest)?
-            }
-            Token::Percentage { unit_value, int_value, has_sign } => {
+            Token::Number {
+                value,
+                int_value,
+                has_sign,
+            } => write_numeric(value, int_value, has_sign, dest)?,
+            Token::Percentage {
+                unit_value,
+                int_value,
+                has_sign,
+            } => {
                 write_numeric(unit_value * 100., int_value, has_sign, dest)?;
                 dest.write_str("%")?;
-            },
-            Token::Dimension { value, int_value, has_sign, ref unit } => {
+            }
+            Token::Dimension {
+                value,
+                int_value,
+                has_sign,
+                ref unit,
+            } => {
                 write_numeric(value, int_value, has_sign, dest)?;
                 // Disambiguate with scientific notation.
                 let unit = &**unit;
                 if unit == "e" || unit == "E" || unit.starts_with("e-") || unit.starts_with("E-") {
                     dest.write_str("\\65 ")?;
                     serialize_name(&unit[1..], dest)?;
                 } else {
                     serialize_identifier(unit, dest)?;
                 }
-            },
+            }
 
             Token::WhiteSpace(content) => dest.write_str(content)?,
             Token::Comment(content) => {
                 dest.write_str("/*")?;
                 dest.write_str(content)?;
                 dest.write_str("*/")?
             }
             Token::Colon => dest.write_str(":")?,
@@ -111,17 +129,17 @@ impl<'a> ToCss for Token<'a> {
             Token::SuffixMatch => dest.write_str("$=")?,
             Token::SubstringMatch => dest.write_str("*=")?,
             Token::CDO => dest.write_str("<!--")?,
             Token::CDC => dest.write_str("-->")?,
 
             Token::Function(ref name) => {
                 serialize_identifier(&**name, dest)?;
                 dest.write_str("(")?;
-            },
+            }
             Token::ParenthesisBlock => dest.write_str("(")?,
             Token::SquareBracketBlock => dest.write_str("[")?,
             Token::CurlyBracketBlock => dest.write_str("{")?,
 
             Token::BadUrl(ref contents) => {
                 dest.write_str("url(")?;
                 dest.write_str(contents)?;
                 dest.write_char(')')?;
@@ -129,50 +147,59 @@ impl<'a> ToCss for Token<'a> {
             Token::BadString(ref value) => {
                 // During tokenization, an unescaped newline after a quote causes
                 // the token to be a BadString instead of a QuotedString.
                 // The BadString token ends just before the newline
                 // (which is in a separate WhiteSpace token),
                 // and therefore does not have a closing quote.
                 dest.write_char('"')?;
                 CssStringWriter::new(dest).write_str(value)?;
-            },
+            }
             Token::CloseParenthesis => dest.write_str(")")?,
             Token::CloseSquareBracket => dest.write_str("]")?,
             Token::CloseCurlyBracket => dest.write_str("}")?,
         }
         Ok(())
     }
 }
 
-fn hex_escape<W>(ascii_byte: u8, dest: &mut W) -> fmt::Result where W:fmt::Write {
+fn hex_escape<W>(ascii_byte: u8, dest: &mut W) -> fmt::Result
+where
+    W: fmt::Write,
+{
     static HEX_DIGITS: &'static [u8; 16] = b"0123456789abcdef";
     let b3;
     let b4;
     let bytes = if ascii_byte > 0x0F {
         let high = (ascii_byte >> 4) as usize;
         let low = (ascii_byte & 0x0F) as usize;
         b4 = [b'\\', HEX_DIGITS[high], HEX_DIGITS[low], b' '];
         &b4[..]
     } else {
         b3 = [b'\\', HEX_DIGITS[ascii_byte as usize], b' '];
         &b3[..]
     };
     dest.write_str(unsafe { str::from_utf8_unchecked(&bytes) })
 }
 
-fn char_escape<W>(ascii_byte: u8, dest: &mut W) -> fmt::Result where W:fmt::Write {
+fn char_escape<W>(ascii_byte: u8, dest: &mut W) -> fmt::Result
+where
+    W: fmt::Write,
+{
     let bytes = [b'\\', ascii_byte];
     dest.write_str(unsafe { str::from_utf8_unchecked(&bytes) })
 }
 
 /// Write a CSS identifier, escaping characters as necessary.
-pub fn serialize_identifier<W>(mut value: &str, dest: &mut W) -> fmt::Result where W:fmt::Write {
+pub fn serialize_identifier<W>(mut value: &str, dest: &mut W) -> fmt::Result
+where
+    W: fmt::Write,
+{
     if value.is_empty() {
-        return Ok(())
+        return Ok(());
     }
 
     if value.starts_with("--") {
         dest.write_str("--")?;
         serialize_name(&value[2..], dest)
     } else if value == "-" {
         dest.write_str("\\-")
     } else {
@@ -187,17 +214,20 @@ pub fn serialize_identifier<W>(mut value
         serialize_name(value, dest)
     }
 }
 
 /// Write a CSS name, like a custom property name.
 ///
 /// You should only use this when you know what you're doing, when in doubt,
 /// consider using `serialize_identifier`.
-pub fn serialize_name<W>(value: &str, dest: &mut W) -> fmt::Result where W:fmt::Write {
+pub fn serialize_name<W>(value: &str, dest: &mut W) -> fmt::Result
+where
+    W: fmt::Write,
+{
     let mut chunk_start = 0;
     for (i, b) in value.bytes().enumerate() {
         let escaped = match b {
             b'0'...b'9' | b'A'...b'Z' | b'a'...b'z' | b'_' | b'-' => continue,
             _ if !b.is_ascii() => continue,
             b'\0' => Some("\u{FFFD}"),
             _ => None,
         };
@@ -209,74 +239,83 @@ pub fn serialize_name<W>(value: &str, de
         } else {
             char_escape(b, dest)?;
         }
         chunk_start = i + 1;
     }
     dest.write_str(&value[chunk_start..])
 }
 
-
-fn serialize_unquoted_url<W>(value: &str, dest: &mut W) -> fmt::Result where W:fmt::Write {
+fn serialize_unquoted_url<W>(value: &str, dest: &mut W) -> fmt::Result
+where
+    W: fmt::Write,
+{
     let mut chunk_start = 0;
     for (i, b) in value.bytes().enumerate() {
         let hex = match b {
-            b'\0' ... b' ' | b'\x7F' => true,
+            b'\0'...b' ' | b'\x7F' => true,
             b'(' | b')' | b'"' | b'\'' | b'\\' => false,
-            _ => continue
+            _ => continue,
         };
         dest.write_str(&value[chunk_start..i])?;
         if hex {
             hex_escape(b, dest)?;
         } else {
             char_escape(b, dest)?;
         }
         chunk_start = i + 1;
     }
     dest.write_str(&value[chunk_start..])
 }
 
-
 /// Write a double-quoted CSS string token, escaping content as necessary.
-pub fn serialize_string<W>(value: &str, dest: &mut W) -> fmt::Result where W: fmt::Write {
+pub fn serialize_string<W>(value: &str, dest: &mut W) -> fmt::Result
+where
+    W: fmt::Write,
+{
     dest.write_str("\"")?;
     CssStringWriter::new(dest).write_str(value)?;
     dest.write_str("\"")?;
     Ok(())
 }
 
-
 /// A `fmt::Write` adapter that escapes text for writing as a double-quoted CSS string.
 /// Quotes are not included.
 ///
 /// Typical usage:
 ///
 /// ```{rust,ignore}
 /// fn write_foo<W>(foo: &Foo, dest: &mut W) -> fmt::Result where W: fmt::Write {
-///     try!(dest.write_str("\""));
+///     dest.write_str("\"")?;
 ///     {
 ///         let mut string_dest = CssStringWriter::new(dest);
 ///         // Write into string_dest...
 ///     }
-///     try!(dest.write_str("\""));
+///     dest.write_str("\"")?;
 ///     Ok(())
 /// }
 /// ```
 pub struct CssStringWriter<'a, W: 'a> {
     inner: &'a mut W,
 }
 
-impl<'a, W> CssStringWriter<'a, W> where W: fmt::Write {
+impl<'a, W> CssStringWriter<'a, W>
+where
+    W: fmt::Write,
+{
     /// Wrap a text writer to create a `CssStringWriter`.
     pub fn new(inner: &'a mut W) -> CssStringWriter<'a, W> {
         CssStringWriter { inner: inner }
     }
 }
 
-impl<'a, W> fmt::Write for CssStringWriter<'a, W> where W: fmt::Write {
+impl<'a, W> fmt::Write for CssStringWriter<'a, W>
+where
+    W: fmt::Write,
+{
     fn write_str(&mut self, s: &str) -> fmt::Result {
         let mut chunk_start = 0;
         for (i, b) in s.bytes().enumerate() {
             let escaped = match b {
                 b'"' => Some("\\\""),
                 b'\\' => Some("\\\\"),
                 b'\0' => Some("\u{FFFD}"),
                 b'\x01'...b'\x1F' | b'\x7F' => None,
@@ -288,29 +327,32 @@ impl<'a, W> fmt::Write for CssStringWrit
                 None => hex_escape(b, self.inner)?,
             };
             chunk_start = i + 1;
         }
         self.inner.write_str(&s[chunk_start..])
     }
 }
 
-
 macro_rules! impl_tocss_for_int {
     ($T: ty) => {
         impl<'a> ToCss for $T {
-            fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
+            fn to_css<W>(&self, dest: &mut W) -> fmt::Result
+            where
+                W: fmt::Write,
+            {
                 struct AssumeUtf8<W: fmt::Write>(W);
 
                 impl<W: fmt::Write> io::Write for AssumeUtf8<W> {
                     #[inline]
                     fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
                         // Safety: itoa only emits ASCII, which is also well-formed UTF-8.
                         debug_assert!(buf.is_ascii());
-                        self.0.write_str(unsafe { str::from_utf8_unchecked(buf) })
+                        self.0
+                            .write_str(unsafe { str::from_utf8_unchecked(buf) })
                             .map_err(|_| io::ErrorKind::Other.into())
                     }
 
                     #[inline]
                     fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
                         self.write_all(buf)?;
                         Ok(buf.len())
                     }
@@ -318,40 +360,43 @@ macro_rules! impl_tocss_for_int {
                     #[inline]
                     fn flush(&mut self) -> io::Result<()> {
                         Ok(())
                     }
                 }
 
                 match itoa::write(AssumeUtf8(dest), *self) {
                     Ok(_) => Ok(()),
-                    Err(_) => Err(fmt::Error)
+                    Err(_) => Err(fmt::Error),
                 }
             }
         }
-    }
+    };
 }
 
 impl_tocss_for_int!(i8);
 impl_tocss_for_int!(u8);
 impl_tocss_for_int!(i16);
 impl_tocss_for_int!(u16);
 impl_tocss_for_int!(i32);
 impl_tocss_for_int!(u32);
 impl_tocss_for_int!(i64);
 impl_tocss_for_int!(u64);
 
 macro_rules! impl_tocss_for_float {
     ($T: ty) => {
         impl<'a> ToCss for $T {
-            fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
+            fn to_css<W>(&self, dest: &mut W) -> fmt::Result
+            where
+                W: fmt::Write,
+            {
                 dtoa_short::write(dest, *self).map(|_| ())
             }
         }
-    }
+    };
 }
 
 impl_tocss_for_float!(f32);
 impl_tocss_for_float!(f64);
 
 /// A category of token. See the `needs_separator_when_before` method.
 #[derive(Copy, Clone, Eq, PartialEq, Debug)]
 pub struct TokenSerializationType(TokenSerializationTypeVariants);
@@ -376,32 +421,43 @@ impl TokenSerializationType {
     /// a token of category `other` with no whitespace in between,
     /// an empty comment `/**/` needs to be inserted between them
     /// so that they are not re-parsed as a single token.
     ///
     /// See https://drafts.csswg.org/css-syntax/#serialization
     pub fn needs_separator_when_before(self, other: TokenSerializationType) -> bool {
         use self::TokenSerializationTypeVariants::*;
         match self.0 {
-            Ident => matches!(other.0,
-                Ident | Function | UrlOrBadUrl | DelimMinus | Number | Percentage | Dimension |
-                CDC | OpenParen),
-            AtKeywordOrHash | Dimension => matches!(other.0,
-                Ident | Function | UrlOrBadUrl | DelimMinus | Number | Percentage | Dimension |
-                CDC),
-            DelimHash | DelimMinus | Number => matches!(other.0,
-                Ident | Function | UrlOrBadUrl | DelimMinus | Number | Percentage | Dimension),
-            DelimAt => matches!(other.0,
-                Ident | Function | UrlOrBadUrl | DelimMinus),
+            Ident => matches!(
+                other.0,
+                Ident
+                    | Function
+                    | UrlOrBadUrl
+                    | DelimMinus
+                    | Number
+                    | Percentage
+                    | Dimension
+                    | CDC
+                    | OpenParen
+            ),
+            AtKeywordOrHash | Dimension => matches!(
+                other.0,
+                Ident | Function | UrlOrBadUrl | DelimMinus | Number | Percentage | Dimension | CDC
+            ),
+            DelimHash | DelimMinus | Number => matches!(
+                other.0,
+                Ident | Function | UrlOrBadUrl | DelimMinus | Number | Percentage | Dimension
+            ),
+            DelimAt => matches!(other.0, Ident | Function | UrlOrBadUrl | DelimMinus),
             DelimDotOrPlus => matches!(other.0, Number | Percentage | Dimension),
             DelimAssorted | DelimAsterisk => matches!(other.0, DelimEquals),
             DelimBar => matches!(other.0, DelimEquals | DelimBar | DashMatch),
             DelimSlash => matches!(other.0, DelimAsterisk | SubstringMatch),
-            Nothing | WhiteSpace | Percentage | UrlOrBadUrl | Function | CDC | OpenParen |
-            DashMatch | SubstringMatch | DelimQuestion | DelimEquals | Other => false,
+            Nothing | WhiteSpace | Percentage | UrlOrBadUrl | Function | CDC | OpenParen
+            | DashMatch | SubstringMatch | DelimQuestion | DelimEquals | Other => false,
         }
     }
 }
 
 #[derive(Copy, Clone, Eq, PartialEq, Debug)]
 enum TokenSerializationTypeVariants {
     Nothing,
     WhiteSpace,
@@ -410,28 +466,28 @@ enum TokenSerializationTypeVariants {
     Dimension,
     Percentage,
     UrlOrBadUrl,
     Function,
     Ident,
     CDC,
     DashMatch,
     SubstringMatch,
-    OpenParen,         // '('
-    DelimHash,         // '#'
-    DelimAt,           // '@'
-    DelimDotOrPlus,    // '.', '+'
-    DelimMinus,        // '-'
-    DelimQuestion,     // '?'
-    DelimAssorted,     // '$', '^', '~'
-    DelimEquals,       // '='
-    DelimBar,          // '|'
-    DelimSlash,        // '/'
-    DelimAsterisk,     // '*'
-    Other,             // anything else
+    OpenParen,      // '('
+    DelimHash,      // '#'
+    DelimAt,        // '@'
+    DelimDotOrPlus, // '.', '+'
+    DelimMinus,     // '-'
+    DelimQuestion,  // '?'
+    DelimAssorted,  // '$', '^', '~'
+    DelimEquals,    // '='
+    DelimBar,       // '|'
+    DelimSlash,     // '/'
+    DelimAsterisk,  // '*'
+    Other,          // anything else
 }
 
 impl<'a> Token<'a> {
     /// Categorize a token into a type that determines when `/**/` needs to be inserted
     /// between two tokens when serialized next to each other without whitespace in between.
     ///
     /// See the `TokenSerializationType::needs_separator_when_before` method.
     pub fn serialization_type(&self) -> TokenSerializationType {
@@ -455,17 +511,26 @@ impl<'a> Token<'a> {
             Token::Dimension { .. } => Dimension,
             Token::WhiteSpace(_) => WhiteSpace,
             Token::Comment(_) => DelimSlash,
             Token::DashMatch => DashMatch,
             Token::SubstringMatch => SubstringMatch,
             Token::CDC => CDC,
             Token::Function(_) => Function,
             Token::ParenthesisBlock => OpenParen,
-            Token::SquareBracketBlock | Token::CurlyBracketBlock |
-            Token::CloseParenthesis | Token::CloseSquareBracket | Token::CloseCurlyBracket |
-            Token::QuotedString(_) | Token::BadString(_) |
-            Token::Delim(_) | Token::Colon | Token::Semicolon | Token::Comma | Token::CDO |
-            Token::IncludeMatch | Token::PrefixMatch | Token::SuffixMatch
-            => Other,
+            Token::SquareBracketBlock
+            | Token::CurlyBracketBlock
+            | Token::CloseParenthesis
+            | Token::CloseSquareBracket
+            | Token::CloseCurlyBracket
+            | Token::QuotedString(_)
+            | Token::BadString(_)
+            | Token::Delim(_)
+            | Token::Colon
+            | Token::Semicolon
+            | Token::Comma
+            | Token::CDO
+            | Token::IncludeMatch
+            | Token::PrefixMatch
+            | Token::SuffixMatch => Other,
         })
     }
 }
--- a/third_party/rust/cssparser/src/size_of_tests.rs
+++ b/third_party/rust/cssparser/src/size_of_tests.rs
@@ -11,35 +11,49 @@ macro_rules! size_of_test {
         #[test]
         fn $testname() {
             let new = ::std::mem::size_of::<$t>();
             let old = $expected_size;
             if new < old {
                 panic!(
                     "Your changes have decreased the stack size of {} from {} to {}. \
                      Good work! Please update the expected size in {}.",
-                    stringify!($t), old, new, file!()
+                    stringify!($t),
+                    old,
+                    new,
+                    file!()
                 )
             } else if new > old {
                 panic!(
                     "Your changes have increased the stack size of {} from {} to {}. \
                      Please consider choosing a design which avoids this increase. \
                      If you feel that the increase is necessary, update the size in {}.",
-                    stringify!($t), old, new, file!()
+                    stringify!($t),
+                    old,
+                    new,
+                    file!()
                 )
             }
         }
-    }
+    };
 }
 
 // Some of these assume 64-bit
 size_of_test!(token, Token, 32);
 size_of_test!(std_cow_str, Cow<'static, str>, 32);
 size_of_test!(cow_rc_str, CowRcStr, 16);
 
 size_of_test!(tokenizer, ::tokenizer::Tokenizer, 72);
-size_of_test!(parser_input, ::parser::ParserInput, if cfg!(rustc_has_pr45225) { 136 } else { 144 });
+size_of_test!(
+    parser_input,
+    ::parser::ParserInput,
+    if cfg!(rustc_has_pr45225) { 136 } else { 144 }
+);
 size_of_test!(parser, ::parser::Parser, 16);
 size_of_test!(source_position, ::SourcePosition, 8);
 size_of_test!(parser_state, ::ParserState, 24);
 
 size_of_test!(basic_parse_error, ::BasicParseError, 48);
-size_of_test!(parse_error_lower_bound, ::ParseError<()>, if cfg!(rustc_has_pr45225) { 48 } else { 56 });
+size_of_test!(
+    parse_error_lower_bound,
+    ::ParseError<()>,
+    if cfg!(rustc_has_pr45225) { 48 } else { 56 }
+);
--- a/third_party/rust/cssparser/src/tests.rs
+++ b/third_party/rust/cssparser/src/tests.rs
@@ -6,24 +6,23 @@
 extern crate test;
 
 use encoding_rs;
 use rustc_serialize::json::{self, Json, ToJson};
 
 #[cfg(feature = "bench")]
 use self::test::Bencher;
 
-use super::{Parser, Delimiter, Token, SourceLocation,
-            ParseError, ParseErrorKind, BasicParseError, BasicParseErrorKind,
-            DeclarationListParser, DeclarationParser, RuleListParser,
-            AtRuleType, AtRuleParser, QualifiedRuleParser, ParserInput,
-            parse_one_declaration, parse_one_rule, parse_important,
-            stylesheet_encoding, EncodingSupport,
-            TokenSerializationType, CowRcStr,
-            Color, RGBA, parse_nth, UnicodeRange, ToCss};
+use super::{
+    parse_important, parse_nth, parse_one_declaration, parse_one_rule, stylesheet_encoding,
+    AtRuleParser, AtRuleType, BasicParseError, BasicParseErrorKind, Color, CowRcStr,
+    DeclarationListParser, DeclarationParser, Delimiter, EncodingSupport, ParseError,
+    ParseErrorKind, Parser, ParserInput, QualifiedRuleParser, RuleListParser, SourceLocation,
+    ToCss, Token, TokenSerializationType, UnicodeRange, RGBA,
+};
 
 macro_rules! JArray {
     ($($e: expr,)*) => { JArray![ $( $e ),* ] };
     ($($e: expr),*) => { Json::Array(vec!( $( $e.to_json() ),* )) }
 }
 
 fn almost_equals(a: &Json, b: &Json) -> bool {
     match (a, b) {
@@ -32,19 +31,21 @@ fn almost_equals(a: &Json, b: &Json) -> 
         (_, &Json::I64(b)) => almost_equals(a, &Json::F64(b as f64)),
         (_, &Json::U64(b)) => almost_equals(a, &Json::F64(b as f64)),
 
         (&Json::F64(a), &Json::F64(b)) => (a - b).abs() <= a.abs() * 1e-6,
 
         (&Json::Boolean(a), &Json::Boolean(b)) => a == b,
         (&Json::String(ref a), &Json::String(ref b)) => a == b,
         (&Json::Array(ref a), &Json::Array(ref b)) => {
-            a.len() == b.len() &&
-            a.iter().zip(b.iter()).all(|(ref a, ref b)| almost_equals(*a, *b))
-        },
+            a.len() == b.len()
+                && a.iter()
+                    .zip(b.iter())
+                    .all(|(ref a, ref b)| almost_equals(*a, *b))
+        }
         (&Json::Object(_), &Json::Object(_)) => panic!("Not implemented"),
         (&Json::Null, &Json::Null) => true,
         _ => false,
     }
 }
 
 fn normalize(json: &mut Json) {
     match *json {
@@ -60,187 +61,201 @@ fn normalize(json: &mut Json) {
         }
         _ => {}
     }
 }
 
 fn assert_json_eq(results: json::Json, mut expected: json::Json, message: &str) {
     normalize(&mut expected);
     if !almost_equals(&results, &expected) {
-        println!("{}", ::difference::Changeset::new(
-            &results.pretty().to_string(),
-            &expected.pretty().to_string(),
-            "\n",
-        ));
+        println!(
+            "{}",
+            ::difference::Changeset::new(
+                &results.pretty().to_string(),
+                &expected.pretty().to_string(),
+                "\n",
+            )
+        );
         panic!("{}", message)
     }
 }
 
 fn run_raw_json_tests<F: Fn(Json, Json) -> ()>(json_data: &str, run: F) {
     let items = match Json::from_str(json_data) {
         Ok(Json::Array(items)) => items,
-        _ => panic!("Invalid JSON")
+        _ => panic!("Invalid JSON"),
     };
     assert!(items.len() % 2 == 0);
     let mut input = None;
     for item in items.into_iter() {
         match (&input, item) {
             (&None, json_obj) => input = Some(json_obj),
             (&Some(_), expected) => {
                 let input = input.take().unwrap();
                 run(input, expected)
-            },
+            }
         };
     }
 }
 
-
 fn run_json_tests<F: Fn(&mut Parser) -> Json>(json_data: &str, parse: F) {
-    run_raw_json_tests(json_data, |input, expected| {
-        match input {
-            Json::String(input) => {
-                let mut parse_input = ParserInput::new(&input);
-                let result = parse(&mut Parser::new(&mut parse_input));
-                assert_json_eq(result, expected, &input);
-            },
-            _ => panic!("Unexpected JSON")
+    run_raw_json_tests(json_data, |input, expected| match input {
+        Json::String(input) => {
+            let mut parse_input = ParserInput::new(&input);
+            let result = parse(&mut Parser::new(&mut parse_input));
+            assert_json_eq(result, expected, &input);
         }
+        _ => panic!("Unexpected JSON"),
     });
 }
 
-
 #[test]
 fn component_value_list() {
-    run_json_tests(include_str!("css-parsing-tests/component_value_list.json"), |input| {
-        Json::Array(component_values_to_json(input))
-    });
+    run_json_tests(
+        include_str!("css-parsing-tests/component_value_list.json"),
+        |input| Json::Array(component_values_to_json(input)),
+    );
 }
 
-
 #[test]
 fn one_component_value() {
-    run_json_tests(include_str!("css-parsing-tests/one_component_value.json"), |input| {
-        let result: Result<Json, ParseError<()>> = input.parse_entirely(|input| {
-            Ok(one_component_value_to_json(input.next()?.clone(), input))
-        });
-        result.unwrap_or(JArray!["error", "invalid"])
-    });
+    run_json_tests(
+        include_str!("css-parsing-tests/one_component_value.json"),
+        |input| {
+            let result: Result<Json, ParseError<()>> = input.parse_entirely(|input| {
+                Ok(one_component_value_to_json(input.next()?.clone(), input))
+            });
+            result.unwrap_or(JArray!["error", "invalid"])
+        },
+    );
 }
 
-
 #[test]
 fn declaration_list() {
-    run_json_tests(include_str!("css-parsing-tests/declaration_list.json"), |input| {
-        Json::Array(DeclarationListParser::new(input, JsonParser).map(|result| {
-            result.unwrap_or(JArray!["error", "invalid"])
-        }).collect())
-    });
+    run_json_tests(
+        include_str!("css-parsing-tests/declaration_list.json"),
+        |input| {
+            Json::Array(
+                DeclarationListParser::new(input, JsonParser)
+                    .map(|result| result.unwrap_or(JArray!["error", "invalid"]))
+                    .collect(),
+            )
+        },
+    );
 }
 
-
 #[test]
 fn one_declaration() {
-    run_json_tests(include_str!("css-parsing-tests/one_declaration.json"), |input| {
-        parse_one_declaration(input, &mut JsonParser).unwrap_or(JArray!["error", "invalid"])
-    });
+    run_json_tests(
+        include_str!("css-parsing-tests/one_declaration.json"),
+        |input| {
+            parse_one_declaration(input, &mut JsonParser).unwrap_or(JArray!["error", "invalid"])
+        },
+    );
 }
 
-
 #[test]
 fn rule_list() {
     run_json_tests(include_str!("css-parsing-tests/rule_list.json"), |input| {
-        Json::Array(RuleListParser::new_for_nested_rule(input, JsonParser).map(|result| {
-            result.unwrap_or(JArray!["error", "invalid"])
-        }).collect())
+        Json::Array(
+            RuleListParser::new_for_nested_rule(input, JsonParser)
+                .map(|result| result.unwrap_or(JArray!["error", "invalid"]))
+                .collect(),
+        )
     });
 }
 
-
 #[test]
 fn stylesheet() {
     run_json_tests(include_str!("css-parsing-tests/stylesheet.json"), |input| {
-        Json::Array(RuleListParser::new_for_stylesheet(input, JsonParser).map(|result| {
-            result.unwrap_or(JArray!["error", "invalid"])
-        }).collect())
+        Json::Array(
+            RuleListParser::new_for_stylesheet(input, JsonParser)
+                .map(|result| result.unwrap_or(JArray!["error", "invalid"]))
+                .collect(),
+        )
     });
 }
 
-
 #[test]
 fn one_rule() {
     run_json_tests(include_str!("css-parsing-tests/one_rule.json"), |input| {
         parse_one_rule(input, &mut JsonParser).unwrap_or(JArray!["error", "invalid"])
     });
 }
 
-
 #[test]
 fn stylesheet_from_bytes() {
     pub struct EncodingRs;
 
     impl EncodingSupport for EncodingRs {
         type Encoding = &'static encoding_rs::Encoding;
 
         fn utf8() -> Self::Encoding {
             encoding_rs::UTF_8
         }
 
         fn is_utf16_be_or_le(encoding: &Self::Encoding) -> bool {
-            *encoding == encoding_rs::UTF_16LE ||
-            *encoding == encoding_rs::UTF_16BE
+            *encoding == encoding_rs::UTF_16LE || *encoding == encoding_rs::UTF_16BE
         }
 
         fn from_label(ascii_label: &[u8]) -> Option<Self::Encoding> {
             encoding_rs::Encoding::for_label(ascii_label)
         }
     }
 
-
-    run_raw_json_tests(include_str!("css-parsing-tests/stylesheet_bytes.json"),
-                       |input, expected| {
-        let map = match input {
-            Json::Object(map) => map,
-            _ => panic!("Unexpected JSON")
-        };
+    run_raw_json_tests(
+        include_str!("css-parsing-tests/stylesheet_bytes.json"),
+        |input, expected| {
+            let map = match input {
+                Json::Object(map) => map,
+                _ => panic!("Unexpected JSON"),
+            };
 
-        let result = {
-            let css = get_string(&map, "css_bytes").unwrap().chars().map(|c| {
-                assert!(c as u32 <= 0xFF);
-                c as u8
-            }).collect::<Vec<u8>>();
-            let protocol_encoding_label = get_string(&map, "protocol_encoding")
-                .map(|s| s.as_bytes());
-            let environment_encoding = get_string(&map, "environment_encoding")
-                .map(|s| s.as_bytes())
-                .and_then(EncodingRs::from_label);
+            let result = {
+                let css = get_string(&map, "css_bytes")
+                    .unwrap()
+                    .chars()
+                    .map(|c| {
+                        assert!(c as u32 <= 0xFF);
+                        c as u8
+                    })
+                    .collect::<Vec<u8>>();
+                let protocol_encoding_label =
+                    get_string(&map, "protocol_encoding").map(|s| s.as_bytes());
+                let environment_encoding = get_string(&map, "environment_encoding")
+                    .map(|s| s.as_bytes())
+                    .and_then(EncodingRs::from_label);
 
-            let encoding = stylesheet_encoding::<EncodingRs>(
-                &css, protocol_encoding_label, environment_encoding);
-            let (css_unicode, used_encoding, _) = encoding.decode(&css);
-            let mut input = ParserInput::new(&css_unicode);
-            let input = &mut Parser::new(&mut input);
-            let rules = RuleListParser::new_for_stylesheet(input, JsonParser)
-                        .map(|result| result.unwrap_or(JArray!["error", "invalid"]))
-                        .collect::<Vec<_>>();
-            JArray![rules, used_encoding.name().to_lowercase()]
-        };
-        assert_json_eq(result, expected, &Json::Object(map).to_string());
-    });
+                let encoding = stylesheet_encoding::<EncodingRs>(
+                    &css,
+                    protocol_encoding_label,
+                    environment_encoding,
+                );
+                let (css_unicode, used_encoding, _) = encoding.decode(&css);
+                let mut input = ParserInput::new(&css_unicode);
+                let input = &mut Parser::new(&mut input);
+                let rules = RuleListParser::new_for_stylesheet(input, JsonParser)
+                    .map(|result| result.unwrap_or(JArray!["error", "invalid"]))
+                    .collect::<Vec<_>>();
+                JArray![rules, used_encoding.name().to_lowercase()]
+            };
+            assert_json_eq(result, expected, &Json::Object(map).to_string());
+        },
+    );
 
     fn get_string<'a>(map: &'a json::Object, key: &str) -> Option<&'a str> {
         match map.get(key) {
             Some(&Json::String(ref s)) => Some(s),
             Some(&Json::Null) => None,
             None => None,
             _ => panic!("Unexpected JSON"),
         }
     }
 }
 
-
 #[test]
 fn expect_no_error_token() {
     let mut input = ParserInput::new("foo 4px ( / { !bar }");
     assert!(Parser::new(&mut input).expect_no_error_token().is_ok());
     let mut input = ParserInput::new(")");
     assert!(Parser::new(&mut input).expect_no_error_token().is_err());
     let mut input = ParserInput::new("}");
     assert!(Parser::new(&mut input).expect_no_error_token().is_err());
@@ -251,65 +266,72 @@ fn expect_no_error_token() {
     let mut input = ParserInput::new("url('\n'");
     assert!(Parser::new(&mut input).expect_no_error_token().is_err());
     let mut input = ParserInput::new("url(a b)");
     assert!(Parser::new(&mut input).expect_no_error_token().is_err());
     let mut input = ParserInput::new("url(\u{7F}))");
     assert!(Parser::new(&mut input).expect_no_error_token().is_err());
 }
 
-
 /// https://github.com/servo/rust-cssparser/issues/71
 #[test]
 fn outer_block_end_consumed() {
     let mut input = ParserInput::new("(calc(true))");
     let mut input = Parser::new(&mut input);
     assert!(input.expect_parenthesis_block().is_ok());
-    assert!(input.parse_nested_block(|input| {
-        input.expect_function_matching("calc").map_err(Into::<ParseError<()>>::into)
-    }).is_ok());
+    assert!(input
+        .parse_nested_block(|input| input
+            .expect_function_matching("calc")
+            .map_err(Into::<ParseError<()>>::into))
+        .is_ok());
     println!("{:?}", input.position());
     assert!(input.next().is_err());
 }
 
 /// https://github.com/servo/rust-cssparser/issues/174
 #[test]
 fn bad_url_slice_out_of_bounds() {
     let mut input = ParserInput::new("url(\u{1}\\");
     let mut parser = Parser::new(&mut input);
-    let result = parser.next_including_whitespace_and_comments();  // This used to panic
+    let result = parser.next_including_whitespace_and_comments(); // This used to panic
     assert_eq!(result, Ok(&Token::BadUrl("\u{1}\\".into())));
 }
 
 /// https://bugzilla.mozilla.org/show_bug.cgi?id=1383975
 #[test]
 fn bad_url_slice_not_at_char_boundary() {
     let mut input = ParserInput::new("url(9\n۰");
     let mut parser = Parser::new(&mut input);
-    let result = parser.next_including_whitespace_and_comments();  // This used to panic
+    let result = parser.next_including_whitespace_and_comments(); // This used to panic
     assert_eq!(result, Ok(&Token::BadUrl("9\n۰".into())));
 }
 
 #[test]
 fn unquoted_url_escaping() {
-    let token = Token::UnquotedUrl("\
-        \x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\
-        \x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f \
-        !\"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]\
-        ^_`abcdefghijklmnopqrstuvwxyz{|}~\x7fé\
-    ".into());
+    let token = Token::UnquotedUrl(
+        "\
+         \x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\
+         \x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f \
+         !\"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]\
+         ^_`abcdefghijklmnopqrstuvwxyz{|}~\x7fé\
+         "
+        .into(),
+    );
     let serialized = token.to_css_string();
-    assert_eq!(serialized, "\
-        url(\
-            \\1 \\2 \\3 \\4 \\5 \\6 \\7 \\8 \\9 \\a \\b \\c \\d \\e \\f \\10 \
-            \\11 \\12 \\13 \\14 \\15 \\16 \\17 \\18 \\19 \\1a \\1b \\1c \\1d \\1e \\1f \\20 \
-            !\\\"#$%&\\'\\(\\)*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\\\]\
-            ^_`abcdefghijklmnopqrstuvwxyz{|}~\\7f é\
-        )\
-        ");
+    assert_eq!(
+        serialized,
+        "\
+         url(\
+         \\1 \\2 \\3 \\4 \\5 \\6 \\7 \\8 \\9 \\a \\b \\c \\d \\e \\f \\10 \
+         \\11 \\12 \\13 \\14 \\15 \\16 \\17 \\18 \\19 \\1a \\1b \\1c \\1d \\1e \\1f \\20 \
+         !\\\"#$%&\\'\\(\\)*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\\\]\
+         ^_`abcdefghijklmnopqrstuvwxyz{|}~\\7f é\
+         )\
+         "
+    );
     let mut input = ParserInput::new(&serialized);
     assert_eq!(Parser::new(&mut input).next(), Ok(&token));
 }
 
 #[test]
 fn test_expect_url() {
     fn parse<'a>(s: &mut ParserInput<'a>) -> Result<CowRcStr<'a>, BasicParseError<'a>> {
         Parser::new(s).expect_url()
@@ -327,53 +349,57 @@ fn test_expect_url() {
     let mut input = ParserInput::new("url(abc more stuff)");
     assert!(parse(&mut input).is_err());
     // The grammar at https://drafts.csswg.org/css-values/#urls plans for `<url-modifier>*`
     // at the position of "more stuff", but no such modifier is defined yet.
     let mut input = ParserInput::new("url('abc' more stuff)");
     assert!(parse(&mut input).is_err());
 }
 
-
 fn run_color_tests<F: Fn(Result<Color, ()>) -> Json>(json_data: &str, to_json: F) {
     run_json_tests(json_data, |input| {
-        let result: Result<_, ParseError<()>> = input.parse_entirely(|i| {
-            Color::parse(i).map_err(Into::into)
-        });
+        let result: Result<_, ParseError<()>> =
+            input.parse_entirely(|i| Color::parse(i).map_err(Into::into));
         to_json(result.map_err(|_| ()))
     });
 }
 
-
 #[test]
 fn color3() {
-    run_color_tests(include_str!("css-parsing-tests/color3.json"), |c| c.ok().to_json())
+    run_color_tests(include_str!("css-parsing-tests/color3.json"), |c| {
+        c.ok().to_json()
+    })
 }
 
-
 #[test]
 fn color3_hsl() {
-    run_color_tests(include_str!("css-parsing-tests/color3_hsl.json"), |c| c.ok().to_json())
+    run_color_tests(include_str!("css-parsing-tests/color3_hsl.json"), |c| {
+        c.ok().to_json()
+    })
 }
 
-
 /// color3_keywords.json is different: R, G and B are in 0..255 rather than 0..1
 #[test]
 fn color3_keywords() {
-    run_color_tests(include_str!("css-parsing-tests/color3_keywords.json"), |c| c.ok().to_json())
+    run_color_tests(
+        include_str!("css-parsing-tests/color3_keywords.json"),
+        |c| c.ok().to_json(),
+    )
 }
 
-
 #[test]
 fn nth() {
     run_json_tests(include_str!("css-parsing-tests/An+B.json"), |input| {
-        input.parse_entirely(|i| {
-            let result: Result<_, ParseError<()>> = parse_nth(i).map_err(Into::into);
-            result
-        }).ok().to_json()
+        input
+            .parse_entirely(|i| {
+                let result: Result<_, ParseError<()>> = parse_nth(i).map_err(Into::into);
+                result
+            })
+            .ok()
+            .to_json()
     });
 }
 
 #[test]
 fn unicode_range() {
     run_json_tests(include_str!("css-parsing-tests/urange.json"), |input| {
         let result: Result<_, ParseError<()>> = input.parse_comma_separated(|input| {
             let result = UnicodeRange::parse(input).ok().map(|r| (r.start, r.end));
@@ -383,66 +409,80 @@ fn unicode_range() {
                 while let Ok(_) = input.next() {}
                 Ok(None)
             }
         });
         result.unwrap().to_json()
     });
 }
 
-
 #[test]
 fn serializer_not_preserving_comments() {
     serializer(false)
 }
 
 #[test]
 fn serializer_preserving_comments() {
     serializer(true)
 }
 
 fn serializer(preserve_comments: bool) {
-    run_json_tests(include_str!("css-parsing-tests/component_value_list.json"), |input| {
-        fn write_to(mut previous_token: TokenSerializationType,
-                    input: &mut Parser,
-                    string: &mut String,
-                    preserve_comments: bool) {
-            while let Ok(token) = if preserve_comments {
-                input.next_including_whitespace_and_comments().map(|t| t.clone())
-            } else {
-                input.next_including_whitespace().map(|t| t.clone())
-            } {
-                let token_type = token.serialization_type();
-                if !preserve_comments && previous_token.needs_separator_when_before(token_type) {
-                    string.push_str("/**/")
-                }
-                previous_token = token_type;
-                token.to_css(string).unwrap();
-                let closing_token = match token {
-                    Token::Function(_) | Token::ParenthesisBlock => Some(Token::CloseParenthesis),
-                    Token::SquareBracketBlock => Some(Token::CloseSquareBracket),
-                    Token::CurlyBracketBlock => Some(Token::CloseCurlyBracket),
-                    _ => None
-                };
-                if let Some(closing_token) = closing_token {
-                    let result: Result<_, ParseError<()>> = input.parse_nested_block(|input| {
-                        write_to(previous_token, input, string, preserve_comments);
-                        Ok(())
-                    });
-                    result.unwrap();
-                    closing_token.to_css(string).unwrap();
+    run_json_tests(
+        include_str!("css-parsing-tests/component_value_list.json"),
+        |input| {
+            fn write_to(
+                mut previous_token: TokenSerializationType,
+                input: &mut Parser,
+                string: &mut String,
+                preserve_comments: bool,
+            ) {
+                while let Ok(token) = if preserve_comments {
+                    input
+                        .next_including_whitespace_and_comments()
+                        .map(|t| t.clone())
+                } else {
+                    input.next_including_whitespace().map(|t| t.clone())
+                } {
+                    let token_type = token.serialization_type();
+                    if !preserve_comments && previous_token.needs_separator_when_before(token_type)
+                    {
+                        string.push_str("/**/")
+                    }
+                    previous_token = token_type;
+                    token.to_css(string).unwrap();
+                    let closing_token = match token {
+                        Token::Function(_) | Token::ParenthesisBlock => {
+                            Some(Token::CloseParenthesis)
+                        }
+                        Token::SquareBracketBlock => Some(Token::CloseSquareBracket),
+                        Token::CurlyBracketBlock => Some(Token::CloseCurlyBracket),
+                        _ => None,
+                    };
+                    if let Some(closing_token) = closing_token {
+                        let result: Result<_, ParseError<()>> = input.parse_nested_block(|input| {
+                            write_to(previous_token, input, string, preserve_comments);
+                            Ok(())
+                        });
+                        result.unwrap();
+                        closing_token.to_css(string).unwrap();
+                    }
                 }
             }
-        }
-        let mut serialized = String::new();
-        write_to(TokenSerializationType::nothing(), input, &mut serialized, preserve_comments);
-        let mut input = ParserInput::new(&serialized);
-        let parser = &mut Parser::new(&mut input);
-        Json::Array(component_values_to_json(parser))
-    });
+            let mut serialized = String::new();
+            write_to(
+                TokenSerializationType::nothing(),
+                input,
+                &mut serialized,
+                preserve_comments,
+            );
+            let mut input = ParserInput::new(&serialized);
+            let parser = &mut Parser::new(&mut input);
+            Json::Array(component_values_to_json(parser))
+        },
+    );
 }
 
 #[test]
 fn serialize_bad_tokens() {
     let mut input = ParserInput::new("url(foo\\) b\\)ar)'ba\\'\"z\n4");
     let mut parser = Parser::new(&mut input);
 
     let token = parser.next().unwrap().clone();
@@ -492,46 +532,100 @@ fn line_numbers() {
         "*/baz\r\n",
         "\n",
         "url(\r\n",
         "  u \r\n",
         ")\"a\\\r\n",
         "b\""
     ));
     let mut input = Parser::new(&mut input);
-    assert_eq!(input.current_source_location(), SourceLocation { line: 0, column: 1 });
-    assert_eq!(input.next_including_whitespace(), Ok(&Token::Ident("fo00o".into())));
-    assert_eq!(input.current_source_location(), SourceLocation { line: 1, column: 3 });
-    assert_eq!(input.next_including_whitespace(), Ok(&Token::WhiteSpace(" ")));
-    assert_eq!(input.current_source_location(), SourceLocation { line: 1, column: 4 });
-    assert_eq!(input.next_including_whitespace(), Ok(&Token::Ident("bar".into())));
-    assert_eq!(input.current_source_location(), SourceLocation { line: 1, column: 7 });
-    assert_eq!(input.next_including_whitespace_and_comments(), Ok(&Token::Comment("\n")));
-    assert_eq!(input.current_source_location(), SourceLocation { line: 2, column: 3 });
-    assert_eq!(input.next_including_whitespace(), Ok(&Token::Ident("baz".into())));
-    assert_eq!(input.current_source_location(), SourceLocation { line: 2, column: 6 });
+    assert_eq!(
+        input.current_source_location(),
+        SourceLocation { line: 0, column: 1 }
+    );
+    assert_eq!(
+        input.next_including_whitespace(),
+        Ok(&Token::Ident("fo00o".into()))
+    );
+    assert_eq!(
+        input.current_source_location(),
+        SourceLocation { line: 1, column: 3 }
+    );
+    assert_eq!(
+        input.next_including_whitespace(),
+        Ok(&Token::WhiteSpace(" "))
+    );
+    assert_eq!(
+        input.current_source_location(),
+        SourceLocation { line: 1, column: 4 }
+    );
+    assert_eq!(
+        input.next_including_whitespace(),
+        Ok(&Token::Ident("bar".into()))
+    );
+    assert_eq!(
+        input.current_source_location(),
+        SourceLocation { line: 1, column: 7 }
+    );
+    assert_eq!(
+        input.next_including_whitespace_and_comments(),
+        Ok(&Token::Comment("\n"))
+    );
+    assert_eq!(
+        input.current_source_location(),
+        SourceLocation { line: 2, column: 3 }
+    );
+    assert_eq!(
+        input.next_including_whitespace(),
+        Ok(&Token::Ident("baz".into()))
+    );
+    assert_eq!(
+        input.current_source_location(),
+        SourceLocation { line: 2, column: 6 }
+    );
     let state = input.state();
 
-    assert_eq!(input.next_including_whitespace(), Ok(&Token::WhiteSpace("\r\n\n")));
-    assert_eq!(input.current_source_location(), SourceLocation { line: 4, column: 1 });
+    assert_eq!(
+        input.next_including_whitespace(),
+        Ok(&Token::WhiteSpace("\r\n\n"))
+    );
+    assert_eq!(
+        input.current_source_location(),
+        SourceLocation { line: 4, column: 1 }
+    );
 
-    assert_eq!(state.source_location(), SourceLocation { line: 2, column: 6 });
+    assert_eq!(
+        state.source_location(),
+        SourceLocation { line: 2, column: 6 }
+    );
 
-    assert_eq!(input.next_including_whitespace(), Ok(&Token::UnquotedUrl("u".into())));
-    assert_eq!(input.current_source_location(), SourceLocation { line: 6, column: 2 });
+    assert_eq!(
+        input.next_including_whitespace(),
+        Ok(&Token::UnquotedUrl("u".into()))
+    );
+    assert_eq!(
+        input.current_source_location(),
+        SourceLocation { line: 6, column: 2 }
+    );
 
-    assert_eq!(input.next_including_whitespace(), Ok(&Token::QuotedString("ab".into())));
-    assert_eq!(input.current_source_location(), SourceLocation { line: 7, column: 3 });
+    assert_eq!(
+        input.next_including_whitespace(),
+        Ok(&Token::QuotedString("ab".into()))
+    );
+    assert_eq!(
+        input.current_source_location(),
+        SourceLocation { line: 7, column: 3 }
+    );
     assert!(input.next_including_whitespace().is_err());
 }
 
 #[test]
 fn overflow() {
+    use std::f32;
     use std::iter::repeat;
-    use std::f32;
 
     let css = r"
          2147483646
          2147483647
          2147483648
          10000000000000
          1000000000000000000000000000000000000000
          1{309 zeros}
@@ -546,17 +640,18 @@ fn overflow() {
          3.30282347e+38
          3.40282347e+38
          3.402824e+38
 
          -3.30282347e+38
          -3.40282347e+38
          -3.402824e+38
 
-    ".replace("{309 zeros}", &repeat('0').take(309).collect::<String>());
+    "
+    .replace("{309 zeros}", &repeat('0').take(309).collect::<String>());
     let mut input = ParserInput::new(&css);
     let mut input = Parser::new(&mut input);
 
     assert_eq!(input.expect_integer(), Ok(2147483646));
     assert_eq!(input.expect_integer(), Ok(2147483647));
     assert_eq!(input.expect_integer(), Ok(2147483647)); // Clamp on overflow
     assert_eq!(input.expect_integer(), Ok(2147483647));
     assert_eq!(input.expect_integer(), Ok(2147483647));
@@ -581,36 +676,47 @@ fn overflow() {
 }
 
 #[test]
 fn line_delimited() {
     let mut input = ParserInput::new(" { foo ; bar } baz;,");
     let mut input = Parser::new(&mut input);
     assert_eq!(input.next(), Ok(&Token::CurlyBracketBlock));
     assert!({
-        let result: Result<_, ParseError<()>> = input.parse_until_after(Delimiter::Semicolon, |_| Ok(42));
+        let result: Result<_, ParseError<()>> =
+            input.parse_until_after(Delimiter::Semicolon, |_| Ok(42));
         result
-    }.is_err());
+    }
+    .is_err());
     assert_eq!(input.next(), Ok(&Token::Comma));
     assert!(input.next().is_err());
 }
 
 #[test]
 fn identifier_serialization() {
     // Null bytes
     assert_eq!(Token::Ident("\0".into()).to_css_string(), "\u{FFFD}");
     assert_eq!(Token::Ident("a\0".into()).to_css_string(), "a\u{FFFD}");
     assert_eq!(Token::Ident("\0b".into()).to_css_string(), "\u{FFFD}b");
     assert_eq!(Token::Ident("a\0b".into()).to_css_string(), "a\u{FFFD}b");
 
     // Replacement character
     assert_eq!(Token::Ident("\u{FFFD}".into()).to_css_string(), "\u{FFFD}");
-    assert_eq!(Token::Ident("a\u{FFFD}".into()).to_css_string(), "a\u{FFFD}");
-    assert_eq!(Token::Ident("\u{FFFD}b".into()).to_css_string(), "\u{FFFD}b");
-    assert_eq!(Token::Ident("a\u{FFFD}b".into()).to_css_string(), "a\u{FFFD}b");
+    assert_eq!(
+        Token::Ident("a\u{FFFD}".into()).to_css_string(),
+        "a\u{FFFD}"
+    );
+    assert_eq!(
+        Token::Ident("\u{FFFD}b".into()).to_css_string(),
+        "\u{FFFD}b"
+    );
+    assert_eq!(
+        Token::Ident("a\u{FFFD}b".into()).to_css_string(),
+        "a\u{FFFD}b"
+    );
 
     // Number prefix
     assert_eq!(Token::Ident("0a".into()).to_css_string(), "\\30 a");
     assert_eq!(Token::Ident("1a".into()).to_css_string(), "\\31 a");
     assert_eq!(Token::Ident("2a".into()).to_css_string(), "\\32 a");
     assert_eq!(Token::Ident("3a".into()).to_css_string(), "\\33 a");
     assert_eq!(Token::Ident("4a".into()).to_css_string(), "\\34 a");
     assert_eq!(Token::Ident("5a".into()).to_css_string(), "\\35 a");
@@ -642,66 +748,87 @@ fn identifier_serialization() {
     assert_eq!(Token::Ident("-7a".into()).to_css_string(), "-\\37 a");
     assert_eq!(Token::Ident("-8a".into()).to_css_string(), "-\\38 a");
     assert_eq!(Token::Ident("-9a".into()).to_css_string(), "-\\39 a");
 
     // Double dash prefix
     assert_eq!(Token::Ident("--a".into()).to_css_string(), "--a");
 
     // Various tests
-    assert_eq!(Token::Ident("\x01\x02\x1E\x1F".into()).to_css_string(), "\\1 \\2 \\1e \\1f ");
-    assert_eq!(Token::Ident("\u{0080}\x2D\x5F\u{00A9}".into()).to_css_string(), "\u{0080}\x2D\x5F\u{00A9}");
+    assert_eq!(
+        Token::Ident("\x01\x02\x1E\x1F".into()).to_css_string(),
+        "\\1 \\2 \\1e \\1f "
+    );
+    assert_eq!(
+        Token::Ident("\u{0080}\x2D\x5F\u{00A9}".into()).to_css_string(),
+        "\u{0080}\x2D\x5F\u{00A9}"
+    );
     assert_eq!(Token::Ident("\x7F\u{0080}\u{0081}\u{0082}\u{0083}\u{0084}\u{0085}\u{0086}\u{0087}\u{0088}\u{0089}\
         \u{008A}\u{008B}\u{008C}\u{008D}\u{008E}\u{008F}\u{0090}\u{0091}\u{0092}\u{0093}\u{0094}\u{0095}\u{0096}\
         \u{0097}\u{0098}\u{0099}\u{009A}\u{009B}\u{009C}\u{009D}\u{009E}\u{009F}".into()).to_css_string(),
         "\\7f \u{0080}\u{0081}\u{0082}\u{0083}\u{0084}\u{0085}\u{0086}\u{0087}\u{0088}\u{0089}\u{008A}\u{008B}\u{008C}\
         \u{008D}\u{008E}\u{008F}\u{0090}\u{0091}\u{0092}\u{0093}\u{0094}\u{0095}\u{0096}\u{0097}\u{0098}\u{0099}\
         \u{009A}\u{009B}\u{009C}\u{009D}\u{009E}\u{009F}");
-    assert_eq!(Token::Ident("\u{00A0}\u{00A1}\u{00A2}".into()).to_css_string(), "\u{00A0}\u{00A1}\u{00A2}");
-    assert_eq!(Token::Ident("a0123456789b".into()).to_css_string(), "a0123456789b");
-    assert_eq!(Token::Ident("abcdefghijklmnopqrstuvwxyz".into()).to_css_string(), "abcdefghijklmnopqrstuvwxyz");
-    assert_eq!(Token::Ident("ABCDEFGHIJKLMNOPQRSTUVWXYZ".into()).to_css_string(), "ABCDEFGHIJKLMNOPQRSTUVWXYZ");
-    assert_eq!(Token::Ident("\x20\x21\x78\x79".into()).to_css_string(), "\\ \\!xy");
+    assert_eq!(
+        Token::Ident("\u{00A0}\u{00A1}\u{00A2}".into()).to_css_string(),
+        "\u{00A0}\u{00A1}\u{00A2}"
+    );
+    assert_eq!(
+        Token::Ident("a0123456789b".into()).to_css_string(),
+        "a0123456789b"
+    );
+    assert_eq!(
+        Token::Ident("abcdefghijklmnopqrstuvwxyz".into()).to_css_string(),
+        "abcdefghijklmnopqrstuvwxyz"
+    );
+    assert_eq!(
+        Token::Ident("ABCDEFGHIJKLMNOPQRSTUVWXYZ".into()).to_css_string(),
+        "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+    );
+    assert_eq!(
+        Token::Ident("\x20\x21\x78\x79".into()).to_css_string(),
+        "\\ \\!xy"
+    );
 
     // astral symbol (U+1D306 TETRAGRAM FOR CENTRE)
-    assert_eq!(Token::Ident("\u{1D306}".into()).to_css_string(), "\u{1D306}");
+    assert_eq!(
+        Token::Ident("\u{1D306}".into()).to_css_string(),
+        "\u{1D306}"
+    );
 }
 
 impl ToJson for Color {
     fn to_json(&self) -> json::Json {
         match *self {
-            Color::RGBA(ref rgba) => {
-                [rgba.red, rgba.green, rgba.blue, rgba.alpha].to_json()
-            },
+            Color::RGBA(ref rgba) => [rgba.red, rgba.green, rgba.blue, rgba.alpha].to_json(),
             Color::CurrentColor => "currentcolor".to_json(),
         }
     }
 }
 
 #[cfg(feature = "bench")]
 const BACKGROUND_IMAGE: &'static str = include_str!("big-data-url.css");
 
 #[cfg(feature = "bench")]
 #[bench]
 fn unquoted_url(b: &mut Bencher) {
     b.iter(|| {
         let mut input = ParserInput::new(BACKGROUND_IMAGE);
         let mut input = Parser::new(&mut input);
         input.look_for_var_or_env_functions();
 
-        let result = input.try(|input| input.expect_url());
+        let result = input.try_parse(|input| input.expect_url());
 
         assert!(result.is_ok());
 
         input.seen_var_or_env_functions();
         (result.is_ok(), input.seen_var_or_env_functions())
     })
 }
 
-
 #[cfg(feature = "bench")]
 #[bench]
 fn numeric(b: &mut Bencher) {
     b.iter(|| {
         for _ in 0..1000000 {
             let mut input = ParserInput::new("10px");
             let mut input = Parser::new(&mut input);
             let _ = test::black_box(input.next());
@@ -715,86 +842,83 @@ struct JsonParser;
 fn no_stack_overflow_multiple_nested_blocks() {
     let mut input: String = "{{".into();
     for _ in 0..20 {
         let dup = input.clone();
         input.push_str(&dup);
     }
     let mut input = ParserInput::new(&input);
     let mut input = Parser::new(&mut input);
-    while let Ok(..) = input.next() { }
+    while let Ok(..) = input.next() {}
 }
 
 impl<'i> DeclarationParser<'i> for JsonParser {
     type Declaration = Json;
     type Error = ();
 
-    fn parse_value<'t>(&mut self, name: CowRcStr<'i>, input: &mut Parser<'i, 't>)
-                       -> Result<Json, ParseError<'i, ()>> {
+    fn parse_value<'t>(
+        &mut self,
+        name: CowRcStr<'i>,
+        input: &mut Parser<'i, 't>,
+    ) -> Result<Json, ParseError<'i, ()>> {
         let mut value = vec![];
         let mut important = false;
         loop {
             let start = input.state();
             if let Ok(mut token) = input.next_including_whitespace().map(|t| t.clone()) {
                 // Hack to deal with css-parsing-tests assuming that
                 // `!important` in the middle of a declaration value is OK.
                 // This can never happen per spec
                 // (even CSS Variables forbid top-level `!`)
                 if token == Token::Delim('!') {
                     input.reset(&start);
                     if parse_important(input).is_ok() {
                         if input.is_exhausted() {
                             important = true;
-                            break
+                            break;
                         }
                     }
                     input.reset(&start);
                     token = input.next_including_whitespace().unwrap().clone();
                 }
                 value.push(one_component_value_to_json(token, input));
             } else {
-                break
+                break;
             }
         }
-        Ok(JArray![
-            "declaration",
-            name,
-            value,
-            important,
-        ])
+        Ok(JArray!["declaration", name, value, important,])
     }
 }
 
 impl<'i> AtRuleParser<'i> for JsonParser {
     type PreludeNoBlock = Vec<Json>;
     type PreludeBlock = Vec<Json>;
     type AtRule = Json;
     type Error = ();
 
-    fn parse_prelude<'t>(&mut self, name: CowRcStr<'i>, input: &mut Parser<'i, 't>)
-                         -> Result<AtRuleType<Vec<Json>, Vec<Json>>, ParseError<'i, ()>> {
+    fn parse_prelude<'t>(
+        &mut self,
+        name: CowRcStr<'i>,
+        input: &mut Parser<'i, 't>,
+    ) -> Result<AtRuleType<Vec<Json>, Vec<Json>>, ParseError<'i, ()>> {
         let prelude = vec![
             "at-rule".to_json(),
             name.to_json(),
             Json::Array(component_values_to_json(input)),
         ];
         match_ignore_ascii_case! { &*name,
             "media" | "foo-with-block" => Ok(AtRuleType::WithBlock(prelude)),
             "charset" => {
                 Err(input.new_error(BasicParseErrorKind::AtRuleInvalid(name.clone()).into()))
             }
             _ => Ok(AtRuleType::WithoutBlock(prelude)),
         }
     }
 
-    fn rule_without_block(
-        &mut self,
-        mut prelude: Vec<Json>,
-        _location: SourceLocation,
-    ) -> Json {
+    fn rule_without_block(&mut self, mut prelude: Vec<Json>, _location: SourceLocation) -> Json {
         prelude.push(Json::Null);
         Json::Array(prelude)
     }
 
     fn parse_block<'t>(
         &mut self,
         mut prelude: Vec<Json>,
         _location: SourceLocation,
@@ -805,17 +929,20 @@ impl<'i> AtRuleParser<'i> for JsonParser
     }
 }
 
 impl<'i> QualifiedRuleParser<'i> for JsonParser {
     type Prelude = Vec<Json>;
     type QualifiedRule = Json;
     type Error = ();
 
-    fn parse_prelude<'t>(&mut self, input: &mut Parser<'i, 't>) -> Result<Vec<Json>, ParseError<'i, ()>> {
+    fn parse_prelude<'t>(
+        &mut self,
+        input: &mut Parser<'i, 't>,
+    ) -> Result<Vec<Json>, ParseError<'i, ()>> {
         Ok(component_values_to_json(input))
     }
 
     fn parse_block<'t>(
         &mut self,
         prelude: Vec<Json>,
         _location: SourceLocation,
         input: &mut Parser<'i, 't>,
@@ -838,50 +965,71 @@ fn component_values_to_json(input: &mut 
 
 fn one_component_value_to_json(token: Token, input: &mut Parser) -> Json {
     fn numeric(value: f32, int_value: Option<i32>, has_sign: bool) -> Vec<json::Json> {
         vec![
             Token::Number {
                 value: value,
                 int_value: int_value,
                 has_sign: has_sign,
-            }.to_css_string().to_json(),
-            match int_value { Some(i) => i.to_json(), None => value.to_json() },
-            match int_value { Some(_) => "integer", None => "number" }.to_json()
+            }
+            .to_css_string()
+            .to_json(),
+            match int_value {
+                Some(i) => i.to_json(),
+                None => value.to_json(),
+            },
+            match int_value {
+                Some(_) => "integer",
+                None => "number",
+            }
+            .to_json(),
         ]
     }
 
     fn nested(input: &mut Parser) -> Vec<Json> {
-        let result: Result<_, ParseError<()>> = input.parse_nested_block(|input| {
-            Ok(component_values_to_json(input))
-        });
+        let result: Result<_, ParseError<()>> =
+            input.parse_nested_block(|input| Ok(component_values_to_json(input)));
         result.unwrap()
     }
 
     match token {
         Token::Ident(value) => JArray!["ident", value],
         Token::AtKeyword(value) => JArray!["at-keyword", value],
         Token::Hash(value) => JArray!["hash", value, "unrestricted"],
         Token::IDHash(value) => JArray!["hash", value, "id"],
         Token::QuotedString(value) => JArray!["string", value],
         Token::UnquotedUrl(value) => JArray!["url", value],
         Token::Delim('\\') => "\\".to_json(),
         Token::Delim(value) => value.to_string().to_json(),
 
-        Token::Number { value, int_value, has_sign } => Json::Array({
+        Token::Number {
+            value,
+            int_value,
+            has_sign,
+        } => Json::Array({
             let mut v = vec!["number".to_json()];
             v.extend(numeric(value, int_value, has_sign));
             v
         }),
-        Token::Percentage { unit_value, int_value, has_sign } => Json::Array({
+        Token::Percentage {
+            unit_value,
+            int_value,
+            has_sign,
+        } => Json::Array({
             let mut v = vec!["percentage".to_json()];
             v.extend(numeric(unit_value * 100., int_value, has_sign));
             v
         }),
-        Token::Dimension { value, int_value, has_sign, unit } => Json::Array({
+        Token::Dimension {
+            value,
+            int_value,
+            has_sign,
+            unit,
+        } => Json::Array({
             let mut v = vec!["dimension".to_json()];
             v.extend(numeric(value, int_value, has_sign));
             v.push(unit.to_json());
             v
         }),
 
         Token::WhiteSpace(_) => " ".to_json(),
         Token::Comment(_) => "/**/".to_json(),
@@ -950,22 +1098,23 @@ fn procedural_masquerade_whitespace() {
     }
 }
 
 #[test]
 fn parse_until_before_stops_at_delimiter_or_end_of_input() {
     // For all j and k, inputs[i].1[j] should parse the same as inputs[i].1[k]
     // when we use delimiters inputs[i].0.
     let inputs = vec![
-        (Delimiter::Bang | Delimiter::Semicolon,
-         // Note that the ';extra' is fine, because the ';' acts the same as
-         // the end of input.
-         vec!["token stream;extra", "token stream!", "token stream"]),
-        (Delimiter::Bang | Delimiter::Semicolon,
-         vec![";", "!", ""]),
+        (
+            Delimiter::Bang | Delimiter::Semicolon,
+            // Note that the ';extra' is fine, because the ';' acts the same as
+            // the end of input.
+            vec!["token stream;extra", "token stream!", "token stream"],
+        ),
+        (Delimiter::Bang | Delimiter::Semicolon, vec![";", "!", ""]),
     ];
     for equivalent in inputs {
         for (j, x) in equivalent.1.iter().enumerate() {
             for y in equivalent.1[j + 1..].iter() {
                 let mut ix = ParserInput::new(x);
                 let mut ix = Parser::new(&mut ix);
 
                 let mut iy = ParserInput::new(y);
@@ -973,17 +1122,17 @@ fn parse_until_before_stops_at_delimiter
 
                 let _ = ix.parse_until_before::<_, _, ()>(equivalent.0, |ix| {
                     iy.parse_until_before::<_, _, ()>(equivalent.0, |iy| {
                         loop {
                             let ox = ix.next();
                             let oy = iy.next();
                             assert_eq!(ox, oy);
                             if let Err(_) = ox {
-                                break
+                                break;
                             }
                         }
                         Ok(())
                     })
                 });
             }
         }
     }
@@ -1007,72 +1156,114 @@ fn parser_maintains_current_line() {
     assert_eq!(parser.next(), Ok(&Token::Ident("ident".into())));
     assert_eq!(parser.current_line(), "ident");
 }
 
 #[test]
 fn parser_with_line_number_offset() {
     let mut input = ParserInput::new_with_line_number_offset("ident\nident", 72);
     let mut parser = Parser::new(&mut input);
-    assert_eq!(parser.current_source_location(), SourceLocation { line: 72, column: 1 });
-    assert_eq!(parser.next_including_whitespace_and_comments(), Ok(&Token::Ident("ident".into())));
-    assert_eq!(parser.current_source_location(), SourceLocation { line: 72, column: 6 });
-    assert_eq!(parser.next_including_whitespace_and_comments(),
-               Ok(&Token::WhiteSpace("\n".into())));
-    assert_eq!(parser.current_source_location(), SourceLocation { line: 73, column: 1 });
-    assert_eq!(parser.next_including_whitespace_and_comments(), Ok(&Token::Ident("ident".into())));
-    assert_eq!(parser.current_source_location(), SourceLocation { line: 73, column: 6 });
+    assert_eq!(
+        parser.current_source_location(),
+        SourceLocation {
+            line: 72,
+            column: 1
+        }
+    );
+    assert_eq!(
+        parser.next_including_whitespace_and_comments(),
+        Ok(&Token::Ident("ident".into()))
+    );
+    assert_eq!(
+        parser.current_source_location(),
+        SourceLocation {
+            line: 72,
+            column: 6
+        }
+    );
+    assert_eq!(
+        parser.next_including_whitespace_and_comments(),
+        Ok(&Token::WhiteSpace("\n".into()))
+    );
+    assert_eq!(
+        parser.current_source_location(),
+        SourceLocation {
+            line: 73,
+            column: 1
+        }
+    );
+    assert_eq!(
+        parser.next_including_whitespace_and_comments(),
+        Ok(&Token::Ident("ident".into()))
+    );
+    assert_eq!(
+        parser.current_source_location(),
+        SourceLocation {
+            line: 73,
+            column: 6
+        }
+    );
 }
 
 #[test]
 fn cdc_regression_test() {
     let mut input = ParserInput::new("-->x");
     let mut parser = Parser::new(&mut input);
     parser.skip_cdc_and_cdo();
     assert_eq!(parser.next(), Ok(&Token::Ident("x".into())));
-    assert_eq!(parser.next(), Err(BasicParseError {
-        kind: BasicParseErrorKind::EndOfInput,
-        location: SourceLocation { line: 0, column: 5 }
-    }));
+    assert_eq!(
+        parser.next(),
+        Err(BasicParseError {
+            kind: BasicParseErrorKind::EndOfInput,
+            location: SourceLocation { line: 0, column: 5 }
+        })
+    );
 }
 
 #[test]
 fn parse_entirely_reports_first_error() {
     #[derive(PartialEq, Debug)]
-    enum E { Foo }
+    enum E {
+        Foo,
+    }
     let mut input = ParserInput::new("ident");
     let mut parser = Parser::new(&mut input);
     let result: Result<(), _> = parser.parse_entirely(|p| Err(p.new_custom_error(E::Foo)));
-    assert_eq!(result, Err(ParseError {
-        kind: ParseErrorKind::Custom(E::Foo),
-        location: SourceLocation { line: 0, column: 1 },
-    }));
+    assert_eq!(
+        result,
+        Err(ParseError {
+            kind: ParseErrorKind::Custom(E::Foo),
+            location: SourceLocation { line: 0, column: 1 },
+        })
+    );
 }
 
 #[test]
 fn parse_sourcemapping_comments() {
     let tests = vec![
         ("/*# sourceMappingURL=here*/", Some("here")),
         ("/*# sourceMappingURL=here  */", Some("here")),
         ("/*@ sourceMappingURL=here*/", Some("here")),
-        ("/*@ sourceMappingURL=there*/ /*# sourceMappingURL=here*/", Some("here")),
+        (
+            "/*@ sourceMappingURL=there*/ /*# sourceMappingURL=here*/",
+            Some("here"),
+        ),
         ("/*# sourceMappingURL=here there  */", Some("here")),
         ("/*# sourceMappingURL=  here  */", Some("")),
         ("/*# sourceMappingURL=*/", Some("")),
         ("/*# sourceMappingUR=here  */", None),
         ("/*! sourceMappingURL=here  */", None),
         ("/*# sourceMappingURL = here  */", None),
-        ("/*   # sourceMappingURL=here   */", None)
+        ("/*   # sourceMappingURL=here   */", None),
     ];
 
     for test in tests {
         let mut input = ParserInput::new(test.0);
         let mut parser = Parser::new(&mut input);
-        while let Ok(_) = parser.next_including_whitespace() {
-        }
+        while let Ok(_) = parser.next_including_whitespace() {}
         assert_eq!(parser.current_source_map_url(), test.1);
     }
 }
 
 #[test]
 fn parse_sourceurl_comments() {
     let tests = vec![
         ("/*# sourceURL=here*/", Some("here")),
@@ -1080,24 +1271,23 @@ fn parse_sourceurl_comments() {
         ("/*@ sourceURL=here*/", Some("here")),
         ("/*@ sourceURL=there*/ /*# sourceURL=here*/", Some("here")),
         ("/*# sourceURL=here there  */", Some("here")),
         ("/*# sourceURL=  here  */", Some("")),
         ("/*# sourceURL=*/", Some("")),
         ("/*# sourceMappingUR=here  */", None),
         ("/*! sourceURL=here  */", None),
         ("/*# sourceURL = here  */", None),
-        ("/*   # sourceURL=here   */", None)
+        ("/*   # sourceURL=here   */", None),
     ];
 
     for test in tests {
         let mut input = ParserInput::new(test.0);
         let mut parser = Parser::new(&mut input);
-        while let Ok(_) = parser.next_including_whitespace() {
-        }
+        while let Ok(_) = parser.next_including_whitespace() {}
         assert_eq!(parser.current_source_url(), test.1);
     }
 }
 
 #[test]
 fn roundtrip_percentage_token() {
     fn test_roundtrip(value: &str) {
         let mut input = ParserInput::new(value);
@@ -1153,18 +1343,25 @@ fn utf16_columns() {
 
     for test in tests {
         let mut input = ParserInput::new(test.0);
         let mut parser = Parser::new(&mut input);
 
         // Read all tokens.
         loop {
             match parser.next() {
-                Err(BasicParseError { kind: BasicParseErrorKind::EndOfInput, .. }) => { break; }
-                Err(_) => { assert!(false); }
+                Err(BasicParseError {
+                    kind: BasicParseErrorKind::EndOfInput,
+                    ..
+                }) => {
+                    break;
+                }
+                Err(_) => {
+                    assert!(false);
+                }
                 Ok(_) => {}
             };
         }
 
         // Check the resulting column.
         assert_eq!(parser.current_source_location().column, test.1);
     }
 }
--- a/third_party/rust/cssparser/src/tokenizer.rs
+++ b/third_party/rust/cssparser/src/tokenizer.rs
@@ -1,48 +1,45 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 // https://drafts.csswg.org/css-syntax/#tokenization
 
+use std::char;
+use std::i32;
 use std::ops::Range;
-use std::char;
-#[allow(unused_imports)] use std::ascii::AsciiExt;
-use std::i32;
 
-use parser::ParserState;
+use self::Token::*;
 use cow_rc_str::CowRcStr;
-use self::Token::*;
-
+use parser::ParserState;
 
 /// One of the pieces the CSS input is broken into.
 ///
 /// Some components use `Cow` in order to borrow from the original input string
 /// and avoid allocating/copying when possible.
 #[derive(PartialEq, Debug, Clone)]
 pub enum Token<'a> {
-
     /// A [`<ident-token>`](https://drafts.csswg.org/css-syntax/#ident-token-diagram)
     Ident(CowRcStr<'a>),
 
     /// A [`<at-keyword-token>`](https://drafts.csswg.org/css-syntax/#at-keyword-token-diagram)
     ///
     /// The value does not include the `@` marker.
     AtKeyword(CowRcStr<'a>),
 
     /// A [`<hash-token>`](https://drafts.csswg.org/css-syntax/#hash-token-diagram) with the type flag set to "unrestricted"
     ///
     /// The value does not include the `#` marker.
     Hash(CowRcStr<'a>),
 
     /// A [`<hash-token>`](https://drafts.csswg.org/css-syntax/#hash-token-diagram) with the type flag set to "id"
     ///
     /// The value does not include the `#` marker.
-    IDHash(CowRcStr<'a>),  // Hash that is a valid ID selector.
+    IDHash(CowRcStr<'a>), // Hash that is a valid ID selector.
 
     /// A [`<string-token>`](https://drafts.csswg.org/css-syntax/#string-token-diagram)
     ///
     /// The value does not include the quotes.
     QuotedString(CowRcStr<'a>),
 
     /// A [`<url-token>`](https://drafts.csswg.org/css-syntax/#url-token-diagram)
     ///
@@ -89,38 +86,38 @@ pub enum Token<'a> {
 
         /// The value as a float
         value: f32,
 
         /// If the origin source did not include a fractional part, the value as an integer.
         int_value: Option<i32>,
 
         /// The unit, e.g. "px" in `12px`
-        unit: CowRcStr<'a>
+        unit: CowRcStr<'a>,
     },
 
     /// A [`<whitespace-token>`](https://drafts.csswg.org/css-syntax/#whitespace-token-diagram)
     WhiteSpace(&'a str),
 
     /// A comment.
     ///
     /// The CSS Syntax spec does not generate tokens for comments,
     /// But we do, because we can (borrowed &str makes it cheap).
     ///
     /// The value does not include the `/*` `*/` markers.
     Comment(&'a str),
 
     /// A `:` `<colon-token>`
-    Colon,  // :
+    Colon, // :
 
     /// A `;` `<semicolon-token>`
-    Semicolon,  // ;
+    Semicolon, // ;
 
     /// A `,` `<comma-token>`
-    Comma,  // ,
+    Comma, // ,
 
     /// A `~=` [`<include-match-token>`](https://drafts.csswg.org/css-syntax/#include-match-token-diagram)
     IncludeMatch,
 
     /// A `|=` [`<dash-match-token>`](https://drafts.csswg.org/css-syntax/#dash-match-token-diagram)
     DashMatch,
 
     /// A `^=` [`<prefix-match-token>`](https://drafts.csswg.org/css-syntax/#prefix-match-token-diagram)
@@ -176,33 +173,31 @@ pub enum Token<'a> {
 
     /// A `<}-token>`
     ///
     /// When obtained from one of the `Parser::next*` methods,
     /// this token is always unmatched and indicates a parse error.
     CloseCurlyBracket,
 }
 
-
 impl<'a> Token<'a> {
     /// Return whether this token represents a parse error.
     ///
     /// `BadUrl` and `BadString` are tokenizer-level parse errors.
     ///
     /// `CloseParenthesis`, `CloseSquareBracket`, and `CloseCurlyBracket` are *unmatched*
     /// and therefore parse errors when returned by one of the `Parser::next*` methods.
     pub fn is_parse_error(&self) -> bool {
         matches!(
             *self,
             BadUrl(_) | BadString(_) | CloseParenthesis | CloseSquareBracket | CloseCurlyBracket
         )
     }
 }
 
-
 #[derive(Clone)]
 pub struct Tokenizer<'a> {
     input: &'a str,
     /// Counted in bytes, not code points. From 0.
     position: usize,
     /// The position at the start of the current line; but adjusted to
     /// ensure that computing the column will give the result in units
     /// of UTF-16 characters.
@@ -215,17 +210,16 @@ pub struct Tokenizer<'a> {
 
 #[derive(Copy, Clone, PartialEq, Eq)]
 enum SeenStatus {
     DontCare,
     LookingForThem,
     SeenAtLeastOne,
 }
 
-
 impl<'a> Tokenizer<'a> {
     #[inline]
     pub fn new(input: &str) -> Tokenizer {
         Tokenizer::with_first_line_number(input, 0)
     }
 
     #[inline]
     pub fn with_first_line_number(input: &str, first_line_number: u32) -> Tokenizer {
@@ -250,19 +244,17 @@ impl<'a> Tokenizer<'a> {
         let seen = self.var_or_env_functions == SeenStatus::SeenAtLeastOne;
         self.var_or_env_functions = SeenStatus::DontCare;
         seen
     }
 
     #[inline]
     pub fn see_function(&mut self, name: &str) {
         if self.var_or_env_functions == SeenStatus::LookingForThem {
-            if name.eq_ignore_ascii_case("var") ||
-                name.eq_ignore_ascii_case("env")
-            {
+            if name.eq_ignore_ascii_case("var") || name.eq_ignore_ascii_case("env") {
                 self.var_or_env_functions = SeenStatus::SeenAtLeastOne;
             }
         }
     }
 
     #[inline]
     pub fn next(&mut self) -> Result<Token<'a>, ()> {
         next_token(self)
@@ -335,22 +327,26 @@ impl<'a> Tokenizer<'a> {
             None
         } else {
             Some(self.input.as_bytes()[self.position])
         }
     }
 
     // If false, `tokenizer.next_char()` will not panic.
     #[inline]
-    fn is_eof(&self) -> bool { !self.has_at_least(0) }
+    fn is_eof(&self) -> bool {
+        !self.has_at_least(0)
+    }
 
     // If true, the input has at least `n` bytes left *after* the current one.
     // That is, `tokenizer.char_at(n)` will not panic.
     #[inline]
-    fn has_at_least(&self, n: usize) -> bool { self.position + n < self.input.len() }
+    fn has_at_least(&self, n: usize) -> bool {
+        self.position + n < self.input.len()
+    }
 
     // Advance over N bytes in the input.  This function can advance
     // over ASCII bytes (excluding newlines), or UTF-8 sequence
     // leaders (excluding leaders for 4-byte sequences).
     #[inline]
     pub fn advance(&mut self, n: usize) {
         if cfg!(debug_assertions) {
             // Each byte must either be an ASCII byte or a sequence
@@ -362,17 +358,19 @@ impl<'a> Tokenizer<'a> {
                 debug_assert!(b != b'\r' && b != b'\n' && b != b'\x0C');
             }
         }
         self.position += n
     }
 
     // Assumes non-EOF
     #[inline]
-    fn next_byte_unchecked(&self) -> u8 { self.byte_at(0) }
+    fn next_byte_unchecked(&self) -> u8 {
+        self.byte_at(0)
+    }
 
     #[inline]
     fn byte_at(&self, offset: usize) -> u8 {
         self.input.as_bytes()[self.position + offset]
     }
 
     // Advance over a single byte; the byte must be a UTF-8 sequence
     // leader for a 4-byte sequence.
@@ -430,28 +428,30 @@ impl<'a> Tokenizer<'a> {
             self.position += 1;
         }
         self.current_line_start_position = self.position;
         self.current_line_number += 1;
     }
 
     #[inline]
     fn has_newline_at(&self, offset: usize) -> bool {
-        self.position + offset < self.input.len() &&
-        matches!(self.byte_at(offset), b'\n' | b'\r' | b'\x0C')
+        self.position + offset < self.input.len()
+            && matches!(self.byte_at(offset), b'\n' | b'\r' | b'\x0C')
     }
 
     #[inline]
     fn consume_char(&mut self) -> char {
         let c = self.next_char();
         let len_utf8 = c.len_utf8();
         self.position += len_utf8;
         // Note that due to the special case for the 4-byte sequence
         // intro, we must use wrapping add here.
-        self.current_line_start_position = self.current_line_start_position.wrapping_add(len_utf8 - c.len_utf16());
+        self.current_line_start_position = self
+            .current_line_start_position
+            .wrapping_add(len_utf8 - c.len_utf16());
         c
     }
 
     #[inline]
     fn starts_with(&self, needle: &[u8]) -> bool {
         self.input.as_bytes()[self.position..].starts_with(needle)
     }
 
@@ -515,32 +515,30 @@ impl<'a> Tokenizer<'a> {
         }
     }
 }
 
 /// A position from the start of the input, counted in UTF-8 bytes.
 #[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy)]
 pub struct SourcePosition(pub(crate) usize);
 
-
 /// The line and column number for a given position within the input.
 #[derive(PartialEq, Eq, Debug, Clone, Copy)]
 pub struct SourceLocation {
     /// The line number, starting at 0 for the first line, unless `with_first_line_number` was used.
     pub line: u32,
 
     /// The column number within a line, starting at 1 for first the character of the line.
     /// Column numbers are counted in UTF-16 code units.
     pub column: u32,
 }
 
-
 fn next_token<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>, ()> {
     if tokenizer.is_eof() {
-        return Err(())
+        return Err(());
     }
     let b = tokenizer.next_byte_unchecked();
     let token = match_byte! { b,
         b' ' | b'\t' => {
             consume_whitespace(tokenizer, false)
         },
         b'\n' | b'\x0C' | b'\r' => {
             consume_whitespace(tokenizer, true)
@@ -666,17 +664,16 @@ fn next_token<'a>(tokenizer: &mut Tokeni
                 tokenizer.advance(1);
                 Delim(b as char)
             }
         },
     };
     Ok(token)
 }
 
-
 fn consume_whitespace<'a>(tokenizer: &mut Tokenizer<'a>, newline: bool) -> Token<'a> {
     let start_position = tokenizer.position();
     if newline {
         tokenizer.consume_newline();
     } else {
         tokenizer.advance(1);
     }
     while !tokenizer.is_eof() {
@@ -691,45 +688,44 @@ fn consume_whitespace<'a>(tokenizer: &mu
             _ => {
                 break
             }
         }
     }
     WhiteSpace(tokenizer.slice_from(start_position))
 }
 
-
 // Check for sourceMappingURL or sourceURL comments and update the
 // tokenizer appropriately.
 fn check_for_source_map<'a>(tokenizer: &mut Tokenizer<'a>, contents: &'a str) {
     let directive = "# sourceMappingURL=";
     let directive_old = "@ sourceMappingURL=";
 
     // If there is a source map directive, extract the URL.
     if contents.starts_with(directive) || contents.starts_with(directive_old) {
         let contents = &contents[directive.len()..];
-        tokenizer.source_map_url = contents.split(|c| {
-            c == ' ' || c == '\t' || c == '\x0C' || c == '\r' || c == '\n'
-        }).next()
+        tokenizer.source_map_url = contents
+            .split(|c| c == ' ' || c == '\t' || c == '\x0C' || c == '\r' || c == '\n')
+            .next()
     }
 
     let directive = "# sourceURL=";
     let directive_old = "@ sourceURL=";
 
     // If there is a source map directive, extract the URL.
     if contents.starts_with(directive) || contents.starts_with(directive_old) {
         let contents = &contents[directive.len()..];
-        tokenizer.source_url = contents.split(|c| {
-            c == ' ' || c == '\t' || c == '\x0C' || c == '\r' || c == '\n'
-        }).next()
+        tokenizer.source_url = contents
+            .split(|c| c == ' ' || c == '\t' || c == '\x0C' || c == '\r' || c == '\n')
+            .next()
     }
 }
 
 fn consume_comment<'a>(tokenizer: &mut Tokenizer<'a>) -> &'a str {
-    tokenizer.advance(2);  // consume "/*"
+    tokenizer.advance(2); // consume "/*"
     let start_position = tokenizer.position();
     while !tokenizer.is_eof() {
         match_byte! { tokenizer.next_byte_unchecked(),
             b'*' => {
                 let end_position = tokenizer.position();
                 tokenizer.advance(1);
                 if tokenizer.next_byte() == Some(b'/') {
                     tokenizer.advance(1);
@@ -752,31 +748,32 @@ fn consume_comment<'a>(tokenizer: &mut T
     let contents = tokenizer.slice_from(start_position);
     check_for_source_map(tokenizer, contents);
     contents
 }
 
 fn consume_string<'a>(tokenizer: &mut Tokenizer<'a>, single_quote: bool) -> Token<'a> {
     match consume_quoted_string(tokenizer, single_quote) {
         Ok(value) => QuotedString(value),
-        Err(value) => BadString(value)
+        Err(value) => BadString(value),
     }
 }
 
-
 /// Return `Err(())` on syntax error (ie. unescaped newline)
-fn consume_quoted_string<'a>(tokenizer: &mut Tokenizer<'a>, single_quote: bool)
-                             -> Result<CowRcStr<'a>, CowRcStr<'a>> {
-    tokenizer.advance(1);  // Skip the initial quote
-    // start_pos is at code point boundary, after " or '
+fn consume_quoted_string<'a>(
+    tokenizer: &mut Tokenizer<'a>,
+    single_quote: bool,
+) -> Result<CowRcStr<'a>, CowRcStr<'a>> {
+    tokenizer.advance(1); // Skip the initial quote
+                          // start_pos is at code point boundary, after " or '
     let start_pos = tokenizer.position();
     let mut string_bytes;
     loop {
         if tokenizer.is_eof() {
-            return Ok(tokenizer.slice_from(start_pos).into())
+            return Ok(tokenizer.slice_from(start_pos).into());
         }
         match_byte! { tokenizer.next_byte_unchecked(),
             b'"' => {
                 if !single_quote {
                     let value = tokenizer.slice_from(start_pos);
                     tokenizer.advance(1);
                     return Ok(value.into())
                 }
@@ -864,40 +861,39 @@ fn consume_quoted_string<'a>(tokenizer: 
 
         // If this byte is part of a multi-byte code point,
         // we’ll end up copying the whole code point before this loop does something else.
         string_bytes.push(b);
     }
 
     Ok(
         // string_bytes is well-formed UTF-8, see other comments.
-        unsafe { from_utf8_release_unchecked(string_bytes) }.into()
+        unsafe { from_utf8_release_unchecked(string_bytes) }.into(),
     )
 }
 
-
 #[inline]
 fn is_ident_start(tokenizer: &mut Tokenizer) -> bool {
-    !tokenizer.is_eof() && match_byte! { tokenizer.next_byte_unchecked(),
-        b'a'...b'z' | b'A'...b'Z' | b'_' | b'\0' => { true },
-        b'-' => {
-            tokenizer.has_at_least(1) && match_byte! { tokenizer.byte_at(1),
-                b'a'...b'z' | b'A'...b'Z' | b'-' | b'_' | b'\0' => {
-                    true
+    !tokenizer.is_eof()
+        && match_byte! { tokenizer.next_byte_unchecked(),
+            b'a'...b'z' | b'A'...b'Z' | b'_' | b'\0' => { true },
+            b'-' => {
+                tokenizer.has_at_least(1) && match_byte! { tokenizer.byte_at(1),
+                    b'a'...b'z' | b'A'...b'Z' | b'-' | b'_' | b'\0' => {
+                        true
+                    }
+                    b'\\' => { !tokenizer.has_newline_at(1) }
+                    b => { !b.is_ascii() },
                 }
-                b'\\' => { !tokenizer.has_newline_at(1) }
-                b => { !b.is_ascii() },
-            }
-        },
-        b'\\' => { !tokenizer.has_newline_at(1) },
-        b => { !b.is_ascii() },
-    }
+            },
+            b'\\' => { !tokenizer.has_newline_at(1) },
+            b => { !b.is_ascii() },
+        }
 }
 
-
 fn consume_ident_like<'a>(tokenizer: &mut Tokenizer<'a>) -> Token<'a> {
     let value = consume_name(tokenizer);
     if !tokenizer.is_eof() && tokenizer.next_byte_unchecked() == b'(' {
         tokenizer.advance(1);
         if value.eq_ignore_ascii_case("url") {
             consume_unquoted_url(tokenizer).unwrap_or(Function(value))
         } else {
             tokenizer.see_function(&value);
@@ -909,17 +905,17 @@ fn consume_ident_like<'a>(tokenizer: &mu
 }
 
 fn consume_name<'a>(tokenizer: &mut Tokenizer<'a>) -> CowRcStr<'a> {
     // start_pos is the end of the previous token, therefore at a code point boundary
     let start_pos = tokenizer.position();
     let mut value_bytes;
     loop {
         if tokenizer.is_eof() {
-            return tokenizer.slice_from(start_pos).into()
+            return tokenizer.slice_from(start_pos).into();
         }
         match_byte! { tokenizer.next_byte_unchecked(),
             b'a'...b'z' | b'A'...b'Z' | b'0'...b'9' | b'_' | b'-' => { tokenizer.advance(1) },
             b'\\' | b'\0' => {
                 // * The tokenizer’s input is UTF-8 since it’s `&str`.
                 // * start_pos is at a code point boundary
                 // * so is the current position (which is before '\\' or '\0'
                 //
@@ -1014,64 +1010,64 @@ fn consume_numeric<'a>(tokenizer: &mut T
         tokenizer.advance(1);
     }
 
     let mut integral_part: f64 = 0.;
     while let Some(digit) = byte_to_decimal_digit(tokenizer.next_byte_unchecked()) {
         integral_part = integral_part * 10. + digit as f64;
         tokenizer.advance(1);
         if tokenizer.is_eof() {
-            break
+            break;
         }
     }
 
     let mut is_integer = true;
 
     let mut fractional_part: f64 = 0.;
-    if tokenizer.has_at_least(1) && tokenizer.next_byte_unchecked() == b'.'
-            && matches!(tokenizer.byte_at(1), b'0'...b'9') {
+    if tokenizer.has_at_least(1)
+        && tokenizer.next_byte_unchecked() == b'.'
+        && matches!(tokenizer.byte_at(1), b'0'...b'9')
+    {
         is_integer = false;
-        tokenizer.advance(1);  // Consume '.'
+        tokenizer.advance(1); // Consume '.'
         let mut factor = 0.1;
         while let Some(digit) = byte_to_decimal_digit(tokenizer.next_byte_unchecked()) {
             fractional_part += digit as f64 * factor;
             factor *= 0.1;
             tokenizer.advance(1);
             if tokenizer.is_eof() {
-                break
+                break;
             }
         }
     }
 
     let mut value = sign * (integral_part + fractional_part);
 
-    if tokenizer.has_at_least(1)
-       && matches!(tokenizer.next_byte_unchecked(), b'e' | b'E') {
-
-        if matches!(tokenizer.byte_at(1), b'0'...b'9') ||
-           (tokenizer.has_at_least(2)
-            && matches!(tokenizer.byte_at(1), b'+' | b'-')
-            && matches!(tokenizer.byte_at(2), b'0'...b'9'))
+    if tokenizer.has_at_least(1) && matches!(tokenizer.next_byte_unchecked(), b'e' | b'E') {
+        if matches!(tokenizer.byte_at(1), b'0'...b'9')
+            || (tokenizer.has_at_least(2)
+                && matches!(tokenizer.byte_at(1), b'+' | b'-')
+                && matches!(tokenizer.byte_at(2), b'0'...b'9'))
         {
             is_integer = false;
             tokenizer.advance(1);
             let (has_sign, sign) = match tokenizer.next_byte_unchecked() {
                 b'-' => (true, -1.),
                 b'+' => (true, 1.),
                 _ => (false, 1.),
             };
             if has_sign {
                 tokenizer.advance(1);
             }
             let mut exponent: f64 = 0.;
             while let Some(digit) = byte_to_decimal_digit(tokenizer.next_byte_unchecked()) {
                 exponent = exponent * 10. + digit as f64;
                 tokenizer.advance(1);
                 if tokenizer.is_eof() {
-                    break
+                    break;
                 }
             }
             value *= f64::powf(10., sign * exponent);
         }
     }
 
     let int_value = if is_integer {
         Some(if value >= i32::MAX as f64 {
@@ -1086,17 +1082,17 @@ fn consume_numeric<'a>(tokenizer: &mut T
     };
 
     if !tokenizer.is_eof() && tokenizer.next_byte_unchecked() == b'%' {
         tokenizer.advance(1);
         return Percentage {
             unit_value: (value / 100.) as f32,
             int_value: int_value,
             has_sign: has_sign,
-        }
+        };
     }
     let value = value as f32;
     if is_ident_start(tokenizer) {
         let unit = consume_name(tokenizer);
         Dimension {
             value: value,
             int_value: int_value,
             has_sign: has_sign,
@@ -1106,17 +1102,16 @@ fn consume_numeric<'a>(tokenizer: &mut T
         Number {
             value: value,
             int_value: int_value,
             has_sign: has_sign,
         }
     }
 }
 
-
 #[inline]
 unsafe fn from_utf8_release_unchecked(string_bytes: Vec<u8>) -> String {
     if cfg!(debug_assertions) {
         String::from_utf8(string_bytes).unwrap()
     } else {
         String::from_utf8_unchecked(string_bytes)
     }
 }
@@ -1129,17 +1124,17 @@ fn consume_unquoted_url<'a>(tokenizer: &
     let mut last_newline = 0;
     let mut found_printable_char = false;
     let mut iter = from_start.bytes().enumerate();
     loop {
         let (offset, b) = match iter.next() {
             Some(item) => item,
             None => {
                 tokenizer.position = tokenizer.input.len();
-                break
+                break;
             }
         };
         match_byte! { b,
             b' ' | b'\t' => {},
             b'\n' | b'\x0C' => {
                 newlines += 1;
                 last_newline = offset;
             }
@@ -1171,28 +1166,28 @@ fn consume_unquoted_url<'a>(tokenizer: &
         // No need for wrapping_add here, because there's no possible
         // way to wrap.
         tokenizer.current_line_start_position = start_position + last_newline + 1;
     }
 
     if found_printable_char {
         // This function only consumed ASCII (whitespace) bytes,
         // so the current position is a code point boundary.
-        return Ok(consume_unquoted_url_internal(tokenizer))
+        return Ok(consume_unquoted_url_internal(tokenizer));
     } else {
-        return Ok(UnquotedUrl("".into()))
+        return Ok(UnquotedUrl("".into()));
     }
 
     fn consume_unquoted_url_internal<'a>(tokenizer: &mut Tokenizer<'a>) -> Token<'a> {
         // This function is only called with start_pos at a code point boundary.
         let start_pos = tokenizer.position();
         let mut string_bytes: Vec<u8>;
         loop {
             if tokenizer.is_eof() {
-                return UnquotedUrl(tokenizer.slice_from(start_pos).into())
+                return UnquotedUrl(tokenizer.slice_from(start_pos).into());
             }
             match_byte! { tokenizer.next_byte_unchecked(),
                 b' ' | b'\t' | b'\n' | b'\r' | b'\x0C' => {
                     let value = tokenizer.slice_from(start_pos);
                     return consume_url_end(tokenizer, start_pos, value.into())
                 }
                 b')' => {
                     let value = tokenizer.slice_from(start_pos);
@@ -1269,24 +1264,25 @@ fn consume_unquoted_url<'a>(tokenizer: &
                     // ASCII or other leading byte.
                     tokenizer.advance(1);
                     string_bytes.push(b)
                 }
             }
         }
         UnquotedUrl(
             // string_bytes is well-formed UTF-8, see other comments.
-            unsafe { from_utf8_release_unchecked(string_bytes) }.into()
+            unsafe { from_utf8_release_unchecked(string_bytes) }.into(),
         )
     }
 
-    fn consume_url_end<'a>(tokenizer: &mut Tokenizer<'a>,
-                           start_pos: SourcePosition,
-                           string: CowRcStr<'a>)
-                           -> Token<'a> {
+    fn consume_url_end<'a>(
+        tokenizer: &mut Tokenizer<'a>,
+        start_pos: SourcePosition,
+        string: CowRcStr<'a>,
+    ) -> Token<'a> {
         while !tokenizer.is_eof() {
             match_byte! { tokenizer.next_byte_unchecked(),
                 b')' => {
                     tokenizer.advance(1);
                     break
                 }
                 b' ' | b'\t' => { tokenizer.advance(1); }
                 b'\n' | b'\x0C' | b'\r' => {
@@ -1334,34 +1330,39 @@ fn consume_hex_digits<'a>(tokenizer: &mu
     let mut digits = 0;
     while digits < 6 && !tokenizer.is_eof() {
         match byte_to_hex_digit(tokenizer.next_byte_unchecked()) {
             Some(digit) => {
                 value = value * 16 + digit;
                 digits += 1;
                 tokenizer.advance(1);
             }
-            None => break
+            None => break,
         }
     }
     (value, digits)
 }
 
-
 // Same constraints as consume_escape except it writes into `bytes` the result
 // instead of returning it.
 fn consume_escape_and_write(tokenizer: &mut Tokenizer, bytes: &mut Vec<u8>) {
-    bytes.extend(consume_escape(tokenizer).encode_utf8(&mut [0; 4]).as_bytes())
+    bytes.extend(
+        consume_escape(tokenizer)
+            .encode_utf8(&mut [0; 4])
+            .as_bytes(),
+    )
 }
 
 // Assumes that the U+005C REVERSE SOLIDUS (\) has already been consumed
 // and that the next input character has already been verified
 // to not be a newline.
 fn consume_escape(tokenizer: &mut Tokenizer) -> char {
-    if tokenizer.is_eof() { return '\u{FFFD}' }  // Escaped EOF
+    if tokenizer.is_eof() {
+        return '\u{FFFD}';
+    } // Escaped EOF
     match_byte! { tokenizer.next_byte_unchecked(),
         b'0'...b'9' | b'A'...b'F' | b'a'...b'f' => {
             let (c, _) = consume_hex_digits(tokenizer);
             if !tokenizer.is_eof() {
                 match_byte! { tokenizer.next_byte_unchecked(),
                     b' ' | b'\t' => {
                         tokenizer.advance(1)
                     }
--- a/third_party/rust/cssparser/src/unicode_range.rs
+++ b/third_party/rust/cssparser/src/unicode_range.rs
@@ -1,18 +1,18 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 //! https://drafts.csswg.org/css-syntax/#urange
 
-use {Parser, ToCss, BasicParseError};
 use std::char;
 use std::fmt;
 use tokenizer::Token;
+use {BasicParseError, Parser, ToCss};
 
 /// One contiguous range of code points.
 ///
 /// Can not be empty. Can represent a single code point when start == end.
 #[derive(PartialEq, Eq, Clone, Hash)]
 #[repr(C)]
 pub struct UnicodeRange {
     /// Inclusive start of the range. In [0, end].
@@ -39,116 +39,117 @@ impl UnicodeRange {
 
         // This deviates from the spec in case there are CSS comments
         // between tokens in the middle of one <unicode-range>,
         // but oh well…
         let concatenated_tokens = input.slice_from(after_u);
 
         let range = match parse_concatenated(concatenated_tokens.as_bytes()) {
             Ok(range) => range,
-            Err(()) => return Err(input.new_basic_unexpected_token_error(Token::Ident(concatenated_tokens.into()))),
+            Err(()) => {
+                return Err(input
+                    .new_basic_unexpected_token_error(Token::Ident(concatenated_tokens.into())))
+            }
         };
         if range.end > char::MAX as u32 || range.start > range.end {
             Err(input.new_basic_unexpected_token_error(Token::Ident(concatenated_tokens.into())))
         } else {
             Ok(range)
         }
     }
 }
 
 fn parse_tokens<'i, 't>(input: &mut Parser<'i, 't>) -> Result<(), BasicParseError<'i>> {
     match input.next_including_whitespace()?.clone() {
         Token::Delim('+') => {
             // FIXME: remove .clone() when lifetimes are non-lexical.
             match input.next_including_whitespace()?.clone() {
                 Token::Ident(_) => {}
                 Token::Delim('?') => {}
-                t => return Err(input.new_basic_unexpected_token_error(t))
+                t => return Err(input.new_basic_unexpected_token_error(t)),
             }
             parse_question_marks(input)
         }
-        Token::Dimension { .. } => {
-            parse_question_marks(input)
-        }
+        Token::Dimension { .. } => parse_question_marks(input),
         Token::Number { .. } => {
             let after_number = input.state();
             match input.next_including_whitespace() {
                 Ok(&Token::Delim('?')) => parse_question_marks(input),
                 Ok(&Token::Dimension { .. }) => {}
                 Ok(&Token::Number { .. }) => {}
-                _ => input.reset(&after_number)
+                _ => input.reset(&after_number),
             }
         }
-        t => return Err(input.new_basic_unexpected_token_error(t))
+        t => return Err(input.new_basic_unexpected_token_error(t)),
     }
     Ok(())
 }
 
 /// Consume as many '?' as possible
 fn parse_question_marks(input: &mut Parser) {
     loop {
         let start = input.state();
         match input.next_including_whitespace() {
             Ok(&Token::Delim('?')) => {}
             _ => {
                 input.reset(&start);
-                return
+                return;
             }
         }
     }
 }
 
 fn parse_concatenated(text: &[u8]) -> Result<UnicodeRange, ()> {
     let mut text = match text.split_first() {
         Some((&b'+', text)) => text,
-        _ => return Err(())
+        _ => return Err(()),
     };
     let (first_hex_value, hex_digit_count) = consume_hex(&mut text);
     let question_marks = consume_question_marks(&mut text);
     let consumed = hex_digit_count + question_marks;
     if consumed == 0 || consumed > 6 {
-        return Err(())
+        return Err(());
     }
 
     if question_marks > 0 {
         if text.is_empty() {
             return Ok(UnicodeRange {
                 start: first_hex_value << (question_marks * 4),
                 end: ((first_hex_value + 1) << (question_marks * 4)) - 1,
-            })
+            });
         }
     } else if text.is_empty() {
         return Ok(UnicodeRange {
             start: first_hex_value,
             end: first_hex_value,
-        })
+        });
     } else {
         if let Some((&b'-', mut text)) = text.split_first() {
             let (second_hex_value, hex_digit_count) = consume_hex(&mut text);
             if hex_digit_count > 0 && hex_digit_count <= 6 && text.is_empty() {
                 return Ok(UnicodeRange {
                     start: first_hex_value,
                     end: second_hex_value,
-                })
+                });
             }
         }
     }
     Err(())
 }
 
 fn consume_hex(text: &mut &[u8]) -> (u32, usize) {
     let mut value = 0;
     let mut digits = 0;
     while let Some((&byte, rest)) = text.split_first() {
         if let Some(digit_value) = (byte as char).to_digit(16) {
             value = value * 0x10 + digit_value;
             digits += 1;
             *text = rest
         } else {
-            break
+            break;
         }
     }
     (value, digits)
 }
 
 fn consume_question_marks(text: &mut &[u8]) -> usize {
     let mut question_marks = 0;
     while let Some((&b'?', rest)) = text.split_first() {
@@ -160,16 +161,19 @@ fn consume_question_marks(text: &mut &[u
 
 impl fmt::Debug for UnicodeRange {
     fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
         self.to_css(formatter)
     }
 }
 
 impl ToCss for UnicodeRange {
-    fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
+    fn to_css<W>(&self, dest: &mut W) -> fmt::Result
+    where
+        W: fmt::Write,
+    {
         write!(dest, "U+{:X}", self.start)?;
         if self.end != self.start {
             write!(dest, "-{:X}", self.end)?;
         }
         Ok(())
     }
 }
--- a/third_party/rust/cstr-macros/.cargo-checksum.json
+++ b/third_party/rust/cstr-macros/.cargo-checksum.json
@@ -1,1 +1,1 @@
-{"files":{"Cargo.toml":"517c9ae719b876cf67d9bd8dfec71d0b7a35ffe6e68668c35e21e2f6c3dfdc05","LICENSE":"2c6fc9268c3b765da5bf34fe4909425437f61be05674c2516c7f8cf1251c20aa","src/lib.rs":"71e7248b21b5e603e31060ecf241cf204efdfea5a0b400d084601f6c8bdfe11c"},"package":"0472c17c83d3ec1af32fb6ee2b3ad56ae0b6e69355d63d1d30602055c34324a8"}
\ No newline at end of file
+{"files":{"Cargo.toml":"ffb2762fc243053d72de4951cc9b0a10b3730bdd10dd14071d222caef198be0f","LICENSE":"2c6fc9268c3b765da5bf34fe4909425437f61be05674c2516c7f8cf1251c20aa","src/lib.rs":"b1de1d679338b008dd22e4aea5384f89d7a9424d65102b4fa536d8d9eaa56e22"},"package":"0f12dd847ec773fc98d75edba5394cb87d0f35e7ee548a4c81849ca6374b3d48"}
\ No newline at end of file
--- a/third_party/rust/cstr-macros/Cargo.toml
+++ b/third_party/rust/cstr-macros/Cargo.toml
@@ -7,25 +7,25 @@
 #
 # If you believe there's an error in this file please file an
 # issue against the rust-lang/cargo repository. If you're
 # editing this file be aware that the upstream Cargo.toml
 # will likely look very different (and much more reasonable)
 
 [package]
 name = "cstr-macros"
-version = "0.1.3"
+version = "0.1.5"
 authors = ["Xidorn Quan <me@upsuper.org>"]
 description = "Procedural macros for cstr"
 license = "MIT"
 repository = "https://github.com/upsuper/cstr"
 
 [lib]
 proc-macro = true
 [dependencies.procedural-masquerade]
 version = "0.1"
 
 [dependencies.syn]
-version = "0.13"
+version = "0.15"
 features = ["derive", "parsing"]
 default-features = false
 [dev-dependencies.quote]
-version = "0.5"
+version = "0.6"
--- a/third_party/rust/cstr-macros/src/lib.rs
+++ b/third_party/rust/cstr-macros/src/lib.rs
@@ -12,22 +12,29 @@ use std::ffi::CString;
 define_proc_macros! {
     #[allow(non_snake_case)]
     pub fn cstr_internal__build_bytes(input: &str) -> String {
         let bytes = build_bytes(input);
         format!("const BYTES: &'static [u8] = {};", bytes)
     }
 }
 
+fn input_to_string(input: &str) -> String {
+    if let Ok(s) = syn::parse_str::<syn::LitStr>(input) {
+        return s.value();
+    }
+    if let Ok(i) = syn::parse_str::<syn::Ident>(input) {
+        return i.to_string();
+    }
+    panic!("expected a string literal or an identifier, got {}", input)
+}
+
 fn build_bytes(input: &str) -> String {
-    let s = match syn::parse_str::<syn::LitStr>(input) {
-        Ok(s) => s,
-        _ => panic!("expected a string literal, got {}", input)
-    };
-    let cstr = match CString::new(s.value()) {
+    let s = input_to_string(input);
+    let cstr = match CString::new(s.as_bytes()) {
         Ok(s) => s,
         _ => panic!("literal must not contain NUL byte")
     };
     let mut bytes = Vec::new();
     bytes.extend(br#"b""#);
     bytes.extend(cstr.as_bytes().iter().flat_map(|&b| escape_default(b)));
     bytes.extend(br#"\0""#);
     String::from_utf8(bytes).unwrap()
@@ -49,16 +56,17 @@ mod tests {
     }
 
     #[test]
     fn test_build_bytes() {
         assert_eq!(build_bytes!("aaa"), result!(b"aaa\0"));
         assert_eq!(build_bytes!("\t\n\r\"\\'"), result!(b"\t\n\r\"\\\'\0"));
         assert_eq!(build_bytes!("\x01\x02 \x7f"), result!(b"\x01\x02 \x7f\0"));
         assert_eq!(build_bytes!("你好"), result!(b"\xe4\xbd\xa0\xe5\xa5\xbd\0"));
+        assert_eq!(build_bytes!(foobar), result!(b"foobar\0"));
     }
 
     #[test]
     #[should_panic]
     fn test_build_bytes_nul_inside() {
         build_bytes!("a\x00a");
     }
 }
--- a/third_party/rust/num-derive/.cargo-checksum.json
+++ b/third_party/rust/num-derive/.cargo-checksum.json
@@ -1,1 +1,1 @@
-{"files":{"Cargo.toml":"57dcf7e06d8a81aba6d207476e9d98b67ae1c626b42455d560ff4722bce331e9","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb","README.md":"5c81631711af54d31e40841cd0153a95d9e505d8eba7503d114789ffb5e232c6","RELEASES.md":"ddb394449b049aace6f20cab3d6ab1de0c82f100a0c778e9790817c544590df5","bors.toml":"1c81ede536a37edd30fe4e622ff0531b25372403ac9475a5d6c50f14156565a2","ci/rustup.sh":"8f58c563be0c200447d528c3b9db296db16ce198e26e872aee210174d4daf518","ci/test_full.sh":"aa3c9fd125f4befddbd109698a0bb3e078e37c092e3fd52029aadb036bd58d3f","src/lib.rs":"9e525516c4bdca62c486fe8d0ef5960e35ec2d7b49a8f2b7e12b9e0383909191","tests/empty_enum.rs":"1b2312ec2fc9866fce7172e71e0aa2efcc3cb9d7659d0b633eb352bb1e080d53","tests/issue-6.rs":"e8eaa07255f00d420eeb5fb64ebd872ba58235d1af6b1f806a6784e6de316459","tests/issue-9.rs":"f64454c4411b73f9257974caeb4561ccf7850d097d0b0177d841c88bb05eaa02","tests/num_derive_without_num.rs":"3ce528221a2cb752859e20c5423c4b474fec714b41d8c1b62f5614b165d7262b","tests/trivial.rs":"31eac2d8c99e3846cca072015e255c72fea4f80a6f5f235f9c7fd76e122656ce","tests/with_custom_values.rs":"3d692c9c5d570ccb2b8d79ce848812bd72fe01db6f564beedcc69176900ca804"},"package":"0d2c31b75c36a993d30c7a13d70513cb93f02acafdd5b7ba250f9b0e18615de7"}
\ No newline at end of file
+{"files":{"Cargo.toml":"24357f38521e03dbc562a42d44139d615a922406fb2bb122e17df29bc9bbb586","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb","README.md":"5c81631711af54d31e40841cd0153a95d9e505d8eba7503d114789ffb5e232c6","RELEASES.md":"62d6fef92273d9ee0e520ba611562b744e775318f5f6ae8e042ed94a3e19b2d6","build.rs":"16de2aa57e754fc1526d0400b5d87a3f771296705fca54601aa598b6f74ded8f","src/lib.rs":"5860c6007ea74b7b58033c15beae7c9e0859205e3ca1b76af9dc3e82914e08a2","tests/empty_enum.rs":"1b2312ec2fc9866fce7172e71e0aa2efcc3cb9d7659d0b633eb352bb1e080d53","tests/issue-6.rs":"b03b7382de854f30b84fd39d11b2c09aa97c136408942841cfc2c30c31b3f1a7","tests/issue-9.rs":"1aa7353078321a964c70986ceb071569290509b70faa9825e8b584165865ea7e","tests/newtype.rs":"1b60f13afbed8f18e94fe37141543d0c8d265419e1c2447b84ce14ac82af48e8","tests/num_derive_without_num.rs":"3ce528221a2cb752859e20c5423c4b474fec714b41d8c1b62f5614b165d7262b","tests/trivial.rs":"a6b0faab04527f6835f43cd72317a00065a7a6cf4c506d04e77f898134f7a59b","tests/with_custom_values.rs":"81ed60b50726555ee840ca773335aae68ac425d5af9ebbcbb3c6d6834358c73c"},"package":"eafd0b45c5537c3ba526f79d3e75120036502bebacbb3f3220914067ce39dbf2"}
\ No newline at end of file
--- a/third_party/rust/num-derive/Cargo.toml
+++ b/third_party/rust/num-derive/Cargo.toml
@@ -1,45 +1,47 @@
 # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
 #
 # When uploading crates to the registry Cargo will automatically
 # "normalize" Cargo.toml files for maximal compatibility
 # with all versions of Cargo and also rewrite `path` dependencies
-# to registry (e.g. crates.io) dependencies
+# to registry (e.g., crates.io) dependencies
 #
 # If you believe there's an error in this file please file an
 # issue against the rust-lang/cargo repository. If you're
 # editing this file be aware that the upstream Cargo.toml
 # will likely look very different (and much more reasonable)
 
 [package]
 name = "num-derive"
-version = "0.2.2"
+version = "0.2.5"
 authors = ["The Rust Project Developers"]
+build = "build.rs"
+exclude = ["/ci/*", "/.travis.yml", "/bors.toml"]
 description = "Numeric syntax extensions"
 homepage = "https://github.com/rust-num/num-derive"
 documentation = "https://docs.rs/num-derive"
 readme = "README.md"
 keywords = ["mathematics", "numerics"]
 categories = ["science"]
 license = "MIT/Apache-2.0"
 repository = "https://github.com/rust-num/num-derive"
 
 [lib]
 name = "num_derive"
 test = false
 proc-macro = true
-[dependencies.num-traits]
-version = "0.2"
-
 [dependencies.proc-macro2]
 version = "0.4.2"
 
 [dependencies.quote]
 version = "0.6"
 
 [dependencies.syn]
-version = "0.14"
+version = "0.15"
 [dev-dependencies.num]
-version = "0.1"
+version = "0.2"
+
+[dev-dependencies.num-traits]
+version = "0.2"
 
 [features]
 full-syntax = ["syn/full"]
--- a/third_party/rust/num-derive/RELEASES.md
+++ b/third_party/rust/num-derive/RELEASES.md
@@ -1,49 +1,69 @@
-# Release 0.2.2
+# Release 0.2.5 (2019-04-23)
+
+- [Improved the masking of lints in derived code][23].
+
+[23]: https://github.com/rust-num/num-derive/pull/23
+
+# Release 0.2.4 (2019-01-25)
+
+- [Adjusted dependencies to allow no-std targets][22].
+
+[22]: https://github.com/rust-num/num-derive/pull/22
+
+# Release 0.2.3 (2018-10-03)
+
+- [Added newtype deriving][17] for `FromPrimitive`, `ToPrimitive`,
+  `NumOps<Self, Self>`, `NumCast`, `Zero`, `One`, `Num`, and `Float`.
+  Thanks @asayers!
+
+[17]: https://github.com/rust-num/num-derive/pull/17
+
+# Release 0.2.2 (2018-05-22)
 
 - [Updated dependencies][14].
 
 [14]: https://github.com/rust-num/num-derive/pull/14
 
-# Release 0.2.1
+# Release 0.2.1 (2018-05-09)
 
 - [Updated dependencies][12] -- thanks @spearman!
 
 [12]: https://github.com/rust-num/num-derive/pull/12
 
-# Release 0.2.0
+# Release 0.2.0 (2018-02-21)
 
 - [Discriminant matching is now simplified][10], casting values directly by
   name, rather than trying to compute offsets from known values manually.
 - **breaking change**: [Derivations now import the traits from `num-traits`][11]
   instead of the full `num` crate.  These are still compatible, but users need
   to have an explicit `num-traits = "0.2"` dependency in their `Cargo.toml`.
 
 [10]: https://github.com/rust-num/num-derive/pull/10
 [11]: https://github.com/rust-num/num-derive/pull/11
 
 
-# Release 0.1.44
+# Release 0.1.44 (2018-01-26)
 
 - [The derived code now explicitly allows `unused_qualifications`][9], so users
   that globally deny that lint don't encounter an error.
 
 [9]: https://github.com/rust-num/num-derive/pull/9
 
 
-# Release 0.1.43
+# Release 0.1.43 (2018-01-23)
 
 - [The derived code now explicitly allows `trivial_numeric_casts`][7], so users
   that globally deny that lint don't encounter an error.
 
 [7]: https://github.com/rust-num/num-derive/pull/7
 
 
-# Release 0.1.42
+# Release 0.1.42 (2018-01-22)
 
 - [num-derive now has its own source repository][num-356] at [rust-num/num-derive][home].
 - [The derivation macros have been updated][3] to using `syn` 0.12.  Support for complex
   expressions in enum values can be enabled with the `full-syntax` feature.
 
 Thanks to @cuviper and @hcpl for their contributions!
 
 [home]: https://github.com/rust-num/num-derive
deleted file mode 100644
--- a/third_party/rust/num-derive/bors.toml
+++ /dev/null
@@ -1,3 +0,0 @@
-status = [
-  "continuous-integration/travis-ci/push",
-]
new file mode 100644
--- /dev/null
+++ b/third_party/rust/num-derive/build.rs
@@ -0,0 +1,35 @@
+use std::env;
+use std::io::Write;
+use std::process::{Command, Stdio};
+
+fn main() {
+    if probe("fn main() { 0i128; }") {
+        println!("cargo:rustc-cfg=has_i128");
+    } else if env::var_os("CARGO_FEATURE_I128").is_some() {
+        panic!("i128 support was not detected!");
+    }
+}
+
+/// Test if a code snippet can be compiled
+fn probe(code: &str) -> bool {
+    let rustc = env::var_os("RUSTC").unwrap_or_else(|| "rustc".into());
+    let out_dir = env::var_os("OUT_DIR").expect("environment variable OUT_DIR");
+
+    let mut child = Command::new(rustc)
+        .arg("--out-dir")
+        .arg(out_dir)
+        .arg("--emit=obj")
+        .arg("-")
+        .stdin(Stdio::piped())
+        .spawn()
+        .expect("rustc probe");
+
+    child
+        .stdin
+        .as_mut()
+        .expect("rustc stdin")
+        .write_all(code.as_bytes())
+        .expect("write rustc stdin");
+
+    child.wait().expect("rustc probe").success()
+}
deleted file mode 100755
--- a/third_party/rust/num-derive/ci/rustup.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/sh
-# Use rustup to locally run the same suite of tests as .travis.yml.
-# (You should first install/update 1.15.0, stable, beta, and nightly.)
-
-set -ex
-
-export TRAVIS_RUST_VERSION
-for TRAVIS_RUST_VERSION in 1.15.0 stable beta nightly; do
-    run="rustup run $TRAVIS_RUST_VERSION"
-    $run cargo build --verbose
-    $run $PWD/ci/test_full.sh
-done
deleted file mode 100755
--- a/third_party/rust/num-derive/ci/test_full.sh
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/bin/bash
-
-set -ex
-
-echo Testing num-derive on rustc ${TRAVIS_RUST_VERSION}
-
-# num-derive should build and test everywhere.
-cargo build --verbose --features="$FEATURES"
-cargo test --verbose --features="$FEATURES"
--- a/third_party/rust/num-derive/src/lib.rs
+++ b/third_party/rust/num-derive/src/lib.rs
@@ -5,16 +5,17 @@
 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
 #![crate_type = "proc-macro"]
 #![doc(html_root_url = "https://docs.rs/num-derive/0.2")]
+#![recursion_limit = "512"]
 
 //! Procedural macros to derive numeric traits in Rust.
 //!
 //! ## Usage
 //!
 //! Add this to your `Cargo.toml`:
 //!
 //! ```toml
@@ -45,17 +46,89 @@ extern crate proc_macro2;
 extern crate quote;
 extern crate syn;
 
 use proc_macro::TokenStream;
 use proc_macro2::Span;
 
 use syn::{Data, Fields, Ident};
 
-/// Derives [`num_traits::FromPrimitive`][from] for simple enums.
+// Within `exp`, you can bring things into scope with `extern crate`.
+//
+// We don't want to assume that `num_traits::` is in scope - the user may have imported it under a
+// different name, or may have imported it in a non-toplevel module (common when putting impls
+// behind a feature gate).
+//
+// Solution: let's just generate `extern crate num_traits as _num_traits` and then refer to
+// `_num_traits` in the derived code.  However, macros are not allowed to produce `extern crate`
+// statements at the toplevel.
+//
+// Solution: let's generate `mod _impl_foo` and import num_traits within that.  However, now we
+// lose access to private members of the surrounding module.  This is a problem if, for example,
+// we're deriving for a newtype, where the inner type is defined in the same module, but not
+// exported.
+//
+// Solution: use the dummy const trick.  For some reason, `extern crate` statements are allowed
+// here, but everything from the surrounding module is in scope.  This trick is taken from serde.
+fn dummy_const_trick<T: quote::ToTokens>(
+    trait_: &str,
+    name: &proc_macro2::Ident,
+    exp: T,
+) -> proc_macro2::TokenStream {
+    let dummy_const = Ident::new(
+        &format!("_IMPL_NUM_{}_FOR_{}", trait_, unraw(name)),
+        Span::call_site(),
+    );
+    quote! {
+        #[allow(non_upper_case_globals, unused_attributes, unused_qualifications)]
+        const #dummy_const: () = {
+            #[allow(unknown_lints)]
+            #[cfg_attr(feature = "cargo-clippy", allow(useless_attribute))]
+            #[allow(rust_2018_idioms)]
+            extern crate num_traits as _num_traits;
+            #exp
+        };
+    }
+}
+
+#[allow(deprecated)]
+fn unraw(ident: &proc_macro2::Ident) -> String {
+    // str::trim_start_matches was added in 1.30, trim_left_matches deprecated
+    // in 1.33. We currently support rustc back to 1.15 so we need to continue
+    // to use the deprecated one.
+    ident.to_string().trim_left_matches("r#").to_owned()
+}
+
+// If `data` is a newtype, return the type it's wrapping.
+fn newtype_inner(data: &syn::Data) -> Option<syn::Type> {
+    match *data {
+        Data::Struct(ref s) => {
+            match s.fields {
+                Fields::Unnamed(ref fs) => {
+                    if fs.unnamed.len() == 1 {
+                        Some(fs.unnamed[0].ty.clone())
+                    } else {
+                        None
+                    }
+                }
+                Fields::Named(ref fs) => {
+                    if fs.named.len() == 1 {
+                        panic!("num-derive doesn't know how to handle newtypes with named fields yet. \
+                           Please use a tuple-style newtype, or submit a PR!");
+                    }
+                    None
+                }
+                _ => None,
+            }
+        }
+        _ => None,
+    }
+}
+
+/// Derives [`num_traits::FromPrimitive`][from] for simple enums and newtypes.
 ///
 /// [from]: https://docs.rs/num-traits/0.2/num_traits/cast/trait.FromPrimitive.html
 ///
 /// # Examples
 ///
 /// Simple enums can be derived:
 ///
 /// ```rust
@@ -97,69 +170,129 @@ use syn::{Data, Fields, Ident};
 ///     b: u8,
 /// }
 /// # fn main() {}
 /// ```
 #[proc_macro_derive(FromPrimitive)]
 pub fn from_primitive(input: TokenStream) -> TokenStream {
     let ast: syn::DeriveInput = syn::parse(input).unwrap();
     let name = &ast.ident;
-    let dummy_const = Ident::new(&format!("_IMPL_NUM_FROM_PRIMITIVE_FOR_{}", name), Span::call_site());
 
-    let variants = match ast.data {
-        Data::Enum(ref data_enum) => &data_enum.variants,
-        _ => panic!("`FromPrimitive` can be applied only to the enums, {} is not an enum", name)
-    };
-
-    let from_i64_var = quote! { n };
-    let clauses: Vec<_> = variants.iter()
-        .map(|variant| {
-            let ident = &variant.ident;
-            match variant.fields {
-                Fields::Unit => (),
-                _ => {
-                    panic!("`FromPrimitive` can be applied only to unitary enums, {}::{} is either struct or tuple", name, ident)
-                },
-            }
-
+    let impl_ = if let Some(inner_ty) = newtype_inner(&ast.data) {
+        let i128_fns = if cfg!(has_i128) {
             quote! {
-                if #from_i64_var == #name::#ident as i64 {
-                    Some(#name::#ident)
+                fn from_i128(n: i128) -> Option<Self> {
+                    <#inner_ty as _num_traits::FromPrimitive>::from_i128(n).map(#name)
+                }
+                fn from_u128(n: u128) -> Option<Self> {
+                    <#inner_ty as _num_traits::FromPrimitive>::from_u128(n).map(#name)
                 }
             }
-        })
-        .collect();
-
-    let from_i64_var = if clauses.is_empty() { quote!(_) } else { from_i64_var };
+        } else {
+            quote! {}
+        };
 
-    let res = quote! {
-        #[allow(non_upper_case_globals)]
-        #[allow(unused_qualifications)]
-        const #dummy_const: () = {
-            extern crate num_traits as _num_traits;
+        quote! {
+            impl _num_traits::FromPrimitive for #name {
+                fn from_i64(n: i64) -> Option<Self> {
+                    <#inner_ty as _num_traits::FromPrimitive>::from_i64(n).map(#name)
+                }
+                fn from_u64(n: u64) -> Option<Self> {
+                    <#inner_ty as _num_traits::FromPrimitive>::from_u64(n).map(#name)
+                }
+                fn from_isize(n: isize) -> Option<Self> {
+                    <#inner_ty as _num_traits::FromPrimitive>::from_isize(n).map(#name)
+                }
+                fn from_i8(n: i8) -> Option<Self> {
+                    <#inner_ty as _num_traits::FromPrimitive>::from_i8(n).map(#name)
+                }
+                fn from_i16(n: i16) -> Option<Self> {
+                    <#inner_ty as _num_traits::FromPrimitive>::from_i16(n).map(#name)
+                }
+                fn from_i32(n: i32) -> Option<Self> {
+                    <#inner_ty as _num_traits::FromPrimitive>::from_i32(n).map(#name)
+                }
+                fn from_usize(n: usize) -> Option<Self> {
+                    <#inner_ty as _num_traits::FromPrimitive>::from_usize(n).map(#name)
+                }
+                fn from_u8(n: u8) -> Option<Self> {
+                    <#inner_ty as _num_traits::FromPrimitive>::from_u8(n).map(#name)
+                }
+                fn from_u16(n: u16) -> Option<Self> {
+                    <#inner_ty as _num_traits::FromPrimitive>::from_u16(n).map(#name)
+                }
+                fn from_u32(n: u32) -> Option<Self> {
+                    <#inner_ty as _num_traits::FromPrimitive>::from_u32(n).map(#name)
+                }
+                fn from_f32(n: f32) -> Option<Self> {
+                    <#inner_ty as _num_traits::FromPrimitive>::from_f32(n).map(#name)
+                }
+                fn from_f64(n: f64) -> Option<Self> {
+                    <#inner_ty as _num_traits::FromPrimitive>::from_f64(n).map(#name)
+                }
+                #i128_fns
+            }
+        }
+    } else {
+        let variants = match ast.data {
+            Data::Enum(ref data_enum) => &data_enum.variants,
+            _ => panic!(
+                "`FromPrimitive` can be applied only to enums and newtypes, {} is neither",
+                name
+            ),
+        };
 
+        let from_i64_var = quote! { n };
+        let clauses: Vec<_> = variants
+            .iter()
+            .map(|variant| {
+                let ident = &variant.ident;
+                match variant.fields {
+                    Fields::Unit => (),
+                    _ => panic!(
+                        "`FromPrimitive` can be applied only to unitary enums and newtypes, \
+                         {}::{} is either struct or tuple",
+                        name, ident
+                    ),
+                }
+
+                quote! {
+                    if #from_i64_var == #name::#ident as i64 {
+                        Some(#name::#ident)
+                    }
+                }
+            })
+            .collect();
+
+        let from_i64_var = if clauses.is_empty() {
+            quote!(_)
+        } else {
+            from_i64_var
+        };
+
+        quote! {
             impl _num_traits::FromPrimitive for #name {
                 #[allow(trivial_numeric_casts)]
                 fn from_i64(#from_i64_var: i64) -> Option<Self> {
                     #(#clauses else)* {
                         None
                     }
                 }
 
                 fn from_u64(n: u64) -> Option<Self> {
                     Self::from_i64(n as i64)
                 }
             }
-        };
+        }
     };
 
-    res.into()
+    dummy_const_trick("FromPrimitive", &name, impl_).into()
 }
 
-/// Derives [`num_traits::ToPrimitive`][to] for simple enums.
+/// Derives [`num_traits::ToPrimitive`][to] for simple enums and newtypes.
 ///
 /// [to]: https://docs.rs/num-traits/0.2/num_traits/cast/trait.ToPrimitive.html
 ///
 /// # Examples
 ///
 /// Simple enums can be derived:
 ///
 /// ```rust
@@ -201,66 +334,464 @@ pub fn from_primitive(input: TokenStream
 ///     b: u8,
 /// }
 /// # fn main() {}
 /// ```
 #[proc_macro_derive(ToPrimitive)]
 pub fn to_primitive(input: TokenStream) -> TokenStream {
     let ast: syn::DeriveInput = syn::parse(input).unwrap();
     let name = &ast.ident;
-    let dummy_const = Ident::new(&format!("_IMPL_NUM_TO_PRIMITIVE_FOR_{}", name), Span::call_site());
 
-    let variants = match ast.data {
-        Data::Enum(ref data_enum) => &data_enum.variants,
-        _ => panic!("`ToPrimitive` can be applied only to the enums, {} is not an enum", name)
-    };
+    let impl_ = if let Some(inner_ty) = newtype_inner(&ast.data) {
+        let i128_fns = if cfg!(has_i128) {
+            quote! {
+                fn to_i128(&self) -> Option<i128> {
+                    <#inner_ty as _num_traits::ToPrimitive>::to_i128(&self.0)
+                }
+                fn to_u128(&self) -> Option<u128> {
+                    <#inner_ty as _num_traits::ToPrimitive>::to_u128(&self.0)
+                }
+            }
+        } else {
+            quote! {}
+        };
 
-    let variants: Vec<_> = variants.iter()
-        .map(|variant| {
-            let ident = &variant.ident;
-            match variant.fields {
-                Fields::Unit => (),
-                _ => {
-                    panic!("`ToPrimitive` can be applied only to unitary enums, {}::{} is either struct or tuple", name, ident)
-                },
+        quote! {
+            impl _num_traits::ToPrimitive for #name {
+                fn to_i64(&self) -> Option<i64> {
+                    <#inner_ty as _num_traits::ToPrimitive>::to_i64(&self.0)
+                }
+                fn to_u64(&self) -> Option<u64> {
+                    <#inner_ty as _num_traits::ToPrimitive>::to_u64(&self.0)
+                }
+                fn to_isize(&self) -> Option<isize> {
+                    <#inner_ty as _num_traits::ToPrimitive>::to_isize(&self.0)
+                }
+                fn to_i8(&self) -> Option<i8> {
+                    <#inner_ty as _num_traits::ToPrimitive>::to_i8(&self.0)
+                }
+                fn to_i16(&self) -> Option<i16> {
+                    <#inner_ty as _num_traits::ToPrimitive>::to_i16(&self.0)
+                }
+                fn to_i32(&self) -> Option<i32> {
+                    <#inner_ty as _num_traits::ToPrimitive>::to_i32(&self.0)
+                }
+                fn to_usize(&self) -> Option<usize> {
+                    <#inner_ty as _num_traits::ToPrimitive>::to_usize(&self.0)
+                }
+                fn to_u8(&self) -> Option<u8> {
+                    <#inner_ty as _num_traits::ToPrimitive>::to_u8(&self.0)
+                }
+                fn to_u16(&self) -> Option<u16> {
+                    <#inner_ty as _num_traits::ToPrimitive>::to_u16(&self.0)
+                }
+                fn to_u32(&self) -> Option<u32> {
+                    <#inner_ty as _num_traits::ToPrimitive>::to_u32(&self.0)
+                }
+                fn to_f32(&self) -> Option<f32> {
+                    <#inner_ty as _num_traits::ToPrimitive>::to_f32(&self.0)
+                }
+                fn to_f64(&self) -> Option<f64> {
+                    <#inner_ty as _num_traits::ToPrimitive>::to_f64(&self.0)
+                }
+                #i128_fns
             }
-
-            // NB: We have to check each variant individually, because we'll only have `&self`
-            // for the input.  We can't move from that, and it might not be `Clone` or `Copy`.
-            // (Otherwise we could just do `*self as i64` without a `match` at all.)
-            quote!(#name::#ident => #name::#ident as i64)
-        })
-        .collect();
-
-    let match_expr = if variants.is_empty() {
-        // No variants found, so do not use Some to not to trigger `unreachable_code` lint
-        quote! {
-            match *self {}
         }
     } else {
-        quote! {
-            Some(match *self {
-                #(#variants,)*
-            })
-        }
-    };
+        let variants = match ast.data {
+            Data::Enum(ref data_enum) => &data_enum.variants,
+            _ => panic!(
+                "`ToPrimitive` can be applied only to enums and newtypes, {} is neither",
+                name
+            ),
+        };
+
+        let variants: Vec<_> = variants
+            .iter()
+            .map(|variant| {
+                let ident = &variant.ident;
+                match variant.fields {
+                    Fields::Unit => (),
+                    _ => {
+                        panic!("`ToPrimitive` can be applied only to unitary enums and newtypes, {}::{} is either struct or tuple", name, ident)
+                    },
+                }
 
-    let res = quote! {
-        #[allow(non_upper_case_globals)]
-        #[allow(unused_qualifications)]
-        const #dummy_const: () = {
-            extern crate num_traits as _num_traits;
+                // NB: We have to check each variant individually, because we'll only have `&self`
+                // for the input.  We can't move from that, and it might not be `Clone` or `Copy`.
+                // (Otherwise we could just do `*self as i64` without a `match` at all.)
+                quote!(#name::#ident => #name::#ident as i64)
+            })
+            .collect();
 
+        let match_expr = if variants.is_empty() {
+            // No variants found, so do not use Some to not to trigger `unreachable_code` lint
+            quote! {
+                match *self {}
+            }
+        } else {
+            quote! {
+                Some(match *self {
+                    #(#variants,)*
+                })
+            }
+        };
+
+        quote! {
             impl _num_traits::ToPrimitive for #name {
                 #[allow(trivial_numeric_casts)]
                 fn to_i64(&self) -> Option<i64> {
                     #match_expr
                 }
 
                 fn to_u64(&self) -> Option<u64> {
                     self.to_i64().map(|x| x as u64)
                 }
             }
-        };
+        }
     };
 
-    res.into()
+    dummy_const_trick("ToPrimitive", &name, impl_).into()
+}
+
+#[allow(renamed_and_removed_lints)]
+#[cfg_attr(feature = "cargo-clippy", allow(const_static_lifetime))]
+const NEWTYPE_ONLY: &'static str = "This trait can only be derived for newtypes";
+
+/// Derives [`num_traits::NumOps`][num_ops] for newtypes.  The inner type must already implement
+/// `NumOps`.
+///
+/// [num_ops]: https://docs.rs/num-traits/0.2/num_traits/trait.NumOps.html
+///
+/// Note that, since `NumOps` is really a trait alias for `Add + Sub + Mul + Div + Rem`, this macro
+/// generates impls for _those_ traits.  Furthermore, in all generated impls, `RHS=Self` and
+/// `Output=Self`.
+#[proc_macro_derive(NumOps)]
+pub fn num_ops(input: TokenStream) -> TokenStream {
+    let ast: syn::DeriveInput = syn::parse(input).unwrap();
+    let name = &ast.ident;
+    let inner_ty = newtype_inner(&ast.data).expect(NEWTYPE_ONLY);
+    dummy_const_trick(
+        "NumOps",
+        &name,
+        quote! {
+            impl ::std::ops::Add for #name {
+                type Output = Self;
+                fn add(self, other: Self) -> Self {
+                    #name(<#inner_ty as ::std::ops::Add>::add(self.0, other.0))
+                }
+            }
+            impl ::std::ops::Sub for #name {
+                type Output = Self;
+                fn sub(self, other: Self) -> Self {
+                    #name(<#inner_ty as ::std::ops::Sub>::sub(self.0, other.0))
+                }
+            }
+            impl ::std::ops::Mul for #name {
+                type Output = Self;
+                fn mul(self, other: Self) -> Self {
+                    #name(<#inner_ty as ::std::ops::Mul>::mul(self.0, other.0))
+                }
+            }
+            impl ::std::ops::Div for #name {
+                type Output = Self;
+                fn div(self, other: Self) -> Self {
+                    #name(<#inner_ty as ::std::ops::Div>::div(self.0, other.0))
+                }
+            }
+            impl ::std::ops::Rem for #name {
+                type Output = Self;
+                fn rem(self, other: Self) -> Self {
+                    #name(<#inner_ty as ::std::ops::Rem>::rem(self.0, other.0))
+                }
+            }
+        },
+    )
+    .into()
+}
+
+/// Derives [`num_traits::NumCast`][num_cast] for newtypes.  The inner type must already implement
+/// `NumCast`.
+///
+/// [num_cast]: https://docs.rs/num-traits/0.2/num_traits/cast/trait.NumCast.html
+#[proc_macro_derive(NumCast)]
+pub fn num_cast(input: TokenStream) -> TokenStream {
+    let ast: syn::DeriveInput = syn::parse(input).unwrap();
+    let name = &ast.ident;
+    let inner_ty = newtype_inner(&ast.data).expect(NEWTYPE_ONLY);
+    dummy_const_trick(
+        "NumCast",
+        &name,
+        quote! {
+            impl _num_traits::NumCast for #name {
+                fn from<T: _num_traits::ToPrimitive>(n: T) -> Option<Self> {
+                    <#inner_ty as _num_traits::NumCast>::from(n).map(#name)
+                }
+            }
+        },
+    )
+    .into()
+}
+
+/// Derives [`num_traits::Zero`][zero] for newtypes.  The inner type must already implement `Zero`.
+///
+/// [zero]: https://docs.rs/num-traits/0.2/num_traits/identities/trait.Zero.html
+#[proc_macro_derive(Zero)]
+pub fn zero(input: TokenStream) -> TokenStream {
+    let ast: syn::DeriveInput = syn::parse(input).unwrap();
+    let name = &ast.ident;
+    let inner_ty = newtype_inner(&ast.data).expect(NEWTYPE_ONLY);
+    dummy_const_trick(
+        "Zero",
+        &name,
+        quote! {
+            impl _num_traits::Zero for #name {
+                fn zero() -> Self {
+                    #name(<#inner_ty as _num_traits::Zero>::zero())
+                }
+                fn is_zero(&self) -> bool {
+                    <#inner_ty as _num_traits::Zero>::is_zero(&self.0)
+                }
+            }
+        },
+    )
+    .into()
+}
+
+/// Derives [`num_traits::One`][one] for newtypes.  The inner type must already implement `One`.
+///
+/// [one]: https://docs.rs/num-traits/0.2/num_traits/identities/trait.One.html
+#[proc_macro_derive(One)]
+pub fn one(input: TokenStream) -> TokenStream {
+    let ast: syn::DeriveInput = syn::parse(input).unwrap();
+    let name = &ast.ident;
+    let inner_ty = newtype_inner(&ast.data).expect(NEWTYPE_ONLY);
+    dummy_const_trick(
+        "One",
+        &name,
+        quote! {
+            impl _num_traits::One for #name {
+                fn one() -> Self {
+                    #name(<#inner_ty as _num_traits::One>::one())
+                }
+                fn is_one(&self) -> bool {
+                    <#inner_ty as _num_traits::One>::is_one(&self.0)
+                }
+            }
+        },
+    )
+    .into()
+}
+
+/// Derives [`num_traits::Num`][num] for newtypes.  The inner type must already implement `Num`.
+///
+/// [num]: https://docs.rs/num-traits/0.2/num_traits/trait.Num.html
+#[proc_macro_derive(Num)]
+pub fn num(input: TokenStream) -> TokenStream {
+    let ast: syn::DeriveInput = syn::parse(input).unwrap();
+    let name = &ast.ident;
+    let inner_ty = newtype_inner(&ast.data).expect(NEWTYPE_ONLY);
+    dummy_const_trick(
+        "Num",
+        &name,
+        quote! {
+            impl _num_traits::Num for #name {
+                type FromStrRadixErr = <#inner_ty as _num_traits::Num>::FromStrRadixErr;
+                fn from_str_radix(s: &str, radix: u32) -> Result<Self, Self::FromStrRadixErr> {
+                    <#inner_ty as _num_traits::Num>::from_str_radix(s, radix).map(#name)
+                }
+            }
+        },
+    )
+    .into()
 }
+
+/// Derives [`num_traits::Float`][float] for newtypes.  The inner type must already implement
+/// `Float`.
+///
+/// [float]: https://docs.rs/num-traits/0.2/num_traits/float/trait.Float.html
+#[proc_macro_derive(Float)]
+pub fn float(input: TokenStream) -> TokenStream {
+    let ast: syn::DeriveInput = syn::parse(input).unwrap();
+    let name = &ast.ident;
+    let inner_ty = newtype_inner(&ast.data).expect(NEWTYPE_ONLY);
+    dummy_const_trick(
+        "Float",
+        &name,
+        quote! {
+            impl _num_traits::Float for #name {
+                fn nan() -> Self {
+                    #name(<#inner_ty as _num_traits::Float>::nan())
+                }
+                fn infinity() -> Self {
+                    #name(<#inner_ty as _num_traits::Float>::infinity())
+                }
+                fn neg_infinity() -> Self {
+                    #name(<#inner_ty as _num_traits::Float>::neg_infinity())
+                }
+                fn neg_zero() -> Self {
+                    #name(<#inner_ty as _num_traits::Float>::neg_zero())
+                }
+                fn min_value() -> Self {
+                    #name(<#inner_ty as _num_traits::Float>::min_value())
+                }
+                fn min_positive_value() -> Self {
+                    #name(<#inner_ty as _num_traits::Float>::min_positive_value())
+                }
+                fn max_value() -> Self {
+                    #name(<#inner_ty as _num_traits::Float>::max_value())
+                }
+                fn is_nan(self) -> bool {
+                    <#inner_ty as _num_traits::Float>::is_nan(self.0)
+                }
+                fn is_infinite(self) -> bool {
+                    <#inner_ty as _num_traits::Float>::is_infinite(self.0)
+                }
+                fn is_finite(self) -> bool {
+                    <#inner_ty as _num_traits::Float>::is_finite(self.0)
+                }
+                fn is_normal(self) -> bool {
+                    <#inner_ty as _num_traits::Float>::is_normal(self.0)
+                }
+                fn classify(self) -> ::std::num::FpCategory {
+                    <#inner_ty as _num_traits::Float>::classify(self.0)
+                }
+                fn floor(self) -> Self {
+                    #name(<#inner_ty as _num_traits::Float>::floor(self.0))
+                }
+                fn ceil(self) -> Self {
+                    #name(<#inner_ty as _num_traits::Float>::ceil(self.0))
+                }
+                fn round(self) -> Self {
+                    #name(<#inner_ty as _num_traits::Float>::round(self.0))
+                }
+                fn trunc(self) -> Self {
+                    #name(<#inner_ty as _num_traits::Float>::trunc(self.0))
+                }
+                fn fract(self) -> Self {
+                    #name(<#inner_ty as _num_traits::Float>::fract(self.0))
+                }
+                fn abs(self) -> Self {
+                    #name(<#inner_ty as _num_traits::Float>::abs(self.0))
+                }
+                fn signum(self) -> Self {
+                    #name(<#inner_ty as _num_traits::Float>::signum(self.0))
+                }
+                fn is_sign_positive(self) -> bool {
+                    <#inner_ty as _num_traits::Float>::is_sign_positive(self.0)
+                }
+                fn is_sign_negative(self) -> bool {
+                    <#inner_ty as _num_traits::Float>::is_sign_negative(self.0)
+                }
+                fn mul_add(self, a: Self, b: Self) -> Self {
+                    #name(<#inner_ty as _num_traits::Float>::mul_add(self.0, a.0, b.0))
+                }
+                fn recip(self) -> Self {
+                    #name(<#inner_ty as _num_traits::Float>::recip(self.0))
+                }
+                fn powi(self, n: i32) -> Self {
+                    #name(<#inner_ty as _num_traits::Float>::powi(self.0, n))
+                }
+                fn powf(self, n: Self) -> Self {
+                    #name(<#inner_ty as _num_traits::Float>::powf(self.0, n.0))
+                }
+                fn sqrt(self) -> Self {
+                    #name(<#inner_ty as _num_traits::Float>::sqrt(self.0))
+                }
+                fn exp(self) -> Self {
+                    #name(<#inner_ty as _num_traits::Float>::exp(self.0))
+                }
+                fn exp2(self) -> Self {
+                    #name(<#inner_ty as _num_traits::Float>::exp2(self.0))
+                }
+                fn ln(self) -> Self {
+                    #name(<#inner_ty as _num_traits::Float>::ln(self.0))
+                }
+                fn log(self, base: Self) -> Self {
+                    #name(<#inner_ty as _num_traits::Float>::log(self.0, base.0))
+                }
+                fn log2(self) -> Self {
+                    #name(<#inner_ty as _num_traits::Float>::log2(self.0))
+                }
+                fn log10(self) -> Self {
+                    #name(<#inner_ty as _num_traits::Float>::log10(self.0))
+                }
+                fn max(self, other: Self) -> Self {
+                    #name(<#inner_ty as _num_traits::Float>::max(self.0, other.0))
+                }
+                fn min(self, other: Self) -> Self {
+                    #name(<#inner_ty as _num_traits::Float>::min(self.0, other.0))
+                }
+                fn abs_sub(self, other: Self) -> Self {
+                    #name(<#inner_ty as _num_traits::Float>::abs_sub(self.0, other.0))
+                }
+                fn cbrt(self) -> Self {
+                    #name(<#inner_ty as _num_traits::Float>::cbrt(self.0))
+                }
+                fn hypot(self, other: Self) -> Self {
+                    #name(<#inner_ty as _num_traits::Float>::hypot(self.0, other.0))
+                }
+                fn sin(self) -> Self {
+                    #name(<#inner_ty as _num_traits::Float>::sin(self.0))
+                }
+                fn cos(self) -> Self {
+                    #name(<#inner_ty as _num_traits::Float>::cos(self.0))
+                }
+                fn tan(self) -> Self {
+                    #name(<#inner_ty as _num_traits::Float>::tan(self.0))
+                }
+                fn asin(self) -> Self {
+                    #name(<#inner_ty as _num_traits::Float>::asin(self.0))
+                }
+                fn acos(self) -> Self {
+                    #name(<#inner_ty as _num_traits::Float>::acos(self.0))
+                }
+                fn atan(self) -> Self {
+                    #name(<#inner_ty as _num_traits::Float>::atan(self.0))
+                }
+                fn atan2(self, other: Self) -> Self {
+                    #name(<#inner_ty as _num_traits::Float>::atan2(self.0, other.0))
+                }
+                fn sin_cos(self) -> (Self, Self) {
+                    let (x, y) = <#inner_ty as _num_traits::Float>::sin_cos(self.0);
+                    (#name(x), #name(y))
+                }
+                fn exp_m1(self) -> Self {
+                    #name(<#inner_ty as _num_traits::Float>::exp_m1(self.0))
+                }
+                fn ln_1p(self) -> Self {
+                    #name(<#inner_ty as _num_traits::Float>::ln_1p(self.0))
+                }
+                fn sinh(self) -> Self {
+                    #name(<#inner_ty as _num_traits::Float>::sinh(self.0))
+                }
+                fn cosh(self) -> Self {
+                    #name(<#inner_ty as _num_traits::Float>::cosh(self.0))
+                }
+                fn tanh(self) -> Self {
+                    #name(<#inner_ty as _num_traits::Float>::tanh(self.0))
+                }
+                fn asinh(self) -> Self {
+                    #name(<#inner_ty as _num_traits::Float>::asinh(self.0))
+                }
+                fn acosh(self) -> Self {
+                    #name(<#inner_ty as _num_traits::Float>::acosh(self.0))
+                }
+                fn atanh(self) -> Self {
+                    #name(<#inner_ty as _num_traits::Float>::atanh(self.0))
+                }
+                fn integer_decode(self) -> (u64, i16, i8) {
+                    <#inner_ty as _num_traits::Float>::integer_decode(self.0)
+                }
+                fn epsilon() -> Self {
+                    #name(<#inner_ty as _num_traits::Float>::epsilon())
+                }
+                fn to_degrees(self) -> Self {
+                    #name(<#inner_ty as _num_traits::Float>::to_degrees(self.0))
+                }
+                fn to_radians(self) -> Self {
+                    #name(<#inner_ty as _num_traits::Float>::to_radians(self.0))
+                }
+            }
+        },
+    )
+    .into()
+}
--- a/third_party/rust/num-derive/tests/issue-6.rs
+++ b/third_party/rust/num-derive/tests/issue-6.rs
@@ -1,16 +1,16 @@
 #![deny(trivial_numeric_casts)]
 extern crate num;
 #[macro_use]
 extern crate num_derive;
 
 #[derive(FromPrimitive, ToPrimitive)]
 pub enum SomeEnum {
-    A = 1
+    A = 1,
 }
 
 #[test]
 fn test_trivial_numeric_casts() {
     use num::{FromPrimitive, ToPrimitive};
     assert!(SomeEnum::from_u64(1).is_some());
     assert!(SomeEnum::from_i64(-1).is_none());
     assert_eq!(SomeEnum::A.to_u64(), Some(1));
--- a/third_party/rust/num-derive/tests/issue-9.rs
+++ b/third_party/rust/num-derive/tests/issue-9.rs
@@ -2,17 +2,17 @@
 extern crate num;
 #[macro_use]
 extern crate num_derive;
 use num::FromPrimitive;
 use num::ToPrimitive;
 
 #[derive(FromPrimitive, ToPrimitive)]
 pub enum SomeEnum {
-    A = 1
+    A = 1,
 }
 
 #[test]
 fn test_unused_qualifications() {
     assert!(SomeEnum::from_u64(1).is_some());
     assert!(SomeEnum::from_i64(-1).is_none());
     assert!(SomeEnum::A.to_i64().is_some());
 }
new file mode 100644
--- /dev/null
+++ b/third_party/rust/num-derive/tests/newtype.rs
@@ -0,0 +1,91 @@
+extern crate num as num_renamed;
+#[macro_use]
+extern crate num_derive;
+
+use num_renamed::{Float, FromPrimitive, Num, NumCast, One, ToPrimitive, Zero};
+use std::ops::Neg;
+
+#[derive(
+    Debug,
+    Clone,
+    Copy,
+    PartialEq,
+    PartialOrd,
+    ToPrimitive,
+    FromPrimitive,
+    NumOps,
+    NumCast,
+    One,
+    Zero,
+    Num,
+    Float,
+)]
+struct MyFloat(f64);
+
+impl Neg for MyFloat {
+    type Output = MyFloat;
+    fn neg(self) -> Self {
+        MyFloat(self.0.neg())
+    }
+}
+
+#[test]
+fn test_from_primitive() {
+    assert_eq!(MyFloat::from_u32(25), Some(MyFloat(25.0)));
+}
+
+#[test]
+#[cfg(has_i128)]
+fn test_from_primitive_128() {
+    assert_eq!(
+        MyFloat::from_i128(std::i128::MIN),
+        Some(MyFloat((-2.0).powi(127)))
+    );
+}
+
+#[test]
+fn test_to_primitive() {
+    assert_eq!(MyFloat(25.0).to_u32(), Some(25));
+}
+
+#[test]
+#[cfg(has_i128)]
+fn test_to_primitive_128() {
+    let f = MyFloat::from_f32(std::f32::MAX).unwrap();
+    assert_eq!(f.to_i128(), None);
+    assert_eq!(f.to_u128(), Some(0xffff_ff00_0000_0000_0000_0000_0000_0000));
+}
+
+#[test]
+fn test_num_ops() {
+    assert_eq!(MyFloat(25.0) + MyFloat(10.0), MyFloat(35.0));
+    assert_eq!(MyFloat(25.0) - MyFloat(10.0), MyFloat(15.0));
+    assert_eq!(MyFloat(25.0) * MyFloat(2.0), MyFloat(50.0));
+    assert_eq!(MyFloat(25.0) / MyFloat(10.0), MyFloat(2.5));
+    assert_eq!(MyFloat(25.0) % MyFloat(10.0), MyFloat(5.0));
+}
+
+#[test]
+fn test_num_cast() {
+    assert_eq!(<MyFloat as NumCast>::from(25u8), Some(MyFloat(25.0)));
+}
+
+#[test]
+fn test_zero() {
+    assert_eq!(MyFloat::zero(), MyFloat(0.0));
+}
+
+#[test]
+fn test_one() {
+    assert_eq!(MyFloat::one(), MyFloat(1.0));
+}
+
+#[test]
+fn test_num() {
+    assert_eq!(MyFloat::from_str_radix("25", 10).ok(), Some(MyFloat(25.0)));
+}
+
+#[test]
+fn test_float() {
+    assert_eq!(MyFloat(4.0).log(MyFloat(2.0)), MyFloat(2.0));
+}
--- a/third_party/rust/num-derive/tests/trivial.rs
+++ b/third_party/rust/num-derive/tests/trivial.rs
@@ -16,37 +16,49 @@ extern crate num_derive;
 enum Color {
     Red,
     Blue,
     Green,
 }
 
 #[test]
 fn test_from_primitive_for_trivial_case() {
-    let v: [Option<Color>; 4] = [num_renamed::FromPrimitive::from_u64(0),
-                                 num_renamed::FromPrimitive::from_u64(1),
-                                 num_renamed::FromPrimitive::from_u64(2),
-                                 num_renamed::FromPrimitive::from_u64(3)];
+    let v: [Option<Color>; 4] = [
+        num_renamed::FromPrimitive::from_u64(0),
+        num_renamed::FromPrimitive::from_u64(1),
+        num_renamed::FromPrimitive::from_u64(2),
+        num_renamed::FromPrimitive::from_u64(3),
+    ];
 
-    assert_eq!(v,
-               [Some(Color::Red), Some(Color::Blue), Some(Color::Green), None]);
+    assert_eq!(
+        v,
+        [
+            Some(Color::Red),
+            Some(Color::Blue),
+            Some(Color::Green),
+            None
+        ]
+    );
 }
 
 #[test]
 fn test_to_primitive_for_trivial_case() {
-    let v: [Option<u64>; 3] = [num_renamed::ToPrimitive::to_u64(&Color::Red),
-                               num_renamed::ToPrimitive::to_u64(&Color::Blue),
-                               num_renamed::ToPrimitive::to_u64(&Color::Green)];
+    let v: [Option<u64>; 3] = [
+        num_renamed::ToPrimitive::to_u64(&Color::Red),
+        num_renamed::ToPrimitive::to_u64(&Color::Blue),
+        num_renamed::ToPrimitive::to_u64(&Color::Green),
+    ];
 
     assert_eq!(v, [Some(0), Some(1), Some(2)]);
 }
 
 #[test]
 fn test_reflexive_for_trivial_case() {
     let before: [u64; 3] = [0, 1, 2];
-    let after: Vec<Option<u64>> = before.iter()
+    let after: Vec<Option<u64>> = before
+        .iter()
         .map(|&x| -> Option<Color> { num_renamed::FromPrimitive::from_u64(x) })
         .map(|x| x.and_then(|x| num_renamed::ToPrimitive::to_u64(&x)))
         .collect();
-    let before = before.into_iter().cloned().map(Some).collect::<Vec<_>>();
+    let before = before.iter().cloned().map(Some).collect::<Vec<_>>();
 
     assert_eq!(before, after);
 }
--- a/third_party/rust/num-derive/tests/with_custom_values.rs
+++ b/third_party/rust/num-derive/tests/with_custom_values.rs
@@ -19,39 +19,52 @@ enum Color {
     Red,
     Blue = 5,
     Green,
     Alpha = (-3 - (-5isize)) - 10,
 }
 
 #[test]
 fn test_from_primitive_for_enum_with_custom_value() {
-    let v: [Option<Color>; 5] = [num_renamed::FromPrimitive::from_u64(0),
-                                 num_renamed::FromPrimitive::from_u64(5),
-                                 num_renamed::FromPrimitive::from_u64(6),
-                                 num_renamed::FromPrimitive::from_u64(-8isize as u64),
-                                 num_renamed::FromPrimitive::from_u64(3)];
+    let v: [Option<Color>; 5] = [
+        num_renamed::FromPrimitive::from_u64(0),
+        num_renamed::FromPrimitive::from_u64(5),
+        num_renamed::FromPrimitive::from_u64(6),
+        num_renamed::FromPrimitive::from_u64(-8isize as u64),
+        num_renamed::FromPrimitive::from_u64(3),
+    ];
 
-    assert_eq!(v,
-               [Some(Color::Red), Some(Color::Blue), Some(Color::Green), Some(Color::Alpha), None]);
+    assert_eq!(
+        v,
+        [
+            Some(Color::Red),
+            Some(Color::Blue),
+            Some(Color::Green),
+            Some(Color::Alpha),
+            None
+        ]
+    );
 }
 
 #[test]
 fn test_to_primitive_for_enum_with_custom_value() {
-    let v: [Option<u64>; 4] = [num_renamed::ToPrimitive::to_u64(&Color::Red),
-                               num_renamed::ToPrimitive::to_u64(&Color::Blue),
-                               num_renamed::ToPrimitive::to_u64(&Color::Green),
-                               num_renamed::ToPrimitive::to_u64(&Color::Alpha)];
+    let v: [Option<u64>; 4] = [
+        num_renamed::ToPrimitive::to_u64(&Color::Red),
+        num_renamed::ToPrimitive::to_u64(&Color::Blue),
+        num_renamed::ToPrimitive::to_u64(&Color::Green),
+        num_renamed::ToPrimitive::to_u64(&Color::Alpha),
+    ];
 
     assert_eq!(v, [Some(0), Some(5), Some(6), Some(-8isize as u64)]);
 }
 
 #[test]
 fn test_reflexive_for_enum_with_custom_value() {
     let before: [u64; 3] = [0, 5, 6];
-    let after: Vec<Option<u64>> = before.iter()
+    let after: Vec<Option<u64>> = before
+        .iter()
         .map(|&x| -> Option<Color> { num_renamed::FromPrimitive::from_u64(x) })
         .map(|x| x.and_then(|x| num_renamed::ToPrimitive::to_u64(&x)))
         .collect();
     let before = before.into_iter().cloned().map(Some).collect::<Vec<_>>();
 
     assert_eq!(before, after);
 }
deleted file mode 100644
--- a/third_party/rust/proc-macro2-0.3.5/.cargo-checksum.json
+++ /dev/null
@@ -1,1 +0,0 @@
-{"files":{".travis.yml":"872a0d195dcb1e84f28aa994f301c7139f70360bb42dee3954df5ee965efea15","Cargo.toml":"e71f764696d6998512da00a9ac309f2717d103707aeef81164f906c1588ede63","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"378f5840b258e2779c39418f3f2d7b2ba96f1c7917dd6be0713f88305dbda397","README.md":"ce05336717e1e90724491a2f54487c41c752fa2d32396639439f7c6d0f1e6776","src/lib.rs":"3280d7e0b6043d8472f418aa5c8458c97aa2b5a572f9156a251b5672828468c2","src/stable.rs":"4b5a65bd5dc174dd027b9ee951844c3765450f9d45961a8d6cd7d5f85b4c25c8","src/strnom.rs":"129fe22f0b50e5a64fca82e731c959135381c910e19f3305ef35420e0aadde08","src/unstable.rs":"b43c713ac16d9de0ba0fa1b9bebe390122b4ad60ef2fc75408f721305fdcd46b","tests/test.rs":"b06713fd8bd93ab9f0156bd25152e08f68a71b35e064c53b584f7f7dbb9b60b8"},"package":"77997c53ae6edd6d187fec07ec41b207063b5ee6f33680e9fa86d405cdd313d4"}
\ No newline at end of file
deleted file mode 100644
--- a/third_party/rust/proc-macro2-0.3.5/.travis.yml
+++ /dev/null
@@ -1,32 +0,0 @@
-language: rust
-sudo: false
-
-matrix:
-  include:
-    - rust: 1.15.0
-    - rust: stable
-    - rust: beta
-    - rust: nightly
-      before_script:
-        - pip install 'travis-cargo<0.2' --user && export PATH=$HOME/.local/bin:$PATH
-      script:
-        - cargo test
-        - cargo build --features nightly
-        - cargo build --no-default-features
-        - RUSTFLAGS='--cfg procmacro2_semver_exempt' cargo test
-        - RUSTFLAGS='--cfg procmacro2_semver_exempt' cargo build --features nightly
-        - RUSTFLAGS='--cfg procmacro2_semver_exempt' cargo doc --no-deps
-      after_success:
-        - travis-cargo --only nightly doc-upload
-
-script:
-  - cargo test
-  - RUSTFLAGS='--cfg procmacro2_semver_exempt' cargo test
-env:
-  global:
-    - TRAVIS_CARGO_NIGHTLY_FEATURE=""
-    - secure: "NAsZghAVTAksrm4WP4I66VmD2wW0eRbwB+ZKHUQfvbgUaCRvVdp4WBbWXGU/f/yHgDFWZwljWR4iPMiBwAK8nZsQFRuLFdHrOOHqbkj639LLdT9A07s1zLMB1GfR1fDttzrGhm903pbT2yxSyqqpahGYM7TaGDYYmKYIk4XyVNA5F5Sk7RI+rCecKraoYDeUEFbjWWYtU2FkEXsELEKj0emX5reWkR+wja3QokFcRZ25+Zd2dRC0K8W5QcY2UokLzKncBMCTC5q70H616S3r/9qW67Si1njsJ7RzP0NlZQUNQ/VCvwr4LCr9w+AD9i1SZtXxuux77tWEWSJvBzUc82dDMUv/floJuF7HTulSxxQoRm+fbzpXj9mgaJNiUHXru6ZRTCRVRUSXpcAco94bVoy/jnjrTe3jgAIZK5w14zA8yLw1Jxof31DlbcWORxgF+6fnY2nKPRN2oiQ50+jm1AuGDZX59/wMiu1QlkjOBHtikHp+u+7mp3SkkM04DvuQ/tWODQQnOOtrA0EB3i5H1zeTSnUcmbJufUljWWOvF1QYII08MccqwfG1KWbpobvdu+cV2iVhkq/lNCEL3Ai101CnmSCnMz+9oK/XxYOrx2TnaD9ootOKgnk7XWxF19GZecQx6O2hHTouxvB/0KcRPGWmMWl0H88f3T/Obql8bG8="
-
-notifications:
-  email:
-    on_success: never
deleted file mode 100644
--- a/third_party/rust/proc-macro2-0.3.5/Cargo.toml
+++ /dev/null
@@ -1,33 +0,0 @@
-# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
-#
-# When uploading crates to the registry Cargo will automatically
-# "normalize" Cargo.toml files for maximal compatibility
-# with all versions of Cargo and also rewrite `path` dependencies
-# to registry (e.g. crates.io) dependencies
-#
-# If you believe there's an error in this file please file an
-# issue against the rust-lang/cargo repository. If you're
-# editing this file be aware that the upstream Cargo.toml
-# will likely look very different (and much more reasonable)
-
-[package]
-name = "proc-macro2"
-version = "0.3.5"
-authors = ["Alex Crichton <alex@alexcrichton.com>"]
-description = "A stable implementation of the upcoming new `proc_macro` API. Comes with an\noption, off by default, to also reimplement itself in terms of the upstream\nunstable API.\n"
-homepage = "https://github.com/alexcrichton/proc-macro2"
-documentation = "https://docs.rs/proc-macro2"
-readme = "README.md"
-keywords = ["macros"]
-license = "MIT/Apache-2.0"
-repository = "https://github.com/alexcrichton/proc-macro2"
-
-[lib]
-doctest = false
-[dependencies.unicode-xid]
-version = "0.1"
-
-[features]
-default = ["proc-macro"]
-nightly = ["proc-macro"]
-proc-macro = []
deleted file mode 100644
--- a/third_party/rust/proc-macro2-0.3.5/LICENSE-APACHE
+++ /dev/null
@@ -1,201 +0,0 @@
-                              Apache License
-                        Version 2.0, January 2004
-                     http://www.apache.org/licenses/
-
-TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-1. Definitions.
-
-   "License" shall mean the terms and conditions for use, reproduction,
-   and distribution as defined by Sections 1 through 9 of this document.
-
-   "Licensor" shall mean the copyright owner or entity authorized by
-   the copyright owner that is granting the License.
-
-   "Legal Entity" shall mean the union of the acting entity and all
-   other entities that control, are controlled by, or are under common
-   control with that entity. For the purposes of this definition,
-   "control" means (i) the power, direct or indirect, to cause the
-   direction or management of such entity, whether by contract or
-   otherwise, or (ii) ownership of fifty percent (50%) or more of the
-   outstanding shares, or (iii) beneficial ownership of such entity.
-
-   "You" (or "Your") shall mean an individual or Legal Entity
-   exercising permissions granted by this License.
-
-   "Source" form shall mean the preferred form for making modifications,
-   including but not limited to software source code, documentation
-   source, and configuration files.
-
-   "Object" form shall mean any form resulting from mechanical
-   transformation or translation of a Source form, including but
-   not limited to compiled object code, generated documentation,
-   and conversions to other media types.
-
-   "Work" shall mean the work of authorship, whether in Source or
-   Object form, made available under the License, as indicated by a
-   copyright notice that is included in or attached to the work
-   (an example is provided in the Appendix below).
-
-   "Derivative Works" shall mean any work, whether in Source or Object
-   form, that is based on (or derived from) the Work and for which the
-   editorial revisions, annotations, elaborations, or other modifications
-   represent, as a whole, an original work of authorship. For the purposes
-   of this License, Derivative Works shall not include works that remain
-   separable from, or merely link (or bind by name) to the interfaces of,
-   the Work and Derivative Works thereof.
-
-   "Contribution" shall mean any work of authorship, including
-   the original version of the Work and any modifications or additions
-   to that Work or Derivative Works thereof, that is intentionally
-   submitted to Licensor for inclusion in the Work by the copyright owner
-   or by an individual or Legal Entity authorized to submit on behalf of
-   the copyright owner. For the purposes of this definition, "submitted"
-   means any form of electronic, verbal, or written communication sent
-   to the Licensor or its representatives, including but not limited to
-   communication on electronic mailing lists, source code control systems,
-   and issue tracking systems that are managed by, or on behalf of, the
-   Licensor for the purpose of discussing and improving the Work, but
-   excluding communication that is conspicuously marked or otherwise
-   designated in writing by the copyright owner as "Not a Contribution."
-
-   "Contributor" shall mean Licensor and any individual or Legal Entity
-   on behalf of whom a Contribution has been received by Licensor and
-   subsequently incorporated within the Work.
-
-2. Grant of Copyright License. Subject to the terms and conditions of
-   this License, each Contributor hereby grants to You a perpetual,
-   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-   copyright license to reproduce, prepare Derivative Works of,
-   publicly display, publicly perform, sublicense, and distribute the
-   Work and such Derivative Works in Source or Object form.
-
-3. Grant of Patent License. Subject to the terms and conditions of
-   this License, each Contributor hereby grants to You a perpetual,
-   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-   (except as stated in this section) patent license to make, have made,
-   use, offer to sell, sell, import, and otherwise transfer the Work,
-   where such license applies only to those patent claims licensable
-   by such Contributor that are necessarily infringed by their
-   Contribution(s) alone or by combination of their Contribution(s)
-   with the Work to which such Contribution(s) was submitted. If You
-   institute patent litigation against any entity (including a
-   cross-claim or counterclaim in a lawsuit) alleging that the Work
-   or a Contribution incorporated within the Work constitutes direct
-   or contributory patent infringement, then any patent licenses
-   granted to You under this License for that Work shall terminate
-   as of the date such litigation is filed.
-
-4. Redistribution. You may reproduce and distribute copies of the
-   Work or Derivative Works thereof in any medium, with or without
-   modifications, and in Source or Object form, provided that You
-   meet the following conditions:
-
-   (a) You must give any other recipients of the Work or
-       Derivative Works a copy of this License; and
-
-   (b) You must cause any modified files to carry prominent notices
-       stating that You changed the files; and
-
-   (c) You must retain, in the Source form of any Derivative Works
-       that You distribute, all copyright, patent, trademark, and
-       attribution notices from the Source form of the Work,
-       excluding those notices that do not pertain to any part of
-       the Derivative Works; and
-
-   (d) If the Work includes a "NOTICE" text file as part of its
-       distribution, then any Derivative Works that You distribute must
-       include a readable copy of the attribution notices contained
-       within such NOTICE file, excluding those notices that do not
-       pertain to any part of the Derivative Works, in at least one
-       of the following places: within a NOTICE text file distributed
-       as part of the Derivative Works; within the Source form or
-       documentation, if provided along with the Derivative Works; or,
-       within a display generated by the Derivative Works, if and
-       wherever such third-party notices normally appear. The contents
-       of the NOTICE file are for informational purposes only and
-       do not modify the License. You may add Your own attribution
-       notices within Derivative Works that You distribute, alongside
-       or as an addendum to the NOTICE text from the Work, provided
-       that such additional attribution notices cannot be construed
-       as modifying the License.
-
-   You may add Your own copyright statement to Your modifications and
-   may provide additional or different license terms and conditions
-   for use, reproduction, or distribution of Your modifications, or
-   for any such Derivative Works as a whole, provided Your use,
-   reproduction, and distribution of the Work otherwise complies with
-   the conditions stated in this License.
-
-5. Submission of Contributions. Unless You explicitly state otherwise,
-   any Contribution intentionally submitted for inclusion in the Work
-   by You to the Licensor shall be under the terms and conditions of
-   this License, without any additional terms or conditions.
-   Notwithstanding the above, nothing herein shall supersede or modify
-   the terms of any separate license agreement you may have executed
-   with Licensor regarding such Contributions.
-
-6. Trademarks. This License does not grant permission to use the trade
-   names, trademarks, service marks, or product names of the Licensor,
-   except as required for reasonable and customary use in describing the
-   origin of the Work and reproducing the content of the NOTICE file.
-
-7. Disclaimer of Warranty. Unless required by applicable law or
-   agreed to in writing, Licensor provides the Work (and each
-   Contributor provides its Contributions) on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-   implied, including, without limitation, any warranties or conditions
-   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-   PARTICULAR PURPOSE. You are solely responsible for determining the
-   appropriateness of using or redistributing the Work and assume any
-   risks associated with Your exercise of permissions under this License.
-
-8. Limitation of Liability. In no event and under no legal theory,
-   whether in tort (including negligence), contract, or otherwise,
-   unless required by applicable law (such as deliberate and grossly
-   negligent acts) or agreed to in writing, shall any Contributor be
-   liable to You for damages, including any direct, indirect, special,
-   incidental, or consequential damages of any character arising as a
-   result of this License or out of the use or inability to use the
-   Work (including but not limited to damages for loss of goodwill,
-   work stoppage, computer failure or malfunction, or any and all
-   other commercial damages or losses), even if such Contributor
-   has been advised of the possibility of such damages.
-
-9. Accepting Warranty or Additional Liability. While redistributing
-   the Work or Derivative Works thereof, You may choose to offer,
-   and charge a fee for, acceptance of support, warranty, indemnity,
-   or other liability obligations and/or rights consistent with this
-   License. However, in accepting such obligations, You may act only
-   on Your own behalf and on Your sole responsibility, not on behalf
-   of any other Contributor, and only if You agree to indemnify,
-   defend, and hold each Contributor harmless for any liability
-   incurred by, or claims asserted against, such Contributor by reason
-   of your accepting any such warranty or additional liability.
-
-END OF TERMS AND CONDITIONS
-
-APPENDIX: How to apply the Apache License to your work.
-
-   To apply the Apache License to your work, attach the following
-   boilerplate notice, with the fields enclosed by brackets "[]"
-   replaced with your own identifying information. (Don't include
-   the brackets!)  The text should be enclosed in the appropriate
-   comment syntax for the file format. We also recommend that a
-   file or class name and description of purpose be included on the
-   same "printed page" as the copyright notice for easier
-   identification within third-party archives.
-
-Copyright [yyyy] [name of copyright owner]
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-	http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
deleted file mode 100644
--- a/third_party/rust/proc-macro2-0.3.5/LICENSE-MIT
+++ /dev/null
@@ -1,25 +0,0 @@
-Copyright (c) 2014 Alex Crichton
-
-Permission is hereby granted, free of charge, to any
-person obtaining a copy of this software and associated
-documentation files (the "Software"), to deal in the
-Software without restriction, including without
-limitation the rights to use, copy, modify, merge,
-publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software
-is furnished to do so, subject to the following
-conditions:
-
-The above copyright notice and this permission notice
-shall be included in all copies or substantial portions
-of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
-ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
-TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
-PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
-IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-DEALINGS IN THE SOFTWARE.
deleted file mode 100644
--- a/third_party/rust/proc-macro2-0.3.5/README.md
+++ /dev/null
@@ -1,98 +0,0 @@
-# proc-macro2
-
-[![Build Status](https://api.travis-ci.org/alexcrichton/proc-macro2.svg?branch=master)](https://travis-ci.org/alexcrichton/proc-macro2)
-[![Latest Version](https://img.shields.io/crates/v/proc-macro2.svg)](https://crates.io/crates/proc-macro2)
-[![Rust Documentation](https://img.shields.io/badge/api-rustdoc-blue.svg)](https://docs.rs/proc-macro2)
-
-A small shim over the `proc_macro` crate in the compiler intended to multiplex
-the current stable interface (as of 2017-07-05) and the [upcoming richer
-interface][upcoming].
-
-[upcoming]: https://github.com/rust-lang/rust/pull/40939
-
-The upcoming support has features like:
-
-* Span information on tokens
-* No need to go in/out through strings
-* Structured input/output
-
-The hope is that libraries ported to `proc_macro2` will be trivial to port to
-the real `proc_macro` crate once the support on nightly is stabilize.
-
-## Usage
-
-This crate by default compiles on the stable version of the compiler. It only
-uses the stable surface area of the `proc_macro` crate upstream in the compiler
-itself. Usage is done via:
-
-```toml
-[dependencies]
-proc-macro2 = "0.3"
-```
-
-followed by
-
-```rust
-extern crate proc_macro;
-extern crate proc_macro2;
-
-#[proc_macro_derive(MyDerive)]
-pub fn my_derive(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
-    let input: proc_macro2::TokenStream = input.into();
-
-    let output: proc_macro2::TokenStream = {
-        /* transform input */
-    };
-
-    output.into()
-}
-```
-
-If you'd like you can enable the `nightly` feature in this crate. This will
-cause it to compile against the **unstable and nightly-only** features of the
-`proc_macro` crate. This in turn requires a nightly compiler. This should help
-preserve span information, however, coming in from the compiler itself.
-
-You can enable this feature via:
-
-```toml
-[dependencies]
-proc-macro2 = { version = "0.3", features = ["nightly"] }
-```
-
-
-## Unstable Features
-
-`proc-macro2` supports exporting some methods from `proc_macro` which are
-currently highly unstable, and may not be stabilized in the first pass of
-`proc_macro` stabilizations. These features are not exported by default. Minor
-versions of `proc-macro2` may make breaking changes to them at any time.
-
-To enable these features, the `procmacro2_semver_exempt` config flag must be
-passed to rustc.
-
-```
-RUSTFLAGS='--cfg procmacro2_semver_exempt' cargo build
-```
-
-Note that this must not only be done for your crate, but for any crate that
-depends on your crate. This infectious nature is intentional, as it serves as a
-reminder that you are outside of the normal semver guarantees.
-
-
-# License
-
-This project is licensed under either of
-
- * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
-   http://www.apache.org/licenses/LICENSE-2.0)
- * MIT license ([LICENSE-MIT](LICENSE-MIT) or
-   http://opensource.org/licenses/MIT)
-
-at your option.
-
-### Contribution
-
-Unless you explicitly state otherwise, any contribution intentionally submitted
-for inclusion in Serde by you, as defined in the Apache-2.0 license, shall be
-dual licensed as above, without any additional terms or conditions.
deleted file mode 100644
--- a/third_party/rust/proc-macro2-0.3.5/src/lib.rs
+++ /dev/null
@@ -1,590 +0,0 @@
-//! A "shim crate" intended to multiplex the [`proc_macro`] API on to stable
-//! Rust.
-//!
-//! Procedural macros in Rust operate over the upstream
-//! [`proc_macro::TokenStream`][ts] type. This type currently is quite
-//! conservative and exposed no internal implementation details. Nightly
-//! compilers, however, contain a much richer interface. This richer interface
-//! allows fine-grained inspection of the token stream which avoids
-//! stringification/re-lexing and also preserves span information.
-//!
-//! The upcoming APIs added to [`proc_macro`] upstream are the foundation for
-//! productive procedural macros in the ecosystem. To help prepare the ecosystem
-//! for using them this crate serves to both compile on stable and nightly and
-//! mirrors the API-to-be. The intention is that procedural macros which switch
-//! to use this crate will be trivially able to switch to the upstream
-//! `proc_macro` crate once its API stabilizes.
-//!
-//! In the meantime this crate also has a `nightly` Cargo feature which
-//! enables it to reimplement itself with the unstable API of [`proc_macro`].
-//! This'll allow immediate usage of the beneficial upstream API, particularly
-//! around preserving span information.
-//!
-//! [`proc_macro`]: https://doc.rust-lang.org/proc_macro/
-//! [ts]: https://doc.rust-lang.org/proc_macro/struct.TokenStream.html
-
-// Proc-macro2 types in rustdoc of other crates get linked to here.
-#![doc(html_root_url = "https://docs.rs/proc-macro2/0.3.5")]
-#![cfg_attr(feature = "nightly", feature(proc_macro))]
-
-#[cfg(feature = "proc-macro")]
-extern crate proc_macro;
-
-#[cfg(not(feature = "nightly"))]
-extern crate unicode_xid;
-
-use std::fmt;
-use std::iter::FromIterator;
-use std::marker;
-use std::rc::Rc;
-use std::str::FromStr;
-
-#[macro_use]
-#[cfg(not(feature = "nightly"))]
-mod strnom;
-
-#[path = "stable.rs"]
-#[cfg(not(feature = "nightly"))]
-mod imp;
-#[path = "unstable.rs"]
-#[cfg(feature = "nightly")]
-mod imp;
-
-#[derive(Clone)]
-pub struct TokenStream {
-    inner: imp::TokenStream,
-    _marker: marker::PhantomData<Rc<()>>,
-}
-
-pub struct LexError {
-    inner: imp::LexError,
-    _marker: marker::PhantomData<Rc<()>>,
-}
-
-impl TokenStream {
-    fn _new(inner: imp::TokenStream) -> TokenStream {
-        TokenStream {
-            inner: inner,
-            _marker: marker::PhantomData,
-        }
-    }
-
-    pub fn empty() -> TokenStream {
-        TokenStream::_new(imp::TokenStream::empty())
-    }
-
-    pub fn is_empty(&self) -> bool {
-        self.inner.is_empty()
-    }
-}
-
-impl FromStr for TokenStream {
-    type Err = LexError;
-
-    fn from_str(src: &str) -> Result<TokenStream, LexError> {
-        let e = src.parse().map_err(|e| LexError {
-            inner: e,
-            _marker: marker::PhantomData,
-        })?;
-        Ok(TokenStream::_new(e))
-    }
-}
-
-#[cfg(feature = "proc-macro")]
-impl From<proc_macro::TokenStream> for TokenStream {
-    fn from(inner: proc_macro::TokenStream) -> TokenStream {
-        TokenStream::_new(inner.into())
-    }
-}
-
-#[cfg(feature = "proc-macro")]
-impl From<TokenStream> for proc_macro::TokenStream {
-    fn from(inner: TokenStream) -> proc_macro::TokenStream {
-        inner.inner.into()
-    }
-}
-
-impl FromIterator<TokenTree> for TokenStream {
-    fn from_iter<I: IntoIterator<Item = TokenTree>>(streams: I) -> Self {
-        TokenStream::_new(streams.into_iter().collect())
-    }
-}
-
-impl fmt::Display for TokenStream {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        self.inner.fmt(f)
-    }
-}
-
-impl fmt::Debug for TokenStream {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        self.inner.fmt(f)
-    }
-}
-
-impl fmt::Debug for LexError {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        self.inner.fmt(f)
-    }
-}
-
-// Returned by reference, so we can't easily wrap it.
-#[cfg(procmacro2_semver_exempt)]
-pub use imp::FileName;
-
-#[cfg(procmacro2_semver_exempt)]
-#[derive(Clone, PartialEq, Eq)]
-pub struct SourceFile(imp::SourceFile);
-
-#[cfg(procmacro2_semver_exempt)]
-impl SourceFile {
-    /// Get the path to this source file as a string.
-    pub fn path(&self) -> &FileName {
-        self.0.path()
-    }
-
-    pub fn is_real(&self) -> bool {
-        self.0.is_real()
-    }
-}
-
-#[cfg(procmacro2_semver_exempt)]
-impl AsRef<FileName> for SourceFile {
-    fn as_ref(&self) -> &FileName {
-        self.0.path()
-    }
-}
-
-#[cfg(procmacro2_semver_exempt)]
-impl fmt::Debug for SourceFile {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        self.0.fmt(f)
-    }
-}
-
-#[cfg(procmacro2_semver_exempt)]
-pub struct LineColumn {
-    pub line: usize,
-    pub column: usize,
-}
-
-#[derive(Copy, Clone)]
-pub struct Span {
-    inner: imp::Span,
-    _marker: marker::PhantomData<Rc<()>>,
-}
-
-impl Span {
-    fn _new(inner: imp::Span) -> Span {
-        Span {
-            inner: inner,
-            _marker: marker::PhantomData,
-        }
-    }
-
-    pub fn call_site() -> Span {
-        Span::_new(imp::Span::call_site())
-    }
-
-    #[cfg(procmacro2_semver_exempt)]
-    pub fn def_site() -> Span {
-        Span::_new(imp::Span::def_site())
-    }
-
-    /// Creates a new span with the same line/column information as `self` but
-    /// that resolves symbols as though it were at `other`.
-    #[cfg(procmacro2_semver_exempt)]
-    pub fn resolved_at(&self, other: Span) -> Span {
-        Span::_new(self.inner.resolved_at(other.inner))
-    }
-
-    /// Creates a new span with the same name resolution behavior as `self` but
-    /// with the line/column information of `other`.
-    #[cfg(procmacro2_semver_exempt)]
-    pub fn located_at(&self, other: Span) -> Span {
-        Span::_new(self.inner.located_at(other.inner))
-    }
-
-    /// This method is only available when the `"nightly"` feature is enabled.
-    #[cfg(all(feature = "nightly", feature = "proc-macro"))]
-    pub fn unstable(self) -> proc_macro::Span {
-        self.inner.unstable()
-    }
-
-    #[cfg(procmacro2_semver_exempt)]
-    pub fn source_file(&self) -> SourceFile {
-        SourceFile(self.inner.source_file())
-    }
-
-    #[cfg(procmacro2_semver_exempt)]
-    pub fn start(&self) -> LineColumn {
-        let imp::LineColumn { line, column } = self.inner.start();
-        LineColumn {
-            line: line,
-            column: column,
-        }
-    }
-
-    #[cfg(procmacro2_semver_exempt)]
-    pub fn end(&self) -> LineColumn {
-        let imp::LineColumn { line, column } = self.inner.end();
-        LineColumn {
-            line: line,
-            column: column,
-        }
-    }
-
-    #[cfg(procmacro2_semver_exempt)]
-    pub fn join(&self, other: Span) -> Option<Span> {
-        self.inner.join(other.inner).map(Span::_new)
-    }
-
-    #[cfg(procmacro2_semver_exempt)]
-    pub fn eq(&self, other: &Span) -> bool {
-        self.inner.eq(&other.inner)
-    }
-}
-
-impl fmt::Debug for Span {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        self.inner.fmt(f)
-    }
-}
-
-#[derive(Clone, Debug)]
-pub enum TokenTree {
-    Group(Group),
-    Term(Term),
-    Op(Op),
-    Literal(Literal),
-}
-
-impl TokenTree {
-    pub fn span(&self) -> Span {
-        match *self {
-            TokenTree::Group(ref t) => t.span(),
-            TokenTree::Term(ref t) => t.span(),
-            TokenTree::Op(ref t) => t.span(),
-            TokenTree::Literal(ref t) => t.span(),
-        }
-    }
-
-    pub fn set_span(&mut self, span: Span) {
-        match *self {
-            TokenTree::Group(ref mut t) => t.set_span(span),
-            TokenTree::Term(ref mut t) => t.set_span(span),
-            TokenTree::Op(ref mut t) => t.set_span(span),
-            TokenTree::Literal(ref mut t) => t.set_span(span),
-        }
-    }
-}
-
-impl From<Group> for TokenTree {
-    fn from(g: Group) -> TokenTree {
-        TokenTree::Group(g)
-    }
-}
-
-impl From<Term> for TokenTree {
-    fn from(g: Term) -> TokenTree {
-        TokenTree::Term(g)
-    }
-}
-
-impl From<Op> for TokenTree {
-    fn from(g: Op) -> TokenTree {
-        TokenTree::Op(g)
-    }
-}
-
-impl From<Literal> for TokenTree {
-    fn from(g: Literal) -> TokenTree {
-        TokenTree::Literal(g)
-    }
-}
-
-impl fmt::Display for TokenTree {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        match *self {
-            TokenTree::Group(ref t) => t.fmt(f),
-            TokenTree::Term(ref t) => t.fmt(f),
-            TokenTree::Op(ref t) => t.fmt(f),
-            TokenTree::Literal(ref t) => t.fmt(f),
-        }
-    }
-}
-
-#[derive(Clone, Debug)]
-pub struct Group {
-    delimiter: Delimiter,
-    stream: TokenStream,
-    span: Span,
-}
-
-#[derive(Copy, Clone, Debug, Eq, PartialEq)]
-pub enum Delimiter {
-    Parenthesis,
-    Brace,
-    Bracket,
-    None,
-}
-
-impl Group {
-    pub fn new(delimiter: Delimiter, stream: TokenStream) -> Group {
-        Group {
-            delimiter: delimiter,
-            stream: stream,
-            span: Span::call_site(),
-        }
-    }
-
-    pub fn delimiter(&self) -> Delimiter {
-        self.delimiter
-    }
-
-    pub fn stream(&self) -> TokenStream {
-        self.stream.clone()
-    }
-
-    pub fn span(&self) -> Span {
-        self.span
-    }
-
-    pub fn set_span(&mut self, span: Span) {
-        self.span = span;
-    }
-}
-
-impl fmt::Display for Group {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        self.stream.fmt(f)
-    }
-}
-
-#[derive(Copy, Clone, Debug)]
-pub struct Op {
-    op: char,
-    spacing: Spacing,
-    span: Span,
-}
-
-#[derive(Copy, Clone, Debug, Eq, PartialEq)]
-pub enum Spacing {
-    Alone,
-    Joint,
-}
-
-impl Op {
-    pub fn new(op: char, spacing: Spacing) -> Op {
-        Op {
-            op: op,
-            spacing: spacing,
-            span: Span::call_site(),
-        }
-    }
-
-    pub fn op(&self) -> char {
-        self.op
-    }
-
-    pub fn spacing(&self) -> Spacing {
-        self.spacing
-    }
-
-    pub fn span(&self) -> Span {
-        self.span
-    }
-
-    pub fn set_span(&mut self, span: Span) {
-        self.span = span;
-    }
-}
-
-impl fmt::Display for Op {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        self.op.fmt(f)
-    }
-}
-
-#[derive(Copy, Clone)]
-pub struct Term {
-    inner: imp::Term,
-    _marker: marker::PhantomData<Rc<()>>,
-}
-
-impl Term {
-    fn _new(inner: imp::Term) -> Term {
-        Term {
-            inner: inner,
-            _marker: marker::PhantomData,
-        }
-    }
-
-    pub fn new(string: &str, span: Span) -> Term {
-        Term::_new(imp::Term::new(string, span.inner))
-    }
-
-    pub fn as_str(&self) -> &str {
-        self.inner.as_str()
-    }
-
-    pub fn span(&self) -> Span {
-        Span::_new(self.inner.span())
-    }
-
-    pub fn set_span(&mut self, span: Span) {
-        self.inner.set_span(span.inner);
-    }
-}
-
-impl fmt::Display for Term {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        self.as_str().fmt(f)
-    }
-}
-
-impl fmt::Debug for Term {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        self.inner.fmt(f)
-    }
-}
-
-#[derive(Clone)]
-pub struct Literal {
-    inner: imp::Literal,
-    _marker: marker::PhantomData<Rc<()>>,
-}
-
-macro_rules! int_literals {
-    ($($name:ident => $kind:ident,)*) => ($(
-        pub fn $name(n: $kind) -> Literal {
-            Literal::_new(imp::Literal::$name(n))
-        }
-    )*)
-}
-
-impl Literal {
-    fn _new(inner: imp::Literal) -> Literal {
-        Literal {
-            inner: inner,
-            _marker: marker::PhantomData,
-        }
-    }
-
-    int_literals! {
-        u8_suffixed => u8,
-        u16_suffixed => u16,
-        u32_suffixed => u32,
-        u64_suffixed => u64,
-        usize_suffixed => usize,
-        i8_suffixed => i8,
-        i16_suffixed => i16,
-        i32_suffixed => i32,
-        i64_suffixed => i64,
-        isize_suffixed => isize,
-
-        u8_unsuffixed => u8,
-        u16_unsuffixed => u16,
-        u32_unsuffixed => u32,
-        u64_unsuffixed => u64,
-        usize_unsuffixed => usize,
-        i8_unsuffixed => i8,
-        i16_unsuffixed => i16,
-        i32_unsuffixed => i32,
-        i64_unsuffixed => i64,
-        isize_unsuffixed => isize,
-    }
-
-    pub fn f64_unsuffixed(f: f64) -> Literal {
-        assert!(f.is_finite());
-        Literal::_new(imp::Literal::f64_unsuffixed(f))
-    }
-
-    pub fn f64_suffixed(f: f64) -> Literal {
-        assert!(f.is_finite());
-        Literal::_new(imp::Literal::f64_suffixed(f))
-    }
-
-    pub fn f32_unsuffixed(f: f32) -> Literal {
-        assert!(f.is_finite());
-        Literal::_new(imp::Literal::f32_unsuffixed(f))
-    }
-
-    pub fn f32_suffixed(f: f32) -> Literal {
-        assert!(f.is_finite());
-        Literal::_new(imp::Literal::f32_suffixed(f))
-    }
-
-    pub fn string(string: &str) -> Literal {
-        Literal::_new(imp::Literal::string(string))
-    }
-
-    pub fn character(ch: char) -> Literal {
-        Literal::_new(imp::Literal::character(ch))
-    }
-
-    pub fn byte_string(s: &[u8]) -> Literal {
-        Literal::_new(imp::Literal::byte_string(s))
-    }
-
-    pub fn span(&self) -> Span {
-        Span::_new(self.inner.span())
-    }
-
-    pub fn set_span(&mut self, span: Span) {
-        self.inner.set_span(span.inner);
-    }
-}
-
-impl fmt::Debug for Literal {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        self.inner.fmt(f)
-    }
-}
-
-impl fmt::Display for Literal {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        self.inner.fmt(f)
-    }
-}
-
-pub mod token_stream {
-    use std::fmt;
-    use std::marker;
-    use std::rc::Rc;
-
-    pub use TokenStream;
-    use TokenTree;
-    use imp;
-
-    pub struct IntoIter {
-        inner: imp::TokenTreeIter,
-        _marker: marker::PhantomData<Rc<()>>,
-    }
-
-    impl Iterator for IntoIter {
-        type Item = TokenTree;
-
-        fn next(&mut self) -> Option<TokenTree> {
-            self.inner.next()
-        }
-    }
-
-    impl fmt::Debug for IntoIter {
-        fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-            self.inner.fmt(f)
-        }
-    }
-
-    impl IntoIterator for TokenStream {
-        type Item = TokenTree;
-        type IntoIter = IntoIter;
-
-        fn into_iter(self) -> IntoIter {
-            IntoIter {
-                inner: self.inner.into_iter(),
-                _marker: marker::PhantomData,
-            }
-        }
-    }
-}
deleted file mode 100644
--- a/third_party/rust/proc-macro2-0.3.5/src/stable.rs
+++ /dev/null
@@ -1,1227 +0,0 @@
-#![cfg_attr(not(procmacro2_semver_exempt), allow(dead_code))]
-
-use std::borrow::Borrow;
-use std::cell::RefCell;
-#[cfg(procmacro2_semver_exempt)]
-use std::cmp;
-use std::collections::HashMap;
-use std::fmt;
-use std::iter;
-use std::rc::Rc;
-use std::str::FromStr;
-use std::vec;
-
-use strnom::{block_comment, skip_whitespace, whitespace, word_break, Cursor, PResult};
-use unicode_xid::UnicodeXID;
-
-use {Delimiter, Group, Op, Spacing, TokenTree};
-
-#[derive(Clone, Debug)]
-pub struct TokenStream {
-    inner: Vec<TokenTree>,
-}
-
-#[derive(Debug)]
-pub struct LexError;
-
-impl TokenStream {
-    pub fn empty() -> TokenStream {
-        TokenStream { inner: Vec::new() }
-    }
-
-    pub fn is_empty(&self) -> bool {
-        self.inner.len() == 0
-    }
-}
-
-#[cfg(procmacro2_semver_exempt)]
-fn get_cursor(src: &str) -> Cursor {
-    // Create a dummy file & add it to the codemap
-    CODEMAP.with(|cm| {
-        let mut cm = cm.borrow_mut();
-        let name = format!("<parsed string {}>", cm.files.len());
-        let span = cm.add_file(&name, src);
-        Cursor {
-            rest: src,
-            off: span.lo,
-        }
-    })
-}
-
-#[cfg(not(procmacro2_semver_exempt))]
-fn get_cursor(src: &str) -> Cursor {
-    Cursor { rest: src }
-}
-
-impl FromStr for TokenStream {
-    type Err = LexError;
-
-    fn from_str(src: &str) -> Result<TokenStream, LexError> {
-        // Create a dummy file & add it to the codemap
-        let cursor = get_cursor(src);
-
-        match token_stream(cursor) {
-            Ok((input, output)) => {
-                if skip_whitespace(input).len() != 0 {
-                    Err(LexError)
-                } else {
-                    Ok(output.inner)
-                }
-            }
-            Err(LexError) => Err(LexError),
-        }
-    }
-}
-
-impl fmt::Display for TokenStream {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        let mut joint = false;
-        for (i, tt) in self.inner.iter().enumerate() {
-            if i != 0 && !joint {
-                write!(f, " ")?;
-            }
-            joint = false;
-            match *tt {
-                TokenTree::Group(ref tt) => {
-                    let (start, end) = match tt.delimiter() {
-                        Delimiter::Parenthesis => ("(", ")"),
-                        Delimiter::Brace => ("{", "}"),
-                        Delimiter::Bracket => ("[", "]"),
-                        Delimiter::None => ("", ""),
-                    };
-                    if tt.stream().inner.inner.len() == 0 {
-                        write!(f, "{} {}", start, end)?
-                    } else {
-                        write!(f, "{} {} {}", start, tt.stream(), end)?
-                    }
-                }
-                TokenTree::Term(ref tt) => write!(f, "{}", tt.as_str())?,
-                TokenTree::Op(ref tt) => {
-                    write!(f, "{}", tt.op())?;
-                    match tt.spacing() {
-                        Spacing::Alone => {}
-                        Spacing::Joint => joint = true,
-                    }
-                }
-                TokenTree::Literal(ref tt) => write!(f, "{}", tt)?,
-            }
-        }
-
-        Ok(())
-    }
-}
-
-#[cfg(feature = "proc-macro")]
-impl From<::proc_macro::TokenStream> for TokenStream {
-    fn from(inner: ::proc_macro::TokenStream) -> TokenStream {
-        inner
-            .to_string()
-            .parse()
-            .expect("compiler token stream parse failed")
-    }
-}
-
-#[cfg(feature = "proc-macro")]
-impl From<TokenStream> for ::proc_macro::TokenStream {
-    fn from(inner: TokenStream) -> ::proc_macro::TokenStream {
-        inner
-            .to_string()
-            .parse()
-            .expect("failed to parse to compiler tokens")
-    }
-}
-
-impl From<TokenTree> for TokenStream {
-    fn from(tree: TokenTree) -> TokenStream {
-        TokenStream { inner: vec![tree] }
-    }
-}
-
-impl iter::FromIterator<TokenTree> for TokenStream {
-    fn from_iter<I: IntoIterator<Item = TokenTree>>(streams: I) -> Self {
-        let mut v = Vec::new();
-
-        for token in streams.into_iter() {
-            v.push(token);
-        }
-
-        TokenStream { inner: v }
-    }
-}
-
-pub type TokenTreeIter = vec::IntoIter<TokenTree>;
-
-impl IntoIterator for TokenStream {
-    type Item = TokenTree;
-    type IntoIter = TokenTreeIter;
-
-    fn into_iter(self) -> TokenTreeIter {
-        self.inner.into_iter()
-    }
-}
-
-#[cfg(procmacro2_semver_exempt)]
-#[derive(Clone, PartialEq, Eq, Debug)]
-pub struct FileName(String);
-
-#[cfg(procmacro2_semver_exempt)]
-impl fmt::Display for FileName {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        self.0.fmt(f)
-    }
-}
-
-#[cfg(procmacro2_semver_exempt)]
-#[derive(Clone, PartialEq, Eq)]
-pub struct SourceFile {
-    name: FileName,
-}
-
-#[cfg(procmacro2_semver_exempt)]
-impl SourceFile {
-    /// Get the path to this source file as a string.
-    pub fn path(&self) -> &FileName {
-        &self.name
-    }
-
-    pub fn is_real(&self) -> bool {
-        // XXX(nika): Support real files in the future?
-        false
-    }
-}
-
-#[cfg(procmacro2_semver_exempt)]
-impl AsRef<FileName> for SourceFile {
-    fn as_ref(&self) -> &FileName {
-        self.path()
-    }
-}
-
-#[cfg(procmacro2_semver_exempt)]
-impl fmt::Debug for SourceFile {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        f.debug_struct("SourceFile")
-            .field("path", &self.path())
-            .field("is_real", &self.is_real())
-            .finish()
-    }
-}
-
-#[cfg(procmacro2_semver_exempt)]
-#[derive(Clone, Copy, Debug, PartialEq, Eq)]
-pub struct LineColumn {
-    pub line: usize,
-    pub column: usize,
-}
-
-#[cfg(procmacro2_semver_exempt)]
-thread_local! {
-    static CODEMAP: RefCell<Codemap> = RefCell::new(Codemap {
-        // NOTE: We start with a single dummy file which all call_site() and
-        // def_site() spans reference.
-        files: vec![FileInfo {
-            name: "<unspecified>".to_owned(),
-            span: Span { lo: 0, hi: 0 },
-            lines: vec![0],
-        }],
-    });
-}
-
-#[cfg(procmacro2_semver_exempt)]
-struct FileInfo {
-    name: String,
-    span: Span,
-    lines: Vec<usize>,
-}
-
-#[cfg(procmacro2_semver_exempt)]
-impl FileInfo {
-    fn offset_line_column(&self, offset: usize) -> LineColumn {
-        assert!(self.span_within(Span {
-            lo: offset as u32,
-            hi: offset as u32
-        }));
-        let offset = offset - self.span.lo as usize;
-        match self.lines.binary_search(&offset) {
-            Ok(found) => LineColumn {
-                line: found + 1,
-                column: 0,
-            },
-            Err(idx) => LineColumn {
-                line: idx,
-                column: offset - self.lines[idx - 1],
-            },
-        }
-    }
-
-    fn span_within(&self, span: Span) -> bool {
-        span.lo >= self.span.lo && span.hi <= self.span.hi
-    }
-}
-
-/// Computesthe offsets of each line in the given source string.
-#[cfg(procmacro2_semver_exempt)]
-fn lines_offsets(s: &str) -> Vec<usize> {
-    let mut lines = vec![0];
-    let mut prev = 0;
-    while let Some(len) = s[prev..].find('\n') {
-        prev += len + 1;
-        lines.push(prev);
-    }
-    lines
-}
-
-#[cfg(procmacro2_semver_exempt)]
-struct Codemap {
-    files: Vec<FileInfo>,
-}
-
-#[cfg(procmacro2_semver_exempt)]
-impl Codemap {
-    fn next_start_pos(&self) -> u32 {
-        // Add 1 so there's always space between files.
-        //
-        // We'll always have at least 1 file, as we initialize our files list
-        // with a dummy file.
-        self.files.last().unwrap().span.hi + 1
-    }
-
-    fn add_file(&mut self, name: &str, src: &str) -> Span {
-        let lines = lines_offsets(src);
-        let lo = self.next_start_pos();
-        // XXX(nika): Shouild we bother doing a checked cast or checked add here?
-        let span = Span {
-            lo: lo,
-            hi: lo + (src.len() as u32),
-        };
-
-        self.files.push(FileInfo {
-            name: name.to_owned(),
-            span: span,
-            lines: lines,
-        });
-
-        span
-    }
-
-    fn fileinfo(&self, span: Span) -> &FileInfo {
-        for file in &self.files {
-            if file.span_within(span) {
-                return file;
-            }
-        }
-        panic!("Invalid span with no related FileInfo!");
-    }
-}
-
-#[derive(Clone, Copy, Debug, PartialEq, Eq)]
-pub struct Span {
-    #[cfg(procmacro2_semver_exempt)]
-    lo: u32,
-    #[cfg(procmacro2_semver_exempt)]
-    hi: u32,
-}
-
-impl Span {
-    #[cfg(not(procmacro2_semver_exempt))]
-    pub fn call_site() -> Span {
-        Span {}
-    }
-
-    #[cfg(procmacro2_semver_exempt)]
-    pub fn call_site() -> Span {
-        Span { lo: 0, hi: 0 }
-    }
-
-    pub fn def_site() -> Span {
-        Span::call_site()
-    }
-
-    pub fn resolved_at(&self, _other: Span) -> Span {
-        // Stable spans consist only of line/column information, so
-        // `resolved_at` and `located_at` only select which span the
-        // caller wants line/column information from.
-        *self
-    }
-
-    pub fn located_at(&self, other: Span) -> Span {
-        other
-    }
-
-    #[cfg(procmacro2_semver_exempt)]
-    pub fn source_file(&self) -> SourceFile {
-        CODEMAP.with(|cm| {
-            let cm = cm.borrow();
-            let fi = cm.fileinfo(*self);
-            SourceFile {
-                name: FileName(fi.name.clone()),
-            }
-        })
-    }
-
-    #[cfg(procmacro2_semver_exempt)]
-    pub fn start(&self) -> LineColumn {
-        CODEMAP.with(|cm| {
-            let cm = cm.borrow();
-            let fi = cm.fileinfo(*self);
-            fi.offset_line_column(self.lo as usize)
-        })
-    }
-
-    #[cfg(procmacro2_semver_exempt)]
-    pub fn end(&self) -> LineColumn {
-        CODEMAP.with(|cm| {
-            let cm = cm.borrow();
-            let fi = cm.fileinfo(*self);
-            fi.offset_line_column(self.hi as usize)
-        })
-    }
-
-    #[cfg(procmacro2_semver_exempt)]
-    pub fn join(&self, other: Span) -> Option<Span> {
-        CODEMAP.with(|cm| {
-            let cm = cm.borrow();
-            // If `other` is not within the same FileInfo as us, return None.
-            if !cm.fileinfo(*self).span_within(other) {
-                return None;
-            }
-            Some(Span {
-                lo: cmp::min(self.lo, other.lo),
-                hi: cmp::max(self.hi, other.hi),
-            })
-        })
-    }
-}
-
-#[derive(Copy, Clone)]
-pub struct Term {
-    intern: usize,
-    span: Span,
-}
-
-thread_local!(static SYMBOLS: RefCell<Interner> = RefCell::new(Interner::new()));
-
-impl Term {
-    pub fn new(string: &str, span: Span) -> Term {
-        Term {
-            intern: SYMBOLS.with(|s| s.borrow_mut().intern(string)),
-            span: span,
-        }
-    }
-
-    pub fn as_str(&self) -> &str {
-        SYMBOLS.with(|interner| {
-            let interner = interner.borrow();
-            let s = interner.get(self.intern);
-            unsafe { &*(s as *const str) }
-        })
-    }
-
-    pub fn span(&self) -> Span {
-        self.span
-    }
-
-    pub fn set_span(&mut self, span: Span) {
-        self.span = span;
-    }
-}
-
-impl fmt::Debug for Term {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        f.debug_tuple("Term").field(&self.as_str()).finish()
-    }
-}
-
-struct Interner {
-    string_to_index: HashMap<MyRc, usize>,
-    index_to_string: Vec<Rc<String>>,
-}
-
-#[derive(Hash, Eq, PartialEq)]
-struct MyRc(Rc<String>);
-
-impl Borrow<str> for MyRc {
-    fn borrow(&self) -> &str {
-        &self.0
-    }
-}
-
-impl Interner {
-    fn new() -> Interner {
-        Interner {
-            string_to_index: HashMap::new(),
-            index_to_string: Vec::new(),
-        }
-    }
-
-    fn intern(&mut self, s: &str) -> usize {
-        if let Some(&idx) = self.string_to_index.get(s) {
-            return idx;
-        }
-        let s = Rc::new(s.to_string());
-        self.index_to_string.push(s.clone());
-        self.string_to_index
-            .insert(MyRc(s), self.index_to_string.len() - 1);
-        self.index_to_string.len() - 1
-    }
-
-    fn get(&self, idx: usize) -> &str {
-        &self.index_to_string[idx]
-    }
-}
-
-#[derive(Clone, Debug)]
-pub struct Literal {
-    text: String,
-    span: Span,
-}
-
-macro_rules! suffixed_numbers {
-    ($($name:ident => $kind:ident,)*) => ($(
-        pub fn $name(n: $kind) -> Literal {
-            Literal::_new(format!(concat!("{}", stringify!($kind)), n))
-        }
-    )*)
-}
-
-macro_rules! unsuffixed_numbers {
-    ($($name:ident => $kind:ident,)*) => ($(
-        pub fn $name(n: $kind) -> Literal {
-            Literal::_new(n.to_string())
-        }
-    )*)
-}
-
-impl Literal {
-    fn _new(text: String) -> Literal {
-        Literal {
-            text: text,
-            span: Span::call_site(),
-        }
-    }
-
-    suffixed_numbers! {
-        u8_suffixed => u8,
-        u16_suffixed => u16,
-        u32_suffixed => u32,
-        u64_suffixed => u64,
-        usize_suffixed => usize,
-        i8_suffixed => i8,
-        i16_suffixed => i16,
-        i32_suffixed => i32,
-        i64_suffixed => i64,
-        isize_suffixed => isize,
-
-        f32_suffixed => f32,
-        f64_suffixed => f64,
-    }
-
-    unsuffixed_numbers! {
-        u8_unsuffixed => u8,
-        u16_unsuffixed => u16,
-        u32_unsuffixed => u32,
-        u64_unsuffixed => u64,
-        usize_unsuffixed => usize,
-        i8_unsuffixed => i8,
-        i16_unsuffixed => i16,
-        i32_unsuffixed => i32,
-        i64_unsuffixed => i64,
-        isize_unsuffixed => isize,
-    }
-
-    pub fn f32_unsuffixed(f: f32) -> Literal {
-        let mut s = f.to_string();
-        if !s.contains(".") {
-            s.push_str(".0");
-        }
-        Literal::_new(s)
-    }
-
-    pub fn f64_unsuffixed(f: f64) -> Literal {
-        let mut s = f.to_string();
-        if !s.contains(".") {
-            s.push_str(".0");
-        }
-        Literal::_new(s)
-    }
-
-    pub fn string(t: &str) -> Literal {
-        let mut s = t.chars()
-            .flat_map(|c| c.escape_default())
-            .collect::<String>();
-        s.push('"');
-        s.insert(0, '"');
-        Literal::_new(s)
-    }
-
-    pub fn character(t: char) -> Literal {
-        Literal::_new(format!("'{}'", t.escape_default().collect::<String>()))
-    }
-
-    pub fn byte_string(bytes: &[u8]) -> Literal {
-        let mut escaped = "b\"".to_string();
-        for b in bytes {
-            match *b {
-                b'\0' => escaped.push_str(r"\0"),
-                b'\t' => escaped.push_str(r"\t"),
-                b'\n' => escaped.push_str(r"\n"),
-                b'\r' => escaped.push_str(r"\r"),
-                b'"' => escaped.push_str("\\\""),
-                b'\\' => escaped.push_str("\\\\"),
-                b'\x20'...b'\x7E' => escaped.push(*b as char),
-                _ => escaped.push_str(&format!("\\x{:02X}", b)),
-            }
-        }
-        escaped.push('"');
-        Literal::_new(escaped)
-    }
-
-    pub fn span(&self) -> Span {
-        self.span
-    }
-
-    pub fn set_span(&mut self, span: Span) {
-        self.span = span;
-    }
-}
-
-impl fmt::Display for Literal {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        self.text.fmt(f)
-    }
-}
-
-fn token_stream(mut input: Cursor) -> PResult<::TokenStream> {
-    let mut trees = Vec::new();
-    loop {
-        let input_no_ws = skip_whitespace(input);
-        if input_no_ws.rest.len() == 0 {
-            break
-        }
-        if let Ok((a, tokens)) = doc_comment(input_no_ws) {
-            input = a;
-            trees.extend(tokens);
-            continue
-        }
-
-        let (a, tt) = match token_tree(input_no_ws) {
-            Ok(p) => p,
-            Err(_) => break,
-        };
-        trees.push(tt);
-        input = a;
-    }
-    Ok((input, ::TokenStream::_new(TokenStream { inner: trees })))
-}
-
-#[cfg(not(procmacro2_semver_exempt))]
-fn spanned<'a, T>(
-    input: Cursor<'a>,
-    f: fn(Cursor<'a>) -> PResult<'a, T>,
-) -> PResult<'a, (T, ::Span)> {
-    let (a, b) = f(skip_whitespace(input))?;
-    Ok((a, ((b, ::Span::_new(Span { })))))
-}
-
-#[cfg(procmacro2_semver_exempt)]
-fn spanned<'a, T>(
-    input: Cursor<'a>,
-    f: fn(Cursor<'a>) -> PResult<'a, T>,
-) -> PResult<'a, (T, ::Span)> {
-    let input = skip_whitespace(input);
-    let lo = input.off;
-    let (a, b) = f(input)?;
-    let hi = a.off;
-    let span = ::Span::_new(Span { lo: lo, hi: hi });
-    Ok((a, (b, span)))
-}
-
-fn token_tree(input: Cursor) -> PResult<TokenTree> {
-    let (rest, (mut tt, span)) = spanned(input, token_kind)?;
-    tt.set_span(span);
-    Ok((rest, tt))
-}
-
-named!(token_kind -> TokenTree, alt!(
-    map!(group, TokenTree::Group)
-    |
-    map!(literal, TokenTree::Literal) // must be before symbol
-    |
-    symbol
-    |
-    map!(op, TokenTree::Op)
-));
-
-named!(group -> Group, alt!(
-    delimited!(
-        punct!("("),
-        token_stream,
-        punct!(")")
-    ) => { |ts| Group::new(Delimiter::Parenthesis, ts) }
-    |
-    delimited!(
-        punct!("["),
-        token_stream,
-        punct!("]")
-    ) => { |ts| Group::new(Delimiter::Bracket, ts) }
-    |
-    delimited!(
-        punct!("{"),
-        token_stream,
-        punct!("}")
-    ) => { |ts| Group::new(Delimiter::Brace, ts) }
-));
-
-fn symbol(mut input: Cursor) -> PResult<TokenTree> {
-    input = skip_whitespace(input);
-
-    let mut chars = input.char_indices();
-
-    let lifetime = input.starts_with("'");
-    if lifetime {
-        chars.next();
-    }
-
-    let raw = !lifetime && input.starts_with("r#");
-    if raw {
-        chars.next();
-        chars.next();
-    }
-
-    match chars.next() {
-        Some((_, ch)) if UnicodeXID::is_xid_start(ch) || ch == '_' => {}
-        _ => return Err(LexError),
-    }
-
-    let mut end = input.len();
-    for (i, ch) in chars {
-        if !UnicodeXID::is_xid_continue(ch) {
-            end = i;
-            break;
-        }
-    }
-
-    let a = &input.rest[..end];
-    if a == "r#_" || lifetime && a != "'static" && KEYWORDS.contains(&&a[1..]) {
-        Err(LexError)
-    } else if a == "_" {
-        Ok((input.advance(end), Op::new('_', Spacing::Alone).into()))
-    } else {
-        Ok((
-            input.advance(end),
-            ::Term::new(a, ::Span::call_site()).into(),
-        ))
-    }
-}
-
-// From https://github.com/rust-lang/rust/blob/master/src/libsyntax_pos/symbol.rs
-static KEYWORDS: &'static [&'static str] = &[
-    "abstract", "alignof", "as", "become", "box", "break", "const", "continue", "crate", "do",
-    "else", "enum", "extern", "false", "final", "fn", "for", "if", "impl", "in", "let", "loop",
-    "macro", "match", "mod", "move", "mut", "offsetof", "override", "priv", "proc", "pub", "pure",
-    "ref", "return", "self", "Self", "sizeof", "static", "struct", "super", "trait", "true",
-    "type", "typeof", "unsafe", "unsized", "use", "virtual", "where", "while", "yield",
-];
-
-fn literal(input: Cursor) -> PResult<::Literal> {
-    let input_no_ws = skip_whitespace(input);
-
-    match literal_nocapture(input_no_ws) {
-        Ok((a, ())) => {
-            let start = input.len() - input_no_ws.len();
-            let len = input_no_ws.len() - a.len();
-            let end = start + len;
-            Ok((
-                a,
-                ::Literal::_new(Literal::_new(input.rest[start..end].to_string())),
-            ))
-        }
-        Err(LexError) => Err(LexError),
-    }
-}
-
-named!(literal_nocapture -> (), alt!(
-    string
-    |
-    byte_string
-    |
-    byte
-    |
-    character
-    |
-    float
-    |
-    int
-));
-
-named!(string -> (), alt!(
-    quoted_string
-    |
-    preceded!(
-        punct!("r"),
-        raw_string
-    ) => { |_| () }
-));
-
-named!(quoted_string -> (), delimited!(
-    punct!("\""),
-    cooked_string,
-    tag!("\"")
-));
-
-fn cooked_string(input: Cursor) -> PResult<()> {
-    let mut chars = input.char_indices().peekable();
-    while let Some((byte_offset, ch)) = chars.next() {
-        match ch {
-            '"' => {
-                return Ok((input.advance(byte_offset), ()));
-            }
-            '\r' => {
-                if let Some((_, '\n')) = chars.next() {
-                    // ...
-                } else {
-                    break;
-                }
-            }
-            '\\' => match chars.next() {
-                Some((_, 'x')) => {
-                    if !backslash_x_char(&mut chars) {
-                        break;
-                    }
-                }
-                Some((_, 'n')) | Some((_, 'r')) | Some((_, 't')) | Some((_, '\\'))
-                | Some((_, '\'')) | Some((_, '"')) | Some((_, '0')) => {}
-                Some((_, 'u')) => {
-                    if !backslash_u(&mut chars) {
-                        break;
-                    }
-                }
-                Some((_, '\n')) | Some((_, '\r')) => {
-                    while let Some(&(_, ch)) = chars.peek() {
-                        if ch.is_whitespace() {
-                            chars.next();
-                        } else {
-                            break;
-                        }
-                    }
-                }
-                _ => break,
-            },
-            _ch => {}
-        }
-    }
-    Err(LexError)
-}
-
-named!(byte_string -> (), alt!(
-    delimited!(
-        punct!("b\""),
-        cooked_byte_string,
-        tag!("\"")
-    ) => { |_| () }
-    |
-    preceded!(
-        punct!("br"),
-        raw_string
-    ) => { |_| () }
-));
-
-fn cooked_byte_string(mut input: Cursor) -> PResult<()> {
-    let mut bytes = input.bytes().enumerate();
-    'outer: while let Some((offset, b)) = bytes.next() {
-        match b {
-            b'"' => {
-                return Ok((input.advance(offset), ()));
-            }
-            b'\r' => {
-                if let Some((_, b'\n')) = bytes.next() {
-                    // ...
-                } else {
-                    break;
-                }
-            }
-            b'\\' => match bytes.next() {
-                Some((_, b'x')) => {
-                    if !backslash_x_byte(&mut bytes) {
-                        break;
-                    }
-                }
-                Some((_, b'n')) | Some((_, b'r')) | Some((_, b't')) | Some((_, b'\\'))
-                | Some((_, b'0')) | Some((_, b'\'')) | Some((_, b'"')) => {}
-                Some((newline, b'\n')) | Some((newline, b'\r')) => {
-                    let rest = input.advance(newline + 1);
-                    for (offset, ch) in rest.char_indices() {
-                        if !ch.is_whitespace() {
-                            input = rest.advance(offset);
-                            bytes = input.bytes().enumerate();
-                            continue 'outer;
-                        }
-                    }
-                    break;
-                }
-                _ => break,
-            },
-            b if b < 0x80 => {}
-            _ => break,
-        }
-    }
-    Err(LexError)
-}
-
-fn raw_string(input: Cursor) -> PResult<()> {
-    let mut chars = input.char_indices();
-    let mut n = 0;
-    while let Some((byte_offset, ch)) = chars.next() {
-        match ch {
-            '"' => {
-                n = byte_offset;
-                break;
-            }
-            '#' => {}
-            _ => return Err(LexError),
-        }
-    }
-    for (byte_offset, ch) in chars {
-        match ch {
-            '"' if input.advance(byte_offset + 1).starts_with(&input.rest[..n]) => {
-                let rest = input.advance(byte_offset + 1 + n);
-                return Ok((rest, ()));
-            }
-            '\r' => {}
-            _ => {}
-        }
-    }
-    Err(LexError)
-}
-
-named!(byte -> (), do_parse!(
-    punct!("b") >>
-    tag!("'") >>
-    cooked_byte >>
-    tag!("'") >>
-    (())
-));
-
-fn cooked_byte(input: Cursor) -> PResult<()> {
-    let mut bytes = input.bytes().enumerate();
-    let ok = match bytes.next().map(|(_, b)| b) {
-        Some(b'\\') => match bytes.next().map(|(_, b)| b) {
-            Some(b'x') => backslash_x_byte(&mut bytes),
-            Some(b'n') | Some(b'r') | Some(b't') | Some(b'\\') | Some(b'0') | Some(b'\'')
-            | Some(b'"') => true,
-            _ => false,
-        },
-        b => b.is_some(),
-    };
-    if ok {
-        match bytes.next() {
-            Some((offset, _)) => {
-                if input.chars().as_str().is_char_boundary(offset) {
-                    Ok((input.advance(offset), ()))
-                } else {
-                    Err(LexError)
-                }
-            }
-            None => Ok((input.advance(input.len()), ())),
-        }
-    } else {
-        Err(LexError)
-    }
-}
-
-named!(character -> (), do_parse!(
-    punct!("'") >>
-    cooked_char >>
-    tag!("'") >>
-    (())
-));
-
-fn cooked_char(input: Cursor) -> PResult<()> {
-    let mut chars = input.char_indices();
-    let ok = match chars.next().map(|(_, ch)| ch) {
-        Some('\\') => match chars.next().map(|(_, ch)| ch) {
-            Some('x') => backslash_x_char(&mut chars),
-            Some('u') => backslash_u(&mut chars),
-            Some('n') | Some('r') | Some('t') | Some('\\') | Some('0') | Some('\'') | Some('"') => {
-                true
-            }
-            _ => false,
-        },
-        ch => ch.is_some(),
-    };
-    if ok {
-        match chars.next() {
-            Some((idx, _)) => Ok((input.advance(idx), ())),
-            None => Ok((input.advance(input.len()), ())),
-        }
-    } else {
-        Err(LexError)
-    }
-}
-
-macro_rules! next_ch {
-    ($chars:ident @ $pat:pat $(| $rest:pat)*) => {
-        match $chars.next() {
-            Some((_, ch)) => match ch {
-                $pat $(| $rest)*  => ch,
-                _ => return false,
-            },
-            None => return false
-        }
-    };
-}
-
-fn backslash_x_char<I>(chars: &mut I) -> bool
-where
-    I: Iterator<Item = (usize, char)>,
-{
-    next_ch!(chars @ '0'...'7');
-    next_ch!(chars @ '0'...'9' | 'a'...'f' | 'A'...'F');
-    true
-}
-
-fn backslash_x_byte<I>(chars: &mut I) -> bool
-where
-    I: Iterator<Item = (usize, u8)>,
-{
-    next_ch!(chars @ b'0'...b'9' | b'a'...b'f' | b'A'...b'F');
-    next_ch!(chars @ b'0'...b'9' | b'a'...b'f' | b'A'...b'F');
-    true
-}
-
-fn backslash_u<I>(chars: &mut I) -> bool
-where
-    I: Iterator<Item = (usize, char)>,
-{
-    next_ch!(chars @ '{');
-    next_ch!(chars @ '0'...'9' | 'a'...'f' | 'A'...'F');
-    loop {
-        let c = next_ch!(chars @ '0'...'9' | 'a'...'f' | 'A'...'F' | '_' | '}');
-        if c == '}' {
-            return true;
-        }
-    }
-}
-
-fn float(input: Cursor) -> PResult<()> {
-    let (rest, ()) = float_digits(input)?;
-    for suffix in &["f32", "f64"] {
-        if rest.starts_with(suffix) {
-            return word_break(rest.advance(suffix.len()));
-        }
-    }
-    word_break(rest)
-}
-
-fn float_digits(input: Cursor) -> PResult<()> {
-    let mut chars = input.chars().peekable();
-    match chars.next() {
-        Some(ch) if ch >= '0' && ch <= '9' => {}
-        _ => return Err(LexError),
-    }
-
-    let mut len = 1;
-    let mut has_dot = false;
-    let mut has_exp = false;
-    while let Some(&ch) = chars.peek() {
-        match ch {
-            '0'...'9' | '_' => {
-                chars.next();
-                len += 1;
-            }
-            '.' => {
-                if has_dot {
-                    break;
-                }
-                chars.next();
-                if chars
-                    .peek()
-                    .map(|&ch| ch == '.' || UnicodeXID::is_xid_start(ch))
-                    .unwrap_or(false)
-                {
-                    return Err(LexError);
-                }
-                len += 1;
-                has_dot = true;
-            }
-            'e' | 'E' => {
-                chars.next();
-                len += 1;
-                has_exp = true;
-                break;
-            }
-            _ => break,
-        }
-    }
-
-    let rest = input.advance(len);
-    if !(has_dot || has_exp || rest.starts_with("f32") || rest.starts_with("f64")) {
-        return Err(LexError);
-    }
-
-    if has_exp {
-        let mut has_exp_value = false;
-        while let Some(&ch) = chars.peek() {
-            match ch {
-                '+' | '-' => {
-                    if has_exp_value {
-                        break;
-                    }
-                    chars.next();
-                    len += 1;
-                }
-                '0'...'9' => {
-                    chars.next();
-                    len += 1;
-                    has_exp_value = true;
-                }
-                '_' => {
-                    chars.next();
-                    len += 1;
-                }
-                _ => break,
-            }
-        }
-        if !has_exp_value {
-            return Err(LexError);
-        }
-    }
-
-    Ok((input.advance(len), ()))
-}
-
-fn int(input: Cursor) -> PResult<()> {
-    let (rest, ()) = digits(input)?;
-    for suffix in &[
-        "isize", "i8", "i16", "i32", "i64", "i128", "usize", "u8", "u16", "u32", "u64", "u128"
-    ] {
-        if rest.starts_with(suffix) {
-            return word_break(rest.advance(suffix.len()));
-        }
-    }
-    word_break(rest)
-}
-
-fn digits(mut input: Cursor) -> PResult<()> {
-    let base = if input.starts_with("0x") {
-        input = input.advance(2);
-        16
-    } else if input.starts_with("0o") {
-        input = input.advance(2);
-        8
-    } else if input.starts_with("0b") {
-        input = input.advance(2);
-        2
-    } else {
-        10
-    };
-
-    let mut len = 0;
-    let mut empty = true;
-    for b in input.bytes() {
-        let digit = match b {
-            b'0'...b'9' => (b - b'0') as u64,
-            b'a'...b'f' => 10 + (b - b'a') as u64,
-            b'A'...b'F' => 10 + (b - b'A') as u64,
-            b'_' => {
-                if empty && base == 10 {
-                    return Err(LexError);
-                }
-                len += 1;
-                continue;
-            }
-            _ => break,
-        };
-        if digit >= base {
-            return Err(LexError);
-        }
-        len += 1;
-        empty = false;
-    }
-    if empty {
-        Err(LexError)
-    } else {
-        Ok((input.advance(len), ()))
-    }
-}
-
-fn op(input: Cursor) -> PResult<Op> {
-    let input = skip_whitespace(input);
-    match op_char(input) {
-        Ok((rest, ch)) => {
-            let kind = match op_char(rest) {
-                Ok(_) => Spacing::Joint,
-                Err(LexError) => Spacing::Alone,
-            };
-            Ok((rest, Op::new(ch, kind)))
-        }
-        Err(LexError) => Err(LexError),
-    }
-}
-
-fn op_char(input: Cursor) -> PResult<char> {
-    let mut chars = input.chars();
-    let first = match chars.next() {
-        Some(ch) => ch,
-        None => {
-            return Err(LexError);
-        }
-    };
-    let recognized = "~!@#$%^&*-=+|;:,<.>/?";
-    if recognized.contains(first) {
-        Ok((input.advance(first.len_utf8()), first))
-    } else {
-        Err(LexError)
-    }
-}
-
-fn doc_comment(input: Cursor) -> PResult<Vec<TokenTree>> {
-    let mut trees = Vec::new();
-    let (rest, ((comment, inner), span)) = spanned(input, doc_comment_contents)?;
-    trees.push(TokenTree::Op(Op::new('#', Spacing::Alone)));
-    if inner {
-        trees.push(Op::new('!', Spacing::Alone).into());
-    }
-    let mut stream = vec![
-        TokenTree::Term(::Term::new("doc", span)),
-        TokenTree::Op(Op::new('=', Spacing::Alone)),
-        TokenTree::Literal(::Literal::string(comment)),
-    ];
-    for tt in stream.iter_mut() {
-        tt.set_span(span);
-    }
-    trees.push(Group::new(Delimiter::Bracket, stream.into_iter().collect()).into());
-    for tt in trees.iter_mut() {
-        tt.set_span(span);
-    }
-    Ok((rest, trees))
-}
-
-named!(doc_comment_contents -> (&str, bool), alt!(
-    do_parse!(
-        punct!("//!") >>
-        s: take_until_newline_or_eof!() >>
-        ((s, true))
-    )
-    |
-    do_parse!(
-        option!(whitespace) >>
-        peek!(tag!("/*!")) >>
-        s: block_comment >>
-        ((s, true))
-    )
-    |
-    do_parse!(
-        punct!("///") >>
-        not!(tag!("/")) >>
-        s: take_until_newline_or_eof!() >>
-        ((s, false))
-    )
-    |
-    do_parse!(
-        option!(whitespace) >>
-        peek!(tuple!(tag!("/**"), not!(tag!("*")))) >>
-        s: block_comment >>
-        ((s, false))
-    )
-));
deleted file mode 100644
--- a/third_party/rust/proc-macro2-0.3.5/src/strnom.rs
+++ /dev/null
@@ -1,391 +0,0 @@
-//! Adapted from [`nom`](https://github.com/Geal/nom).
-
-use std::str::{Bytes, CharIndices, Chars};
-
-use unicode_xid::UnicodeXID;
-
-use imp::LexError;
-
-#[derive(Copy, Clone, Eq, PartialEq)]
-pub struct Cursor<'a> {
-    pub rest: &'a str,
-    #[cfg(procmacro2_semver_exempt)]
-    pub off: u32,
-}
-
-impl<'a> Cursor<'a> {
-    #[cfg(not(procmacro2_semver_exempt))]
-    pub fn advance(&self, amt: usize) -> Cursor<'a> {
-        Cursor {
-            rest: &self.rest[amt..],
-        }
-    }
-    #[cfg(procmacro2_semver_exempt)]
-    pub fn advance(&self, amt: usize) -> Cursor<'a> {
-        Cursor {
-            rest: &self.rest[amt..],
-            off: self.off + (amt as u32),
-        }
-    }
-
-    pub fn find(&self, p: char) -> Option<usize> {
-        self.rest.find(p)
-    }
-
-    pub fn starts_with(&self, s: &str) -> bool {
-        self.rest.starts_with(s)
-    }
-
-    pub fn is_empty(&self) -> bool {
-        self.rest.is_empty()
-    }
-
-    pub fn len(&self) -> usize {
-        self.rest.len()
-    }
-
-    pub fn as_bytes(&self) -> &'a [u8] {
-        self.rest.as_bytes()
-    }
-
-    pub fn bytes(&self) -> Bytes<'a> {
-        self.rest.bytes()
-    }
-
-    pub fn chars(&self) -> Chars<'a> {
-        self.rest.chars()
-    }
-
-    pub fn char_indices(&self) -> CharIndices<'a> {
-        self.rest.char_indices()
-    }
-}
-
-pub type PResult<'a, O> = Result<(Cursor<'a>, O), LexError>;
-
-pub fn whitespace(input: Cursor) -> PResult<()> {
-    if input.is_empty() {
-        return Err(LexError);
-    }
-
-    let bytes = input.as_bytes();
-    let mut i = 0;
-    while i < bytes.len() {
-        let s = input.advance(i);
-        if bytes[i] == b'/' {
-            if s.starts_with("//") && (!s.starts_with("///") || s.starts_with("////"))
-                && !s.starts_with("//!")
-            {
-                if let Some(len) = s.find('\n') {
-                    i += len + 1;
-                    continue;
-                }
-                break;
-            } else if s.starts_with("/**/") {
-                i += 4;
-                continue;
-            } else if s.starts_with("/*") && (!s.starts_with("/**") || s.starts_with("/***"))
-                && !s.starts_with("/*!")
-            {
-                let (_, com) = block_comment(s)?;
-                i += com.len();
-                continue;
-            }
-        }
-        match bytes[i] {
-            b' ' | 0x09...0x0d => {
-                i += 1;
-                continue;
-            }
-            b if b <= 0x7f => {}
-            _ => {
-                let ch = s.chars().next().unwrap();
-                if is_whitespace(ch) {
-                    i += ch.len_utf8();
-                    continue;
-                }
-            }
-        }
-        return if i > 0 { Ok((s, ())) } else { Err(LexError) };
-    }
-    Ok((input.advance(input.len()), ()))
-}
-
-pub fn block_comment(input: Cursor) -> PResult<&str> {
-    if !input.starts_with("/*") {
-        return Err(LexError);
-    }
-
-    let mut depth = 0;
-    let bytes = input.as_bytes();
-    let mut i = 0;
-    let upper = bytes.len() - 1;
-    while i < upper {
-        if bytes[i] == b'/' && bytes[i + 1] == b'*' {
-            depth += 1;
-            i += 1; // eat '*'
-        } else if bytes[i] == b'*' && bytes[i + 1] == b'/' {
-            depth -= 1;
-            if depth == 0 {
-                return Ok((input.advance(i + 2), &input.rest[..i + 2]));
-            }
-            i += 1; // eat '/'
-        }
-        i += 1;
-    }
-    Err(LexError)
-}
-
-pub fn skip_whitespace(input: Cursor) -> Cursor {
-    match whitespace(input) {
-        Ok((rest, _)) => rest,
-        Err(LexError) => input,
-    }
-}
-
-fn is_whitespace(ch: char) -> bool {
-    // Rust treats left-to-right mark and right-to-left mark as whitespace
-    ch.is_whitespace() || ch == '\u{200e}' || ch == '\u{200f}'
-}
-
-pub fn word_break(input: Cursor) -> PResult<()> {
-    match input.chars().next() {
-        Some(ch) if UnicodeXID::is_xid_continue(ch) => Err(LexError),
-        Some(_) | None => Ok((input, ())),
-    }
-}
-
-macro_rules! named {
-    ($name:ident -> $o:ty, $submac:ident!( $($args:tt)* )) => {
-        fn $name<'a>(i: Cursor<'a>) -> $crate::strnom::PResult<'a, $o> {
-            $submac!(i, $($args)*)
-        }
-    };
-}
-
-macro_rules! alt {
-    ($i:expr, $e:ident | $($rest:tt)*) => {
-        alt!($i, call!($e) | $($rest)*)
-    };
-
-    ($i:expr, $subrule:ident!( $($args:tt)*) | $($rest:tt)*) => {
-        match $subrule!($i, $($args)*) {
-            res @ Ok(_) => res,
-            _ => alt!($i, $($rest)*)
-        }
-    };
-
-    ($i:expr, $subrule:ident!( $($args:tt)* ) => { $gen:expr } | $($rest:tt)+) => {
-        match $subrule!($i, $($args)*) {
-            Ok((i, o)) => Ok((i, $gen(o))),
-            Err(LexError) => alt!($i, $($rest)*)
-        }
-    };
-
-    ($i:expr, $e:ident => { $gen:expr } | $($rest:tt)*) => {
-        alt!($i, call!($e) => { $gen } | $($rest)*)
-    };
-
-    ($i:expr, $e:ident => { $gen:expr }) => {
-        alt!($i, call!($e) => { $gen })
-    };
-
-    ($i:expr, $subrule:ident!( $($args:tt)* ) => { $gen:expr }) => {
-        match $subrule!($i, $($args)*) {
-            Ok((i, o)) => Ok((i, $gen(o))),
-            Err(LexError) => Err(LexError),
-        }
-    };
-
-    ($i:expr, $e:ident) => {
-        alt!($i, call!($e))
-    };
-
-    ($i:expr, $subrule:ident!( $($args:tt)*)) => {
-        $subrule!($i, $($args)*)
-    };
-}
-
-macro_rules! do_parse {
-    ($i:expr, ( $($rest:expr),* )) => {
-        Ok(($i, ( $($rest),* )))
-    };
-
-    ($i:expr, $e:ident >> $($rest:tt)*) => {
-        do_parse!($i, call!($e) >> $($rest)*)
-    };
-
-    ($i:expr, $submac:ident!( $($args:tt)* ) >> $($rest:tt)*) => {
-        match $submac!($i, $($args)*) {
-            Err(LexError) => Err(LexError),
-            Ok((i, _)) => do_parse!(i, $($rest)*),
-        }
-    };
-
-    ($i:expr, $field:ident : $e:ident >> $($rest:tt)*) => {
-        do_parse!($i, $field: call!($e) >> $($rest)*)
-    };
-
-    ($i:expr, $field:ident : $submac:ident!( $($args:tt)* ) >> $($rest:tt)*) => {
-        match $submac!($i, $($args)*) {
-            Err(LexError) => Err(LexError),
-            Ok((i, o)) => {
-                let $field = o;
-                do_parse!(i, $($rest)*)
-            },
-        }
-    };
-}
-
-macro_rules! peek {
-    ($i:expr, $submac:ident!( $($args:tt)* )) => {
-        match $submac!($i, $($args)*) {
-            Ok((_, o)) => Ok(($i, o)),
-            Err(LexError) => Err(LexError),
-        }
-    };
-}
-
-macro_rules! call {
-    ($i:expr, $fun:expr $(, $args:expr)*) => {
-        $fun($i $(, $args)*)
-    };
-}
-
-macro_rules! option {
-    ($i:expr, $f:expr) => {
-        match $f($i) {
-            Ok((i, o)) => Ok((i, Some(o))),
-            Err(LexError) => Ok(($i, None)),
-        }
-    };
-}
-
-macro_rules! take_until_newline_or_eof {
-    ($i:expr,) => {{
-        if $i.len() == 0 {
-            Ok(($i, ""))
-        } else {
-            match $i.find('\n') {
-                Some(i) => Ok(($i.advance(i), &$i.rest[..i])),
-                None => Ok(($i.advance($i.len()), &$i.rest[..$i.len()])),
-            }
-        }
-    }};
-}
-
-macro_rules! tuple {
-    ($i:expr, $($rest:tt)*) => {
-        tuple_parser!($i, (), $($rest)*)
-    };
-}
-
-/// Do not use directly. Use `tuple!`.
-macro_rules! tuple_parser {
-    ($i:expr, ($($parsed:tt),*), $e:ident, $($rest:tt)*) => {
-        tuple_parser!($i, ($($parsed),*), call!($e), $($rest)*)
-    };
-
-    ($i:expr, (), $submac:ident!( $($args:tt)* ), $($rest:tt)*) => {
-        match $submac!($i, $($args)*) {
-            Err(LexError) => Err(LexError),
-            Ok((i, o)) => tuple_parser!(i, (o), $($rest)*),
-        }
-    };
-
-    ($i:expr, ($($parsed:tt)*), $submac:ident!( $($args:tt)* ), $($rest:tt)*) => {
-        match $submac!($i, $($args)*) {
-            Err(LexError) => Err(LexError),
-            Ok((i, o)) => tuple_parser!(i, ($($parsed)* , o), $($rest)*),
-        }
-    };
-
-    ($i:expr, ($($parsed:tt),*), $e:ident) => {
-        tuple_parser!($i, ($($parsed),*), call!($e))
-    };
-
-    ($i:expr, (), $submac:ident!( $($args:tt)* )) => {
-        $submac!($i, $($args)*)
-    };
-
-    ($i:expr, ($($parsed:expr),*), $submac:ident!( $($args:tt)* )) => {
-        match $submac!($i, $($args)*) {
-            Err(LexError) => Err(LexError),
-            Ok((i, o)) => Ok((i, ($($parsed),*, o)))
-        }
-    };
-
-    ($i:expr, ($($parsed:expr),*)) => {
-        Ok(($i, ($($parsed),*)))
-    };
-}
-
-macro_rules! not {
-    ($i:expr, $submac:ident!( $($args:tt)* )) => {
-        match $submac!($i, $($args)*) {
-            Ok((_, _)) => Err(LexError),
-            Err(LexError) => Ok(($i, ())),
-        }
-    };
-}
-
-macro_rules! tag {
-    ($i:expr, $tag:expr) => {
-        if $i.starts_with($tag) {
-            Ok(($i.advance($tag.len()), &$i.rest[..$tag.len()]))
-        } else {
-            Err(LexError)
-        }
-    };
-}
-
-macro_rules! punct {
-    ($i:expr, $punct:expr) => {
-        $crate::strnom::punct($i, $punct)
-    };
-}
-
-/// Do not use directly. Use `punct!`.
-pub fn punct<'a>(input: Cursor<'a>, token: &'static str) -> PResult<'a, &'a str> {
-    let input = skip_whitespace(input);
-    if input.starts_with(token) {
-        Ok((input.advance(token.len()), token))
-    } else {
-        Err(LexError)
-    }
-}
-
-macro_rules! preceded {
-    ($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => {
-        match tuple!($i, $submac!($($args)*), $submac2!($($args2)*)) {
-            Ok((remaining, (_, o))) => Ok((remaining, o)),
-            Err(LexError) => Err(LexError),
-        }
-    };
-
-    ($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => {
-        preceded!($i, $submac!($($args)*), call!($g))
-    };
-}
-
-macro_rules! delimited {
-    ($i:expr, $submac:ident!( $($args:tt)* ), $($rest:tt)+) => {
-        match tuple_parser!($i, (), $submac!($($args)*), $($rest)*) {
-            Err(LexError) => Err(LexError),
-            Ok((i1, (_, o, _))) => Ok((i1, o))
-        }
-    };
-}
-
-macro_rules! map {
-    ($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => {
-        match $submac!($i, $($args)*) {
-            Err(LexError) => Err(LexError),
-            Ok((i, o)) => Ok((i, call!(o, $g)))
-        }
-    };
-
-    ($i:expr, $f:expr, $g:expr) => {
-        map!($i, call!($f), $g)
-    };
-}
deleted file mode 100644
--- a/third_party/rust/proc-macro2-0.3.5/src/unstable.rs
+++ /dev/null
@@ -1,399 +0,0 @@
-#![cfg_attr(not(procmacro2_semver_exempt), allow(dead_code))]
-
-use std::fmt;
-use std::iter;
-use std::str::FromStr;
-
-use proc_macro;
-
-use {Delimiter, Group, Op, Spacing, TokenTree};
-
-#[derive(Clone)]
-pub struct TokenStream(proc_macro::TokenStream);
-
-pub struct LexError(proc_macro::LexError);
-
-impl TokenStream {
-    pub fn empty() -> TokenStream {
-        TokenStream(proc_macro::TokenStream::empty())
-    }
-
-    pub fn is_empty(&self) -> bool {
-        self.0.is_empty()
-    }
-}
-
-impl FromStr for TokenStream {
-    type Err = LexError;
-
-    fn from_str(src: &str) -> Result<TokenStream, LexError> {
-        Ok(TokenStream(src.parse().map_err(LexError)?))
-    }
-}
-
-impl fmt::Display for TokenStream {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        self.0.fmt(f)
-    }
-}
-
-impl From<proc_macro::TokenStream> for TokenStream {
-    fn from(inner: proc_macro::TokenStream) -> TokenStream {
-        TokenStream(inner)
-    }
-}
-
-impl From<TokenStream> for proc_macro::TokenStream {
-    fn from(inner: TokenStream) -> proc_macro::TokenStream {
-        inner.0
-    }
-}
-
-impl From<TokenTree> for TokenStream {
-    fn from(token: TokenTree) -> TokenStream {
-        let tt: proc_macro::TokenTree = match token {
-            TokenTree::Group(tt) => {
-                let delim = match tt.delimiter() {
-                    Delimiter::Parenthesis => proc_macro::Delimiter::Parenthesis,
-                    Delimiter::Bracket => proc_macro::Delimiter::Bracket,
-                    Delimiter::Brace => proc_macro::Delimiter::Brace,
-                    Delimiter::None => proc_macro::Delimiter::None,
-                };
-                let span = tt.span();
-                let mut group = proc_macro::Group::new(delim, tt.stream.inner.0);
-                group.set_span(span.inner.0);
-                group.into()
-            }
-            TokenTree::Op(tt) => {
-                let spacing = match tt.spacing() {
-                    Spacing::Joint => proc_macro::Spacing::Joint,
-                    Spacing::Alone => proc_macro::Spacing::Alone,
-                };
-                let mut op = proc_macro::Op::new(tt.op(), spacing);
-                op.set_span(tt.span().inner.0);
-                op.into()
-            }
-            TokenTree::Term(tt) => tt.inner.term.into(),
-            TokenTree::Literal(tt) => tt.inner.lit.into(),
-        };
-        TokenStream(tt.into())
-    }
-}
-
-impl iter::FromIterator<TokenTree> for TokenStream {
-    fn from_iter<I: IntoIterator<Item = TokenTree>>(streams: I) -> Self {
-        let streams = streams.into_iter().map(TokenStream::from)
-            .flat_map(|t| t.0);
-        TokenStream(streams.collect::<proc_macro::TokenStream>())
-    }
-}
-
-impl fmt::Debug for TokenStream {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        self.0.fmt(f)
-    }
-}
-
-impl fmt::Debug for LexError {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        self.0.fmt(f)
-    }
-}
-
-pub struct TokenTreeIter(proc_macro::token_stream::IntoIter);
-
-impl IntoIterator for TokenStream {
-    type Item = TokenTree;
-    type IntoIter = TokenTreeIter;
-
-    fn into_iter(self) -> TokenTreeIter {
-        TokenTreeIter(self.0.into_iter())
-    }