No bug - Revendor rust dependencies
authorServo VCS Sync <servo-vcs-sync@mozilla.com>
Sat, 02 Sep 2017 00:17:56 +0000
changeset 427840 8cb43bf606f804d25a8c146ef77d94511a47de11
parent 427839 18d453c2736cc29e3e52acdd3c262292db07b4d3
child 427841 101965d9cfc3cee585a44a0b3ea3b2e2653630c9
push id7761
push userjlund@mozilla.com
push dateFri, 15 Sep 2017 00:19:52 +0000
treeherdermozilla-beta@c38455951db4 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
milestone57.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
No bug - Revendor rust dependencies
third_party/rust/cssparser/.cargo-checksum.json
third_party/rust/cssparser/Cargo.toml
third_party/rust/cssparser/src/color.rs
third_party/rust/cssparser/src/lib.rs
third_party/rust/cssparser/src/parser.rs
third_party/rust/cssparser/src/rules_and_declarations.rs
third_party/rust/cssparser/src/serializer.rs
third_party/rust/cssparser/src/tests.rs
third_party/rust/cssparser/src/tokenizer.rs
toolkit/library/gtest/rust/Cargo.lock
toolkit/library/rust/Cargo.lock
--- a/third_party/rust/cssparser/.cargo-checksum.json
+++ b/third_party/rust/cssparser/.cargo-checksum.json
@@ -1,1 +1,1 @@
-{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".travis.yml":"f1fb4b65964c81bc1240544267ea334f554ca38ae7a74d57066f4d47d2b5d568","Cargo.toml":"a52213c38e6ff8fcbf4c2f632c6d78521a9a8b9cfcfdfa34339544649d486076","LICENSE":"fab3dd6bdab226f1c08630b1dd917e11fcb4ec5e1e020e2c16f83a0a13863e85","README.md":"c5781e673335f37ed3d7acb119f8ed33efdf6eb75a7094b7da2abe0c3230adb8","build.rs":"950bcc47a196f07f99f59637c28cc65e02a885130011f90a2b2608248b4724a2","build/match_byte.rs":"89e8b941af74df2c204abf808672d3ff278bdec75abc918c41a843260b924677","docs/.nojekyll":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855","docs/404.html":"025861f76f8d1f6d67c20ab624c6e418f4f824385e2dd8ad8732c4ea563c6a2e","docs/index.html":"025861f76f8d1f6d67c20ab624c6e418f4f824385e2dd8ad8732c4ea563c6a2e","src/color.rs":"b847b80097015cb7d0f4be67c0d8b2f6b82006be865917ff14a96b484760d460","src/cow_rc_str.rs":"541216f8ef74ee3cc5cbbc1347e5f32ed66588c401851c9a7d68b867aede1de0","src/from_bytes.rs":"331fe63af2123ae3675b61928a69461b5ac77799fff3ce9978c55cf2c558f4ff","src/lib.rs":"77c0852be9ba7682f4e325a09ebac03ce25aafec30142eb10937b77651a29d67","src/macros.rs":"adb9773c157890381556ea83d7942dcc676f99eea71abbb6afeffee1e3f28960","src/nth.rs":"246fa83a3ab97a7bb617c97a976af77136652ce77ba8ccca22e144b213b61310","src/parser.rs":"3a315b7600e80b577c5d04f215038c55ae1c9e5a2c70c6587850cd7fc1be6ae4","src/rules_and_declarations.rs":"44e47663aaa8a5ff167393b91337e377e5a4fcbef64b227028780b6d22879f69","src/serializer.rs":"843c9d01de00523851a4c40f791c64e3b00325426cb38f897e4a2ddb4cfa6de8","src/size_of_tests.rs":"a28664d44797519119d659eaf7e84e1789ef97e9e2c2d36630eb9f226c0cc0a6","src/tests.rs":"c07f5d8464217b1650f7ee8911b90ef67947876305be215d1e666a20a793dbfb","src/tokenizer.rs":"63640e6a2d875e8afda9dea6034b8c57db9b5877c3c491a97fee1c6ec223b75d","src/unicode_range.rs":"fbbd0f4b393944699730a6b0f945b2b2376fcea61fce2ea37190fb287793021a"},"package":"dc476dc0960774aa1cabfd0044de7d4585a8f2f8a3ef72e6d9d1e16c1e2492b1"}
\ No newline at end of file
+{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".travis.yml":"f1fb4b65964c81bc1240544267ea334f554ca38ae7a74d57066f4d47d2b5d568","Cargo.toml":"b15b69a36fd6c23052045990fcfe68e8cad505d4d92d568a52eac041982699ee","LICENSE":"fab3dd6bdab226f1c08630b1dd917e11fcb4ec5e1e020e2c16f83a0a13863e85","README.md":"c5781e673335f37ed3d7acb119f8ed33efdf6eb75a7094b7da2abe0c3230adb8","build.rs":"950bcc47a196f07f99f59637c28cc65e02a885130011f90a2b2608248b4724a2","build/match_byte.rs":"89e8b941af74df2c204abf808672d3ff278bdec75abc918c41a843260b924677","docs/.nojekyll":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855","docs/404.html":"025861f76f8d1f6d67c20ab624c6e418f4f824385e2dd8ad8732c4ea563c6a2e","docs/index.html":"025861f76f8d1f6d67c20ab624c6e418f4f824385e2dd8ad8732c4ea563c6a2e","src/color.rs":"422a2e934b06a2cca7beef7afeab42bdca81a73eb27afcbdb3d2a98db892590b","src/cow_rc_str.rs":"541216f8ef74ee3cc5cbbc1347e5f32ed66588c401851c9a7d68b867aede1de0","src/from_bytes.rs":"331fe63af2123ae3675b61928a69461b5ac77799fff3ce9978c55cf2c558f4ff","src/lib.rs":"37aec41c81021cd4cc8f34491ee75de2e8340feada2d0096b107597fc4ac485d","src/macros.rs":"adb9773c157890381556ea83d7942dcc676f99eea71abbb6afeffee1e3f28960","src/nth.rs":"246fa83a3ab97a7bb617c97a976af77136652ce77ba8ccca22e144b213b61310","src/parser.rs":"9f147bc14e25fd4789e390ad0c9d2270188002a3a4785c150f39ef278b291259","src/rules_and_declarations.rs":"962f59aab8030b0d1202859ff841ed6254ce4bd4159eee5e915ccdf4b802f4d5","src/serializer.rs":"9e0c821b1ee6d35ca0632f7f01209f7174eef053b69a5c25b7145e1e8e667efe","src/size_of_tests.rs":"a28664d44797519119d659eaf7e84e1789ef97e9e2c2d36630eb9f226c0cc0a6","src/tests.rs":"ff00ab136330a5798d2b28375069f03f6019cdb1c8b38b407d24120e106a9f1e","src/tokenizer.rs":"fb2e3036f9a20969a1feaf2da293de224efb092f7abb4c01e7d5aaf981c29826","src/unicode_range.rs":"fbbd0f4b393944699730a6b0f945b2b2376fcea61fce2ea37190fb287793021a"},"package":"2334576d63647dd96a6238cc3fb1d51b2aae3eb98872de157ae35c0b2e358fd2"}
\ No newline at end of file
--- a/third_party/rust/cssparser/Cargo.toml
+++ b/third_party/rust/cssparser/Cargo.toml
@@ -7,59 +7,62 @@
 #
 # If you believe there's an error in this file please file an
 # issue against the rust-lang/cargo repository. If you're
 # editing this file be aware that the upstream Cargo.toml
 # will likely look very different (and much more reasonable)
 
 [package]
 name = "cssparser"
-version = "0.19.5"
+version = "0.20.0"
 authors = ["Simon Sapin <simon.sapin@exyr.org>"]
 build = "build.rs"
 exclude = ["src/css-parsing-tests/**", "src/big-data-url.css"]
 description = "Rust implementation of CSS Syntax Level 3"
 documentation = "https://docs.rs/cssparser/"
 readme = "README.md"
 keywords = ["css", "syntax", "parser"]
 license = "MPL-2.0"
 repository = "https://github.com/servo/rust-cssparser"
-[dependencies.cssparser-macros]
-version = "0.3"
-
-[dependencies.procedural-masquerade]
-version = "0.1"
-
-[dependencies.matches]
-version = "0.1"
-
 [dependencies.dtoa-short]
 version = "0.3"
 
 [dependencies.smallvec]
 version = "0.4.3"
 
 [dependencies.phf]
 version = "0.7"
 
 [dependencies.serde]
 version = "1.0"
 optional = true
 
 [dependencies.heapsize]
 version = ">= 0.3, < 0.5"
 optional = true
-[dev-dependencies.difference]
-version = "1.0"
+
+[dependencies.procedural-masquerade]
+version = "0.1"
+
+[dependencies.itoa]
+version = "0.3"
+
+[dependencies.matches]
+version = "0.1"
+
+[dependencies.cssparser-macros]
+version = "0.3"
+[dev-dependencies.rustc-serialize]
+version = "0.3"
 
 [dev-dependencies.encoding_rs]
-version = "0.5"
+version = "0.7"
 
-[dev-dependencies.rustc-serialize]
-version = "0.3"
+[dev-dependencies.difference]
+version = "1.0"
 [build-dependencies.syn]
 version = "0.11"
 
 [build-dependencies.quote]
 version = "0.3"
 
 [features]
 dummy_match_byte = []
--- a/third_party/rust/cssparser/src/color.rs
+++ b/third_party/rust/cssparser/src/color.rs
@@ -95,28 +95,36 @@ impl<'de> Deserialize<'de> for RGBA {
 
 #[cfg(feature = "heapsize")]
 known_heap_size!(0, RGBA);
 
 impl ToCss for RGBA {
     fn to_css<W>(&self, dest: &mut W) -> fmt::Result
         where W: fmt::Write,
     {
-        // Try first with two decimal places, then with three.
-        let mut rounded_alpha = (self.alpha_f32() * 100.).round() / 100.;
-        if clamp_unit_f32(rounded_alpha) != self.alpha {
-            rounded_alpha = (self.alpha_f32() * 1000.).round() / 1000.;
-        }
+        let serialize_alpha = self.alpha != 255;
 
-        if self.alpha == 255 {
-            write!(dest, "rgb({}, {}, {})", self.red, self.green, self.blue)
-        } else {
-            write!(dest, "rgba({}, {}, {}, {})",
-                   self.red, self.green, self.blue, rounded_alpha)
+        dest.write_str(if serialize_alpha { "rgba(" } else { "rgb(" })?;
+        self.red.to_css(dest)?;
+        dest.write_str(", ")?;
+        self.green.to_css(dest)?;
+        dest.write_str(", ")?;
+        self.blue.to_css(dest)?;
+        if serialize_alpha {
+            dest.write_str(", ")?;
+
+            // Try first with two decimal places, then with three.
+            let mut rounded_alpha = (self.alpha_f32() * 100.).round() / 100.;
+            if clamp_unit_f32(rounded_alpha) != self.alpha {
+                rounded_alpha = (self.alpha_f32() * 1000.).round() / 1000.;
+            }
+
+            rounded_alpha.to_css(dest)?;
         }
+        dest.write_char(')')
     }
 }
 
 /// A <color> value.
 #[derive(Clone, Copy, PartialEq, Debug)]
 pub enum Color {
     /// The 'currentcolor' keyword
     CurrentColor,
--- a/third_party/rust/cssparser/src/lib.rs
+++ b/third_party/rust/cssparser/src/lib.rs
@@ -64,16 +64,17 @@ fn parse_border_spacing(_context: &Parse
 }
 ```
 
 */
 
 #![recursion_limit="200"]  // For color::parse_color_keyword
 
 extern crate dtoa_short;
+extern crate itoa;
 #[macro_use] extern crate cssparser_macros;
 #[macro_use] extern crate matches;
 #[macro_use] extern crate procedural_masquerade;
 #[doc(hidden)] pub extern crate phf as _internal__phf;
 #[cfg(test)] extern crate encoding_rs;
 #[cfg(test)] extern crate difference;
 #[cfg(test)] extern crate rustc_serialize;
 #[cfg(feature = "serde")] extern crate serde;
--- a/third_party/rust/cssparser/src/parser.rs
+++ b/third_party/rust/cssparser/src/parser.rs
@@ -52,16 +52,17 @@ pub enum BasicParseError<'a> {
     AtRuleInvalid(CowRcStr<'a>),
     /// The body of an '@' rule was invalid.
     AtRuleBodyInvalid,
     /// A qualified rule was encountered that was invalid.
     QualifiedRuleInvalid,
 }
 
 impl<'a, T> From<BasicParseError<'a>> for ParseError<'a, T> {
+    #[inline]
     fn from(this: BasicParseError<'a>) -> ParseError<'a, T> {
         ParseError::Basic(this)
     }
 }
 
 /// Extensible parse errors that can be encountered by client parsing implementations.
 #[derive(Clone, Debug, PartialEq)]
 pub enum ParseError<'a, T: 'a> {
@@ -195,26 +196,29 @@ mod ClosingDelimiter {
     pub const CloseCurlyBracket: Delimiters = Delimiters { bits: 1 << 5 };
     pub const CloseSquareBracket: Delimiters = Delimiters { bits: 1 << 6 };
     pub const CloseParenthesis: Delimiters = Delimiters { bits: 1 << 7 };
 }
 
 impl BitOr<Delimiters> for Delimiters {
     type Output = Delimiters;
 
+    #[inline]
     fn bitor(self, other: Delimiters) -> Delimiters {
         Delimiters { bits: self.bits | other.bits }
     }
 }
 
 impl Delimiters {
+    #[inline]
     fn contains(self, other: Delimiters) -> bool {
         (self.bits & other.bits) != 0
     }
 
+    #[inline]
     fn from_byte(byte: Option<u8>) -> Delimiters {
         match byte {
             Some(b';') => Delimiter::Semicolon,
             Some(b'!') => Delimiter::Bang,
             Some(b',') => Delimiter::Comma,
             Some(b'{') => Delimiter::CurlyBracketBlock,
             Some(b'}') => ClosingDelimiter::CloseCurlyBracket,
             Some(b']') => ClosingDelimiter::CloseSquareBracket,
@@ -344,30 +348,16 @@ impl<'i: 't, 't> Parser<'i, 't> {
 
     /// Return whether a `var()` function has been seen by the tokenizer since
     /// either `look_for_var_functions` was called, and stop looking.
     #[inline]
     pub fn seen_var_functions(&mut self) -> bool {
         self.input.tokenizer.seen_var_functions()
     }
 
-    /// Start looking for viewport percentage lengths. (See the `seen_viewport_percentages`
-    /// method.)
-    #[inline]
-    pub fn look_for_viewport_percentages(&mut self) {
-        self.input.tokenizer.look_for_viewport_percentages()
-    }
-
-    /// Return whether a `vh`, `vw`, `vmin`, or `vmax` dimension has been seen by the tokenizer
-    /// since `look_for_viewport_percentages` was called, and stop looking.
-    #[inline]
-    pub fn seen_viewport_percentages(&mut self) -> bool {
-        self.input.tokenizer.seen_viewport_percentages()
-    }
-
     /// Execute the given closure, passing it the parser.
     /// If the result (returned unchanged) is `Err`,
     /// the internal state of the parser  (including position within the input)
     /// is restored to what it was before the call.
     #[inline]
     pub fn try<F, T, E>(&mut self, thing: F) -> Result<T, E>
     where F: FnOnce(&mut Parser<'i, 't>) -> Result<T, E> {
         let start = self.state();
@@ -436,17 +426,16 @@ impl<'i: 't, 't> Parser<'i, 't> {
 
         let token_start_position = self.input.tokenizer.position();
         let token;
         match self.input.cached_token {
             Some(ref cached_token)
             if cached_token.start_position == token_start_position => {
                 self.input.tokenizer.reset(&cached_token.end_state);
                 match cached_token.token {
-                    Token::Dimension { ref unit, .. } => self.input.tokenizer.see_dimension(unit),
                     Token::Function(ref name) => self.input.tokenizer.see_function(name),
                     _ => {}
                 }
                 token = &cached_token.token
             }
             _ => {
                 let new_token = self.input.tokenizer.next().map_err(|()| BasicParseError::EndOfInput)?;
                 self.input.cached_token = Some(CachedToken {
@@ -817,16 +806,17 @@ pub fn parse_until_after<'i: 't, 't, F, 
                                               delimiters: Delimiters,
                                               parse: F)
                                               -> Result <T, ParseError<'i, E>>
     where F: for<'tt> FnOnce(&mut Parser<'i, 'tt>) -> Result<T, ParseError<'i, E>> {
     let result = parser.parse_until_before(delimiters, parse);
     let next_byte = (parser.input.tokenizer).next_byte();
     if next_byte.is_some() && !parser.stop_before.contains(Delimiters::from_byte(next_byte)) {
         debug_assert!(delimiters.contains(Delimiters::from_byte(next_byte)));
+        // We know this byte is ASCII.
         (parser.input.tokenizer).advance(1);
         if next_byte == Some(b'{') {
             consume_until_end_of_block(BlockType::CurlyBracket, &mut parser.input.tokenizer);
         }
     }
     result
 }
 
@@ -855,16 +845,18 @@ pub fn parse_nested_block<'i: 't, 't, F,
         if let Some(block_type) = nested_parser.at_start_of {
             consume_until_end_of_block(block_type, &mut nested_parser.input.tokenizer);
         }
     }
     consume_until_end_of_block(block_type, &mut parser.input.tokenizer);
     result
 }
 
+#[inline(never)]
+#[cold]
 fn consume_until_end_of_block(block_type: BlockType, tokenizer: &mut Tokenizer) {
     let mut stack = SmallVec::<[BlockType; 16]>::new();
     stack.push(block_type);
 
     // FIXME: have a special-purpose tokenizer method for this that does less work.
     while let Ok(ref token) = tokenizer.next() {
         if let Some(b) = BlockType::closing(token) {
             if *stack.last().unwrap() == b {
--- a/third_party/rust/cssparser/src/rules_and_declarations.rs
+++ b/third_party/rust/cssparser/src/rules_and_declarations.rs
@@ -18,34 +18,27 @@ pub fn parse_important<'i, 't>(input: &m
     input.expect_delim('!')?;
     input.expect_ident_matching("important")
 }
 
 
 /// The return value for `AtRuleParser::parse_prelude`.
 /// Indicates whether the at-rule is expected to have a `{ /* ... */ }` block
 /// or end with a `;` semicolon.
-pub enum AtRuleType<P, R> {
+pub enum AtRuleType<P, PB> {
     /// The at-rule is expected to end with a `;` semicolon. Example: `@import`.
     ///
-    /// The value is the finished representation of an at-rule
-    /// as returned by `RuleListParser::next` or `DeclarationListParser::next`.
-    WithoutBlock(R),
+    /// The value is the representation of all data of the rule which would be
+    /// handled in rule_without_block.
+    WithoutBlock(P),
 
     /// The at-rule is expected to have a a `{ /* ... */ }` block. Example: `@media`
     ///
     /// The value is the representation of the "prelude" part of the rule.
-    WithBlock(P),
-
-    /// The at-rule may either have a block or end with a semicolon.
-    ///
-    /// This is mostly for testing. As of this writing no real CSS at-rule behaves like this.
-    ///
-    /// The value is the representation of the "prelude" part of the rule.
-    OptionalBlock(P),
+    WithBlock(PB),
 }
 
 /// A trait to provide various parsing of declaration values.
 ///
 /// For example, there could be different implementations for property declarations in style rules
 /// and for descriptors in `@font-face` rules.
 pub trait DeclarationParser<'i> {
     /// The finished representation of a declaration.
@@ -80,18 +73,21 @@ pub trait DeclarationParser<'i> {
 /// For example, there could be different implementations for top-level at-rules
 /// (`@media`, `@font-face`, …)
 /// and for page-margin rules inside `@page`.
 ///
 /// Default implementations that reject all at-rules are provided,
 /// so that `impl AtRuleParser<(), ()> for ... {}` can be used
 /// for using `DeclarationListParser` to parse a declartions list with only qualified rules.
 pub trait AtRuleParser<'i> {
-    /// The intermediate representation of an at-rule prelude.
-    type Prelude;
+    /// The intermediate representation of prelude of an at-rule without block;
+    type PreludeNoBlock;
+
+    /// The intermediate representation of prelude of an at-rule with block;
+    type PreludeBlock;
 
     /// The finished representation of an at-rule.
     type AtRule;
 
     /// The error type that is included in the ParseError value that can be returned.
     type Error: 'i;
 
     /// Parse the prelude of an at-rule with the given `name`.
@@ -107,46 +103,49 @@ pub trait AtRuleParser<'i> {
     /// At-rule name matching should be case-insensitive in the ASCII range.
     /// This can be done with `std::ascii::Ascii::eq_ignore_ascii_case`,
     /// or with the `match_ignore_ascii_case!` macro.
     ///
     /// The given `input` is a "delimited" parser
     /// that ends wherever the prelude should end.
     /// (Before the next semicolon, the next `{`, or the end of the current block.)
     fn parse_prelude<'t>(&mut self, name: CowRcStr<'i>, input: &mut Parser<'i, 't>)
-                     -> Result<AtRuleType<Self::Prelude, Self::AtRule>, ParseError<'i, Self::Error>> {
+                     -> Result<AtRuleType<Self::PreludeNoBlock, Self::PreludeBlock>,
+                               ParseError<'i, Self::Error>> {
         let _ = name;
         let _ = input;
         Err(ParseError::Basic(BasicParseError::AtRuleInvalid(name)))
     }
 
+    /// End an at-rule which doesn't have block. Return the finished
+    /// representation of the at-rule.
+    ///
+    /// This is only called when `parse_prelude` returned `WithoutBlock`, and
+    /// either the `;` semicolon indeed follows the prelude, or parser is at
+    /// the end of the input.
+    fn rule_without_block(&mut self, prelude: Self::PreludeNoBlock) -> Self::AtRule {
+        let _ = prelude;
+        panic!("The `AtRuleParser::rule_without_block` method must be overriden \
+                if `AtRuleParser::parse_prelude` ever returns `AtRuleType::WithoutBlock`.")
+    }
+
     /// Parse the content of a `{ /* ... */ }` block for the body of the at-rule.
     ///
     /// Return the finished representation of the at-rule
     /// as returned by `RuleListParser::next` or `DeclarationListParser::next`,
     /// or `Err(())` to ignore the entire at-rule as invalid.
     ///
-    /// This is only called when `parse_prelude` returned `WithBlock` or `OptionalBlock`,
-    /// and a block was indeed found following the prelude.
-    fn parse_block<'t>(&mut self, prelude: Self::Prelude, input: &mut Parser<'i, 't>)
+    /// This is only called when `parse_prelude` returned `WithBlock`, and a block
+    /// was indeed found following the prelude.
+    fn parse_block<'t>(&mut self, prelude: Self::PreludeBlock, input: &mut Parser<'i, 't>)
                        -> Result<Self::AtRule, ParseError<'i, Self::Error>> {
         let _ = prelude;
         let _ = input;
         Err(ParseError::Basic(BasicParseError::AtRuleBodyInvalid))
     }
-
-    /// An `OptionalBlock` prelude was followed by `;`.
-    ///
-    /// Convert the prelude into the finished representation of the at-rule
-    /// as returned by `RuleListParser::next` or `DeclarationListParser::next`.
-    fn rule_without_block(&mut self, prelude: Self::Prelude) -> Self::AtRule {
-        let _ = prelude;
-        panic!("The `AtRuleParser::rule_without_block` method must be overriden \
-                if `AtRuleParser::parse_prelude` ever returns `AtRuleType::OptionalBlock`.")
-    }
 }
 
 /// A trait to provide various parsing of qualified rules.
 ///
 /// For example, there could be different implementations
 /// for top-level qualified rules (i.e. style rules with Selectors as prelude)
 /// and for qualified rules inside `@keyframes` (keyframe rules with keyframe selectors as prelude).
 ///
@@ -455,19 +454,19 @@ fn parse_at_rule<'i: 't, 't, P, E>(start
                                    -> Result<<P as AtRuleParser<'i>>::AtRule, PreciseParseError<'i, E>>
                                    where P: AtRuleParser<'i, Error = E> {
     let delimiters = Delimiter::Semicolon | Delimiter::CurlyBracketBlock;
     // FIXME: https://github.com/rust-lang/rust/issues/42508
     let result = parse_until_before::<'i, 't, _, _, _>(input, delimiters, |input| {
         parser.parse_prelude(name, input)
     });
     match result {
-        Ok(AtRuleType::WithoutBlock(rule)) => {
+        Ok(AtRuleType::WithoutBlock(prelude)) => {
             match input.next() {
-                Ok(&Token::Semicolon) | Err(_) => Ok(rule),
+                Ok(&Token::Semicolon) | Err(_) => Ok(parser.rule_without_block(prelude)),
                 Ok(&Token::CurlyBracketBlock) => Err(PreciseParseError {
                     error: ParseError::Basic(BasicParseError::UnexpectedToken(Token::CurlyBracketBlock)),
                     slice: input.slice_from(start.position()),
                     location: start.source_location(),
                 }),
                 Ok(_) => unreachable!()
             }
         }
@@ -490,31 +489,16 @@ fn parse_at_rule<'i: 't, 't, P, E>(start
                 Err(e) => Err(PreciseParseError {
                     error: ParseError::Basic(e),
                     slice: input.slice_from(start.position()),
                     location: start.source_location(),
                 }),
                 Ok(_) => unreachable!()
             }
         }
-        Ok(AtRuleType::OptionalBlock(prelude)) => {
-            match input.next() {
-                Ok(&Token::Semicolon) | Err(_) => Ok(parser.rule_without_block(prelude)),
-                Ok(&Token::CurlyBracketBlock) => {
-                    // FIXME: https://github.com/rust-lang/rust/issues/42508
-                    parse_nested_block::<'i, 't, _, _, _>(input, move |input| parser.parse_block(prelude, input))
-                        .map_err(|e| PreciseParseError {
-                            error: e,
-                            slice: input.slice_from(start.position()),
-                            location: start.source_location(),
-                        })
-                }
-                _ => unreachable!()
-            }
-        }
         Err(error) => {
             let end_position = input.position();
             match input.next() {
                 Ok(&Token::CurlyBracketBlock) | Ok(&Token::Semicolon) | Err(_) => {},
                 _ => unreachable!()
             };
             Err(PreciseParseError {
                 error: error,
--- a/third_party/rust/cssparser/src/serializer.rs
+++ b/third_party/rust/cssparser/src/serializer.rs
@@ -1,15 +1,17 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 use dtoa_short::{self, Notation};
+use itoa;
 use std::ascii::AsciiExt;
 use std::fmt::{self, Write};
+use std::io;
 use std::str;
 
 use super::Token;
 
 
 /// Trait for things the can serialize themselves in CSS syntax.
 pub trait ToCss {
     /// Serialize `self` in CSS syntax, writing to `dest`.
@@ -19,33 +21,16 @@ pub trait ToCss {
     ///
     /// (This is a convenience wrapper for `to_css` and probably should not be overridden.)
     #[inline]
     fn to_css_string(&self) -> String {
         let mut s = String::new();
         self.to_css(&mut s).unwrap();
         s
     }
-
-    /// Serialize `self` in CSS syntax and return a result compatible with `std::fmt::Show`.
-    ///
-    /// Typical usage is, for a `Foo` that implements `ToCss`:
-    ///
-    /// ```{rust,ignore}
-    /// use std::fmt;
-    /// impl fmt::Show for Foo {
-    ///     #[inline] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.fmt_to_css(f) }
-    /// }
-    /// ```
-    ///
-    /// (This is a convenience wrapper for `to_css` and probably should not be overridden.)
-    #[inline]
-    fn fmt_to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
-        self.to_css(dest).map_err(|_| fmt::Error)
-    }
 }
 
 #[inline]
 fn write_numeric<W>(value: f32, int_value: Option<i32>, has_sign: bool, dest: &mut W)
                     -> fmt::Result where W: fmt::Write {
     // `value.value >= 0` is true for negative 0.
     if has_sign && value.is_sign_positive() {
         dest.write_str("+")?;
@@ -85,17 +70,17 @@ impl<'a> ToCss for Token<'a> {
                 serialize_identifier(&**value, dest)?;
             }
             Token::QuotedString(ref value) => serialize_string(&**value, dest)?,
             Token::UnquotedUrl(ref value) => {
                 dest.write_str("url(")?;
                 serialize_unquoted_url(&**value, dest)?;
                 dest.write_str(")")?;
             },
-            Token::Delim(value) => write!(dest, "{}", value)?,
+            Token::Delim(value) => dest.write_char(value)?,
 
             Token::Number { value, int_value, has_sign } => {
                 write_numeric(value, int_value, has_sign, dest)?
             }
             Token::Percentage { unit_value, int_value, has_sign } => {
                 write_numeric(unit_value * 100., int_value, has_sign, dest)?;
                 dest.write_str("%")?;
             },
@@ -107,17 +92,21 @@ impl<'a> ToCss for Token<'a> {
                     dest.write_str("\\65 ")?;
                     serialize_name(&unit[1..], dest)?;
                 } else {
                     serialize_identifier(unit, dest)?;
                 }
             },
 
             Token::WhiteSpace(content) => dest.write_str(content)?,
-            Token::Comment(content) => write!(dest, "/*{}*/", content)?,
+            Token::Comment(content) => {
+                dest.write_str("/*")?;
+                dest.write_str(content)?;
+                dest.write_str("*/")?
+            }
             Token::Colon => dest.write_str(":")?,
             Token::Semicolon => dest.write_str(";")?,
             Token::Comma => dest.write_str(",")?,
             Token::IncludeMatch => dest.write_str("~=")?,
             Token::DashMatch => dest.write_str("|=")?,
             Token::PrefixMatch => dest.write_str("^=")?,
             Token::SuffixMatch => dest.write_str("$=")?,
             Token::SubstringMatch => dest.write_str("*=")?,
@@ -138,16 +127,42 @@ impl<'a> ToCss for Token<'a> {
             Token::CloseParenthesis => dest.write_str(")")?,
             Token::CloseSquareBracket => dest.write_str("]")?,
             Token::CloseCurlyBracket => dest.write_str("}")?,
         }
         Ok(())
     }
 }
 
+fn to_hex_byte(value: u8) -> u8 {
+    match value {
+        0...9 => value + b'0',
+        _ => value - 10 + b'a',
+    }
+}
+
+fn hex_escape<W>(ascii_byte: u8, dest: &mut W) -> fmt::Result where W:fmt::Write {
+    let high = ascii_byte >> 4;
+    let b3;
+    let b4;
+    let bytes = if high > 0 {
+        let low = ascii_byte & 0x0F;
+        b4 = [b'\\', to_hex_byte(high), to_hex_byte(low), b' '];
+        &b4[..]
+    } else {
+        b3 = [b'\\', to_hex_byte(ascii_byte), b' '];
+        &b3[..]
+    };
+    dest.write_str(unsafe { str::from_utf8_unchecked(&bytes) })
+}
+
+fn char_escape<W>(ascii_byte: u8, dest: &mut W) -> fmt::Result where W:fmt::Write {
+    let bytes = [b'\\', ascii_byte];
+    dest.write_str(unsafe { str::from_utf8_unchecked(&bytes) })
+}
 
 /// Write a CSS identifier, escaping characters as necessary.
 pub fn serialize_identifier<W>(mut value: &str, dest: &mut W) -> fmt::Result where W:fmt::Write {
     if value.is_empty() {
         return Ok(())
     }
 
     if value.starts_with("--") {
@@ -156,17 +171,17 @@ pub fn serialize_identifier<W>(mut value
     } else if value == "-" {
         dest.write_str("\\-")
     } else {
         if value.as_bytes()[0] == b'-' {
             dest.write_str("-")?;
             value = &value[1..];
         }
         if let digit @ b'0'...b'9' = value.as_bytes()[0] {
-            write!(dest, "\\3{} ", digit as char)?;
+            hex_escape(digit, dest)?;
             value = &value[1..];
         }
         serialize_name(value, dest)
     }
 }
 
 
 fn serialize_name<W>(value: &str, dest: &mut W) -> fmt::Result where W:fmt::Write {
@@ -177,19 +192,19 @@ fn serialize_name<W>(value: &str, dest: 
             _ if !b.is_ascii() => continue,
             b'\0' => Some("\u{FFFD}"),
             _ => None,
         };
         dest.write_str(&value[chunk_start..i])?;
         if let Some(escaped) = escaped {
             dest.write_str(escaped)?;
         } else if (b >= b'\x01' && b <= b'\x1F') || b == b'\x7F' {
-            write!(dest, "\\{:x} ", b)?;
+            hex_escape(b, dest)?;
         } else {
-            write!(dest, "\\{}", b as char)?;
+            char_escape(b, dest)?;
         }
         chunk_start = i + 1;
     }
     dest.write_str(&value[chunk_start..])
 }
 
 
 fn serialize_unquoted_url<W>(value: &str, dest: &mut W) -> fmt::Result where W:fmt::Write {
@@ -197,19 +212,19 @@ fn serialize_unquoted_url<W>(value: &str
     for (i, b) in value.bytes().enumerate() {
         let hex = match b {
             b'\0' ... b' ' | b'\x7F' => true,
             b'(' | b')' | b'"' | b'\'' | b'\\' => false,
             _ => continue
         };
         dest.write_str(&value[chunk_start..i])?;
         if hex {
-            write!(dest, "\\{:X} ", b)?;
+            hex_escape(b, dest)?;
         } else {
-            write!(dest, "\\{}", b as char)?;
+            char_escape(b, dest)?;
         }
         chunk_start = i + 1;
     }
     dest.write_str(&value[chunk_start..])
 }
 
 
 /// Write a double-quoted CSS string token, escaping content as necessary.
@@ -257,30 +272,56 @@ impl<'a, W> fmt::Write for CssStringWrit
                 b'\\' => Some("\\\\"),
                 b'\0' => Some("\u{FFFD}"),
                 b'\x01'...b'\x1F' | b'\x7F' => None,
                 _ => continue,
             };
             self.inner.write_str(&s[chunk_start..i])?;
             match escaped {
                 Some(x) => self.inner.write_str(x)?,
-                None => write!(self.inner, "\\{:x} ", b)?,
+                None => hex_escape(b, self.inner)?,
             };
             chunk_start = i + 1;
         }
         self.inner.write_str(&s[chunk_start..])
     }
 }
 
 
 macro_rules! impl_tocss_for_int {
     ($T: ty) => {
         impl<'a> ToCss for $T {
             fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
-                write!(dest, "{}", *self)
+                struct AssumeUtf8<W: fmt::Write>(W);
+
+                impl<W: fmt::Write> io::Write for AssumeUtf8<W> {
+                    #[inline]
+                    fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
+                        // Safety: itoa only emits ASCII, which is also well-formed UTF-8.
+                        debug_assert!(buf.is_ascii());
+                        self.0.write_str(unsafe { str::from_utf8_unchecked(buf) })
+                            .map_err(|_| io::ErrorKind::Other.into())
+                    }
+
+                    #[inline]
+                    fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+                        self.write_all(buf)?;
+                        Ok(buf.len())
+                    }
+
+                    #[inline]
+                    fn flush(&mut self) -> io::Result<()> {
+                        Ok(())
+                    }
+                }
+
+                match itoa::write(AssumeUtf8(dest), *self) {
+                    Ok(_) => Ok(()),
+                    Err(_) => Err(fmt::Error)
+                }
             }
         }
     }
 }
 
 impl_tocss_for_int!(i8);
 impl_tocss_for_int!(u8);
 impl_tocss_for_int!(i16);
--- a/third_party/rust/cssparser/src/tests.rs
+++ b/third_party/rust/cssparser/src/tests.rs
@@ -295,20 +295,20 @@ fn unquoted_url_escaping() {
         \x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\
         \x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f \
         !\"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]\
         ^_`abcdefghijklmnopqrstuvwxyz{|}~\x7fé\
     ".into());
     let serialized = token.to_css_string();
     assert_eq!(serialized, "\
         url(\
-            \\1 \\2 \\3 \\4 \\5 \\6 \\7 \\8 \\9 \\A \\B \\C \\D \\E \\F \\10 \
-            \\11 \\12 \\13 \\14 \\15 \\16 \\17 \\18 \\19 \\1A \\1B \\1C \\1D \\1E \\1F \\20 \
+            \\1 \\2 \\3 \\4 \\5 \\6 \\7 \\8 \\9 \\a \\b \\c \\d \\e \\f \\10 \
+            \\11 \\12 \\13 \\14 \\15 \\16 \\17 \\18 \\19 \\1a \\1b \\1c \\1d \\1e \\1f \\20 \
             !\\\"#$%&\\'\\(\\)*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\\\]\
-            ^_`abcdefghijklmnopqrstuvwxyz{|}~\\7F é\
+            ^_`abcdefghijklmnopqrstuvwxyz{|}~\\7f é\
         )\
         ");
     let mut input = ParserInput::new(&serialized);
     assert_eq!(Parser::new(&mut input).next(), Ok(&token));
 }
 
 #[test]
 fn test_expect_url() {
@@ -740,39 +740,45 @@ impl<'i> DeclarationParser<'i> for JsonP
             name,
             value,
             important,
         ])
     }
 }
 
 impl<'i> AtRuleParser<'i> for JsonParser {
-    type Prelude = Vec<Json>;
+    type PreludeNoBlock = Vec<Json>;
+    type PreludeBlock = Vec<Json>;
     type AtRule = Json;
     type Error = ();
 
     fn parse_prelude<'t>(&mut self, name: CowRcStr<'i>, input: &mut Parser<'i, 't>)
-                         -> Result<AtRuleType<Vec<Json>, Json>, ParseError<'i, ()>> {
-        Ok(AtRuleType::OptionalBlock(vec![
+                         -> Result<AtRuleType<Vec<Json>, Vec<Json>>, ParseError<'i, ()>> {
+        let prelude = vec![
             "at-rule".to_json(),
             name.to_json(),
             Json::Array(component_values_to_json(input)),
-        ]))
+        ];
+        match_ignore_ascii_case! { &*name,
+            "media" | "foo-with-block" => Ok(AtRuleType::WithBlock(prelude)),
+            "charset" => Err(BasicParseError::AtRuleInvalid(name.clone()).into()),
+            _ => Ok(AtRuleType::WithoutBlock(prelude)),
+        }
+    }
+
+    fn rule_without_block(&mut self, mut prelude: Vec<Json>) -> Json {
+        prelude.push(Json::Null);
+        Json::Array(prelude)
     }
 
     fn parse_block<'t>(&mut self, mut prelude: Vec<Json>, input: &mut Parser<'i, 't>)
                        -> Result<Json, ParseError<'i, ()>> {
         prelude.push(Json::Array(component_values_to_json(input)));
         Ok(Json::Array(prelude))
     }
-
-    fn rule_without_block(&mut self, mut prelude: Vec<Json>) -> Json {
-        prelude.push(Json::Null);
-        Json::Array(prelude)
-    }
 }
 
 impl<'i> QualifiedRuleParser<'i> for JsonParser {
     type Prelude = Vec<Json>;
     type QualifiedRule = Json;
     type Error = ();
 
     fn parse_prelude<'t>(&mut self, input: &mut Parser<'i, 't>) -> Result<Vec<Json>, ParseError<'i, ()>> {
@@ -1044,8 +1050,58 @@ fn roundtrip_percentage_token() {
                 test_roundtrip(&format!("{}.{}%", i, j));
             }
             for k in 1..10 {
                 test_roundtrip(&format!("{}.{}{}%", i, j, k));
             }
         }
     }
 }
+
+#[test]
+fn utf16_columns() {
+    // This particular test serves two purposes.  First, it checks
+    // that the column number computations are correct.  Second, it
+    // checks that tokenizer code paths correctly differentiate
+    // between the different UTF-8 encoding bytes.  In particular
+    // different leader bytes and continuation bytes are treated
+    // differently, so we make sure to include all lengths in the
+    // tests, using the string "QΡ✈🆒".  Also, remember that because
+    // the column is in units of UTF-16, the 4-byte sequence results
+    // in two columns.
+    let tests = vec![
+        ("", 0),
+        ("ascii", 5),
+        ("/*QΡ✈🆒*/", 9),
+        ("'QΡ✈🆒*'", 8),
+        ("\"\\\"'QΡ✈🆒*'", 11),
+        ("\\Q\\Ρ\\✈\\🆒", 9),
+        ("QΡ✈🆒", 5),
+        ("QΡ✈🆒\\Q\\Ρ\\✈\\🆒", 14),
+        ("newline\r\nQΡ✈🆒", 5),
+        ("url(QΡ✈🆒\\Q\\Ρ\\✈\\🆒)", 19),
+        ("url(QΡ✈🆒)", 10),
+        ("url(\r\nQΡ✈🆒\\Q\\Ρ\\✈\\🆒)", 15),
+        ("url(\r\nQΡ✈🆒\\Q\\Ρ\\✈\\🆒", 14),
+        ("url(\r\nQΡ✈🆒\\Q\\Ρ\\✈\\🆒 x", 16),
+        ("QΡ✈🆒()", 7),
+        // Test that under/over-flow of current_line_start_position is
+        // handled properly; see the special case in consume_4byte_intro.
+        ("🆒", 2),
+    ];
+
+    for test in tests {
+        let mut input = ParserInput::new(test.0);
+        let mut parser = Parser::new(&mut input);
+
+        // Read all tokens.
+        loop {
+            match parser.next() {
+                Err(BasicParseError::EndOfInput) => { break; }
+                Err(_) => { assert!(false); }
+                Ok(_) => {}
+            };
+        }
+
+        // Check the resulting column.
+        assert_eq!(parser.current_source_location().column, test.1);
+    }
+}
--- a/third_party/rust/cssparser/src/tokenizer.rs
+++ b/third_party/rust/cssparser/src/tokenizer.rs
@@ -39,19 +39,20 @@ pub enum Token<'a> {
     /// The value does not include the `#` marker.
     IDHash(CowRcStr<'a>),  // Hash that is a valid ID selector.
 
     /// A [`<string-token>`](https://drafts.csswg.org/css-syntax/#string-token-diagram)
     ///
     /// The value does not include the quotes.
     QuotedString(CowRcStr<'a>),
 
-    /// A [`<url-token>`](https://drafts.csswg.org/css-syntax/#url-token-diagram) or `url( <string-token> )` function
+    /// A [`<url-token>`](https://drafts.csswg.org/css-syntax/#url-token-diagram)
     ///
-    /// The value does not include the `url(` `)` markers or the quotes.
+    /// The value does not include the `url(` `)` markers.  Note that `url( <string-token> )` is represented by a
+    /// `Function` token.
     UnquotedUrl(CowRcStr<'a>),
 
     /// A `<delim-token>`
     Delim(char),
 
     /// A [`<number-token>`](https://drafts.csswg.org/css-syntax/#number-token-diagram)
     Number {
         /// Whether the number had a `+` or `-` sign.
@@ -200,20 +201,22 @@ impl<'a> Token<'a> {
 }
 
 
 #[derive(Clone)]
 pub struct Tokenizer<'a> {
     input: &'a str,
     /// Counted in bytes, not code points. From 0.
     position: usize,
+    /// The position at the start of the current line; but adjusted to
+    /// ensure that computing the column will give the result in units
+    /// of UTF-16 characters.
     current_line_start_position: usize,
     current_line_number: u32,
     var_functions: SeenStatus,
-    viewport_percentages: SeenStatus,
     source_map_url: Option<&'a str>,
 }
 
 #[derive(Copy, Clone, PartialEq, Eq)]
 enum SeenStatus {
     DontCare,
     LookingForThem,
     SeenAtLeastOne,
@@ -229,17 +232,16 @@ impl<'a> Tokenizer<'a> {
     #[inline]
     pub fn with_first_line_number(input: &str, first_line_number: u32) -> Tokenizer {
         Tokenizer {
             input: input,
             position: 0,
             current_line_start_position: 0,
             current_line_number: first_line_number,
             var_functions: SeenStatus::DontCare,
-            viewport_percentages: SeenStatus::DontCare,
             source_map_url: None,
         }
     }
 
     #[inline]
     pub fn look_for_var_functions(&mut self) {
         self.var_functions = SeenStatus::LookingForThem;
     }
@@ -256,40 +258,16 @@ impl<'a> Tokenizer<'a> {
         if self.var_functions == SeenStatus::LookingForThem {
             if name.eq_ignore_ascii_case("var") {
                 self.var_functions = SeenStatus::SeenAtLeastOne;
             }
         }
     }
 
     #[inline]
-    pub fn look_for_viewport_percentages(&mut self) {
-        self.viewport_percentages = SeenStatus::LookingForThem;
-    }
-
-    #[inline]
-    pub fn seen_viewport_percentages(&mut self) -> bool {
-        let seen = self.viewport_percentages == SeenStatus::SeenAtLeastOne;
-        self.viewport_percentages = SeenStatus::DontCare;
-        seen
-    }
-
-    #[inline]
-    pub fn see_dimension(&mut self, unit: &str) {
-        if self.viewport_percentages == SeenStatus::LookingForThem {
-            if unit.eq_ignore_ascii_case("vh") ||
-               unit.eq_ignore_ascii_case("vw") ||
-               unit.eq_ignore_ascii_case("vmin") ||
-               unit.eq_ignore_ascii_case("vmax") {
-                   self.viewport_percentages = SeenStatus::SeenAtLeastOne;
-            }
-        }
-    }
-
-    #[inline]
     pub fn next(&mut self) -> Result<Token<'a>, ()> {
         next_token(self)
     }
 
     #[inline]
     pub fn position(&self) -> SourcePosition {
         SourcePosition(self.position)
     }
@@ -358,78 +336,132 @@ impl<'a> Tokenizer<'a> {
     #[inline]
     fn is_eof(&self) -> bool { !self.has_at_least(0) }
 
     // If true, the input has at least `n` bytes left *after* the current one.
     // That is, `tokenizer.char_at(n)` will not panic.
     #[inline]
     fn has_at_least(&self, n: usize) -> bool { self.position + n < self.input.len() }
 
+    // Advance over N bytes in the input.  This function can advance
+    // over ASCII bytes (excluding newlines), or UTF-8 sequence
+    // leaders (excluding leaders for 4-byte sequences).
     #[inline]
-    pub fn advance(&mut self, n: usize) { self.position += n }
+    pub fn advance(&mut self, n: usize) {
+        if cfg!(debug_assertions) {
+            // Each byte must either be an ASCII byte or a sequence
+            // leader, but not a 4-byte leader; also newlines are
+            // rejected.
+            for i in 0..n {
+                let b = self.byte_at(i);
+                debug_assert!(b.is_ascii() || (b & 0xF0 != 0xF0 && b & 0xC0 != 0x80));
+                debug_assert!(b != b'\r' && b != b'\n' && b != b'\x0C');
+            }
+        }
+        self.position += n
+    }
 
     // Assumes non-EOF
     #[inline]
     fn next_byte_unchecked(&self) -> u8 { self.byte_at(0) }
 
     #[inline]
     fn byte_at(&self, offset: usize) -> u8 {
         self.input.as_bytes()[self.position + offset]
     }
 
+    // Advance over a single byte; the byte must be a UTF-8 sequence
+    // leader for a 4-byte sequence.
     #[inline]
-    fn consume_byte(&mut self) -> u8 {
+    fn consume_4byte_intro(&mut self) {
+        debug_assert!(self.next_byte_unchecked() & 0xF0 == 0xF0);
+        // This takes two UTF-16 characters to represent, so we
+        // actually have an undercount.
+        self.current_line_start_position = self.current_line_start_position.wrapping_sub(1);
+        self.position += 1;
+    }
+
+    // Advance over a single byte; the byte must be a UTF-8
+    // continuation byte.
+    #[inline]
+    fn consume_continuation_byte(&mut self) {
+        debug_assert!(self.next_byte_unchecked() & 0xC0 == 0x80);
+        // Continuation bytes contribute to column overcount.  Note
+        // that due to the special case for the 4-byte sequence intro,
+        // we must use wrapping add here.
+        self.current_line_start_position = self.current_line_start_position.wrapping_add(1);
         self.position += 1;
-        self.input.as_bytes()[self.position - 1]
+    }
+
+    // Advance over any kind of byte, excluding newlines.
+    #[inline(never)]
+    fn consume_known_byte(&mut self, byte: u8) {
+        debug_assert!(byte != b'\r' && byte != b'\n' && byte != b'\x0C');
+        self.position += 1;
+        // Continuation bytes contribute to column overcount.
+        if byte & 0xF0 == 0xF0 {
+            // This takes two UTF-16 characters to represent, so we
+            // actually have an undercount.
+            self.current_line_start_position = self.current_line_start_position.wrapping_sub(1);
+        } else if byte & 0xC0 == 0x80 {
+            // Note that due to the special case for the 4-byte
+            // sequence intro, we must use wrapping add here.
+            self.current_line_start_position = self.current_line_start_position.wrapping_add(1);
+        }
     }
 
     #[inline]
     fn next_char(&self) -> char {
         self.input[self.position..].chars().next().unwrap()
     }
 
-    fn seen_newline(&mut self, is_cr: bool) {
-        if is_cr && self.next_byte() == Some(/* LF */ b'\n') {
-            return
+    // Given that a newline has been seen, advance over the newline
+    // and update the state.
+    #[inline]
+    fn consume_newline(&mut self) {
+        let byte = self.next_byte_unchecked();
+        debug_assert!(byte == b'\r' || byte == b'\n' || byte == b'\x0C');
+        self.position += 1;
+        if byte == b'\r' && self.next_byte() == Some(b'\n') {
+            self.position += 1;
         }
         self.current_line_start_position = self.position;
         self.current_line_number += 1;
     }
 
     #[inline]
     fn has_newline_at(&self, offset: usize) -> bool {
         self.position + offset < self.input.len() &&
         matches!(self.byte_at(offset), b'\n' | b'\r' | b'\x0C')
     }
 
     #[inline]
     fn consume_char(&mut self) -> char {
         let c = self.next_char();
-        self.position += c.len_utf8();
+        let len_utf8 = c.len_utf8();
+        self.position += len_utf8;
+        // Note that due to the special case for the 4-byte sequence
+        // intro, we must use wrapping add here.
+        self.current_line_start_position = self.current_line_start_position.wrapping_add(len_utf8 - c.len_utf16());
         c
     }
 
     #[inline]
     fn starts_with(&self, needle: &[u8]) -> bool {
         self.input.as_bytes()[self.position..].starts_with(needle)
     }
 
     pub fn skip_whitespace(&mut self) {
         while !self.is_eof() {
             match_byte! { self.next_byte_unchecked(),
                 b' ' | b'\t' => {
                     self.advance(1)
                 },
-                b'\n' | b'\x0C' => {
-                    self.advance(1);
-                    self.seen_newline(false);
-                },
-                b'\r' => {
-                    self.advance(1);
-                    self.seen_newline(true);
+                b'\n' | b'\x0C' | b'\r' => {
+                    self.consume_newline();
                 },
                 b'/' => {
                     if self.starts_with(b"/*") {
                         consume_comment(self);
                     } else {
                         return
                     }
                 }
@@ -441,23 +473,18 @@ impl<'a> Tokenizer<'a> {
     }
 
     pub fn skip_cdc_and_cdo(&mut self) {
         while !self.is_eof() {
             match_byte! { self.next_byte_unchecked(),
                 b' ' | b'\t' => {
                     self.advance(1)
                 },
-                b'\n' | b'\x0C' => {
-                    self.advance(1);
-                    self.seen_newline(false);
-                },
-                b'\r' => {
-                    self.advance(1);
-                    self.seen_newline(true);
+                b'\n' | b'\x0C' | b'\r' => {
+                    self.consume_newline();
                 },
                 b'/' => {
                     if self.starts_with(b"/*") {
                         consume_comment(self);
                     } else {
                         return
                     }
                 }
@@ -490,43 +517,41 @@ pub struct SourcePosition(pub(crate) usi
 
 /// The line and column number for a given position within the input.
 #[derive(PartialEq, Eq, Debug, Clone, Copy)]
 pub struct SourceLocation {
     /// The line number, starting at 0 for the first line, unless `with_first_line_number` was used.
     pub line: u32,
 
     /// The column number within a line, starting at 0 for first the character of the line.
+    /// Column numbers are in units of UTF-16 characters.
     pub column: u32,
 }
 
 
 fn next_token<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>, ()> {
     if tokenizer.is_eof() {
         return Err(())
     }
     let b = tokenizer.next_byte_unchecked();
     let token = match_byte! { b,
         b' ' | b'\t' => {
-            consume_whitespace(tokenizer, false, false)
+            consume_whitespace(tokenizer, false)
         },
-        b'\n' | b'\x0C' => {
-            consume_whitespace(tokenizer, true, false)
-        },
-        b'\r' => {
-            consume_whitespace(tokenizer, true, true)
+        b'\n' | b'\x0C' | b'\r' => {
+            consume_whitespace(tokenizer, true)
         },
         b'"' => { consume_string(tokenizer, false) },
         b'#' => {
             tokenizer.advance(1);
             if is_ident_start(tokenizer) { IDHash(consume_name(tokenizer)) }
             else if !tokenizer.is_eof() && match tokenizer.next_byte_unchecked() {
-                b'a'...b'z' | b'A'...b'Z' | b'0'...b'9' | b'-' | b'_' => true,
-                b'\\' => !tokenizer.has_newline_at(1),
-                _ => !b.is_ascii(),
+                // Any other valid case here already resulted in IDHash.
+                b'0'...b'9' | b'-' => true,
+                _ => false,
             } { Hash(consume_name(tokenizer)) }
             else { Delim('#') }
         },
         b'$' => {
             if tokenizer.starts_with(b"$=") { tokenizer.advance(2); SuffixMatch }
             else { tokenizer.advance(1); Delim('$') }
         },
         b'\'' => { consume_string(tokenizer, true) },
@@ -637,35 +662,31 @@ fn next_token<'a>(tokenizer: &mut Tokeni
                 Delim(b as char)
             }
         },
     };
     Ok(token)
 }
 
 
-fn consume_whitespace<'a>(tokenizer: &mut Tokenizer<'a>, newline: bool, is_cr: bool) -> Token<'a> {
+fn consume_whitespace<'a>(tokenizer: &mut Tokenizer<'a>, newline: bool) -> Token<'a> {
     let start_position = tokenizer.position();
-    tokenizer.advance(1);
     if newline {
-        tokenizer.seen_newline(is_cr)
+        tokenizer.consume_newline();
+    } else {
+        tokenizer.advance(1);
     }
     while !tokenizer.is_eof() {
         let b = tokenizer.next_byte_unchecked();
         match_byte! { b,
             b' ' | b'\t' => {
                 tokenizer.advance(1);
             }
-            b'\n' | b'\x0C' => {
-                tokenizer.advance(1);
-                tokenizer.seen_newline(false);
-            }
-            b'\r' => {
-                tokenizer.advance(1);
-                tokenizer.seen_newline(true);
+            b'\n' | b'\x0C' | b'\r' => {
+                tokenizer.consume_newline();
             }
             _ => {
                 break
             }
         }
     }
     WhiteSpace(tokenizer.slice_from(start_position))
 }
@@ -695,25 +716,23 @@ fn consume_comment<'a>(tokenizer: &mut T
                 tokenizer.advance(1);
                 if tokenizer.next_byte() == Some(b'/') {
                     tokenizer.advance(1);
                     let contents = tokenizer.slice(start_position..end_position);
                     check_for_source_map(tokenizer, contents);
                     return contents
                 }
             }
-            b'\n' | b'\x0C' => {
-                tokenizer.advance(1);
-                tokenizer.seen_newline(false);
+            b'\n' | b'\x0C' | b'\r' => {
+                tokenizer.consume_newline();
             }
-            b'\r' => {
-                tokenizer.advance(1);
-                tokenizer.seen_newline(true);
-            }
+            b'\x80'...b'\xBF' => { tokenizer.consume_continuation_byte(); }
+            b'\xF0'...b'\xFF' => { tokenizer.consume_4byte_intro(); }
             _ => {
+                // ASCII or other leading byte.
                 tokenizer.advance(1);
             }
         }
     }
     let contents = tokenizer.slice_from(start_position);
     check_for_source_map(tokenizer, contents);
     contents
 }
@@ -739,92 +758,96 @@ fn consume_quoted_string<'a>(tokenizer: 
         }
         match_byte! { tokenizer.next_byte_unchecked(),
             b'"' => {
                 if !single_quote {
                     let value = tokenizer.slice_from(start_pos);
                     tokenizer.advance(1);
                     return Ok(value.into())
                 }
+                tokenizer.advance(1);
             }
             b'\'' => {
                 if single_quote {
                     let value = tokenizer.slice_from(start_pos);
                     tokenizer.advance(1);
                     return Ok(value.into())
                 }
+                tokenizer.advance(1);
             }
             b'\\' | b'\0' => {
                 // * The tokenizer’s input is UTF-8 since it’s `&str`.
                 // * start_pos is at a code point boundary
                 // * so is the current position (which is before '\\' or '\0'
                 //
                 // So `string_bytes` is well-formed UTF-8.
                 string_bytes = tokenizer.slice_from(start_pos).as_bytes().to_owned();
                 break
             }
             b'\n' | b'\r' | b'\x0C' => {
                 return Err(tokenizer.slice_from(start_pos).into())
             },
-            _ => {}
+            b'\x80'...b'\xBF' => { tokenizer.consume_continuation_byte(); }
+            b'\xF0'...b'\xFF' => { tokenizer.consume_4byte_intro(); }
+            _ => {
+                // ASCII or other leading byte.
+                tokenizer.advance(1);
+            }
         }
-        tokenizer.consume_byte();
     }
 
     while !tokenizer.is_eof() {
-        if matches!(tokenizer.next_byte_unchecked(), b'\n' | b'\r' | b'\x0C') {
-            return Err(
-                // string_bytes is well-formed UTF-8, see other comments.
-                unsafe {
-                    from_utf8_release_unchecked(string_bytes)
-                }.into()
-            );
-        }
-        let b = tokenizer.consume_byte();
+        let b = tokenizer.next_byte_unchecked();
         match_byte! { b,
+            b'\n' | b'\r' | b'\x0C' => {
+                return Err(
+                    // string_bytes is well-formed UTF-8, see other comments.
+                    unsafe {
+                        from_utf8_release_unchecked(string_bytes)
+                    }.into()
+                );
+            }
             b'"' => {
+                tokenizer.advance(1);
                 if !single_quote {
                     break;
                 }
             }
             b'\'' => {
+                tokenizer.advance(1);
                 if single_quote {
                     break;
                 }
             }
             b'\\' => {
+                tokenizer.advance(1);
                 if !tokenizer.is_eof() {
                     match tokenizer.next_byte_unchecked() {
                         // Escaped newline
-                        b'\n' | b'\x0C' => {
-                            tokenizer.advance(1);
-                            tokenizer.seen_newline(false);
-                        }
-                        b'\r' => {
-                            tokenizer.advance(1);
-                            if tokenizer.next_byte() == Some(b'\n') {
-                                tokenizer.advance(1);
-                            }
-                            // `is_cr = true` is useful to skip \r when the next iteration
-                            // of a loop will call `seen_newline` again for the following \n.
-                            // In this case we’re consuming both in this iteration, so passing `false`.
-                            tokenizer.seen_newline(false);
+                        b'\n' | b'\x0C' | b'\r' => {
+                            tokenizer.consume_newline();
                         }
                         // This pushes one well-formed code point
                         _ => consume_escape_and_write(tokenizer, &mut string_bytes)
                     }
                 }
                 // else: escaped EOF, do nothing.
                 continue;
             }
             b'\0' => {
+                tokenizer.advance(1);
                 string_bytes.extend("\u{FFFD}".as_bytes());
                 continue;
             }
-            _ => {},
+            b'\x80'...b'\xBF' => { tokenizer.consume_continuation_byte(); }
+            b'\xF0'...b'\xFF' => { tokenizer.consume_4byte_intro(); }
+            _ => {
+                // ASCII or other leading byte.
+                tokenizer.advance(1);
+            },
         }
 
         // If this byte is part of a multi-byte code point,
         // we’ll end up copying the whole code point before this loop does something else.
         string_bytes.push(b);
     }
 
     Ok(
@@ -882,21 +905,21 @@ fn consume_name<'a>(tokenizer: &mut Toke
                 // * The tokenizer’s input is UTF-8 since it’s `&str`.
                 // * start_pos is at a code point boundary
                 // * so is the current position (which is before '\\' or '\0'
                 //
                 // So `value_bytes` is well-formed UTF-8.
                 value_bytes = tokenizer.slice_from(start_pos).as_bytes().to_owned();
                 break
             }
+            b'\x80'...b'\xBF' => { tokenizer.consume_continuation_byte(); }
+            b'\xC0'...b'\xEF' => { tokenizer.advance(1); }
+            b'\xF0'...b'\xFF' => { tokenizer.consume_4byte_intro(); }
             b => {
-                if b.is_ascii() {
-                    return tokenizer.slice_from(start_pos).into();
-                }
-                tokenizer.advance(1);
+                return tokenizer.slice_from(start_pos).into();
             }
         }
     }
 
     while !tokenizer.is_eof() {
         let b = tokenizer.next_byte_unchecked();
         match_byte! { b,
             b'a'...b'z' | b'A'...b'Z' | b'0'...b'9' | b'_' | b'-'  => {
@@ -908,25 +931,36 @@ fn consume_name<'a>(tokenizer: &mut Toke
                 tokenizer.advance(1);
                 // This pushes one well-formed code point
                 consume_escape_and_write(tokenizer, &mut value_bytes)
             }
             b'\0' => {
                 tokenizer.advance(1);
                 value_bytes.extend("\u{FFFD}".as_bytes());
             },
-            _ => {
-                if b.is_ascii() {
-                    break;
-                }
-                tokenizer.advance(1);
+            b'\x80'...b'\xBF' => {
+                // This byte *is* part of a multi-byte code point,
+                // we’ll end up copying the whole code point before this loop does something else.
+                tokenizer.consume_continuation_byte();
+                value_bytes.push(b)
+            }
+            b'\xC0'...b'\xEF' => {
                 // This byte *is* part of a multi-byte code point,
                 // we’ll end up copying the whole code point before this loop does something else.
+                tokenizer.advance(1);
                 value_bytes.push(b)
             }
+            b'\xF0'...b'\xFF' => {
+                tokenizer.consume_4byte_intro();
+                value_bytes.push(b)
+            }
+            _ => {
+                // ASCII
+                break;
+            }
         }
     }
     // string_bytes is well-formed UTF-8, see other comments.
     unsafe { from_utf8_release_unchecked(value_bytes) }.into()
 }
 
 fn byte_to_hex_digit(b: u8) -> Option<u32> {
     Some(match_byte! { b,
@@ -1040,17 +1074,16 @@ fn consume_numeric<'a>(tokenizer: &mut T
             unit_value: (value / 100.) as f32,
             int_value: int_value,
             has_sign: has_sign,
         }
     }
     let value = value as f32;
     if is_ident_start(tokenizer) {
         let unit = consume_name(tokenizer);
-        tokenizer.see_dimension(&unit);
         Dimension {
             value: value,
             int_value: int_value,
             has_sign: has_sign,
             unit: unit,
         }
     } else {
         Number {
@@ -1096,29 +1129,35 @@ fn consume_unquoted_url<'a>(tokenizer: &
             b'\r' => {
                 if from_start.as_bytes().get(offset + 1) != Some(&b'\n') {
                     newlines += 1;
                     last_newline = offset;
                 }
             }
             b'"' | b'\'' => { return Err(()) },  // Do not advance
             b')' => {
-                tokenizer.advance(offset + 1);
+                // Don't use advance, because we may be skipping
+                // newlines here, and we want to avoid the assert.
+                tokenizer.position += offset + 1;
                 break
             }
             _ => {
-                tokenizer.advance(offset);
+                // Don't use advance, because we may be skipping
+                // newlines here, and we want to avoid the assert.
+                tokenizer.position += offset;
                 found_printable_char = true;
                 break
             }
         }
     }
 
     if newlines > 0 {
         tokenizer.current_line_number += newlines;
+        // No need for wrapping_add here, because there's no possible
+        // way to wrap.
         tokenizer.current_line_start_position = start_position + last_newline + 1;
     }
 
     if found_printable_char {
         // This function only consumed ASCII (whitespace) bytes,
         // so the current position is a code point boundary.
         return Ok(consume_unquoted_url_internal(tokenizer))
     } else {
@@ -1152,101 +1191,124 @@ fn consume_unquoted_url<'a>(tokenizer: &
                     // * The tokenizer’s input is UTF-8 since it’s `&str`.
                     // * start_pos is at a code point boundary
                     // * so is the current position (which is before '\\' or '\0'
                     //
                     // So `string_bytes` is well-formed UTF-8.
                     string_bytes = tokenizer.slice_from(start_pos).as_bytes().to_owned();
                     break
                 }
+                b'\x80'...b'\xBF' => { tokenizer.consume_continuation_byte(); }
+                b'\xF0'...b'\xFF' => { tokenizer.consume_4byte_intro(); }
                 _ => {
+                    // ASCII or other leading byte.
                     tokenizer.advance(1);
                 }
             }
         }
         while !tokenizer.is_eof() {
-            match_byte! { tokenizer.consume_byte(),
+            let b = tokenizer.next_byte_unchecked();
+            match_byte! { b,
                 b' ' | b'\t' | b'\n' | b'\r' | b'\x0C' => {
                     // string_bytes is well-formed UTF-8, see other comments.
                     let string = unsafe { from_utf8_release_unchecked(string_bytes) }.into();
-                    tokenizer.position -= 1;
                     return consume_url_end(tokenizer, start_pos, string)
                 }
                 b')' => {
+                    tokenizer.advance(1);
                     break;
                 }
                 b'\x01'...b'\x08' | b'\x0B' | b'\x0E'...b'\x1F' | b'\x7F'  // non-printable
                     | b'"' | b'\'' | b'(' => {
+                    tokenizer.advance(1);
                     return consume_bad_url(tokenizer, start_pos);
                 }
                 b'\\' => {
+                    tokenizer.advance(1);
                     if tokenizer.has_newline_at(0) {
                         return consume_bad_url(tokenizer, start_pos)
                     }
 
                     // This pushes one well-formed code point to string_bytes
                     consume_escape_and_write(tokenizer, &mut string_bytes)
                 },
                 b'\0' => {
+                    tokenizer.advance(1);
                     string_bytes.extend("\u{FFFD}".as_bytes());
                 }
+                b'\x80'...b'\xBF' => {
+                    // We’ll end up copying the whole code point
+                    // before this loop does something else.
+                    tokenizer.consume_continuation_byte();
+                    string_bytes.push(b);
+                }
+                b'\xF0'...b'\xFF' => {
+                    // We’ll end up copying the whole code point
+                    // before this loop does something else.
+                    tokenizer.consume_4byte_intro();
+                    string_bytes.push(b);
+                }
                 // If this byte is part of a multi-byte code point,
                 // we’ll end up copying the whole code point before this loop does something else.
-                b => { string_bytes.push(b) }
+                b => {
+                    // ASCII or other leading byte.
+                    tokenizer.advance(1);
+                    string_bytes.push(b)
+                }
             }
         }
         UnquotedUrl(
             // string_bytes is well-formed UTF-8, see other comments.
             unsafe { from_utf8_release_unchecked(string_bytes) }.into()
         )
     }
 
     fn consume_url_end<'a>(tokenizer: &mut Tokenizer<'a>,
                            start_pos: SourcePosition,
                            string: CowRcStr<'a>)
                            -> Token<'a> {
         while !tokenizer.is_eof() {
-            match_byte! { tokenizer.consume_byte(),
+            match_byte! { tokenizer.next_byte_unchecked(),
                 b')' => {
+                    tokenizer.advance(1);
                     break
                 }
-                b' ' | b'\t' => {}
-                b'\n' | b'\x0C' => {
-                    tokenizer.seen_newline(false);
+                b' ' | b'\t' => { tokenizer.advance(1); }
+                b'\n' | b'\x0C' | b'\r' => {
+                    tokenizer.consume_newline();
                 }
-                b'\r' => {
-                    tokenizer.seen_newline(true);
-                }
-                _ => {
+                b => {
+                    tokenizer.consume_known_byte(b);
                     return consume_bad_url(tokenizer, start_pos);
                 }
             }
         }
         UnquotedUrl(string)
     }
 
     fn consume_bad_url<'a>(tokenizer: &mut Tokenizer<'a>, start_pos: SourcePosition) -> Token<'a> {
         // Consume up to the closing )
         while !tokenizer.is_eof() {
-            match_byte! { tokenizer.consume_byte(),
+            match_byte! { tokenizer.next_byte_unchecked(),
                 b')' => {
+                    tokenizer.advance(1);
                     break
                 }
                 b'\\' => {
+                    tokenizer.advance(1);
                     if matches!(tokenizer.next_byte(), Some(b')') | Some(b'\\')) {
                         tokenizer.advance(1); // Skip an escaped ')' or '\'
                     }
                 }
-                b'\n' | b'\x0C' => {
-                    tokenizer.seen_newline(false);
+                b'\n' | b'\x0C' | b'\r' => {
+                    tokenizer.consume_newline();
                 }
-                b'\r' => {
-                    tokenizer.seen_newline(true);
+                b => {
+                    tokenizer.consume_known_byte(b);
                 }
-                _ => {},
             }
         }
         BadUrl(tokenizer.slice_from(start_pos).into())
     }
 }
 
 // (value, number of digits up to 6)
 fn consume_hex_digits<'a>(tokenizer: &mut Tokenizer<'a>) -> (u32, u32) {
@@ -1280,26 +1342,18 @@ fn consume_escape(tokenizer: &mut Tokeni
     match_byte! { tokenizer.next_byte_unchecked(),
         b'0'...b'9' | b'A'...b'F' | b'a'...b'f' => {
             let (c, _) = consume_hex_digits(tokenizer);
             if !tokenizer.is_eof() {
                 match_byte! { tokenizer.next_byte_unchecked(),
                     b' ' | b'\t' => {
                         tokenizer.advance(1)
                     }
-                    b'\n' | b'\x0C' => {
-                        tokenizer.advance(1);
-                        tokenizer.seen_newline(false)
-                    }
-                    b'\r' => {
-                        tokenizer.advance(1);
-                        if !tokenizer.is_eof() && tokenizer.next_byte_unchecked() == b'\n' {
-                            tokenizer.advance(1);
-                        }
-                        tokenizer.seen_newline(false)
+                    b'\n' | b'\x0C' | b'\r' => {
+                        tokenizer.consume_newline();
                     }
                     _ => {}
                 }
             }
             static REPLACEMENT_CHAR: char = '\u{FFFD}';
             if c != 0 {
                 let c = char::from_u32(c);
                 c.unwrap_or(REPLACEMENT_CHAR)
--- a/toolkit/library/gtest/rust/Cargo.lock
+++ b/toolkit/library/gtest/rust/Cargo.lock
@@ -291,21 +291,22 @@ source = "registry+https://github.com/ru
 dependencies = [
  "core-foundation 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "core-graphics 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "libc 0.2.24 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "cssparser"
-version = "0.19.5"
+version = "0.20.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "cssparser-macros 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "dtoa-short 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "itoa 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "matches 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
  "phf 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)",
  "procedural-masquerade 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "quote 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)",
  "smallvec 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
  "syn 0.11.11 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
@@ -542,17 +543,17 @@ dependencies = [
  "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "geckoservo"
 version = "0.0.1"
 dependencies = [
  "atomic_refcell 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "cssparser 0.19.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cssparser 0.20.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "env_logger 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "libc 0.2.24 (registry+https://github.com/rust-lang/crates.io-index)",
  "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
  "nsstring_vendor 0.1.0",
  "parking_lot 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)",
  "selectors 0.19.0",
  "servo_arc 0.0.1",
  "style 0.0.1",
@@ -1104,17 +1105,17 @@ name = "scopeguard"
 version = "0.3.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
 name = "selectors"
 version = "0.19.0"
 dependencies = [
  "bitflags 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "cssparser 0.19.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cssparser 0.20.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "fnv 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)",
  "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
  "matches 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
  "phf 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)",
  "phf_codegen 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)",
  "precomputed-hash 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "servo_arc 0.0.1",
  "smallvec 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -1206,17 +1207,17 @@ dependencies = [
  "arraydeque 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
  "arrayvec 0.3.23 (registry+https://github.com/rust-lang/crates.io-index)",
  "atomic_refcell 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "bindgen 0.29.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "bit-vec 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
  "bitflags 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "cfg-if 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "cssparser 0.19.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cssparser 0.20.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "euclid 0.15.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "fnv 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)",
  "hashglobe 0.1.0",
  "itertools 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)",
  "itoa 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
  "lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
  "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -1255,17 +1256,17 @@ dependencies = [
 ]
 
 [[package]]
 name = "style_traits"
 version = "0.0.1"
 dependencies = [
  "app_units 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)",
  "bitflags 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "cssparser 0.19.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cssparser 0.20.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "euclid 0.15.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "selectors 0.19.0",
 ]
 
 [[package]]
 name = "syn"
 version = "0.11.11"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -1572,17 +1573,17 @@ dependencies = [
 "checksum cfg-if 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "de1e760d7b6535af4241fca8bd8adf68e2e7edacc6b29f5d399050c5e48cf88c"
 "checksum clang-sys 0.19.0 (registry+https://github.com/rust-lang/crates.io-index)" = "611ec2e3a7623afd8a8c0d027887b6b55759d894abbf5fe11b9dc11b50d5b49a"
 "checksum clap 2.24.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6b8f69e518f967224e628896b54e41ff6acfb4dcfefc5076325c36525dac900f"
 "checksum coco 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c06169f5beb7e31c7c67ebf5540b8b472d23e3eade3b2ec7d1f5b504a85f91bd"
 "checksum core-foundation 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f51ce3b8ebe311c56de14231eb57572c15abebd2d32b3bcb99bcdb9c101f5ac3"
 "checksum core-foundation-sys 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "41115a6aa5d3e1e5ef98148373f25971d1fad53818553f216495f9e67e90a624"
 "checksum core-graphics 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a9f841e9637adec70838c537cae52cb4c751cc6514ad05669b51d107c2021c79"
 "checksum core-text 6.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "16ce16d9ed00181016c11ff48e561314bec92bfbce9fe48f319366618d4e5de6"
-"checksum cssparser 0.19.5 (registry+https://github.com/rust-lang/crates.io-index)" = "dc476dc0960774aa1cabfd0044de7d4585a8f2f8a3ef72e6d9d1e16c1e2492b1"
+"checksum cssparser 0.20.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2334576d63647dd96a6238cc3fb1d51b2aae3eb98872de157ae35c0b2e358fd2"
 "checksum cssparser-macros 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "079adec4af52bb5275eadd004292028c79eb3c5f5b4ee8086a36d4197032f6df"
 "checksum darling 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9861a8495606435477df581bc858ccf15a3469747edf175b94a4704fd9aaedac"
 "checksum darling_core 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1486a8b00b45062c997f767738178b43219133dd0c8c826cb811e60563810821"
 "checksum darling_macro 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8a86ec160aa0c3dd492dd4a14ec8104ad8f1a9400a820624db857998cc1f80f9"
 "checksum dbghelp-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "97590ba53bcb8ac28279161ca943a924d1fd4a8fb3fa63302591647c4fc5b850"
 "checksum dtoa 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "09c3753c3db574d215cba4ea76018483895d7bff25a31b49ba45db21c48e50ab"
 "checksum dtoa-short 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fe6f727b406462fd57c95fed84d1b0dbfb5f0136fcac005adba9ea0367c05cc8"
 "checksum dwrote 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "36e3b27cd0b8a68e00f07e8d8e1e4f4d8a6b8b873290a734f63bd56d792d23e1"
--- a/toolkit/library/rust/Cargo.lock
+++ b/toolkit/library/rust/Cargo.lock
@@ -289,21 +289,22 @@ source = "registry+https://github.com/ru
 dependencies = [
  "core-foundation 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "core-graphics 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "libc 0.2.24 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "cssparser"
-version = "0.19.5"
+version = "0.20.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "cssparser-macros 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "dtoa-short 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "itoa 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "matches 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
  "phf 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)",
  "procedural-masquerade 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "quote 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)",
  "smallvec 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
  "syn 0.11.11 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
@@ -540,17 +541,17 @@ dependencies = [
  "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "geckoservo"
 version = "0.0.1"
 dependencies = [
  "atomic_refcell 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "cssparser 0.19.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cssparser 0.20.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "env_logger 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "libc 0.2.24 (registry+https://github.com/rust-lang/crates.io-index)",
  "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
  "nsstring_vendor 0.1.0",
  "parking_lot 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)",
  "selectors 0.19.0",
  "servo_arc 0.0.1",
  "style 0.0.1",
@@ -1091,17 +1092,17 @@ name = "scopeguard"
 version = "0.3.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
 name = "selectors"
 version = "0.19.0"
 dependencies = [
  "bitflags 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "cssparser 0.19.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cssparser 0.20.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "fnv 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)",
  "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
  "matches 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
  "phf 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)",
  "phf_codegen 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)",
  "precomputed-hash 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "servo_arc 0.0.1",
  "smallvec 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -1193,17 +1194,17 @@ dependencies = [
  "arraydeque 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
  "arrayvec 0.3.23 (registry+https://github.com/rust-lang/crates.io-index)",
  "atomic_refcell 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "bindgen 0.29.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "bit-vec 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
  "bitflags 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "cfg-if 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "cssparser 0.19.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cssparser 0.20.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "euclid 0.15.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "fnv 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)",
  "hashglobe 0.1.0",
  "itertools 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)",
  "itoa 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
  "lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
  "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -1242,17 +1243,17 @@ dependencies = [
 ]
 
 [[package]]
 name = "style_traits"
 version = "0.0.1"
 dependencies = [
  "app_units 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)",
  "bitflags 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "cssparser 0.19.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cssparser 0.20.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "euclid 0.15.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "selectors 0.19.0",
 ]
 
 [[package]]
 name = "syn"
 version = "0.11.11"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -1559,17 +1560,17 @@ dependencies = [
 "checksum cfg-if 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "de1e760d7b6535af4241fca8bd8adf68e2e7edacc6b29f5d399050c5e48cf88c"
 "checksum clang-sys 0.19.0 (registry+https://github.com/rust-lang/crates.io-index)" = "611ec2e3a7623afd8a8c0d027887b6b55759d894abbf5fe11b9dc11b50d5b49a"
 "checksum clap 2.24.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6b8f69e518f967224e628896b54e41ff6acfb4dcfefc5076325c36525dac900f"
 "checksum coco 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c06169f5beb7e31c7c67ebf5540b8b472d23e3eade3b2ec7d1f5b504a85f91bd"
 "checksum core-foundation 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f51ce3b8ebe311c56de14231eb57572c15abebd2d32b3bcb99bcdb9c101f5ac3"
 "checksum core-foundation-sys 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "41115a6aa5d3e1e5ef98148373f25971d1fad53818553f216495f9e67e90a624"
 "checksum core-graphics 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a9f841e9637adec70838c537cae52cb4c751cc6514ad05669b51d107c2021c79"
 "checksum core-text 6.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "16ce16d9ed00181016c11ff48e561314bec92bfbce9fe48f319366618d4e5de6"
-"checksum cssparser 0.19.5 (registry+https://github.com/rust-lang/crates.io-index)" = "dc476dc0960774aa1cabfd0044de7d4585a8f2f8a3ef72e6d9d1e16c1e2492b1"
+"checksum cssparser 0.20.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2334576d63647dd96a6238cc3fb1d51b2aae3eb98872de157ae35c0b2e358fd2"
 "checksum cssparser-macros 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "079adec4af52bb5275eadd004292028c79eb3c5f5b4ee8086a36d4197032f6df"
 "checksum darling 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9861a8495606435477df581bc858ccf15a3469747edf175b94a4704fd9aaedac"
 "checksum darling_core 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1486a8b00b45062c997f767738178b43219133dd0c8c826cb811e60563810821"
 "checksum darling_macro 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8a86ec160aa0c3dd492dd4a14ec8104ad8f1a9400a820624db857998cc1f80f9"
 "checksum dbghelp-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "97590ba53bcb8ac28279161ca943a924d1fd4a8fb3fa63302591647c4fc5b850"
 "checksum dtoa 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "09c3753c3db574d215cba4ea76018483895d7bff25a31b49ba45db21c48e50ab"
 "checksum dtoa-short 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fe6f727b406462fd57c95fed84d1b0dbfb5f0136fcac005adba9ea0367c05cc8"
 "checksum dwrote 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "36e3b27cd0b8a68e00f07e8d8e1e4f4d8a6b8b873290a734f63bd56d792d23e1"