summaryrefslogtreecommitdiffstats
path: root/third_party/rust/cssparser/src
diff options
context:
space:
mode:
Diffstat (limited to 'third_party/rust/cssparser/src')
-rw-r--r--third_party/rust/cssparser/src/color.rs63
-rw-r--r--third_party/rust/cssparser/src/cow_rc_str.rs6
-rw-r--r--third_party/rust/cssparser/src/macros.rs4
-rw-r--r--third_party/rust/cssparser/src/nth.rs18
-rw-r--r--third_party/rust/cssparser/src/parser.rs56
-rw-r--r--third_party/rust/cssparser/src/rules_and_declarations.rs87
-rw-r--r--third_party/rust/cssparser/src/serializer.rs36
-rw-r--r--third_party/rust/cssparser/src/tests.rs74
-rw-r--r--third_party/rust/cssparser/src/tokenizer.rs86
-rw-r--r--third_party/rust/cssparser/src/unicode_range.rs20
10 files changed, 237 insertions, 213 deletions
diff --git a/third_party/rust/cssparser/src/color.rs b/third_party/rust/cssparser/src/color.rs
index d5f9a5c0e6..978936e01a 100644
--- a/third_party/rust/cssparser/src/color.rs
+++ b/third_party/rust/cssparser/src/color.rs
@@ -14,9 +14,8 @@
/// The opaque alpha value of 1.0.
pub const OPAQUE: f32 = 1.0;
-use crate::ToCss;
+use crate::{BasicParseError, Parser, ToCss, Token};
use std::fmt;
-use std::str::FromStr;
/// Clamp a 0..1 number to a 0..255 range to u8.
///
@@ -76,7 +75,9 @@ pub fn serialize_color_alpha(
/// A Predefined color space specified in:
/// <https://drafts.csswg.org/css-color-4/#predefined>
-#[derive(Clone, Copy, PartialEq, Debug)]
+#[derive(Clone, Copy, Eq, PartialEq, Debug)]
+#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))]
+#[cfg_attr(feature = "serde", serde(tag = "type"))]
pub enum PredefinedColorSpace {
/// <https://drafts.csswg.org/css-color-4/#predefined-sRGB>
Srgb,
@@ -97,36 +98,21 @@ pub enum PredefinedColorSpace {
}
impl PredefinedColorSpace {
- /// Returns the string value of the predefined color space.
- pub fn as_str(&self) -> &str {
- match self {
- PredefinedColorSpace::Srgb => "srgb",
- PredefinedColorSpace::SrgbLinear => "srgb-linear",
- PredefinedColorSpace::DisplayP3 => "display-p3",
- PredefinedColorSpace::A98Rgb => "a98-rgb",
- PredefinedColorSpace::ProphotoRgb => "prophoto-rgb",
- PredefinedColorSpace::Rec2020 => "rec2020",
- PredefinedColorSpace::XyzD50 => "xyz-d50",
- PredefinedColorSpace::XyzD65 => "xyz-d65",
- }
- }
-}
-
-impl FromStr for PredefinedColorSpace {
- type Err = ();
+ /// Parse a PredefinedColorSpace from the given input.
+ pub fn parse<'i>(input: &mut Parser<'i, '_>) -> Result<Self, BasicParseError<'i>> {
+ let location = input.current_source_location();
- fn from_str(s: &str) -> Result<Self, Self::Err> {
- Ok(match_ignore_ascii_case! { s,
- "srgb" => PredefinedColorSpace::Srgb,
- "srgb-linear" => PredefinedColorSpace::SrgbLinear,
- "display-p3" => PredefinedColorSpace::DisplayP3,
- "a98-rgb" => PredefinedColorSpace::A98Rgb,
- "prophoto-rgb" => PredefinedColorSpace::ProphotoRgb,
- "rec2020" => PredefinedColorSpace::Rec2020,
- "xyz-d50" => PredefinedColorSpace::XyzD50,
- "xyz" | "xyz-d65" => PredefinedColorSpace::XyzD65,
-
- _ => return Err(()),
+ let ident = input.expect_ident()?;
+ Ok(match_ignore_ascii_case! { ident,
+ "srgb" => Self::Srgb,
+ "srgb-linear" => Self::SrgbLinear,
+ "display-p3" => Self::DisplayP3,
+ "a98-rgb" => Self::A98Rgb,
+ "prophoto-rgb" => Self::ProphotoRgb,
+ "rec2020" => Self::Rec2020,
+ "xyz-d50" => Self::XyzD50,
+ "xyz" | "xyz-d65" => Self::XyzD65,
+ _ => return Err(location.new_basic_unexpected_token_error(Token::Ident(ident.clone()))),
})
}
}
@@ -136,11 +122,21 @@ impl ToCss for PredefinedColorSpace {
where
W: fmt::Write,
{
- dest.write_str(self.as_str())
+ dest.write_str(match self {
+ Self::Srgb => "srgb",
+ Self::SrgbLinear => "srgb-linear",
+ Self::DisplayP3 => "display-p3",
+ Self::A98Rgb => "a98-rgb",
+ Self::ProphotoRgb => "prophoto-rgb",
+ Self::Rec2020 => "rec2020",
+ Self::XyzD50 => "xyz-d50",
+ Self::XyzD65 => "xyz-d65",
+ })
}
}
/// Parse a color hash, without the leading '#' character.
+#[allow(clippy::result_unit_err)]
#[inline]
pub fn parse_hash_color(value: &[u8]) -> Result<(u8, u8, u8, f32), ()> {
Ok(match value.len() {
@@ -328,6 +324,7 @@ ascii_case_insensitive_phf_map! {
/// Returns the named color with the given name.
/// <https://drafts.csswg.org/css-color-4/#typedef-named-color>
+#[allow(clippy::result_unit_err)]
#[inline]
pub fn parse_named_color(ident: &str) -> Result<(u8, u8, u8), ()> {
named_colors::get(ident).copied().ok_or(())
diff --git a/third_party/rust/cssparser/src/cow_rc_str.rs b/third_party/rust/cssparser/src/cow_rc_str.rs
index ecf14a0a75..2650848111 100644
--- a/third_party/rust/cssparser/src/cow_rc_str.rs
+++ b/third_party/rust/cssparser/src/cow_rc_str.rs
@@ -4,7 +4,7 @@
use std::borrow::{Borrow, Cow};
use std::rc::Rc;
-use std::{cmp, fmt, hash, marker, mem, ops, slice, str, ptr};
+use std::{cmp, fmt, hash, marker, mem, ops, ptr, slice, str};
/// A string that is either shared (heap-allocated and reference-counted) or borrowed.
///
@@ -23,9 +23,9 @@ pub struct CowRcStr<'a> {
phantom: marker::PhantomData<Result<&'a str, Rc<String>>>,
}
-fn _static_assert_same_size<'a>() {
+fn _static_assert_same_size() {
// "Instantiate" the generic function without calling it.
- let _ = mem::transmute::<CowRcStr<'a>, Option<CowRcStr<'a>>>;
+ let _ = mem::transmute::<CowRcStr<'_>, Option<CowRcStr<'_>>>;
}
impl<'a> From<Cow<'a, str>> for CowRcStr<'a> {
diff --git a/third_party/rust/cssparser/src/macros.rs b/third_party/rust/cssparser/src/macros.rs
index fc4b77a194..67d8365884 100644
--- a/third_party/rust/cssparser/src/macros.rs
+++ b/third_party/rust/cssparser/src/macros.rs
@@ -182,7 +182,7 @@ pub fn _cssparser_internal_to_lowercase<'a>(
let input_bytes =
unsafe { &*(input.as_bytes() as *const [u8] as *const [MaybeUninit<u8>]) };
- buffer.copy_from_slice(&*input_bytes);
+ buffer.copy_from_slice(input_bytes);
// Same as above re layout, plus these bytes have been initialized:
let buffer = unsafe { &mut *(buffer as *mut [MaybeUninit<u8>] as *mut [u8]) };
@@ -195,7 +195,7 @@ pub fn _cssparser_internal_to_lowercase<'a>(
}
Some(
- match input.bytes().position(|byte| matches!(byte, b'A'..=b'Z')) {
+ match input.bytes().position(|byte| byte.is_ascii_uppercase()) {
Some(first_uppercase) => make_ascii_lowercase(buffer, input, first_uppercase),
// common case: input is already lower-case
None => input,
diff --git a/third_party/rust/cssparser/src/nth.rs b/third_party/rust/cssparser/src/nth.rs
index 518de4d9b4..4fe5a6bc04 100644
--- a/third_party/rust/cssparser/src/nth.rs
+++ b/third_party/rust/cssparser/src/nth.rs
@@ -7,8 +7,8 @@ use super::{BasicParseError, Parser, ParserInput, Token};
/// Parse the *An+B* notation, as found in the `:nth-child()` selector.
/// The input is typically the arguments of a function,
/// in which case the caller needs to check if the arguments’ parser is exhausted.
-/// Return `Ok((A, B))`, or `Err(())` for a syntax error.
-pub fn parse_nth<'i, 't>(input: &mut Parser<'i, 't>) -> Result<(i32, i32), BasicParseError<'i>> {
+/// Return `Ok((A, B))`, or an `Err(..)` for a syntax error.
+pub fn parse_nth<'i>(input: &mut Parser<'i, '_>) -> Result<(i32, i32), BasicParseError<'i>> {
match *input.next()? {
Token::Number {
int_value: Some(b), ..
@@ -22,7 +22,7 @@ pub fn parse_nth<'i, 't>(input: &mut Parser<'i, 't>) -> Result<(i32, i32), Basic
unit,
"n" => Ok(parse_b(input, a)?),
"n-" => Ok(parse_signless_b(input, a, -1)?),
- _ => match parse_n_dash_digits(&*unit) {
+ _ => match parse_n_dash_digits(unit) {
Ok(b) => Ok((a, b)),
Err(()) => {
let unit = unit.clone();
@@ -40,8 +40,8 @@ pub fn parse_nth<'i, 't>(input: &mut Parser<'i, 't>) -> Result<(i32, i32), Basic
"n-" => Ok(parse_signless_b(input, 1, -1)?),
"-n-" => Ok(parse_signless_b(input, -1, -1)?),
_ => {
- let (slice, a) = if value.starts_with("-") {
- (&value[1..], -1)
+ let (slice, a) = if let Some(stripped) = value.strip_prefix('-') {
+ (stripped, -1)
} else {
(&**value, 1)
};
@@ -81,7 +81,7 @@ pub fn parse_nth<'i, 't>(input: &mut Parser<'i, 't>) -> Result<(i32, i32), Basic
}
}
-fn parse_b<'i, 't>(input: &mut Parser<'i, 't>, a: i32) -> Result<(i32, i32), BasicParseError<'i>> {
+fn parse_b<'i>(input: &mut Parser<'i, '_>, a: i32) -> Result<(i32, i32), BasicParseError<'i>> {
let start = input.state();
match input.next() {
Ok(&Token::Delim('+')) => parse_signless_b(input, a, 1),
@@ -98,8 +98,8 @@ fn parse_b<'i, 't>(input: &mut Parser<'i, 't>, a: i32) -> Result<(i32, i32), Bas
}
}
-fn parse_signless_b<'i, 't>(
- input: &mut Parser<'i, 't>,
+fn parse_signless_b<'i>(
+ input: &mut Parser<'i, '_>,
a: i32,
b_sign: i32,
) -> Result<(i32, i32), BasicParseError<'i>> {
@@ -118,7 +118,7 @@ fn parse_n_dash_digits(string: &str) -> Result<i32, ()> {
let bytes = string.as_bytes();
if bytes.len() >= 3
&& bytes[..2].eq_ignore_ascii_case(b"n-")
- && bytes[2..].iter().all(|&c| matches!(c, b'0'..=b'9'))
+ && bytes[2..].iter().all(|&c| c.is_ascii_digit())
{
Ok(parse_number_saturate(&string[1..]).unwrap()) // Include the minus sign
} else {
diff --git a/third_party/rust/cssparser/src/parser.rs b/third_party/rust/cssparser/src/parser.rs
index dd7777a2d8..dd35fc50ed 100644
--- a/third_party/rust/cssparser/src/parser.rs
+++ b/third_party/rust/cssparser/src/parser.rs
@@ -53,7 +53,7 @@ impl ParserState {
///
/// Would need to scan the whole {} block to find a semicolon, only for parsing getting restarted
/// as a qualified rule later.
-#[derive(Clone, Copy, Debug, PartialEq)]
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum ParseUntilErrorBehavior {
/// Consume until we see the relevant delimiter or the end of the stream.
Consume,
@@ -116,18 +116,30 @@ impl<'i, T> From<BasicParseError<'i>> for ParseError<'i, T> {
impl SourceLocation {
/// Create a new BasicParseError at this location for an unexpected token
#[inline]
- pub fn new_basic_unexpected_token_error<'i>(self, token: Token<'i>) -> BasicParseError<'i> {
+ pub fn new_basic_unexpected_token_error(self, token: Token<'_>) -> BasicParseError<'_> {
+ self.new_basic_error(BasicParseErrorKind::UnexpectedToken(token))
+ }
+
+ /// Create a new BasicParseError at this location
+ #[inline]
+ pub fn new_basic_error(self, kind: BasicParseErrorKind<'_>) -> BasicParseError<'_> {
BasicParseError {
- kind: BasicParseErrorKind::UnexpectedToken(token),
+ kind,
location: self,
}
}
/// Create a new ParseError at this location for an unexpected token
#[inline]
- pub fn new_unexpected_token_error<'i, E>(self, token: Token<'i>) -> ParseError<'i, E> {
+ pub fn new_unexpected_token_error<E>(self, token: Token<'_>) -> ParseError<'_, E> {
+ self.new_error(BasicParseErrorKind::UnexpectedToken(token))
+ }
+
+ /// Create a new basic ParseError at the current location
+ #[inline]
+ pub fn new_error<E>(self, kind: BasicParseErrorKind<'_>) -> ParseError<'_, E> {
ParseError {
- kind: ParseErrorKind::Basic(BasicParseErrorKind::UnexpectedToken(token)),
+ kind: ParseErrorKind::Basic(kind),
location: self,
}
}
@@ -450,19 +462,13 @@ impl<'i: 't, 't> Parser<'i, 't> {
/// Create a new BasicParseError at the current location
#[inline]
pub fn new_basic_error(&self, kind: BasicParseErrorKind<'i>) -> BasicParseError<'i> {
- BasicParseError {
- kind,
- location: self.current_source_location(),
- }
+ self.current_source_location().new_basic_error(kind)
}
/// Create a new basic ParseError at the current location
#[inline]
pub fn new_error<E>(&self, kind: BasicParseErrorKind<'i>) -> ParseError<'i, E> {
- ParseError {
- kind: ParseErrorKind::Basic(kind),
- location: self.current_source_location(),
- }
+ self.current_source_location().new_error(kind)
}
/// Create a new custom BasicParseError at the current location
@@ -606,6 +612,7 @@ impl<'i: 't, 't> Parser<'i, 't> {
/// See the `Parser::parse_nested_block` method to parse the content of functions or blocks.
///
/// This only returns a closing token when it is unmatched (and therefore an error).
+ #[allow(clippy::should_implement_trait)]
pub fn next(&mut self) -> Result<&Token<'i>, BasicParseError<'i>> {
self.skip_whitespace();
self.next_including_whitespace_and_comments()
@@ -652,9 +659,8 @@ impl<'i: 't, 't> Parser<'i, 't> {
let token = if using_cached_token {
let cached_token = self.input.cached_token.as_ref().unwrap();
self.input.tokenizer.reset(&cached_token.end_state);
- match cached_token.token {
- Token::Function(ref name) => self.input.tokenizer.see_function(name),
- _ => {}
+ if let Token::Function(ref name) = cached_token.token {
+ self.input.tokenizer.see_function(name)
}
&cached_token.token
} else {
@@ -678,7 +684,7 @@ impl<'i: 't, 't> Parser<'i, 't> {
}
/// Have the given closure parse something, then check the the input is exhausted.
- /// The result is overridden to `Err(())` if some input remains.
+ /// The result is overridden to an `Err(..)` if some input remains.
///
/// This can help tell e.g. `color: green;` from `color: green 4px;`
#[inline]
@@ -699,7 +705,7 @@ impl<'i: 't, 't> Parser<'i, 't> {
///
/// Successful results are accumulated in a vector.
///
- /// This method returns `Err(())` the first time that a closure call does,
+ /// This method returns an`Err(..)` the first time that a closure call does,
/// or if a closure call leaves some input before the next comma or the end
/// of the input.
#[inline]
@@ -748,7 +754,7 @@ impl<'i: 't, 't> Parser<'i, 't> {
match self.parse_until_before(Delimiter::Comma, &mut parse_one) {
Ok(v) => values.push(v),
Err(e) if !ignore_errors => return Err(e),
- Err(_) => {},
+ Err(_) => {}
}
match self.next() {
Err(_) => return Ok(values),
@@ -768,7 +774,7 @@ impl<'i: 't, 't> Parser<'i, 't> {
/// The given closure is called with a "delimited" parser
/// that stops at the end of the block or function (at the matching closing token).
///
- /// The result is overridden to `Err(())` if the closure leaves some input before that point.
+ /// The result is overridden to an `Err(..)` if the closure leaves some input before that point.
#[inline]
pub fn parse_nested_block<F, T, E>(&mut self, parse: F) -> Result<T, ParseError<'i, E>>
where
@@ -784,7 +790,7 @@ impl<'i: 't, 't> Parser<'i, 't> {
/// that stops before the first character at this block/function nesting level
/// that matches the given set of delimiters, or at the end of the input.
///
- /// The result is overridden to `Err(())` if the closure leaves some input before that point.
+ /// The result is overridden to an `Err(..)` if the closure leaves some input before that point.
#[inline]
pub fn parse_until_before<F, T, E>(
&mut self,
@@ -835,7 +841,7 @@ impl<'i: 't, 't> Parser<'i, 't> {
/// expect_ident, but clone the CowRcStr
#[inline]
pub fn expect_ident_cloned(&mut self) -> Result<CowRcStr<'i>, BasicParseError<'i>> {
- self.expect_ident().map(|s| s.clone())
+ self.expect_ident().cloned()
}
/// Parse a <ident-token> whose unescaped value is an ASCII-insensitive match for the given value.
@@ -860,7 +866,7 @@ impl<'i: 't, 't> Parser<'i, 't> {
/// expect_string, but clone the CowRcStr
#[inline]
pub fn expect_string_cloned(&mut self) -> Result<CowRcStr<'i>, BasicParseError<'i>> {
- self.expect_string().map(|s| s.clone())
+ self.expect_string().cloned()
}
/// Parse either a <ident-token> or a <string-token>, and return the unescaped value.
@@ -879,7 +885,7 @@ impl<'i: 't, 't> Parser<'i, 't> {
Token::UnquotedUrl(ref value) => Ok(value.clone()),
Token::Function(ref name) if name.eq_ignore_ascii_case("url") => {
self.parse_nested_block(|input| {
- input.expect_string().map_err(Into::into).map(|s| s.clone())
+ input.expect_string().map_err(Into::into).cloned()
})
.map_err(ParseError::<()>::basic)
}
@@ -894,7 +900,7 @@ impl<'i: 't, 't> Parser<'i, 't> {
Token::QuotedString(ref value) => Ok(value.clone()),
Token::Function(ref name) if name.eq_ignore_ascii_case("url") => {
self.parse_nested_block(|input| {
- input.expect_string().map_err(Into::into).map(|s| s.clone())
+ input.expect_string().map_err(Into::into).cloned()
})
.map_err(ParseError::<()>::basic)
}
diff --git a/third_party/rust/cssparser/src/rules_and_declarations.rs b/third_party/rust/cssparser/src/rules_and_declarations.rs
index fb33a7d0cd..48da02b5cc 100644
--- a/third_party/rust/cssparser/src/rules_and_declarations.rs
+++ b/third_party/rust/cssparser/src/rules_and_declarations.rs
@@ -4,9 +4,7 @@
// https://drafts.csswg.org/css-syntax/#parsing
-use super::{
- BasicParseError, BasicParseErrorKind, Delimiter, Delimiters, ParseError, Parser, Token,
-};
+use super::{BasicParseError, BasicParseErrorKind, Delimiter, ParseError, Parser, Token};
use crate::cow_rc_str::CowRcStr;
use crate::parser::{parse_nested_block, parse_until_after, ParseUntilErrorBehavior, ParserState};
@@ -14,7 +12,7 @@ use crate::parser::{parse_nested_block, parse_until_after, ParseUntilErrorBehavi
///
/// Typical usage is `input.try_parse(parse_important).is_ok()`
/// at the end of a `DeclarationParser::parse_value` implementation.
-pub fn parse_important<'i, 't>(input: &mut Parser<'i, 't>) -> Result<(), BasicParseError<'i>> {
+pub fn parse_important<'i>(input: &mut Parser<'i, '_>) -> Result<(), BasicParseError<'i>> {
input.expect_delim('!')?;
input.expect_ident_matching("important")
}
@@ -34,7 +32,7 @@ pub trait DeclarationParser<'i> {
///
/// Return the finished representation for the declaration
/// as returned by `DeclarationListParser::next`,
- /// or `Err(())` to ignore the entire declaration as invalid.
+ /// or an `Err(..)` to ignore the entire declaration as invalid.
///
/// Declaration name matching should be case-insensitive in the ASCII range.
/// This can be done with `std::ascii::Ascii::eq_ignore_ascii_case`,
@@ -78,7 +76,7 @@ pub trait AtRuleParser<'i> {
/// Parse the prelude of an at-rule with the given `name`.
///
/// Return the representation of the prelude and the type of at-rule,
- /// or `Err(())` to ignore the entire at-rule as invalid.
+ /// or an `Err(..)` to ignore the entire at-rule as invalid.
///
/// The prelude is the part after the at-keyword
/// and before the `;` semicolon or `{ /* ... */ }` block.
@@ -106,6 +104,7 @@ pub trait AtRuleParser<'i> {
/// This is only called when `parse_prelude` returned `WithoutBlock`, and
/// either the `;` semicolon indeed follows the prelude, or parser is at
/// the end of the input.
+ #[allow(clippy::result_unit_err)]
fn rule_without_block(
&mut self,
prelude: Self::Prelude,
@@ -122,7 +121,7 @@ pub trait AtRuleParser<'i> {
///
/// Return the finished representation of the at-rule
/// as returned by `RuleListParser::next` or `DeclarationListParser::next`,
- /// or `Err(())` to ignore the entire at-rule as invalid.
+ /// or an `Err(..)` to ignore the entire at-rule as invalid.
///
/// This is only called when `parse_prelude` returned `WithBlock`, and a block
/// was indeed found following the prelude.
@@ -161,7 +160,7 @@ pub trait QualifiedRuleParser<'i> {
/// Parse the prelude of a qualified rule. For style rules, this is as Selector list.
///
/// Return the representation of the prelude,
- /// or `Err(())` to ignore the entire at-rule as invalid.
+ /// or an `Err(..)` to ignore the entire at-rule as invalid.
///
/// The prelude is the part before the `{ /* ... */ }` block.
///
@@ -180,7 +179,7 @@ pub trait QualifiedRuleParser<'i> {
///
/// Return the finished representation of the qualified rule
/// as returned by `RuleListParser::next`,
- /// or `Err(())` to ignore the entire at-rule as invalid.
+ /// or an `Err(..)` to ignore the entire at-rule as invalid.
fn parse_block<'t>(
&mut self,
prelude: Self::Prelude,
@@ -253,10 +252,10 @@ where
self.input.skip_whitespace();
let start = self.input.state();
match self.input.next_including_whitespace_and_comments().ok()? {
- Token::CloseCurlyBracket |
- Token::WhiteSpace(..) |
- Token::Semicolon |
- Token::Comment(..) => continue,
+ Token::CloseCurlyBracket
+ | Token::WhiteSpace(..)
+ | Token::Semicolon
+ | Token::Comment(..) => continue,
Token::AtKeyword(ref name) => {
let name = name.clone();
return Some(parse_at_rule(&start, name, self.input, &mut *self.parser));
@@ -292,9 +291,9 @@ where
&start,
self.input,
&mut *self.parser,
- Delimiter::Semicolon | Delimiter::CurlyBracketBlock,
+ /* nested = */ true,
) {
- return Some(Ok(qual))
+ return Some(Ok(qual));
}
}
@@ -303,12 +302,8 @@ where
token => {
let result = if self.parser.parse_qualified() {
self.input.reset(&start);
- let delimiters = if self.parser.parse_declarations() {
- Delimiter::Semicolon | Delimiter::CurlyBracketBlock
- } else {
- Delimiter::CurlyBracketBlock
- };
- parse_qualified_rule(&start, self.input, &mut *self.parser, delimiters)
+ let nested = self.parser.parse_declarations();
+ parse_qualified_rule(&start, self.input, &mut *self.parser, nested)
} else {
let token = token.clone();
self.input.parse_until_after(Delimiter::Semicolon, |_| {
@@ -353,7 +348,7 @@ where
}
}
-/// `RuleListParser` is an iterator that yields `Ok(_)` for a rule or `Err(())` for an invalid one.
+/// `RuleListParser` is an iterator that yields `Ok(_)` for a rule or an `Err(..)` for an invalid one.
impl<'i, 't, 'a, R, P, E: 'i> Iterator for StyleSheetParser<'i, 't, 'a, P>
where
P: QualifiedRuleParser<'i, QualifiedRule = R, Error = E>
@@ -367,7 +362,7 @@ where
let start = self.input.state();
let at_keyword = match self.input.next_byte()? {
b'@' => match self.input.next_including_whitespace_and_comments() {
- Ok(&Token::AtKeyword(ref name)) => Some(name.clone()),
+ Ok(Token::AtKeyword(name)) => Some(name.clone()),
_ => {
self.input.reset(&start);
None
@@ -397,7 +392,7 @@ where
&start,
self.input,
&mut *self.parser,
- Delimiter::CurlyBracketBlock,
+ /* nested = */ false,
);
return Some(result.map_err(|e| (e, self.input.slice_from(start.position()))));
}
@@ -450,7 +445,7 @@ where
if let Some(name) = at_keyword {
parse_at_rule(&start, name, input, parser).map_err(|e| e.0)
} else {
- parse_qualified_rule(&start, input, parser, Delimiter::CurlyBracketBlock)
+ parse_qualified_rule(&start, input, parser, /* nested = */ false)
}
})
}
@@ -490,18 +485,54 @@ where
}
}
+// If the first two non-<whitespace-token> values of rule’s prelude are an <ident-token> whose
+// value starts with "--" followed by a <colon-token>, then...
+fn looks_like_a_custom_property(input: &mut Parser) -> bool {
+ let ident = match input.expect_ident() {
+ Ok(i) => i,
+ Err(..) => return false,
+ };
+ ident.starts_with("--") && input.expect_colon().is_ok()
+}
+
+// https://drafts.csswg.org/css-syntax/#consume-a-qualified-rule
fn parse_qualified_rule<'i, 't, P, E>(
start: &ParserState,
input: &mut Parser<'i, 't>,
parser: &mut P,
- delimiters: Delimiters,
+ nested: bool,
) -> Result<<P as QualifiedRuleParser<'i>>::QualifiedRule, ParseError<'i, E>>
where
P: QualifiedRuleParser<'i, Error = E>,
{
- let prelude = input.parse_until_before(delimiters, |input| parser.parse_prelude(input));
+ input.skip_whitespace();
+ let prelude = {
+ let state = input.state();
+ if looks_like_a_custom_property(input) {
+ // If nested is true, consume the remnants of a bad declaration from input, with
+ // nested set to true, and return nothing.
+ // If nested is false, consume a block from input, and return nothing.
+ let delimiters = if nested {
+ Delimiter::Semicolon
+ } else {
+ Delimiter::CurlyBracketBlock
+ };
+ let _: Result<(), ParseError<()>> = input.parse_until_after(delimiters, |_| Ok(()));
+ return Err(state
+ .source_location()
+ .new_error(BasicParseErrorKind::QualifiedRuleInvalid));
+ }
+ let delimiters = if nested {
+ Delimiter::Semicolon | Delimiter::CurlyBracketBlock
+ } else {
+ Delimiter::CurlyBracketBlock
+ };
+ input.reset(&state);
+ input.parse_until_before(delimiters, |input| parser.parse_prelude(input))
+ };
+
input.expect_curly_bracket_block()?;
// Do this here so that we consume the `{` even if the prelude is `Err`.
let prelude = prelude?;
- parse_nested_block(input, |input| parser.parse_block(prelude, &start, input))
+ parse_nested_block(input, |input| parser.parse_block(prelude, start, input))
}
diff --git a/third_party/rust/cssparser/src/serializer.rs b/third_party/rust/cssparser/src/serializer.rs
index 09c224022d..3c6e31cb84 100644
--- a/third_party/rust/cssparser/src/serializer.rs
+++ b/third_party/rust/cssparser/src/serializer.rs
@@ -3,8 +3,7 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::match_byte;
-use dtoa_short::{self, Notation};
-use itoa;
+use dtoa_short::Notation;
use std::fmt::{self, Write};
use std::str;
@@ -49,10 +48,9 @@ where
dtoa_short::write(dest, value)?
};
- if int_value.is_none() && value.fract() == 0. {
- if !notation.decimal_point && !notation.scientific {
- dest.write_str(".0")?;
- }
+ if int_value.is_none() && value.fract() == 0. && !notation.decimal_point && !notation.scientific
+ {
+ dest.write_str(".0")?;
}
Ok(())
}
@@ -63,10 +61,10 @@ impl<'a> ToCss for Token<'a> {
W: fmt::Write,
{
match *self {
- Token::Ident(ref value) => serialize_identifier(&**value, dest)?,
+ Token::Ident(ref value) => serialize_identifier(value, dest)?,
Token::AtKeyword(ref value) => {
dest.write_str("@")?;
- serialize_identifier(&**value, dest)?;
+ serialize_identifier(value, dest)?;
}
Token::Hash(ref value) => {
dest.write_str("#")?;
@@ -74,12 +72,12 @@ impl<'a> ToCss for Token<'a> {
}
Token::IDHash(ref value) => {
dest.write_str("#")?;
- serialize_identifier(&**value, dest)?;
+ serialize_identifier(value, dest)?;
}
- Token::QuotedString(ref value) => serialize_string(&**value, dest)?,
+ Token::QuotedString(ref value) => serialize_string(value, dest)?,
Token::UnquotedUrl(ref value) => {
dest.write_str("url(")?;
- serialize_unquoted_url(&**value, dest)?;
+ serialize_unquoted_url(value, dest)?;
dest.write_str(")")?;
}
Token::Delim(value) => dest.write_char(value)?,
@@ -134,7 +132,7 @@ impl<'a> ToCss for Token<'a> {
Token::CDC => dest.write_str("-->")?,
Token::Function(ref name) => {
- serialize_identifier(&**name, dest)?;
+ serialize_identifier(name, dest)?;
dest.write_str("(")?;
}
Token::ParenthesisBlock => dest.write_str("(")?,
@@ -167,7 +165,7 @@ fn hex_escape<W>(ascii_byte: u8, dest: &mut W) -> fmt::Result
where
W: fmt::Write,
{
- static HEX_DIGITS: &'static [u8; 16] = b"0123456789abcdef";
+ static HEX_DIGITS: &[u8; 16] = b"0123456789abcdef";
let b3;
let b4;
let bytes = if ascii_byte > 0x0F {
@@ -179,7 +177,7 @@ where
b3 = [b'\\', HEX_DIGITS[ascii_byte as usize], b' '];
&b3[..]
};
- dest.write_str(unsafe { str::from_utf8_unchecked(&bytes) })
+ dest.write_str(unsafe { str::from_utf8_unchecked(bytes) })
}
fn char_escape<W>(ascii_byte: u8, dest: &mut W) -> fmt::Result
@@ -199,9 +197,9 @@ where
return Ok(());
}
- if value.starts_with("--") {
+ if let Some(value) = value.strip_prefix("--") {
dest.write_str("--")?;
- serialize_name(&value[2..], dest)
+ serialize_name(value, dest)
} else if value == "-" {
dest.write_str("\\-")
} else {
@@ -240,7 +238,7 @@ where
dest.write_str(&value[chunk_start..i])?;
if let Some(escaped) = escaped {
dest.write_str(escaped)?;
- } else if (b >= b'\x01' && b <= b'\x1F') || b == b'\x7F' {
+ } else if (b'\x01'..=b'\x1F').contains(&b) || b == b'\x7F' {
hex_escape(b, dest)?;
} else {
char_escape(b, dest)?;
@@ -340,7 +338,7 @@ where
macro_rules! impl_tocss_for_int {
($T: ty) => {
- impl<'a> ToCss for $T {
+ impl ToCss for $T {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result
where
W: fmt::Write,
@@ -363,7 +361,7 @@ impl_tocss_for_int!(u64);
macro_rules! impl_tocss_for_float {
($T: ty) => {
- impl<'a> ToCss for $T {
+ impl ToCss for $T {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result
where
W: fmt::Write,
diff --git a/third_party/rust/cssparser/src/tests.rs b/third_party/rust/cssparser/src/tests.rs
index f9dea19325..7389664de4 100644
--- a/third_party/rust/cssparser/src/tests.rs
+++ b/third_party/rust/cssparser/src/tests.rs
@@ -5,8 +5,7 @@
#[cfg(feature = "bench")]
extern crate test;
-use encoding_rs;
-use serde_json::{self, json, Map, Value};
+use serde_json::{json, Map, Value};
#[cfg(feature = "bench")]
use self::test::Bencher;
@@ -25,25 +24,23 @@ macro_rules! JArray {
}
fn almost_equals(a: &Value, b: &Value) -> bool {
- match (a, b) {
- (&Value::Number(ref a), &Value::Number(ref b)) => {
+ let var_name = match (a, b) {
+ (Value::Number(a), Value::Number(b)) => {
let a = a.as_f64().unwrap();
let b = b.as_f64().unwrap();
(a - b).abs() <= a.abs() * 1e-6
}
(&Value::Bool(a), &Value::Bool(b)) => a == b,
- (&Value::String(ref a), &Value::String(ref b)) => a == b,
- (&Value::Array(ref a), &Value::Array(ref b)) => {
- a.len() == b.len()
- && a.iter()
- .zip(b.iter())
- .all(|(ref a, ref b)| almost_equals(*a, *b))
+ (Value::String(a), Value::String(b)) => a == b,
+ (Value::Array(a), Value::Array(b)) => {
+ a.len() == b.len() && a.iter().zip(b.iter()).all(|(a, b)| almost_equals(a, b))
}
(&Value::Object(_), &Value::Object(_)) => panic!("Not implemented"),
(&Value::Null, &Value::Null) => true,
_ => false,
- }
+ };
+ var_name
}
fn normalize(json: &mut Value) {
@@ -77,7 +74,7 @@ fn assert_json_eq(results: Value, mut expected: Value, message: &str) {
}
}
-fn run_raw_json_tests<F: Fn(Value, Value) -> ()>(json_data: &str, run: F) {
+fn run_raw_json_tests<F: Fn(Value, Value)>(json_data: &str, run: F) {
let items = match serde_json::from_str(json_data) {
Ok(Value::Array(items)) => items,
other => panic!("Invalid JSON: {:?}", other),
@@ -242,7 +239,7 @@ fn stylesheet_from_bytes() {
fn get_string<'a>(map: &'a Map<String, Value>, key: &str) -> Option<&'a str> {
match map.get(key) {
- Some(&Value::String(ref s)) => Some(s),
+ Some(Value::String(s)) => Some(s),
Some(&Value::Null) => None,
None => None,
_ => panic!("Unexpected JSON"),
@@ -393,7 +390,7 @@ fn unicode_range() {
if input.is_exhausted() {
Ok(result)
} else {
- while let Ok(_) = input.next() {}
+ while input.next().is_ok() {}
Ok(None)
}
});
@@ -433,11 +430,9 @@ fn serializer(preserve_comments: bool) {
preserve_comments: bool,
) {
while let Ok(token) = if preserve_comments {
- input
- .next_including_whitespace_and_comments()
- .map(|t| t.clone())
+ input.next_including_whitespace_and_comments().cloned()
} else {
- input.next_including_whitespace().map(|t| t.clone())
+ input.next_including_whitespace().cloned()
} {
let token_type = token.serialization_type();
if !preserve_comments && previous_token.needs_separator_when_before(token_type)
@@ -593,8 +588,6 @@ fn line_numbers() {
#[test]
fn overflow() {
- use std::iter::repeat;
-
let css = r"
2147483646
2147483647
@@ -619,7 +612,7 @@ fn overflow() {
-3.402824e+38
"
- .replace("{309 zeros}", &repeat('0').take(309).collect::<String>());
+ .replace("{309 zeros}", &"0".repeat(309));
let mut input = ParserInput::new(&css);
let mut input = Parser::new(&mut input);
@@ -637,15 +630,13 @@ fn overflow() {
assert_eq!(input.expect_integer(), Ok(-2147483648));
assert_eq!(input.expect_integer(), Ok(-2147483648));
- assert_eq!(input.expect_number(), Ok(3.30282347e+38));
+ assert_eq!(input.expect_number(), Ok(3.302_823_5e38));
assert_eq!(input.expect_number(), Ok(f32::MAX));
assert_eq!(input.expect_number(), Ok(f32::INFINITY));
- assert!(f32::MAX != f32::INFINITY);
- assert_eq!(input.expect_number(), Ok(-3.30282347e+38));
+ assert_eq!(input.expect_number(), Ok(-3.302_823_5e38));
assert_eq!(input.expect_number(), Ok(f32::MIN));
assert_eq!(input.expect_number(), Ok(f32::NEG_INFINITY));
- assert!(f32::MIN != f32::NEG_INFINITY);
}
#[test]
@@ -784,7 +775,7 @@ where
impl<'a> ToJson for CowRcStr<'a> {
fn to_json(&self) -> Value {
- let s: &str = &*self;
+ let s: &str = self;
s.to_json()
}
}
@@ -847,7 +838,7 @@ fn no_stack_overflow_multiple_nested_blocks() {
}
let mut input = ParserInput::new(&input);
let mut input = Parser::new(&mut input);
- while let Ok(..) = input.next() {}
+ while input.next().is_ok() {}
}
impl<'i> DeclarationParser<'i> for JsonParser {
@@ -863,18 +854,16 @@ impl<'i> DeclarationParser<'i> for JsonParser {
let mut important = false;
loop {
let start = input.state();
- if let Ok(mut token) = input.next_including_whitespace().map(|t| t.clone()) {
+ if let Ok(mut token) = input.next_including_whitespace().cloned() {
// Hack to deal with css-parsing-tests assuming that
// `!important` in the middle of a declaration value is OK.
// This can never happen per spec
// (even CSS Variables forbid top-level `!`)
if token == Token::Delim('!') {
input.reset(&start);
- if parse_important(input).is_ok() {
- if input.is_exhausted() {
- important = true;
- break;
- }
+ if parse_important(input).is_ok() && input.is_exhausted() {
+ important = true;
+ break;
}
input.reset(&start);
token = input.next_including_whitespace().unwrap().clone();
@@ -905,7 +894,7 @@ impl<'i> AtRuleParser<'i> for JsonParser {
];
match_ignore_ascii_case! { &*name,
"charset" => {
- Err(input.new_error(BasicParseErrorKind::AtRuleInvalid(name.clone()).into()))
+ Err(input.new_error(BasicParseErrorKind::AtRuleInvalid(name.clone())))
},
_ => Ok(prelude),
}
@@ -968,7 +957,7 @@ impl<'i> RuleBodyItemParser<'i, Value, ()> for JsonParser {
fn component_values_to_json(input: &mut Parser) -> Vec<Value> {
let mut values = vec![];
- while let Ok(token) = input.next_including_whitespace().map(|t| t.clone()) {
+ while let Ok(token) = input.next_including_whitespace().cloned() {
values.push(one_component_value_to_json(token, input));
}
values
@@ -978,9 +967,9 @@ fn one_component_value_to_json(token: Token, input: &mut Parser) -> Value {
fn numeric(value: f32, int_value: Option<i32>, has_sign: bool) -> Vec<Value> {
vec![
Token::Number {
- value: value,
- int_value: int_value,
- has_sign: has_sign,
+ value,
+ int_value,
+ has_sign,
}
.to_css_string()
.to_json(),
@@ -1137,7 +1126,7 @@ fn parse_until_before_stops_at_delimiter_or_end_of_input() {
let ox = ix.next();
let oy = iy.next();
assert_eq!(ox, oy);
- if let Err(_) = ox {
+ if ox.is_err() {
break;
}
}
@@ -1223,7 +1212,7 @@ fn parse_sourcemapping_comments() {
for test in tests {
let mut input = ParserInput::new(test.0);
let mut parser = Parser::new(&mut input);
- while let Ok(_) = parser.next_including_whitespace() {}
+ while parser.next_including_whitespace().is_ok() {}
assert_eq!(parser.current_source_map_url(), test.1);
}
}
@@ -1247,7 +1236,7 @@ fn parse_sourceurl_comments() {
for test in tests {
let mut input = ParserInput::new(test.0);
let mut parser = Parser::new(&mut input);
- while let Ok(_) = parser.next_including_whitespace() {}
+ while parser.next_including_whitespace().is_ok() {}
assert_eq!(parser.current_source_url(), test.1);
}
}
@@ -1321,7 +1310,8 @@ fn utf16_columns() {
break;
}
Err(_) => {
- assert!(false);
+ // should this be an explicit panic instead?
+ unreachable!();
}
Ok(_) => {}
};
diff --git a/third_party/rust/cssparser/src/tokenizer.rs b/third_party/rust/cssparser/src/tokenizer.rs
index a3b700632d..ea173a5e45 100644
--- a/third_party/rust/cssparser/src/tokenizer.rs
+++ b/third_party/rust/cssparser/src/tokenizer.rs
@@ -255,10 +255,10 @@ impl<'a> Tokenizer<'a> {
#[inline]
pub fn see_function(&mut self, name: &str) {
- if self.var_or_env_functions == SeenStatus::LookingForThem {
- if name.eq_ignore_ascii_case("var") || name.eq_ignore_ascii_case("env") {
- self.var_or_env_functions = SeenStatus::SeenAtLeastOne;
- }
+ if self.var_or_env_functions == SeenStatus::LookingForThem
+ && (name.eq_ignore_ascii_case("var") || name.eq_ignore_ascii_case("env"))
+ {
+ self.var_or_env_functions = SeenStatus::SeenAtLeastOne;
}
}
@@ -322,10 +322,12 @@ impl<'a> Tokenizer<'a> {
pub fn current_source_line(&self) -> &'a str {
let current = self.position();
- let start = self.slice(SourcePosition(0)..current)
+ let start = self
+ .slice(SourcePosition(0)..current)
.rfind(|c| matches!(c, '\r' | '\n' | '\x0C'))
.map_or(0, |start| start + 1);
- let end = self.slice(current..SourcePosition(self.input.len()))
+ let end = self
+ .slice(current..SourcePosition(self.input.len()))
.find(|c| matches!(c, '\r' | '\n' | '\x0C'))
.map_or(self.input.len(), |end| current.0 + end);
self.slice(SourcePosition(start)..SourcePosition(end))
@@ -424,7 +426,10 @@ impl<'a> Tokenizer<'a> {
#[inline]
fn next_char(&self) -> char {
- unsafe { self.input.get_unchecked(self.position().0..) }.chars().next().unwrap()
+ unsafe { self.input.get_unchecked(self.position().0..) }
+ .chars()
+ .next()
+ .unwrap()
}
// Given that a newline has been seen, advance over the newline
@@ -561,11 +566,11 @@ fn next_token<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>, ()> {
b'#' => {
tokenizer.advance(1);
if is_ident_start(tokenizer) { IDHash(consume_name(tokenizer)) }
- else if !tokenizer.is_eof() && match tokenizer.next_byte_unchecked() {
+ else if !tokenizer.is_eof() &&
+ matches!(tokenizer.next_byte_unchecked(), b'0'..=b'9' | b'-') {
// Any other valid case here already resulted in IDHash.
- b'0'..=b'9' | b'-' => true,
- _ => false,
- } { Hash(consume_name(tokenizer)) }
+ Hash(consume_name(tokenizer))
+ }
else { Delim('#') }
},
b'$' => {
@@ -582,11 +587,11 @@ fn next_token<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>, ()> {
b'+' => {
if (
tokenizer.has_at_least(1)
- && matches!(tokenizer.byte_at(1), b'0'..=b'9')
+ && tokenizer.byte_at(1).is_ascii_digit()
) || (
tokenizer.has_at_least(2)
&& tokenizer.byte_at(1) == b'.'
- && matches!(tokenizer.byte_at(2), b'0'..=b'9')
+ && tokenizer.byte_at(2).is_ascii_digit()
) {
consume_numeric(tokenizer)
} else {
@@ -598,11 +603,11 @@ fn next_token<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>, ()> {
b'-' => {
if (
tokenizer.has_at_least(1)
- && matches!(tokenizer.byte_at(1), b'0'..=b'9')
+ && tokenizer.byte_at(1).is_ascii_digit()
) || (
tokenizer.has_at_least(2)
&& tokenizer.byte_at(1) == b'.'
- && matches!(tokenizer.byte_at(2), b'0'..=b'9')
+ && tokenizer.byte_at(2).is_ascii_digit()
) {
consume_numeric(tokenizer)
} else if tokenizer.starts_with(b"-->") {
@@ -617,8 +622,7 @@ fn next_token<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>, ()> {
},
b'.' => {
if tokenizer.has_at_least(1)
- && matches!(tokenizer.byte_at(1), b'0'..=b'9'
- ) {
+ && tokenizer.byte_at(1).is_ascii_digit() {
consume_numeric(tokenizer)
} else {
tokenizer.advance(1);
@@ -1001,7 +1005,7 @@ fn byte_to_hex_digit(b: u8) -> Option<u32> {
}
fn byte_to_decimal_digit(b: u8) -> Option<u32> {
- if b >= b'0' && b <= b'9' {
+ if b.is_ascii_digit() {
Some((b - b'0') as u32)
} else {
None
@@ -1038,7 +1042,7 @@ fn consume_numeric<'a>(tokenizer: &mut Tokenizer<'a>) -> Token<'a> {
let mut fractional_part: f64 = 0.;
if tokenizer.has_at_least(1)
&& tokenizer.next_byte_unchecked() == b'.'
- && matches!(tokenizer.byte_at(1), b'0'..=b'9')
+ && tokenizer.byte_at(1).is_ascii_digit()
{
is_integer = false;
tokenizer.advance(1); // Consume '.'
@@ -1055,32 +1059,32 @@ fn consume_numeric<'a>(tokenizer: &mut Tokenizer<'a>) -> Token<'a> {
let mut value = sign * (integral_part + fractional_part);
- if tokenizer.has_at_least(1) && matches!(tokenizer.next_byte_unchecked(), b'e' | b'E') {
- if matches!(tokenizer.byte_at(1), b'0'..=b'9')
+ if tokenizer.has_at_least(1)
+ && matches!(tokenizer.next_byte_unchecked(), b'e' | b'E')
+ && (tokenizer.byte_at(1).is_ascii_digit()
|| (tokenizer.has_at_least(2)
&& matches!(tokenizer.byte_at(1), b'+' | b'-')
- && matches!(tokenizer.byte_at(2), b'0'..=b'9'))
- {
- is_integer = false;
+ && tokenizer.byte_at(2).is_ascii_digit()))
+ {
+ is_integer = false;
+ tokenizer.advance(1);
+ let (has_sign, sign) = match tokenizer.next_byte_unchecked() {
+ b'-' => (true, -1.),
+ b'+' => (true, 1.),
+ _ => (false, 1.),
+ };
+ if has_sign {
tokenizer.advance(1);
- let (has_sign, sign) = match tokenizer.next_byte_unchecked() {
- b'-' => (true, -1.),
- b'+' => (true, 1.),
- _ => (false, 1.),
- };
- if has_sign {
- tokenizer.advance(1);
- }
- let mut exponent: f64 = 0.;
- while let Some(digit) = byte_to_decimal_digit(tokenizer.next_byte_unchecked()) {
- exponent = exponent * 10. + digit as f64;
- tokenizer.advance(1);
- if tokenizer.is_eof() {
- break;
- }
+ }
+ let mut exponent: f64 = 0.;
+ while let Some(digit) = byte_to_decimal_digit(tokenizer.next_byte_unchecked()) {
+ exponent = exponent * 10. + digit as f64;
+ tokenizer.advance(1);
+ if tokenizer.is_eof() {
+ break;
}
- value *= f64::powf(10., sign * exponent);
}
+ value *= f64::powf(10., sign * exponent);
}
let int_value = if is_integer {
@@ -1339,7 +1343,7 @@ fn consume_unquoted_url<'a>(tokenizer: &mut Tokenizer<'a>) -> Result<Token<'a>,
}
// (value, number of digits up to 6)
-fn consume_hex_digits<'a>(tokenizer: &mut Tokenizer<'a>) -> (u32, u32) {
+fn consume_hex_digits(tokenizer: &mut Tokenizer<'_>) -> (u32, u32) {
let mut value = 0;
let mut digits = 0;
while digits < 6 && !tokenizer.is_eof() {
diff --git a/third_party/rust/cssparser/src/unicode_range.rs b/third_party/rust/cssparser/src/unicode_range.rs
index b0a2017cbf..ce6bb3b5e7 100644
--- a/third_party/rust/cssparser/src/unicode_range.rs
+++ b/third_party/rust/cssparser/src/unicode_range.rs
@@ -24,7 +24,7 @@ pub struct UnicodeRange {
impl UnicodeRange {
/// https://drafts.csswg.org/css-syntax/#urange-syntax
- pub fn parse<'i, 't>(input: &mut Parser<'i, 't>) -> Result<Self, BasicParseError<'i>> {
+ pub fn parse<'i>(input: &mut Parser<'i, '_>) -> Result<Self, BasicParseError<'i>> {
// <urange> =
// u '+' <ident-token> '?'* |
// u <dimension-token> '?'* |
@@ -57,7 +57,7 @@ impl UnicodeRange {
}
}
-fn parse_tokens<'i, 't>(input: &mut Parser<'i, 't>) -> Result<(), BasicParseError<'i>> {
+fn parse_tokens<'i>(input: &mut Parser<'i, '_>) -> Result<(), BasicParseError<'i>> {
match input.next_including_whitespace()?.clone() {
Token::Delim('+') => {
match *input.next_including_whitespace()? {
@@ -123,15 +123,13 @@ fn parse_concatenated(text: &[u8]) -> Result<UnicodeRange, ()> {
start: first_hex_value,
end: first_hex_value,
});
- } else {
- if let Some((&b'-', mut text)) = text.split_first() {
- let (second_hex_value, hex_digit_count) = consume_hex(&mut text);
- if hex_digit_count > 0 && hex_digit_count <= 6 && text.is_empty() {
- return Ok(UnicodeRange {
- start: first_hex_value,
- end: second_hex_value,
- });
- }
+ } else if let Some((&b'-', mut text)) = text.split_first() {
+ let (second_hex_value, hex_digit_count) = consume_hex(&mut text);
+ if hex_digit_count > 0 && hex_digit_count <= 6 && text.is_empty() {
+ return Ok(UnicodeRange {
+ start: first_hex_value,
+ end: second_hex_value,
+ });
}
}
Err(())