summaryrefslogtreecommitdiffstats
path: root/devtools/client/fronts/inspector/rule-rewriter.js
diff options
context:
space:
mode:
Diffstat (limited to 'devtools/client/fronts/inspector/rule-rewriter.js')
-rw-r--r--devtools/client/fronts/inspector/rule-rewriter.js100
1 files changed, 54 insertions, 46 deletions
diff --git a/devtools/client/fronts/inspector/rule-rewriter.js b/devtools/client/fronts/inspector/rule-rewriter.js
index 30d1cf88d2..9fd84ccd8c 100644
--- a/devtools/client/fronts/inspector/rule-rewriter.js
+++ b/devtools/client/fronts/inspector/rule-rewriter.js
@@ -12,7 +12,9 @@
"use strict";
-const { getCSSLexer } = require("resource://devtools/shared/css/lexer.js");
+const {
+ InspectorCSSParserWrapper,
+} = require("resource://devtools/shared/css/lexer.js");
const {
COMMENT_PARSING_HEURISTIC_BYPASS_CHAR,
escapeCSSComment,
@@ -194,7 +196,7 @@ RuleRewriter.prototype = {
// into "url(;)" by this code -- due to the way "url(...)" is
// parsed as a single token.
text = text.replace(/;$/, "");
- const lexer = getCSSLexer(text);
+ const lexer = new InspectorCSSParserWrapper(text, { trackEOFChars: true });
let result = "";
let previousOffset = 0;
@@ -210,7 +212,7 @@ RuleRewriter.prototype = {
// We set the location of the paren in a funny way, to handle
// the case where we've seen a function token, where the paren
// appears at the end.
- parenStack.push({ closer, offset: result.length - 1 });
+ parenStack.push({ closer, offset: result.length - 1, token });
previousOffset = token.endOffset;
};
@@ -223,6 +225,18 @@ RuleRewriter.prototype = {
return true;
}
+ // We need to handle non-closed url function differently, as performEOFFixup will
+ // only automatically close missing parenthesis `url`.
+ // In such case, don't do anything here.
+ if (
+ paren.closer === ")" &&
+ closer == null &&
+ paren.token.tokenType === "Function" &&
+ paren.token.value === "url"
+ ) {
+ return true;
+ }
+
// Found a non-matching closing paren, so quote it. Note that
// these are processed in reverse order.
result =
@@ -234,50 +248,43 @@ RuleRewriter.prototype = {
return false;
};
- while (true) {
- const token = lexer.nextToken();
- if (!token) {
- break;
- }
-
- if (token.tokenType === "symbol") {
- switch (token.text) {
- case ";":
- // We simply drop the ";" here. This lets us cope with
- // declarations that don't have a ";" and also other
- // termination. The caller handles adding the ";" again.
+ let token;
+ while ((token = lexer.nextToken())) {
+ switch (token.tokenType) {
+ case "Semicolon":
+ // We simply drop the ";" here. This lets us cope with
+ // declarations that don't have a ";" and also other
+ // termination. The caller handles adding the ";" again.
+ result += text.substring(previousOffset, token.startOffset);
+ previousOffset = token.endOffset;
+ break;
+
+ case "CurlyBracketBlock":
+ pushParen(token, "}");
+ break;
+
+ case "ParenthesisBlock":
+ case "Function":
+ pushParen(token, ")");
+ break;
+
+ case "SquareBracketBlock":
+ pushParen(token, "]");
+ break;
+
+ case "CloseCurlyBracket":
+ case "CloseParenthesis":
+ case "CloseSquareBracket":
+ // Did we find an unmatched close bracket?
+ if (!popSomeParens(token.text)) {
+ // Copy out text from |previousOffset|.
result += text.substring(previousOffset, token.startOffset);
+ // Quote the offending symbol.
+ result += "\\" + token.text;
previousOffset = token.endOffset;
- break;
-
- case "{":
- pushParen(token, "}");
- break;
-
- case "(":
- pushParen(token, ")");
- break;
-
- case "[":
- pushParen(token, "]");
- break;
-
- case "}":
- case ")":
- case "]":
- // Did we find an unmatched close bracket?
- if (!popSomeParens(token.text)) {
- // Copy out text from |previousOffset|.
- result += text.substring(previousOffset, token.startOffset);
- // Quote the offending symbol.
- result += "\\" + token.text;
- previousOffset = token.endOffset;
- anySanitized = true;
- }
- break;
- }
- } else if (token.tokenType === "function") {
- pushParen(token, ")");
+ anySanitized = true;
+ }
+ break;
}
}
@@ -286,7 +293,8 @@ RuleRewriter.prototype = {
// Copy out any remaining text, then any needed terminators.
result += text.substring(previousOffset, text.length);
- const eofFixup = lexer.performEOFFixup("", true);
+
+ const eofFixup = lexer.performEOFFixup("");
if (eofFixup) {
anySanitized = true;
result += eofFixup;