summaryrefslogtreecommitdiffstats
path: root/testing/web-platform/tests/tools/lint
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 17:32:43 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 17:32:43 +0000
commit6bf0a5cb5034a7e684dcc3500e841785237ce2dd (patch)
treea68f146d7fa01f0134297619fbe7e33db084e0aa /testing/web-platform/tests/tools/lint
parentInitial commit. (diff)
downloadthunderbird-upstream.tar.xz
thunderbird-upstream.zip
Adding upstream version 1:115.7.0.upstream/1%115.7.0upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'testing/web-platform/tests/tools/lint')
-rw-r--r--testing/web-platform/tests/tools/lint/__init__.py1
-rw-r--r--testing/web-platform/tests/tools/lint/commands.json3
-rw-r--r--testing/web-platform/tests/tools/lint/fnmatch.py31
-rw-r--r--testing/web-platform/tests/tools/lint/lint.py996
-rw-r--r--testing/web-platform/tests/tools/lint/rules.py504
-rw-r--r--testing/web-platform/tests/tools/lint/tests/__init__.py0
-rw-r--r--testing/web-platform/tests/tools/lint/tests/base.py9
-rw-r--r--testing/web-platform/tests/tools/lint/tests/dummy/about_blank.html1
-rw-r--r--testing/web-platform/tests/tools/lint/tests/dummy/broken.html1
-rw-r--r--testing/web-platform/tests/tools/lint/tests/dummy/broken_ignored.html1
-rw-r--r--testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/a-ref.html1
-rw-r--r--testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/a.html4
-rw-r--r--testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/match/a-ref.html1
-rw-r--r--testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/match/a.html4
-rw-r--r--testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/match/support/a.html1
-rw-r--r--testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/match/support/tools/a.html1
-rw-r--r--testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/match/tools/a.html1
-rw-r--r--testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/not-match/a-ref.html1
-rw-r--r--testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/not-match/a.html4
-rw-r--r--testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/not-match/support/a.html1
-rw-r--r--testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/not-match/tools/a.html1
-rw-r--r--testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/relative-testharness-interact.html5
-rw-r--r--testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/relative-testharness.html4
-rw-r--r--testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/selectors/a.html4
-rw-r--r--testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/support/a.html1
-rw-r--r--testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/support/tools/a.html1
-rw-r--r--testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/tools/a.html1
-rw-r--r--testing/web-platform/tests/tools/lint/tests/dummy/dependency.html1
-rw-r--r--testing/web-platform/tests/tools/lint/tests/dummy/lint.ignore1
-rw-r--r--testing/web-platform/tests/tools/lint/tests/dummy/okay.html1
-rw-r--r--testing/web-platform/tests/tools/lint/tests/dummy/ref/absolute.html1
-rw-r--r--testing/web-platform/tests/tools/lint/tests/dummy/ref/existent_relative-ref.html0
-rw-r--r--testing/web-platform/tests/tools/lint/tests/dummy/ref/existent_relative.html1
-rw-r--r--testing/web-platform/tests/tools/lint/tests/dummy/ref/existent_root_relative.html1
-rw-r--r--testing/web-platform/tests/tools/lint/tests/dummy/ref/non_existent_relative.html1
-rw-r--r--testing/web-platform/tests/tools/lint/tests/dummy/ref/non_existent_root_relative.html1
-rw-r--r--testing/web-platform/tests/tools/lint/tests/dummy/ref/same_file_empty.html1
-rw-r--r--testing/web-platform/tests/tools/lint/tests/dummy/ref/same_file_path.html1
-rw-r--r--testing/web-platform/tests/tools/lint/tests/dummy/tests/dir1/a.html4
-rw-r--r--testing/web-platform/tests/tools/lint/tests/dummy/tests/dir1/a.js6
-rw-r--r--testing/web-platform/tests/tools/lint/tests/dummy/tests/dir1/a.xhtml5
-rw-r--r--testing/web-platform/tests/tools/lint/tests/dummy/tests/dir1/b.html4
-rw-r--r--testing/web-platform/tests/tools/lint/tests/dummy/tests/dir2/a.xhtml5
-rw-r--r--testing/web-platform/tests/tools/lint/tests/dummy/tests/relative-testharness-manual.html3
-rw-r--r--testing/web-platform/tests/tools/lint/tests/test_file_lints.py912
-rw-r--r--testing/web-platform/tests/tools/lint/tests/test_lint.py440
-rw-r--r--testing/web-platform/tests/tools/lint/tests/test_path_lints.py164
47 files changed, 3136 insertions, 0 deletions
diff --git a/testing/web-platform/tests/tools/lint/__init__.py b/testing/web-platform/tests/tools/lint/__init__.py
new file mode 100644
index 0000000000..f3d12ed9a7
--- /dev/null
+++ b/testing/web-platform/tests/tools/lint/__init__.py
@@ -0,0 +1 @@
+from . import lint # noqa: F401
diff --git a/testing/web-platform/tests/tools/lint/commands.json b/testing/web-platform/tests/tools/lint/commands.json
new file mode 100644
index 0000000000..a8e9844faf
--- /dev/null
+++ b/testing/web-platform/tests/tools/lint/commands.json
@@ -0,0 +1,3 @@
+{"lint":
+ {"path": "lint.py", "script": "main", "parser": "create_parser", "help": "Run the lint",
+ "virtualenv": false}}
diff --git a/testing/web-platform/tests/tools/lint/fnmatch.py b/testing/web-platform/tests/tools/lint/fnmatch.py
new file mode 100644
index 0000000000..4148ed75a9
--- /dev/null
+++ b/testing/web-platform/tests/tools/lint/fnmatch.py
@@ -0,0 +1,31 @@
+import fnmatch as _stdlib_fnmatch
+import os
+from typing import Iterable, List, Text
+
+
+__all__ = ["fnmatch", "fnmatchcase", "filter", "translate"]
+
+
+def fnmatch(name: Text, pat: Text) -> bool:
+ name = os.path.normcase(name)
+ pat = os.path.normcase(pat)
+ return fnmatchcase(name, pat)
+
+
+def fnmatchcase(name: Text, pat: Text) -> bool:
+ if '?' not in pat and '[' not in pat:
+ wildcards = pat.count("*")
+ if wildcards == 0:
+ return name == pat
+ elif wildcards == 1 and pat[0] == "*":
+ return name.endswith(pat[1:])
+ elif wildcards == 1 and pat[-1] == "*":
+ return name.startswith(pat[:-1])
+ return _stdlib_fnmatch.fnmatchcase(name, pat)
+
+
+def filter(names: Iterable[Text], pat: Text) -> List[Text]:
+ return [n for n in names if fnmatch(n, pat)]
+
+
+translate = _stdlib_fnmatch.translate
diff --git a/testing/web-platform/tests/tools/lint/lint.py b/testing/web-platform/tests/tools/lint/lint.py
new file mode 100644
index 0000000000..9fc78d9b68
--- /dev/null
+++ b/testing/web-platform/tests/tools/lint/lint.py
@@ -0,0 +1,996 @@
+import abc
+import argparse
+import ast
+import json
+import logging
+import multiprocessing
+import os
+import re
+import subprocess
+import sys
+import tempfile
+from collections import defaultdict
+from typing import (Any, Callable, Dict, IO, Iterable, List, Optional, Sequence, Set, Text, Tuple,
+ Type, TypeVar)
+
+from urllib.parse import urlsplit, urljoin
+
+try:
+ from xml.etree import cElementTree as ElementTree
+except ImportError:
+ from xml.etree import ElementTree as ElementTree # type: ignore
+
+from . import fnmatch
+from . import rules
+from .. import localpaths
+from ..ci.tc.github_checks_output import get_gh_checks_outputter, GitHubChecksOutputter
+from ..gitignore.gitignore import PathFilter
+from ..wpt import testfiles
+from ..manifest.vcs import walk
+
+from ..manifest.sourcefile import SourceFile, js_meta_re, python_meta_re, space_chars, get_any_variants
+
+
+# The Ignorelist is a two level dictionary. The top level is indexed by
+# error names (e.g. 'TRAILING WHITESPACE'). Each of those then has a map of
+# file patterns (e.g. 'foo/*') to a set of specific line numbers for the
+# exception. The line numbers are optional; if missing the entire file
+# ignores the error.
+Ignorelist = Dict[str, Dict[str, Set[Optional[int]]]]
+
+# Define an arbitrary typevar
+T = TypeVar("T")
+
+
+logger: Optional[logging.Logger] = None
+
+
+def setup_logging(prefix: bool = False) -> None:
+ global logger
+ if logger is None:
+ logger = logging.getLogger(os.path.basename(os.path.splitext(__file__)[0]))
+ handler: logging.Handler = logging.StreamHandler(sys.stdout)
+ # Only add a handler if the parent logger is missing a handler
+ parent = logger.parent
+ assert isinstance(parent, logging.Logger)
+ if parent and len(parent.handlers) == 0:
+ handler = logging.StreamHandler(sys.stdout)
+ logger.addHandler(handler)
+ if prefix:
+ format = logging.BASIC_FORMAT
+ else:
+ format = "%(message)s"
+ formatter = logging.Formatter(format)
+ for handler in logger.handlers:
+ handler.setFormatter(formatter)
+ logger.setLevel(logging.DEBUG)
+
+
+setup_logging()
+
+
+ERROR_MSG = """You must fix all errors; for details on how to fix them, see
+https://web-platform-tests.org/writing-tests/lint-tool.html
+
+However, instead of fixing a particular error, it's sometimes
+OK to add a line to the lint.ignore file in the root of the
+web-platform-tests directory to make the lint tool ignore it.
+
+For example, to make the lint tool ignore all '%s'
+errors in the %s file,
+you could add the following line to the lint.ignore file.
+
+%s: %s"""
+
+
+def all_filesystem_paths(repo_root: Text, subdir: Optional[Text] = None) -> Iterable[Text]:
+ path_filter = PathFilter(repo_root.encode("utf8"),
+ extras=[b".git/"])
+ if subdir:
+ expanded_path = subdir.encode("utf8")
+ subdir_str = expanded_path
+ else:
+ expanded_path = repo_root.encode("utf8")
+ for dirpath, dirnames, filenames in path_filter(walk(expanded_path)):
+ for filename, _ in filenames:
+ path = os.path.join(dirpath, filename)
+ if subdir:
+ path = os.path.join(subdir_str, path)
+ assert not os.path.isabs(path), path
+ yield path.decode("utf8")
+
+
+def _all_files_equal(paths: Iterable[Text]) -> bool:
+ """
+ Checks all the paths are files that are byte-for-byte identical
+
+ :param paths: the list of paths to compare
+ :returns: True if they are all identical
+ """
+ paths = list(paths)
+ if len(paths) < 2:
+ return True
+
+ first = paths.pop()
+ size = os.path.getsize(first)
+ if any(os.path.getsize(path) != size for path in paths):
+ return False
+
+ # Chunk this to avoid eating up memory and file descriptors
+ bufsize = 4096*4 # 16KB, a "reasonable" number of disk sectors
+ groupsize = 8 # Hypothesised to be large enough in the common case that everything fits in one group
+ with open(first, "rb") as first_f:
+ for start in range(0, len(paths), groupsize):
+ path_group = paths[start:start+groupsize]
+ first_f.seek(0)
+ try:
+ files = [open(x, "rb") for x in path_group]
+ for _ in range(0, size, bufsize):
+ a = first_f.read(bufsize)
+ for f in files:
+ b = f.read(bufsize)
+ if a != b:
+ return False
+ finally:
+ for f in files:
+ f.close()
+
+ return True
+
+
+def check_path_length(repo_root: Text, path: Text) -> List[rules.Error]:
+ if len(path) + 1 > 150:
+ return [rules.PathLength.error(path, (path, len(path) + 1))]
+ return []
+
+
+def check_file_type(repo_root: Text, path: Text) -> List[rules.Error]:
+ if os.path.islink(path):
+ return [rules.FileType.error(path, (path, "symlink"))]
+ return []
+
+
+def check_worker_collision(repo_root: Text, path: Text) -> List[rules.Error]:
+ endings = [(".any.html", ".any.js"),
+ (".any.worker.html", ".any.js"),
+ (".worker.html", ".worker.js")]
+ for path_ending, generated in endings:
+ if path.endswith(path_ending):
+ return [rules.WorkerCollision.error(path, (path_ending, generated))]
+ return []
+
+
+def check_gitignore_file(repo_root: Text, path: Text) -> List[rules.Error]:
+ if not path.endswith(".gitignore"):
+ return []
+
+ path_parts = path.split(os.path.sep)
+ if len(path_parts) == 1:
+ return []
+
+ if path_parts[-1] != ".gitignore":
+ return []
+
+ if (path_parts[0] in ["tools", "docs"] or
+ path_parts[:2] == ["resources", "webidl2"]):
+ return []
+
+ return [rules.GitIgnoreFile.error(path)]
+
+
+def check_mojom_js(repo_root: Text, path: Text) -> List[rules.Error]:
+ if path.endswith(".mojom.js"):
+ return [rules.MojomJSFile.error(path)]
+ return []
+
+
+def check_ahem_copy(repo_root: Text, path: Text) -> List[rules.Error]:
+ lpath = path.lower()
+ if "ahem" in lpath and lpath.endswith(".ttf"):
+ return [rules.AhemCopy.error(path)]
+ return []
+
+
+def check_tentative_directories(repo_root: Text, path: Text) -> List[rules.Error]:
+ path_parts = path.split(os.path.sep)
+ for directory in path_parts[:-1]:
+ if "tentative" in directory and directory != "tentative":
+ return [rules.TentativeDirectoryName.error(path)]
+ return []
+
+
+def check_git_ignore(repo_root: Text, paths: List[Text]) -> List[rules.Error]:
+ errors = []
+
+ with tempfile.TemporaryFile('w+', newline='') as f:
+ for path in paths:
+ f.write('%s\n' % os.path.join(repo_root, path))
+ f.seek(0)
+ try:
+ matches = subprocess.check_output(
+ ["git", "check-ignore", "--verbose", "--no-index", "--stdin"], stdin=f)
+ for match in matches.strip().split(b'\n'):
+ match_filter, path_bytes = match.split()
+ _, _, filter_string = match_filter.split(b':')
+ # If the matching filter reported by check-ignore is a special-case exception,
+ # that's fine. Otherwise, it requires a new special-case exception.
+ if filter_string[0:1] != b'!':
+ path = path_bytes.decode("utf8")
+ errors.append(rules.IgnoredPath.error(path, (path,)))
+ except subprocess.CalledProcessError:
+ # Nonzero return code means that no match exists.
+ pass
+ return errors
+
+
+drafts_csswg_re = re.compile(r"https?\:\/\/drafts\.csswg\.org\/([^/?#]+)")
+w3c_tr_re = re.compile(r"https?\:\/\/www\.w3c?\.org\/TR\/([^/?#]+)")
+w3c_dev_re = re.compile(r"https?\:\/\/dev\.w3c?\.org\/[^/?#]+\/([^/?#]+)")
+
+
+def check_unique_testharness_basenames(repo_root: Text, paths: List[Text]) -> List[rules.Error]:
+ """
+ Checks that all testharness files have unique basename paths.
+
+ The 'basename path' refers to the entire path excluding the extension. For
+ example, 'foo/bar/baz.html' and 'foo/bar/baz.xhtml' have the same basename
+ path, but 'foo/bar/baz.html' and 'foo/qux/baz.html' do not.
+
+ Testharness files with identical basenames have caused issues in downstream
+ infrastructure (see https://github.com/web-platform-tests/wpt/issues/7570),
+ and may cause confusion in general.
+
+ :param repo_root: the repository root
+ :param paths: list of all paths
+ :returns: a list of errors found in ``paths``
+ """
+
+ errors = []
+ file_dict = defaultdict(list)
+ for path in paths:
+ source_file = SourceFile(repo_root, path, "/")
+ if "testharness" not in source_file.possible_types:
+ continue
+ file_name, file_extension = os.path.splitext(path)
+ file_dict[file_name].append(file_extension)
+ for k, v in file_dict.items():
+ if len(v) == 1:
+ continue
+ context = (', '.join(v),)
+ for extension in v:
+ errors.append(rules.DuplicateBasenamePath.error(k + extension, context))
+ return errors
+
+
+def check_unique_case_insensitive_paths(repo_root: Text, paths: List[Text]) -> List[rules.Error]:
+ seen: Dict[Text, Text] = {}
+ errors = []
+ for path in paths:
+ lower_path = path.lower()
+ if lower_path in seen:
+ context = (seen[lower_path],)
+ errors.append(rules.DuplicatePathCaseInsensitive.error(path, context))
+ else:
+ seen[lower_path] = path
+ return errors
+
+
+def parse_ignorelist(f: IO[Text]) -> Tuple[Ignorelist, Set[Text]]:
+ """
+ Parse the ignorelist file given by `f`, and return the parsed structure.
+
+ :returns: a tuple of an Ignorelist and a set of files that are completely
+ skipped by the linter (i.e. have a '*' entry).
+ """
+
+ data: Ignorelist = defaultdict(lambda:defaultdict(set))
+ skipped_files: Set[Text] = set()
+
+ for line in f:
+ line = line.strip()
+ if not line or line.startswith("#"):
+ continue
+ parts = [item.strip() for item in line.split(":")]
+
+ if len(parts) == 2:
+ error_types_s, file_match = parts
+ line_number: Optional[int] = None
+ else:
+ error_types_s, file_match, line_number_s = parts
+ line_number = int(line_number_s)
+
+ error_types = {item.strip() for item in error_types_s.split(",")}
+ file_match = os.path.normcase(file_match)
+
+ if "*" in error_types:
+ skipped_files.add(file_match)
+ else:
+ for error_type in error_types:
+ data[error_type][file_match].add(line_number)
+
+ return data, skipped_files
+
+
+def filter_ignorelist_errors(data: Ignorelist, errors: Sequence[rules.Error]) -> List[rules.Error]:
+ """
+ Filter out those errors that are ignored in `data`.
+ """
+
+ if not errors:
+ return []
+
+ skipped = [False for item in range(len(errors))]
+
+ for i, (error_type, msg, path, line) in enumerate(errors):
+ normpath = os.path.normcase(path)
+ # Allow skipping all lint errors except the IGNORED PATH lint,
+ # which explains how to fix it correctly and shouldn't be skipped.
+ if error_type in data and error_type != "IGNORED PATH":
+ wl_files = data[error_type]
+ for file_match, allowed_lines in wl_files.items():
+ if None in allowed_lines or line in allowed_lines:
+ if fnmatch.fnmatchcase(normpath, file_match):
+ skipped[i] = True
+
+ return [item for i, item in enumerate(errors) if not skipped[i]]
+
+
+regexps = [item() for item in # type: ignore
+ [rules.TrailingWhitespaceRegexp,
+ rules.TabsRegexp,
+ rules.CRRegexp,
+ rules.SetTimeoutRegexp,
+ rules.W3CTestOrgRegexp,
+ rules.WebPlatformTestRegexp,
+ rules.Webidl2Regexp,
+ rules.ConsoleRegexp,
+ rules.GenerateTestsRegexp,
+ rules.PrintRegexp,
+ rules.LayoutTestsRegexp,
+ rules.MissingDepsRegexp,
+ rules.SpecialPowersRegexp,
+ rules.AssertThrowsRegexp,
+ rules.PromiseRejectsRegexp,
+ rules.AssertPreconditionRegexp]]
+
+
+def check_regexp_line(repo_root: Text, path: Text, f: IO[bytes]) -> List[rules.Error]:
+ errors: List[rules.Error] = []
+
+ applicable_regexps = [regexp for regexp in regexps if regexp.applies(path)]
+
+ for i, line in enumerate(f):
+ for regexp in applicable_regexps:
+ if regexp.search(line):
+ errors.append((regexp.name, regexp.description, path, i+1))
+
+ return errors
+
+
+def check_parsed(repo_root: Text, path: Text, f: IO[bytes]) -> List[rules.Error]:
+ source_file = SourceFile(repo_root, path, "/", contents=f.read())
+
+ errors: List[rules.Error] = []
+
+ if path.startswith("css/"):
+ if (source_file.type != "support" and
+ not source_file.name_is_reference and
+ not source_file.name_is_tentative and
+ not source_file.name_is_crashtest and
+ not source_file.spec_links):
+ return [rules.MissingLink.error(path)]
+
+ if source_file.name_is_non_test:
+ return []
+
+ if source_file.markup_type is None:
+ return []
+
+ if source_file.root is None:
+ return [rules.ParseFailed.error(path)]
+
+ if source_file.type == "manual" and not source_file.name_is_manual:
+ errors.append(rules.ContentManual.error(path))
+
+ if source_file.type == "visual" and not source_file.name_is_visual:
+ errors.append(rules.ContentVisual.error(path))
+
+ about_blank_parts = urlsplit("about:blank")
+ for reftest_node in source_file.reftest_nodes:
+ href = reftest_node.attrib.get("href", "").strip(space_chars)
+ parts = urlsplit(href)
+
+ if parts == about_blank_parts:
+ continue
+
+ if (parts.scheme or parts.netloc):
+ errors.append(rules.AbsoluteUrlRef.error(path, (href,)))
+ continue
+
+ ref_url = urljoin(source_file.url, href)
+ ref_parts = urlsplit(ref_url)
+
+ if source_file.url == ref_url:
+ errors.append(rules.SameFileRef.error(path))
+ continue
+
+ assert ref_parts.path != ""
+
+ reference_file = os.path.join(repo_root, ref_parts.path[1:])
+ reference_rel = reftest_node.attrib.get("rel", "")
+
+ if not os.path.isfile(reference_file):
+ errors.append(rules.NonexistentRef.error(path,
+ (reference_rel, href)))
+
+ if len(source_file.timeout_nodes) > 1:
+ errors.append(rules.MultipleTimeout.error(path))
+
+ for timeout_node in source_file.timeout_nodes:
+ timeout_value = timeout_node.attrib.get("content", "").lower()
+ if timeout_value != "long":
+ errors.append(rules.InvalidTimeout.error(path, (timeout_value,)))
+
+ required_elements: List[Text] = []
+
+ testharnessreport_nodes: List[ElementTree.Element] = []
+ if source_file.testharness_nodes:
+ test_type = source_file.manifest_items()[0]
+ if test_type not in ("testharness", "manual"):
+ errors.append(rules.TestharnessInOtherType.error(path, (test_type,)))
+ if len(source_file.testharness_nodes) > 1:
+ errors.append(rules.MultipleTestharness.error(path))
+
+ testharnessreport_nodes = source_file.root.findall(".//{http://www.w3.org/1999/xhtml}script[@src='/resources/testharnessreport.js']")
+ if not testharnessreport_nodes:
+ errors.append(rules.MissingTestharnessReport.error(path))
+ else:
+ if len(testharnessreport_nodes) > 1:
+ errors.append(rules.MultipleTestharnessReport.error(path))
+
+ for element in source_file.variant_nodes:
+ if "content" not in element.attrib:
+ errors.append(rules.VariantMissing.error(path))
+ else:
+ variant = element.attrib["content"]
+ if variant != "":
+ if (variant[0] not in ("?", "#") or
+ len(variant) == 1 or
+ (variant[0] == "?" and variant[1] == "#")):
+ errors.append(rules.MalformedVariant.error(path, (path,)))
+
+ required_elements.extend(key for key, value in {"testharness": True,
+ "testharnessreport": len(testharnessreport_nodes) > 0,
+ "timeout": len(source_file.timeout_nodes) > 0}.items()
+ if value)
+
+ testdriver_vendor_nodes: List[ElementTree.Element] = []
+ if source_file.testdriver_nodes:
+ if len(source_file.testdriver_nodes) > 1:
+ errors.append(rules.MultipleTestdriver.error(path))
+
+ testdriver_vendor_nodes = source_file.root.findall(".//{http://www.w3.org/1999/xhtml}script[@src='/resources/testdriver-vendor.js']")
+ if not testdriver_vendor_nodes:
+ errors.append(rules.MissingTestdriverVendor.error(path))
+ else:
+ if len(testdriver_vendor_nodes) > 1:
+ errors.append(rules.MultipleTestdriverVendor.error(path))
+
+ required_elements.append("testdriver")
+ if len(testdriver_vendor_nodes) > 0:
+ required_elements.append("testdriver-vendor")
+
+ if required_elements:
+ seen_elements = defaultdict(bool)
+
+ for elem in source_file.root.iter():
+ if source_file.timeout_nodes and elem == source_file.timeout_nodes[0]:
+ seen_elements["timeout"] = True
+ if seen_elements["testharness"]:
+ errors.append(rules.LateTimeout.error(path))
+
+ elif source_file.testharness_nodes and elem == source_file.testharness_nodes[0]:
+ seen_elements["testharness"] = True
+
+ elif testharnessreport_nodes and elem == testharnessreport_nodes[0]:
+ seen_elements["testharnessreport"] = True
+ if not seen_elements["testharness"]:
+ errors.append(rules.EarlyTestharnessReport.error(path))
+
+ elif source_file.testdriver_nodes and elem == source_file.testdriver_nodes[0]:
+ seen_elements["testdriver"] = True
+
+ elif testdriver_vendor_nodes and elem == testdriver_vendor_nodes[0]:
+ seen_elements["testdriver-vendor"] = True
+ if not seen_elements["testdriver"]:
+ errors.append(rules.EarlyTestdriverVendor.error(path))
+
+ if all(seen_elements[name] for name in required_elements):
+ break
+
+ for element in source_file.root.findall(".//{http://www.w3.org/1999/xhtml}script[@src]"):
+ src = element.attrib["src"]
+
+ def incorrect_path(script: Text, src: Text) -> bool:
+ return (script == src or
+ ("/%s" % script in src and src != "/resources/%s" % script))
+
+ if incorrect_path("testharness.js", src):
+ errors.append(rules.TestharnessPath.error(path))
+
+ if incorrect_path("testharnessreport.js", src):
+ errors.append(rules.TestharnessReportPath.error(path))
+
+ if incorrect_path("testdriver.js", src):
+ errors.append(rules.TestdriverPath.error(path))
+
+ if incorrect_path("testdriver-vendor.js", src):
+ errors.append(rules.TestdriverVendorPath.error(path))
+
+ script_path = None
+ try:
+ script_path = urlsplit(urljoin(source_file.url, src)).path
+ except ValueError:
+ # This happens if the contents of src isn't something that looks like a URL to Python
+ pass
+ if (script_path == "/common/reftest-wait.js" and
+ "reftest-wait" not in source_file.root.attrib.get("class", "").split()):
+ errors.append(rules.MissingReftestWait.error(path))
+
+ return errors
+
+class ASTCheck(metaclass=abc.ABCMeta):
+ @abc.abstractproperty
+ def rule(self) -> Type[rules.Rule]:
+ pass
+
+ @abc.abstractmethod
+ def check(self, root: ast.AST) -> List[int]:
+ pass
+
+class OpenModeCheck(ASTCheck):
+ rule = rules.OpenNoMode
+
+ def check(self, root: ast.AST) -> List[int]:
+ errors = []
+ for node in ast.walk(root):
+ if isinstance(node, ast.Call):
+ if hasattr(node.func, "id") and node.func.id in ("open", "file"):
+ if (len(node.args) < 2 and
+ all(item.arg != "mode" for item in node.keywords)):
+ errors.append(node.lineno)
+ return errors
+
+ast_checkers = [item() for item in [OpenModeCheck]]
+
+def check_python_ast(repo_root: Text, path: Text, f: IO[bytes]) -> List[rules.Error]:
+ if not path.endswith(".py"):
+ return []
+
+ try:
+ root = ast.parse(f.read())
+ except SyntaxError as e:
+ return [rules.ParseFailed.error(path, line_no=e.lineno)]
+
+ errors = []
+ for checker in ast_checkers:
+ for lineno in checker.check(root):
+ errors.append(checker.rule.error(path, line_no=lineno))
+ return errors
+
+
+broken_js_metadata = re.compile(br"//\s*META:")
+broken_python_metadata = re.compile(br"#\s*META:")
+
+
+def check_global_metadata(value: bytes) -> Iterable[Tuple[Type[rules.Rule], Tuple[Any, ...]]]:
+ global_values = {item.strip().decode("utf8") for item in value.split(b",") if item.strip()}
+
+ # TODO: this could check for duplicates and such
+ for global_value in global_values:
+ if not get_any_variants(global_value):
+ yield (rules.UnknownGlobalMetadata, ())
+
+
+def check_script_metadata(repo_root: Text, path: Text, f: IO[bytes]) -> List[rules.Error]:
+ if path.endswith((".worker.js", ".any.js")):
+ meta_re = js_meta_re
+ broken_metadata = broken_js_metadata
+ elif path.endswith(".py"):
+ meta_re = python_meta_re
+ broken_metadata = broken_python_metadata
+ else:
+ return []
+
+ done = False
+ errors = []
+ for idx, line in enumerate(f):
+ assert isinstance(line, bytes), line
+
+ m = meta_re.match(line)
+ if m:
+ key, value = m.groups()
+ if key == b"global":
+ for rule_class, context in check_global_metadata(value):
+ errors.append(rule_class.error(path, context, idx + 1))
+ elif key == b"timeout":
+ if value != b"long":
+ errors.append(rules.UnknownTimeoutMetadata.error(path,
+ line_no=idx + 1))
+ elif key not in (b"title", b"script", b"variant", b"quic"):
+ errors.append(rules.UnknownMetadata.error(path,
+ line_no=idx + 1))
+ else:
+ done = True
+
+ if done:
+ if meta_re.match(line):
+ errors.append(rules.StrayMetadata.error(path, line_no=idx + 1))
+ elif meta_re.search(line):
+ errors.append(rules.IndentedMetadata.error(path,
+ line_no=idx + 1))
+ elif broken_metadata.search(line):
+ errors.append(rules.BrokenMetadata.error(path, line_no=idx + 1))
+
+ return errors
+
+
+ahem_font_re = re.compile(br"font.*:.*ahem", flags=re.IGNORECASE)
+# Ahem can appear either in the global location or in the support
+# directory for legacy Mozilla imports
+ahem_stylesheet_re = re.compile(br"\/fonts\/ahem\.css|support\/ahem.css",
+ flags=re.IGNORECASE)
+
+
+def check_ahem_system_font(repo_root: Text, path: Text, f: IO[bytes]) -> List[rules.Error]:
+ if not path.endswith((".html", ".htm", ".xht", ".xhtml")):
+ return []
+ contents = f.read()
+ errors = []
+ if ahem_font_re.search(contents) and not ahem_stylesheet_re.search(contents):
+ errors.append(rules.AhemSystemFont.error(path))
+ return errors
+
+
+def check_path(repo_root: Text, path: Text) -> List[rules.Error]:
+ """
+ Runs lints that check the file path.
+
+ :param repo_root: the repository root
+ :param path: the path of the file within the repository
+ :returns: a list of errors found in ``path``
+ """
+
+ errors = []
+ for path_fn in path_lints:
+ errors.extend(path_fn(repo_root, path))
+ return errors
+
+
+def check_all_paths(repo_root: Text, paths: List[Text]) -> List[rules.Error]:
+ """
+ Runs lints that check all paths globally.
+
+ :param repo_root: the repository root
+ :param paths: a list of all the paths within the repository
+ :returns: a list of errors found in ``f``
+ """
+
+ errors = []
+ for paths_fn in all_paths_lints:
+ errors.extend(paths_fn(repo_root, paths))
+ return errors
+
+
+def check_file_contents(repo_root: Text, path: Text, f: Optional[IO[bytes]] = None) -> List[rules.Error]:
+ """
+ Runs lints that check the file contents.
+
+ :param repo_root: the repository root
+ :param path: the path of the file within the repository
+ :param f: a file-like object with the file contents
+ :returns: a list of errors found in ``f``
+ """
+ if f is None:
+ f = open(os.path.join(repo_root, path), 'rb')
+ with f:
+ errors = []
+ for file_fn in file_lints:
+ errors.extend(file_fn(repo_root, path, f))
+ f.seek(0)
+ return errors
+
+
+def check_file_contents_apply(args: Tuple[Text, Text]) -> List[rules.Error]:
+ return check_file_contents(*args)
+
+
+def output_errors_text(log: Callable[[Any], None], errors: List[rules.Error]) -> None:
+ for error_type, description, path, line_number in errors:
+ pos_string = path
+ if line_number:
+ pos_string += ":%s" % line_number
+ log(f"{pos_string}: {description} ({error_type})")
+
+
+def output_errors_markdown(log: Callable[[Any], None], errors: List[rules.Error]) -> None:
+ if not errors:
+ return
+ heading = """Got lint errors:
+
+| Error Type | Position | Message |
+|------------|----------|---------|"""
+ for line in heading.split("\n"):
+ log(line)
+ for error_type, description, path, line_number in errors:
+ pos_string = path
+ if line_number:
+ pos_string += ":%s" % line_number
+ log(f"{error_type} | {pos_string} | {description} |")
+
+
+def output_errors_json(log: Callable[[Any], None], errors: List[rules.Error]) -> None:
+ for error_type, error, path, line_number in errors:
+ # We use 'print' rather than the log function to ensure that the output
+ # is valid JSON (e.g. with no logger preamble).
+ print(json.dumps({"path": path, "lineno": line_number,
+ "rule": error_type, "message": error}))
+
+
+def output_errors_github_checks(outputter: GitHubChecksOutputter, errors: List[rules.Error], first_reported: bool) -> None:
+ """Output errors to the GitHub Checks output markdown format.
+
+ :param outputter: the GitHub Checks outputter
+ :param errors: a list of error tuples (error type, message, path, line number)
+ :param first_reported: True if these are the first reported errors
+ """
+ if first_reported:
+ outputter.output(
+ "\nChanges in this PR contain lint errors, listed below. These "
+ "errors must either be fixed or added to the list of ignored "
+ "errors; see [the documentation]("
+ "https://web-platform-tests.org/writing-tests/lint-tool.html). "
+ "For help, please tag `@web-platform-tests/wpt-core-team` in a "
+ "comment.\n")
+ outputter.output("```")
+ output_errors_text(outputter.output, errors)
+
+
+def output_error_count(error_count: Dict[Text, int]) -> None:
+ if not error_count:
+ return
+
+ assert logger is not None
+ by_type = " ".join("%s: %d" % item for item in error_count.items())
+ count = sum(error_count.values())
+ logger.info("")
+ if count == 1:
+ logger.info(f"There was 1 error ({by_type})")
+ else:
+ logger.info("There were %d errors (%s)" % (count, by_type))
+
+
+def changed_files(wpt_root: Text) -> List[Text]:
+ revish = testfiles.get_revish(revish=None)
+ changed, _ = testfiles.files_changed(revish, None, include_uncommitted=True, include_new=True)
+ return [os.path.relpath(item, wpt_root) for item in changed]
+
+
+def lint_paths(kwargs: Dict[Text, Any], wpt_root: Text) -> List[Text]:
+ if kwargs.get("paths"):
+ paths = []
+ for path in kwargs.get("paths", []):
+ if os.path.isdir(path):
+ path_dir = list(all_filesystem_paths(wpt_root, path))
+ paths.extend(path_dir)
+ elif os.path.isfile(path):
+ paths.append(os.path.relpath(os.path.abspath(path), wpt_root))
+ elif kwargs["all"]:
+ paths = list(all_filesystem_paths(wpt_root))
+ elif kwargs["paths_file"]:
+ paths = []
+ with open(kwargs["paths_file"], 'r', newline='') as f:
+ for line in f.readlines():
+ path = line.strip()
+ if os.path.isdir(path):
+ path_dir = list(all_filesystem_paths(wpt_root, path))
+ paths.extend(path_dir)
+ elif os.path.isfile(path):
+ paths.append(os.path.relpath(os.path.abspath(path), wpt_root))
+ else:
+ changed_paths = changed_files(wpt_root)
+ force_all = False
+ for path in changed_paths:
+ path = path.replace(os.path.sep, "/")
+ if path == "lint.ignore" or path.startswith("tools/lint/"):
+ force_all = True
+ break
+ paths = (list(changed_paths) if not force_all
+ else list(all_filesystem_paths(wpt_root)))
+
+ return paths
+
+
+def create_parser() -> argparse.ArgumentParser:
+ parser = argparse.ArgumentParser()
+ parser.add_argument("paths", nargs="*",
+ help="List of paths to lint")
+ parser.add_argument("--json", action="store_true",
+ help="Output machine-readable JSON format")
+ parser.add_argument("--markdown", action="store_true",
+ help="Output markdown")
+ parser.add_argument("--repo-root", type=str,
+ help="The WPT directory. Use this "
+ "option if the lint script exists outside the repository")
+ parser.add_argument("--ignore-glob", type=str, action="append",
+ help="Additional file glob to ignore (repeat to add more). "
+ "Globs are matched against paths relative to REPO_ROOT "
+ "using fnmatch, except that path separators are normalized.")
+ parser.add_argument("--all", action="store_true", help="If no paths are passed, try to lint the whole "
+ "working directory, not just files that changed")
+ parser.add_argument("--github-checks-text-file", type=str,
+ help="Path to GitHub checks output file for Taskcluster runs")
+ parser.add_argument("-j", "--jobs", type=int, default=0,
+ help="Level to parallelism to use (defaults to 0, which detects the number of CPUs)")
+ parser.add_argument("--paths-file", help="File containing a list of files to lint, one per line")
+ return parser
+
+
+def main(**kwargs: Any) -> int:
+
+ assert logger is not None
+ if kwargs.get("json") and kwargs.get("markdown"):
+ logger.critical("Cannot specify --json and --markdown")
+ sys.exit(2)
+
+ repo_root = kwargs.get('repo_root') or localpaths.repo_root
+ output_format = {(True, False): "json",
+ (False, True): "markdown",
+ (False, False): "normal"}[(kwargs.get("json", False),
+ kwargs.get("markdown", False))]
+
+ if output_format == "markdown":
+ setup_logging(True)
+
+ paths = lint_paths(kwargs, repo_root)
+
+ ignore_glob = kwargs.get("ignore_glob", [])
+
+ github_checks_outputter = get_gh_checks_outputter(kwargs["github_checks_text_file"])
+
+ jobs = kwargs.get("jobs", 0)
+
+ return lint(repo_root, paths, output_format, ignore_glob, github_checks_outputter, jobs)
+
+
+# best experimental guess at a decent cut-off for using the parallel path
+MIN_FILES_FOR_PARALLEL = 80
+
+
+def lint(repo_root: Text,
+ paths: List[Text],
+ output_format: Text,
+ ignore_glob: Optional[List[Text]] = None,
+ github_checks_outputter: Optional[GitHubChecksOutputter] = None,
+ jobs: int = 0) -> int:
+ error_count: Dict[Text, int] = defaultdict(int)
+ last = None
+
+ if jobs == 0:
+ jobs = multiprocessing.cpu_count()
+ if sys.platform == 'win32':
+ # Using too many child processes in Python 3 hits either hangs or a
+ # ValueError exception, and, has diminishing returns. Clamp to 56 to
+ # give margin for error.
+ jobs = min(jobs, 56)
+
+ with open(os.path.join(repo_root, "lint.ignore")) as f:
+ ignorelist, skipped_files = parse_ignorelist(f)
+
+ if ignore_glob:
+ skipped_files |= set(ignore_glob)
+
+ output_errors = {"json": output_errors_json,
+ "markdown": output_errors_markdown,
+ "normal": output_errors_text}[output_format]
+
+ def process_errors(errors: List[rules.Error]) -> Optional[Tuple[Text, Text]]:
+ """
+ Filters and prints the errors, and updates the ``error_count`` object.
+
+ :param errors: a list of error tuples (error type, message, path, line number)
+ :returns: ``None`` if there were no errors, or
+ a tuple of the error type and the path otherwise
+ """
+
+ errors = filter_ignorelist_errors(ignorelist, errors)
+ if not errors:
+ return None
+
+ assert logger is not None
+ output_errors(logger.error, errors)
+
+ if github_checks_outputter:
+ first_output = len(error_count) == 0
+ output_errors_github_checks(github_checks_outputter, errors, first_output)
+
+ for error_type, error, path, line in errors:
+ error_count[error_type] += 1
+
+ return (errors[-1][0], path)
+
+ to_check_content = []
+ skip = set()
+
+ for path in paths:
+ abs_path = os.path.join(repo_root, path)
+ if not os.path.exists(abs_path):
+ skip.add(path)
+ continue
+
+ if any(fnmatch.fnmatch(path, file_match) for file_match in skipped_files):
+ skip.add(path)
+ continue
+
+ errors = check_path(repo_root, path)
+ last = process_errors(errors) or last
+
+ if not os.path.isdir(abs_path):
+ to_check_content.append((repo_root, path))
+
+ paths = [p for p in paths if p not in skip]
+
+ if jobs > 1 and len(to_check_content) >= MIN_FILES_FOR_PARALLEL:
+ pool = multiprocessing.Pool(jobs)
+ # submit this job first, as it's the longest running
+ all_paths_result = pool.apply_async(check_all_paths, (repo_root, paths))
+ # each item tends to be quick, so pass things in large chunks to avoid too much IPC overhead
+ errors_it = pool.imap_unordered(check_file_contents_apply, to_check_content, chunksize=40)
+ pool.close()
+ for errors in errors_it:
+ last = process_errors(errors) or last
+
+ errors = all_paths_result.get()
+ pool.join()
+ last = process_errors(errors) or last
+ else:
+ for item in to_check_content:
+ errors = check_file_contents(*item)
+ last = process_errors(errors) or last
+
+ errors = check_all_paths(repo_root, paths)
+ last = process_errors(errors) or last
+
+ if output_format in ("normal", "markdown"):
+ output_error_count(error_count)
+ if error_count:
+ assert last is not None
+ assert logger is not None
+ for line in (ERROR_MSG % (last[0], last[1], last[0], last[1])).split("\n"):
+ logger.info(line)
+
+ if error_count and github_checks_outputter:
+ github_checks_outputter.output("```")
+
+ return sum(error_count.values())
+
+
+path_lints = [check_file_type, check_path_length, check_worker_collision, check_ahem_copy,
+ check_mojom_js, check_tentative_directories, check_gitignore_file]
+all_paths_lints = [check_unique_testharness_basenames,
+ check_unique_case_insensitive_paths]
+file_lints = [check_regexp_line, check_parsed, check_python_ast, check_script_metadata,
+ check_ahem_system_font]
+
+# Don't break users of the lint that don't have git installed.
+try:
+ subprocess.check_output(["git", "--version"])
+ all_paths_lints += [check_git_ignore]
+except (subprocess.CalledProcessError, FileNotFoundError):
+ print('No git present; skipping .gitignore lint.')
+
+if __name__ == "__main__":
+ args = create_parser().parse_args()
+ error_count = main(**vars(args))
+ if error_count > 0:
+ sys.exit(1)
diff --git a/testing/web-platform/tests/tools/lint/rules.py b/testing/web-platform/tests/tools/lint/rules.py
new file mode 100644
index 0000000000..88b7b6d80c
--- /dev/null
+++ b/testing/web-platform/tests/tools/lint/rules.py
@@ -0,0 +1,504 @@
+import abc
+import inspect
+import os
+import re
+from typing import Any, List, Match, Optional, Pattern, Text, Tuple, cast
+
+
+Error = Tuple[str, str, str, Optional[int]]
+
+def collapse(text: Text) -> Text:
+ return inspect.cleandoc(str(text)).replace("\n", " ")
+
+
+class Rule(metaclass=abc.ABCMeta):
+ @abc.abstractproperty
+ def name(self) -> Text:
+ pass
+
+ @abc.abstractproperty
+ def description(self) -> Text:
+ pass
+
+ to_fix: Optional[Text] = None
+
+ @classmethod
+ def error(cls, path: Text, context: Tuple[Any, ...] = (), line_no: Optional[int] = None) -> Error:
+ name = cast(str, cls.name)
+ description = cast(str, cls.description) % context
+ return (name, description, path, line_no)
+
+
+class MissingLink(Rule):
+ name = "MISSING-LINK"
+ description = "Testcase file must have a link to a spec"
+ to_fix = """
+ Ensure that there is a `<link rel="help" href="[url]">` for the spec.
+ `MISSING-LINK` is designed to ensure that the CSS build tool can find
+ the tests. Note that the CSS build system is primarily used by
+ [test.csswg.org/](http://test.csswg.org/), which doesn't use
+ `wptserve`, so `*.any.js` and similar tests won't work there; stick
+ with the `.html` equivalent.
+ """
+
+
+class PathLength(Rule):
+ name = "PATH LENGTH"
+ description = "/%s longer than maximum path length (%d > 150)"
+ to_fix = "use shorter filename to rename the test file"
+
+
+class FileType(Rule):
+ name = "FILE TYPE"
+ description = "/%s is an unsupported file type (%s)"
+
+
+class WorkerCollision(Rule):
+ name = "WORKER COLLISION"
+ description = collapse("""
+ path ends with %s which collides with generated tests from %s files
+ """)
+
+
+class GitIgnoreFile(Rule):
+ name = "GITIGNORE"
+ description = ".gitignore found outside the root"
+
+
+class MojomJSFile(Rule):
+ name = "MOJOM-JS"
+ description = "Don't check *.mojom.js files into WPT"
+ to_fix = """
+ Check if the file is already included in mojojs.zip:
+ https://source.chromium.org/chromium/chromium/src/+/master:chrome/tools/build/linux/FILES.cfg
+ If yes, use `loadMojoResources` from `resources/test-only-api.js` to load
+ it; if not, contact ecosystem-infra@chromium.org for adding new files
+ to mojojs.zip.
+ """
+
+
+class AhemCopy(Rule):
+ name = "AHEM COPY"
+ description = "Don't add extra copies of Ahem, use /fonts/Ahem.ttf"
+
+
+class AhemSystemFont(Rule):
+ name = "AHEM SYSTEM FONT"
+ description = "Don't use Ahem as a system font, use /fonts/ahem.css"
+
+
+# TODO: Add tests for this rule
+class IgnoredPath(Rule):
+ name = "IGNORED PATH"
+ description = collapse("""
+ %s matches an ignore filter in .gitignore - please add a .gitignore
+ exception
+ """)
+
+
+class ParseFailed(Rule):
+ name = "PARSE-FAILED"
+ description = "Unable to parse file"
+ to_fix = """
+ examine the file to find the causes of any parse errors, and fix them.
+ """
+
+
+class ContentManual(Rule):
+ name = "CONTENT-MANUAL"
+ description = "Manual test whose filename doesn't end in '-manual'"
+
+
+class ContentVisual(Rule):
+ name = "CONTENT-VISUAL"
+ description = "Visual test whose filename doesn't end in '-visual'"
+
+
+class AbsoluteUrlRef(Rule):
+ name = "ABSOLUTE-URL-REF"
+ description = collapse("""
+ Reference test with a reference file specified via an absolute URL:
+ '%s'
+ """)
+
+
+class SameFileRef(Rule):
+ name = "SAME-FILE-REF"
+ description = "Reference test which points at itself as a reference"
+
+
+class NonexistentRef(Rule):
+ name = "NON-EXISTENT-REF"
+ description = collapse("""
+ Reference test with a non-existent '%s' relationship reference: '%s'
+ """)
+
+
+class MultipleTimeout(Rule):
+ name = "MULTIPLE-TIMEOUT"
+ description = "More than one meta name='timeout'"
+ to_fix = """
+ ensure each test file has only one instance of a `<meta
+ name="timeout"...>` element
+ """
+
+
+class InvalidTimeout(Rule):
+ name = "INVALID-TIMEOUT"
+ description = collapse("""
+ Test file with `<meta name='timeout'...>` element that has a `content`
+ attribute whose value is not `long`: %s
+ """)
+ to_fix = "replace the value of the `content` attribute with `long`"
+
+
+class MultipleTestharness(Rule):
+ name = "MULTIPLE-TESTHARNESS"
+ description = "More than one `<script src='/resources/testharness.js'>`"
+ to_fix = """
+ ensure each test has only one `<script
+ src='/resources/testharnessreport.js'>` instance
+ """
+
+
+class MissingReftestWait(Rule):
+ name = "MISSING-REFTESTWAIT"
+ description = "Missing `class=reftest-wait`"
+ to_fix = """
+ ensure tests that include reftest-wait.js also use class=reftest-wait on the root element.
+ """
+
+
+class MissingTestharnessReport(Rule):
+ name = "MISSING-TESTHARNESSREPORT"
+ description = "Missing `<script src='/resources/testharnessreport.js'>`"
+ to_fix = """
+ ensure each test file contains `<script
+ src='/resources/testharnessreport.js'>`
+ """
+
+
+class MultipleTestharnessReport(Rule):
+ name = "MULTIPLE-TESTHARNESSREPORT"
+ description = "More than one `<script src='/resources/testharnessreport.js'>`"
+
+
+class VariantMissing(Rule):
+ name = "VARIANT-MISSING"
+ description = collapse("""
+ Test file with a `<meta name='variant'...>` element that's missing a
+ `content` attribute
+ """)
+ to_fix = """
+ add a `content` attribute with an appropriate value to the `<meta
+ name='variant'...>` element
+ """
+
+
+class MalformedVariant(Rule):
+ name = "MALFORMED-VARIANT"
+ description = collapse("""
+ %s `<meta name=variant>` 'content' attribute must be the empty string
+ or start with '?' or '#'
+ """)
+
+
+class LateTimeout(Rule):
+ name = "LATE-TIMEOUT"
+ description = "`<meta name=timeout>` seen after testharness.js script"
+ description = collapse("""
+ Test file with `<meta name='timeout'...>` element after `<script
+ src='/resources/testharnessreport.js'>` element
+ """)
+ to_fix = """
+ move the `<meta name="timeout"...>` element to precede the `script`
+ element.
+ """
+
+
+class EarlyTestharnessReport(Rule):
+ name = "EARLY-TESTHARNESSREPORT"
+ description = collapse("""
+ Test file has an instance of
+ `<script src='/resources/testharnessreport.js'>` prior to
+ `<script src='/resources/testharness.js'>`
+ """)
+ to_fix = "flip the order"
+
+
+class EarlyTestdriverVendor(Rule):
+ name = "EARLY-TESTDRIVER-VENDOR"
+ description = collapse("""
+ Test file has an instance of
+ `<script src='/resources/testdriver-vendor.js'>` prior to
+ `<script src='/resources/testdriver.js'>`
+ """)
+ to_fix = "flip the order"
+
+
+class MultipleTestdriver(Rule):
+ name = "MULTIPLE-TESTDRIVER"
+ description = "More than one `<script src='/resources/testdriver.js'>`"
+
+
+class MissingTestdriverVendor(Rule):
+ name = "MISSING-TESTDRIVER-VENDOR"
+ description = "Missing `<script src='/resources/testdriver-vendor.js'>`"
+
+
+class MultipleTestdriverVendor(Rule):
+ name = "MULTIPLE-TESTDRIVER-VENDOR"
+ description = "More than one `<script src='/resources/testdriver-vendor.js'>`"
+
+
+class TestharnessPath(Rule):
+ name = "TESTHARNESS-PATH"
+ description = "testharness.js script seen with incorrect path"
+
+
+class TestharnessReportPath(Rule):
+ name = "TESTHARNESSREPORT-PATH"
+ description = "testharnessreport.js script seen with incorrect path"
+
+
+class TestdriverPath(Rule):
+ name = "TESTDRIVER-PATH"
+ description = "testdriver.js script seen with incorrect path"
+
+
+class TestdriverVendorPath(Rule):
+ name = "TESTDRIVER-VENDOR-PATH"
+ description = "testdriver-vendor.js script seen with incorrect path"
+
+
+class OpenNoMode(Rule):
+ name = "OPEN-NO-MODE"
+ description = "File opened without providing an explicit mode (note: binary files must be read with 'b' in the mode flags)"
+
+
+class UnknownGlobalMetadata(Rule):
+ name = "UNKNOWN-GLOBAL-METADATA"
+ description = "Unexpected value for global metadata"
+
+
+class BrokenGlobalMetadata(Rule):
+ name = "BROKEN-GLOBAL-METADATA"
+ description = "Invalid global metadata: %s"
+
+
+class UnknownTimeoutMetadata(Rule):
+ name = "UNKNOWN-TIMEOUT-METADATA"
+ description = "Unexpected value for timeout metadata"
+
+
+class UnknownMetadata(Rule):
+ name = "UNKNOWN-METADATA"
+ description = "Unexpected kind of metadata"
+
+
+class StrayMetadata(Rule):
+ name = "STRAY-METADATA"
+ description = "Metadata comments should start the file"
+
+
+class IndentedMetadata(Rule):
+ name = "INDENTED-METADATA"
+ description = "Metadata comments should start the line"
+
+
+class BrokenMetadata(Rule):
+ name = "BROKEN-METADATA"
+ description = "Metadata comment is not formatted correctly"
+
+
+class TestharnessInOtherType(Rule):
+ name = "TESTHARNESS-IN-OTHER-TYPE"
+ description = "testharness.js included in a %s test"
+
+
+class DuplicateBasenamePath(Rule):
+ name = "DUPLICATE-BASENAME-PATH"
+ description = collapse("""
+ File has identical basename path (path excluding extension) as
+ other file(s) (found extensions: %s)
+ """)
+ to_fix = "rename files so they have unique basename paths"
+
+
+class DuplicatePathCaseInsensitive(Rule):
+ name = "DUPLICATE-CASE-INSENSITIVE-PATH"
+ description = collapse("""
+ Path differs from path %s only in case
+ """)
+ to_fix = "rename files so they are unique irrespective of case"
+
+
+class TentativeDirectoryName(Rule):
+ name = "TENTATIVE-DIRECTORY-NAME"
+ description = "Directories for tentative tests must be named exactly 'tentative'"
+ to_fix = "rename directory to be called 'tentative'"
+
+
+class Regexp(metaclass=abc.ABCMeta):
+ @abc.abstractproperty
+ def pattern(self) -> bytes:
+ pass
+
+ @abc.abstractproperty
+ def name(self) -> Text:
+ pass
+
+ @abc.abstractproperty
+ def description(self) -> Text:
+ pass
+
+ file_extensions: Optional[List[Text]] = None
+
+ def __init__(self) -> None:
+ self._re: Pattern[bytes] = re.compile(self.pattern)
+
+ def applies(self, path: Text) -> bool:
+ return (self.file_extensions is None or
+ os.path.splitext(path)[1] in self.file_extensions)
+
+ def search(self, line: bytes) -> Optional[Match[bytes]]:
+ return self._re.search(line)
+
+
+class TabsRegexp(Regexp):
+ pattern = b"^\t"
+ name = "INDENT TABS"
+ description = "Test-file line starts with one or more tab characters"
+ to_fix = "use spaces to replace any tab characters at beginning of lines"
+
+
+class CRRegexp(Regexp):
+ pattern = b"\r$"
+ name = "CR AT EOL"
+ description = "Test-file line ends with CR (U+000D) character"
+ to_fix = """
+ reformat file so each line just has LF (U+000A) line ending (standard,
+ cross-platform "Unix" line endings instead of, e.g., DOS line endings).
+ """
+
+
+class SetTimeoutRegexp(Regexp):
+ pattern = br"setTimeout\s*\("
+ name = "SET TIMEOUT"
+ file_extensions = [".html", ".htm", ".js", ".xht", ".xhtml", ".svg"]
+ description = "setTimeout used"
+ to_fix = """
+ replace all `setTimeout(...)` calls with `step_timeout(...)` calls
+ """
+
+
+class W3CTestOrgRegexp(Regexp):
+ pattern = br"w3c\-test\.org"
+ name = "W3C-TEST.ORG"
+ description = "Test-file line has the string `w3c-test.org`"
+ to_fix = """
+ either replace the `w3c-test.org` string with the expression
+ `{{host}}:{{ports[http][0]}}` or a generic hostname like `example.org`
+ """
+
+
+class WebPlatformTestRegexp(Regexp):
+ pattern = br"web\-platform\.test"
+ name = "WEB-PLATFORM.TEST"
+ description = "Internal web-platform.test domain used"
+ to_fix = """
+ use [server-side substitution](https://web-platform-tests.org/writing-tests/server-pipes.html#sub),
+ along with the [`.sub` filename-flag](https://web-platform-tests.org/writing-tests/file-names.html#test-features),
+ to replace web-platform.test with `{{domains[]}}`
+ """
+
+
+class Webidl2Regexp(Regexp):
+ pattern = br"webidl2\.js"
+ name = "WEBIDL2.JS"
+ description = "Legacy webidl2.js script used"
+
+
+class ConsoleRegexp(Regexp):
+ pattern = br"console\.[a-zA-Z]+\s*\("
+ name = "CONSOLE"
+ file_extensions = [".html", ".htm", ".js", ".xht", ".xhtml", ".svg"]
+ description = "Test-file line has a `console.*(...)` call"
+ to_fix = """
+ remove the `console.*(...)` call (and in some cases, consider adding an
+ `assert_*` of some kind in place of it)
+ """
+
+
+class GenerateTestsRegexp(Regexp):
+ pattern = br"generate_tests\s*\("
+ name = "GENERATE_TESTS"
+ file_extensions = [".html", ".htm", ".js", ".xht", ".xhtml", ".svg"]
+ description = "Test file line has a generate_tests call"
+ to_fix = "remove the call and call `test()` a number of times instead"
+
+
+class PrintRegexp(Regexp):
+ pattern = br"print(?:\s|\s*\()"
+ name = "PRINT STATEMENT"
+ file_extensions = [".py"]
+ description = collapse("""
+ A server-side python support file contains a `print` statement
+ """)
+ to_fix = """
+ remove the `print` statement or replace it with something else that
+ achieves the intended effect (e.g., a logging call)
+ """
+
+
+class LayoutTestsRegexp(Regexp):
+ pattern = br"(eventSender|testRunner|internals)\."
+ name = "LAYOUTTESTS APIS"
+ file_extensions = [".html", ".htm", ".js", ".xht", ".xhtml", ".svg"]
+ description = "eventSender/testRunner/internals used; these are LayoutTests-specific APIs (WebKit/Blink)"
+
+
+class MissingDepsRegexp(Regexp):
+ pattern = br"[^\w]/gen/"
+ name = "MISSING DEPENDENCY"
+ file_extensions = [".html", ".htm", ".js", ".xht", ".xhtml", ".svg"]
+ description = "Chromium-specific content referenced"
+ to_fix = "Reimplement the test to use well-documented testing interfaces"
+
+
+class SpecialPowersRegexp(Regexp):
+ pattern = b"SpecialPowers"
+ name = "SPECIALPOWERS API"
+ file_extensions = [".html", ".htm", ".js", ".xht", ".xhtml", ".svg"]
+ description = "SpecialPowers used; this is gecko-specific and not supported in wpt"
+
+
+class TrailingWhitespaceRegexp(Regexp):
+ name = "TRAILING WHITESPACE"
+ description = "Whitespace at EOL"
+ pattern = b"[ \t\f\v]$"
+ to_fix = """Remove trailing whitespace from all lines in the file."""
+
+
+class AssertThrowsRegexp(Regexp):
+ pattern = br"[^.]assert_throws\("
+ name = "ASSERT_THROWS"
+ file_extensions = [".html", ".htm", ".js", ".xht", ".xhtml", ".svg"]
+ description = "Test-file line has an `assert_throws(...)` call"
+ to_fix = """Replace with `assert_throws_dom` or `assert_throws_js` or `assert_throws_exactly`"""
+
+
+class PromiseRejectsRegexp(Regexp):
+ pattern = br"promise_rejects\("
+ name = "PROMISE_REJECTS"
+ file_extensions = [".html", ".htm", ".js", ".xht", ".xhtml", ".svg"]
+ description = "Test-file line has a `promise_rejects(...)` call"
+ to_fix = """Replace with promise_rejects_dom or promise_rejects_js or `promise_rejects_exactly`"""
+
+
+class AssertPreconditionRegexp(Regexp):
+ pattern = br"[^.]assert_precondition\("
+ name = "ASSERT-PRECONDITION"
+ file_extensions = [".html", ".htm", ".js", ".xht", ".xhtml", ".svg"]
+ description = "Test-file line has an `assert_precondition(...)` call"
+ to_fix = """Replace with `assert_implements` or `assert_implements_optional`"""
diff --git a/testing/web-platform/tests/tools/lint/tests/__init__.py b/testing/web-platform/tests/tools/lint/tests/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/tools/lint/tests/__init__.py
diff --git a/testing/web-platform/tests/tools/lint/tests/base.py b/testing/web-platform/tests/tools/lint/tests/base.py
new file mode 100644
index 0000000000..7740bb25ca
--- /dev/null
+++ b/testing/web-platform/tests/tools/lint/tests/base.py
@@ -0,0 +1,9 @@
+# mypy: allow-untyped-defs
+
+def check_errors(errors):
+ for e in errors:
+ error_type, description, path, line_number = e
+ assert isinstance(error_type, str)
+ assert isinstance(description, str)
+ assert isinstance(path, str)
+ assert line_number is None or isinstance(line_number, int)
diff --git a/testing/web-platform/tests/tools/lint/tests/dummy/about_blank.html b/testing/web-platform/tests/tools/lint/tests/dummy/about_blank.html
new file mode 100644
index 0000000000..8f940b540f
--- /dev/null
+++ b/testing/web-platform/tests/tools/lint/tests/dummy/about_blank.html
@@ -0,0 +1 @@
+<link rel="match" href=about:blank>
diff --git a/testing/web-platform/tests/tools/lint/tests/dummy/broken.html b/testing/web-platform/tests/tools/lint/tests/dummy/broken.html
new file mode 100644
index 0000000000..74793c43ca
--- /dev/null
+++ b/testing/web-platform/tests/tools/lint/tests/dummy/broken.html
@@ -0,0 +1 @@
+THIS LINE HAS TRAILING WHITESPACE
diff --git a/testing/web-platform/tests/tools/lint/tests/dummy/broken_ignored.html b/testing/web-platform/tests/tools/lint/tests/dummy/broken_ignored.html
new file mode 100644
index 0000000000..74793c43ca
--- /dev/null
+++ b/testing/web-platform/tests/tools/lint/tests/dummy/broken_ignored.html
@@ -0,0 +1 @@
+THIS LINE HAS TRAILING WHITESPACE
diff --git a/testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/a-ref.html b/testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/a-ref.html
new file mode 100644
index 0000000000..d00491fd7e
--- /dev/null
+++ b/testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/a-ref.html
@@ -0,0 +1 @@
+1
diff --git a/testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/a.html b/testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/a.html
new file mode 100644
index 0000000000..73c5d0bc37
--- /dev/null
+++ b/testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/a.html
@@ -0,0 +1,4 @@
+<link rel="help" href="https://www.w3.org/TR/CSS21/aural.html#propdef-stress">
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+1
diff --git a/testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/match/a-ref.html b/testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/match/a-ref.html
new file mode 100644
index 0000000000..d00491fd7e
--- /dev/null
+++ b/testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/match/a-ref.html
@@ -0,0 +1 @@
+1
diff --git a/testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/match/a.html b/testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/match/a.html
new file mode 100644
index 0000000000..73c5d0bc37
--- /dev/null
+++ b/testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/match/a.html
@@ -0,0 +1,4 @@
+<link rel="help" href="https://www.w3.org/TR/CSS21/aural.html#propdef-stress">
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+1
diff --git a/testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/match/support/a.html b/testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/match/support/a.html
new file mode 100644
index 0000000000..d00491fd7e
--- /dev/null
+++ b/testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/match/support/a.html
@@ -0,0 +1 @@
+1
diff --git a/testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/match/support/tools/a.html b/testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/match/support/tools/a.html
new file mode 100644
index 0000000000..0cfbf08886
--- /dev/null
+++ b/testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/match/support/tools/a.html
@@ -0,0 +1 @@
+2
diff --git a/testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/match/tools/a.html b/testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/match/tools/a.html
new file mode 100644
index 0000000000..d00491fd7e
--- /dev/null
+++ b/testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/match/tools/a.html
@@ -0,0 +1 @@
+1
diff --git a/testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/not-match/a-ref.html b/testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/not-match/a-ref.html
new file mode 100644
index 0000000000..0cfbf08886
--- /dev/null
+++ b/testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/not-match/a-ref.html
@@ -0,0 +1 @@
+2
diff --git a/testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/not-match/a.html b/testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/not-match/a.html
new file mode 100644
index 0000000000..4b0ce383a2
--- /dev/null
+++ b/testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/not-match/a.html
@@ -0,0 +1,4 @@
+<link rel="help" href="https://www.w3.org/TR/CSS21/aural.html#propdef-stress">
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+2
diff --git a/testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/not-match/support/a.html b/testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/not-match/support/a.html
new file mode 100644
index 0000000000..0cfbf08886
--- /dev/null
+++ b/testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/not-match/support/a.html
@@ -0,0 +1 @@
+2
diff --git a/testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/not-match/tools/a.html b/testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/not-match/tools/a.html
new file mode 100644
index 0000000000..0cfbf08886
--- /dev/null
+++ b/testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/not-match/tools/a.html
@@ -0,0 +1 @@
+2
diff --git a/testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/relative-testharness-interact.html b/testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/relative-testharness-interact.html
new file mode 100644
index 0000000000..a50ef983a9
--- /dev/null
+++ b/testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/relative-testharness-interact.html
@@ -0,0 +1,5 @@
+<!doctype html>
+<link rel=help href="https://www.w3.org/TR/CSS2/visufx.html#overflow-clipping">
+<meta name=flags content=interact>
+<script src="../../../resources/testharness.js"></script>
+<script src="../../../resources/testharnessreport.js"></script>
diff --git a/testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/relative-testharness.html b/testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/relative-testharness.html
new file mode 100644
index 0000000000..f15649e3aa
--- /dev/null
+++ b/testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/relative-testharness.html
@@ -0,0 +1,4 @@
+<!doctype html>
+<link rel=help href="https://www.w3.org/TR/CSS2/visufx.html#overflow-clipping">
+<script src="../../../resources/testharness.js"></script>
+<script src="../../../resources/testharnessreport.js"></script>
diff --git a/testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/selectors/a.html b/testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/selectors/a.html
new file mode 100644
index 0000000000..0d63c6bfed
--- /dev/null
+++ b/testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/selectors/a.html
@@ -0,0 +1,4 @@
+<link rel="help" href="https://drafts.csswg.org/selectors-3/#type-selectors">
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+1
diff --git a/testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/support/a.html b/testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/support/a.html
new file mode 100644
index 0000000000..d00491fd7e
--- /dev/null
+++ b/testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/support/a.html
@@ -0,0 +1 @@
+1
diff --git a/testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/support/tools/a.html b/testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/support/tools/a.html
new file mode 100644
index 0000000000..0cfbf08886
--- /dev/null
+++ b/testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/support/tools/a.html
@@ -0,0 +1 @@
+2
diff --git a/testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/tools/a.html b/testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/tools/a.html
new file mode 100644
index 0000000000..d00491fd7e
--- /dev/null
+++ b/testing/web-platform/tests/tools/lint/tests/dummy/css/css-unique/tools/a.html
@@ -0,0 +1 @@
+1
diff --git a/testing/web-platform/tests/tools/lint/tests/dummy/dependency.html b/testing/web-platform/tests/tools/lint/tests/dummy/dependency.html
new file mode 100644
index 0000000000..29296f4c58
--- /dev/null
+++ b/testing/web-platform/tests/tools/lint/tests/dummy/dependency.html
@@ -0,0 +1 @@
+This file is used to demonstrate acceptance of root-relative reftest references.
diff --git a/testing/web-platform/tests/tools/lint/tests/dummy/lint.ignore b/testing/web-platform/tests/tools/lint/tests/dummy/lint.ignore
new file mode 100644
index 0000000000..a763e4432e
--- /dev/null
+++ b/testing/web-platform/tests/tools/lint/tests/dummy/lint.ignore
@@ -0,0 +1 @@
+*:broken_ignored.html
diff --git a/testing/web-platform/tests/tools/lint/tests/dummy/okay.html b/testing/web-platform/tests/tools/lint/tests/dummy/okay.html
new file mode 100644
index 0000000000..a3178a3c83
--- /dev/null
+++ b/testing/web-platform/tests/tools/lint/tests/dummy/okay.html
@@ -0,0 +1 @@
+THIS LINE HAS NO TRAILING WHITESPACE
diff --git a/testing/web-platform/tests/tools/lint/tests/dummy/ref/absolute.html b/testing/web-platform/tests/tools/lint/tests/dummy/ref/absolute.html
new file mode 100644
index 0000000000..4b47bc8836
--- /dev/null
+++ b/testing/web-platform/tests/tools/lint/tests/dummy/ref/absolute.html
@@ -0,0 +1 @@
+<link rel="match" href="http://example.com/reference.html">
diff --git a/testing/web-platform/tests/tools/lint/tests/dummy/ref/existent_relative-ref.html b/testing/web-platform/tests/tools/lint/tests/dummy/ref/existent_relative-ref.html
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/tools/lint/tests/dummy/ref/existent_relative-ref.html
diff --git a/testing/web-platform/tests/tools/lint/tests/dummy/ref/existent_relative.html b/testing/web-platform/tests/tools/lint/tests/dummy/ref/existent_relative.html
new file mode 100644
index 0000000000..29364ee5fb
--- /dev/null
+++ b/testing/web-platform/tests/tools/lint/tests/dummy/ref/existent_relative.html
@@ -0,0 +1 @@
+<link rel="match" href="existent_relative-ref.html">
diff --git a/testing/web-platform/tests/tools/lint/tests/dummy/ref/existent_root_relative.html b/testing/web-platform/tests/tools/lint/tests/dummy/ref/existent_root_relative.html
new file mode 100644
index 0000000000..2bedc2d309
--- /dev/null
+++ b/testing/web-platform/tests/tools/lint/tests/dummy/ref/existent_root_relative.html
@@ -0,0 +1 @@
+<link rel="match" href="/dependency.html">
diff --git a/testing/web-platform/tests/tools/lint/tests/dummy/ref/non_existent_relative.html b/testing/web-platform/tests/tools/lint/tests/dummy/ref/non_existent_relative.html
new file mode 100644
index 0000000000..009a1d5eb0
--- /dev/null
+++ b/testing/web-platform/tests/tools/lint/tests/dummy/ref/non_existent_relative.html
@@ -0,0 +1 @@
+<link rel="match" href="non_existent_file.html">
diff --git a/testing/web-platform/tests/tools/lint/tests/dummy/ref/non_existent_root_relative.html b/testing/web-platform/tests/tools/lint/tests/dummy/ref/non_existent_root_relative.html
new file mode 100644
index 0000000000..b1812013aa
--- /dev/null
+++ b/testing/web-platform/tests/tools/lint/tests/dummy/ref/non_existent_root_relative.html
@@ -0,0 +1 @@
+<link rel="match" href="/non_existent_file.html">
diff --git a/testing/web-platform/tests/tools/lint/tests/dummy/ref/same_file_empty.html b/testing/web-platform/tests/tools/lint/tests/dummy/ref/same_file_empty.html
new file mode 100644
index 0000000000..eaa18e9c01
--- /dev/null
+++ b/testing/web-platform/tests/tools/lint/tests/dummy/ref/same_file_empty.html
@@ -0,0 +1 @@
+<link rel="match" href="">
diff --git a/testing/web-platform/tests/tools/lint/tests/dummy/ref/same_file_path.html b/testing/web-platform/tests/tools/lint/tests/dummy/ref/same_file_path.html
new file mode 100644
index 0000000000..6a80c1f20d
--- /dev/null
+++ b/testing/web-platform/tests/tools/lint/tests/dummy/ref/same_file_path.html
@@ -0,0 +1 @@
+<link rel="match" href="same_file_path.html">
diff --git a/testing/web-platform/tests/tools/lint/tests/dummy/tests/dir1/a.html b/testing/web-platform/tests/tools/lint/tests/dummy/tests/dir1/a.html
new file mode 100644
index 0000000000..f412593b04
--- /dev/null
+++ b/testing/web-platform/tests/tools/lint/tests/dummy/tests/dir1/a.html
@@ -0,0 +1,4 @@
+<!DOCTYPE html>
+<meta charset="utf-8">
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
diff --git a/testing/web-platform/tests/tools/lint/tests/dummy/tests/dir1/a.js b/testing/web-platform/tests/tools/lint/tests/dummy/tests/dir1/a.js
new file mode 100644
index 0000000000..a855dab6a7
--- /dev/null
+++ b/testing/web-platform/tests/tools/lint/tests/dummy/tests/dir1/a.js
@@ -0,0 +1,6 @@
+// This is a dummy JavaScript file, meant to indicate a 'support' file of
+// sorts that may be included by some test.
+
+function helloWorld() {
+ return 'Hello, world!';
+}
diff --git a/testing/web-platform/tests/tools/lint/tests/dummy/tests/dir1/a.xhtml b/testing/web-platform/tests/tools/lint/tests/dummy/tests/dir1/a.xhtml
new file mode 100644
index 0000000000..c8b4cc2e52
--- /dev/null
+++ b/testing/web-platform/tests/tools/lint/tests/dummy/tests/dir1/a.xhtml
@@ -0,0 +1,5 @@
+<?xml version="1.0" encoding="utf-8"?>
+<html xmlns="http://www.w3.org/1999/xhtml">
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+</html>
diff --git a/testing/web-platform/tests/tools/lint/tests/dummy/tests/dir1/b.html b/testing/web-platform/tests/tools/lint/tests/dummy/tests/dir1/b.html
new file mode 100644
index 0000000000..f412593b04
--- /dev/null
+++ b/testing/web-platform/tests/tools/lint/tests/dummy/tests/dir1/b.html
@@ -0,0 +1,4 @@
+<!DOCTYPE html>
+<meta charset="utf-8">
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
diff --git a/testing/web-platform/tests/tools/lint/tests/dummy/tests/dir2/a.xhtml b/testing/web-platform/tests/tools/lint/tests/dummy/tests/dir2/a.xhtml
new file mode 100644
index 0000000000..c8b4cc2e52
--- /dev/null
+++ b/testing/web-platform/tests/tools/lint/tests/dummy/tests/dir2/a.xhtml
@@ -0,0 +1,5 @@
+<?xml version="1.0" encoding="utf-8"?>
+<html xmlns="http://www.w3.org/1999/xhtml">
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+</html>
diff --git a/testing/web-platform/tests/tools/lint/tests/dummy/tests/relative-testharness-manual.html b/testing/web-platform/tests/tools/lint/tests/dummy/tests/relative-testharness-manual.html
new file mode 100644
index 0000000000..15693b990c
--- /dev/null
+++ b/testing/web-platform/tests/tools/lint/tests/dummy/tests/relative-testharness-manual.html
@@ -0,0 +1,3 @@
+<!doctype html>
+<script src="../../../resources/testharness.js"></script>
+<script src="../../../resources/testharnessreport.js"></script>
diff --git a/testing/web-platform/tests/tools/lint/tests/test_file_lints.py b/testing/web-platform/tests/tools/lint/tests/test_file_lints.py
new file mode 100644
index 0000000000..9a49d6e7a2
--- /dev/null
+++ b/testing/web-platform/tests/tools/lint/tests/test_file_lints.py
@@ -0,0 +1,912 @@
+# mypy: allow-untyped-defs
+
+from ..lint import check_file_contents
+from .base import check_errors
+import io
+import os
+import pytest
+
+INTERESTING_FILE_NAMES = {
+ "python": [
+ "test.py",
+ ],
+ "js": [
+ "test.js",
+ ],
+ "web-lax": [
+ "test.htm",
+ "test.html",
+ ],
+ "web-strict": [
+ "test.svg",
+ "test.xht",
+ "test.xhtml",
+ ],
+}
+
+def check_with_files(input_bytes):
+ return {
+ filename: (check_file_contents("", filename, io.BytesIO(input_bytes)), kind)
+ for (filename, kind) in
+ (
+ (os.path.join("html", filename), kind)
+ for (kind, filenames) in INTERESTING_FILE_NAMES.items()
+ for filename in filenames
+ )
+ }
+
+
+def test_trailing_whitespace():
+ error_map = check_with_files(b"test; ")
+
+ for (filename, (errors, kind)) in error_map.items():
+ check_errors(errors)
+
+ expected = [("TRAILING WHITESPACE", "Whitespace at EOL", filename, 1)]
+ if kind == "web-strict":
+ expected.append(("PARSE-FAILED", "Unable to parse file", filename, None))
+ assert errors == expected
+
+
+def test_indent_tabs():
+ error_map = check_with_files(b"def foo():\n\x09pass")
+
+ for (filename, (errors, kind)) in error_map.items():
+ check_errors(errors)
+
+ expected = [("INDENT TABS", "Test-file line starts with one or more tab characters", filename, 2)]
+ if kind == "web-strict":
+ expected.append(("PARSE-FAILED", "Unable to parse file", filename, None))
+ assert errors == expected
+
+
+def test_cr_not_at_eol():
+ error_map = check_with_files(b"line1\rline2\r")
+
+ for (filename, (errors, kind)) in error_map.items():
+ check_errors(errors)
+
+ expected = [("CR AT EOL", "Test-file line ends with CR (U+000D) character", filename, 1)]
+ if kind == "web-strict":
+ expected.append(("PARSE-FAILED", "Unable to parse file", filename, None))
+ assert errors == expected
+
+
+def test_cr_at_eol():
+ error_map = check_with_files(b"line1\r\nline2\r\n")
+
+ for (filename, (errors, kind)) in error_map.items():
+ check_errors(errors)
+
+ expected = [
+ ("CR AT EOL", "Test-file line ends with CR (U+000D) character", filename, 1),
+ ("CR AT EOL", "Test-file line ends with CR (U+000D) character", filename, 2),
+ ]
+ if kind == "web-strict":
+ expected.append(("PARSE-FAILED", "Unable to parse file", filename, None))
+ assert errors == expected
+
+
+def test_w3c_test_org():
+ error_map = check_with_files(b"import('http://www.w3c-test.org/')")
+
+ for (filename, (errors, kind)) in error_map.items():
+ check_errors(errors)
+
+ expected = [("W3C-TEST.ORG", "Test-file line has the string `w3c-test.org`", filename, 1)]
+ if kind == "python":
+ expected.append(("PARSE-FAILED", "Unable to parse file", filename, 1))
+ elif kind == "web-strict":
+ expected.append(("PARSE-FAILED", "Unable to parse file", filename, None))
+ assert errors == expected
+
+def test_web_platform_test():
+ error_map = check_with_files(b"import('http://web-platform.test/')")
+
+ for (filename, (errors, kind)) in error_map.items():
+ check_errors(errors)
+
+ expected = [("WEB-PLATFORM.TEST", "Internal web-platform.test domain used", filename, 1)]
+ if kind == "python":
+ expected.append(("PARSE-FAILED", "Unable to parse file", filename, 1))
+ elif kind == "web-strict":
+ expected.append(("PARSE-FAILED", "Unable to parse file", filename, None))
+ assert errors == expected
+
+
+def test_webidl2_js():
+ error_map = check_with_files(b"<script src=/resources/webidl2.js>")
+
+ for (filename, (errors, kind)) in error_map.items():
+ check_errors(errors)
+
+ expected = [("WEBIDL2.JS", "Legacy webidl2.js script used", filename, 1)]
+ if kind == "python":
+ expected.append(("PARSE-FAILED", "Unable to parse file", filename, 1))
+ elif kind == "web-strict":
+ expected.append(("PARSE-FAILED", "Unable to parse file", filename, None))
+ assert errors == expected
+
+
+def test_console():
+ error_map = check_with_files(b"<script>\nconsole.log('error');\nconsole.error ('log')\n</script>")
+
+ for (filename, (errors, kind)) in error_map.items():
+ check_errors(errors)
+
+ if kind in ["web-lax", "web-strict", "js"]:
+ assert errors == [
+ ("CONSOLE", "Test-file line has a `console.*(...)` call", filename, 2),
+ ("CONSOLE", "Test-file line has a `console.*(...)` call", filename, 3),
+ ]
+ else:
+ assert errors == [("PARSE-FAILED", "Unable to parse file", filename, 1)]
+
+
+def test_setTimeout():
+ error_map = check_with_files(b"<script>setTimeout(() => 1, 10)</script>")
+
+ for (filename, (errors, kind)) in error_map.items():
+ check_errors(errors)
+
+ if kind == "python":
+ assert errors == [("PARSE-FAILED", "Unable to parse file", filename, 1)]
+ else:
+ assert errors == [('SET TIMEOUT',
+ 'setTimeout used',
+ filename,
+ 1)]
+
+
+def test_eventSender():
+ error_map = check_with_files(b"<script>eventSender.mouseDown()</script>")
+
+ for (filename, (errors, kind)) in error_map.items():
+ check_errors(errors)
+
+ if kind == "python":
+ assert errors == [("PARSE-FAILED", "Unable to parse file", filename, 1)]
+ else:
+ assert errors == [('LAYOUTTESTS APIS',
+ 'eventSender/testRunner/internals used; these are LayoutTests-specific APIs (WebKit/Blink)',
+ filename,
+ 1)]
+
+
+def test_testRunner():
+ error_map = check_with_files(b"<script>if (window.testRunner) { testRunner.waitUntilDone(); }</script>")
+
+ for (filename, (errors, kind)) in error_map.items():
+ check_errors(errors)
+
+ if kind == "python":
+ assert errors == [("PARSE-FAILED", "Unable to parse file", filename, 1)]
+ else:
+ assert errors == [('LAYOUTTESTS APIS',
+ 'eventSender/testRunner/internals used; these are LayoutTests-specific APIs (WebKit/Blink)',
+ filename,
+ 1)]
+
+
+def test_internals():
+ error_map = check_with_files(b"<script>if (window.internals) { internals.doAThing(); }</script>")
+
+ for (filename, (errors, kind)) in error_map.items():
+ check_errors(errors)
+
+ if kind == "python":
+ assert errors == [("PARSE-FAILED", "Unable to parse file", filename, 1)]
+ else:
+ assert errors == [('LAYOUTTESTS APIS',
+ 'eventSender/testRunner/internals used; these are LayoutTests-specific APIs (WebKit/Blink)',
+ filename,
+ 1)]
+
+
+def test_missing_deps():
+ error_map = check_with_files(b"<script src='/gen/foo.js'></script>")
+
+ for (filename, (errors, kind)) in error_map.items():
+ check_errors(errors)
+
+ if kind == "python":
+ assert errors == [("PARSE-FAILED", "Unable to parse file", filename, 1)]
+ else:
+ assert errors == [('MISSING DEPENDENCY',
+ 'Chromium-specific content referenced',
+ filename,
+ 1)]
+
+
+def test_no_missing_deps():
+ error_map = check_with_files(b"""<head>
+<script src='/foo/gen/foo.js'></script>
+<script src='/gens/foo.js'></script>
+</head>""")
+
+ for (filename, (errors, kind)) in error_map.items():
+ check_errors(errors)
+
+ if kind == "python":
+ assert errors == [("PARSE-FAILED", "Unable to parse file", filename, 1)]
+ else:
+ assert errors == []
+
+
+def test_meta_timeout():
+ code = b"""
+<html xmlns="http://www.w3.org/1999/xhtml">
+<meta name="timeout" />
+<meta name="timeout" content="short" />
+<meta name="timeout" content="long" />
+</html>
+"""
+ error_map = check_with_files(code)
+
+ for (filename, (errors, kind)) in error_map.items():
+ check_errors(errors)
+
+ if kind in ["web-lax", "web-strict"]:
+ assert errors == [
+ ("MULTIPLE-TIMEOUT", "More than one meta name='timeout'", filename, None),
+ ("INVALID-TIMEOUT",
+ "Test file with `<meta name='timeout'...>` element that has a `content` attribute whose value is not `long`: ",
+ filename,
+ None),
+ ("INVALID-TIMEOUT",
+ "Test file with `<meta name='timeout'...>` element that has a `content` attribute whose value is not `long`: short",
+ filename,
+ None),
+ ]
+ elif kind == "python":
+ assert errors == [
+ ("PARSE-FAILED", "Unable to parse file", filename, 2),
+ ]
+
+
+def test_early_testharnessreport():
+ code = b"""
+<html xmlns="http://www.w3.org/1999/xhtml">
+<script src="/resources/testharnessreport.js"></script>
+<script src="/resources/testharness.js"></script>
+</html>
+"""
+ error_map = check_with_files(code)
+
+ for (filename, (errors, kind)) in error_map.items():
+ check_errors(errors)
+
+ if kind in ["web-lax", "web-strict"]:
+ assert errors == [
+ ("EARLY-TESTHARNESSREPORT",
+ "Test file has an instance of "
+ "`<script src='/resources/testharnessreport.js'>` "
+ "prior to `<script src='/resources/testharness.js'>`",
+ filename,
+ None),
+ ]
+ elif kind == "python":
+ assert errors == [
+ ("PARSE-FAILED", "Unable to parse file", filename, 2),
+ ]
+
+
+def test_multiple_testharness():
+ code = b"""
+<html xmlns="http://www.w3.org/1999/xhtml">
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharness.js"></script>
+</html>
+"""
+ error_map = check_with_files(code)
+
+ for (filename, (errors, kind)) in error_map.items():
+ check_errors(errors)
+
+ if kind in ["web-lax", "web-strict"]:
+ assert errors == [
+ ("MULTIPLE-TESTHARNESS", "More than one `<script src='/resources/testharness.js'>`", filename, None),
+ ("MISSING-TESTHARNESSREPORT", "Missing `<script src='/resources/testharnessreport.js'>`", filename, None),
+ ]
+ elif kind == "python":
+ assert errors == [
+ ("PARSE-FAILED", "Unable to parse file", filename, 2),
+ ]
+
+
+def test_multiple_testharnessreport():
+ code = b"""
+<html xmlns="http://www.w3.org/1999/xhtml">
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+</html>
+"""
+ error_map = check_with_files(code)
+
+ for (filename, (errors, kind)) in error_map.items():
+ check_errors(errors)
+
+ if kind in ["web-lax", "web-strict"]:
+ assert errors == [
+ ("MULTIPLE-TESTHARNESSREPORT", "More than one `<script src='/resources/testharnessreport.js'>`", filename, None),
+ ]
+ elif kind == "python":
+ assert errors == [
+ ("PARSE-FAILED", "Unable to parse file", filename, 2),
+ ]
+
+
+def test_early_testdriver_vendor():
+ code = b"""
+<html xmlns="http://www.w3.org/1999/xhtml">
+<script src="/resources/testdriver-vendor.js"></script>
+<script src="/resources/testdriver.js"></script>
+</html>
+"""
+ error_map = check_with_files(code)
+
+ for (filename, (errors, kind)) in error_map.items():
+ check_errors(errors)
+
+ if kind in ["web-lax", "web-strict"]:
+ assert errors == [
+ ("EARLY-TESTDRIVER-VENDOR",
+ "Test file has an instance of "
+ "`<script src='/resources/testdriver-vendor.js'>` "
+ "prior to `<script src='/resources/testdriver.js'>`",
+ filename,
+ None),
+ ]
+ elif kind == "python":
+ assert errors == [
+ ("PARSE-FAILED", "Unable to parse file", filename, 2),
+ ]
+
+
+def test_multiple_testdriver():
+ code = b"""
+<html xmlns="http://www.w3.org/1999/xhtml">
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script src="/resources/testdriver.js"></script>
+<script src="/resources/testdriver.js"></script>
+<script src="/resources/testdriver-vendor.js"></script>
+</html>
+"""
+ error_map = check_with_files(code)
+
+ for (filename, (errors, kind)) in error_map.items():
+ check_errors(errors)
+
+ if kind in ["web-lax", "web-strict"]:
+ assert errors == [
+ ("MULTIPLE-TESTDRIVER", "More than one `<script src='/resources/testdriver.js'>`", filename, None),
+ ]
+ elif kind == "python":
+ assert errors == [
+ ("PARSE-FAILED", "Unable to parse file", filename, 2),
+ ]
+
+
+def test_multiple_testdriver_vendor():
+ code = b"""
+<html xmlns="http://www.w3.org/1999/xhtml">
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script src="/resources/testdriver.js"></script>
+<script src="/resources/testdriver-vendor.js"></script>
+<script src="/resources/testdriver-vendor.js"></script>
+</html>
+"""
+ error_map = check_with_files(code)
+
+ for (filename, (errors, kind)) in error_map.items():
+ check_errors(errors)
+
+ if kind in ["web-lax", "web-strict"]:
+ assert errors == [
+ ("MULTIPLE-TESTDRIVER-VENDOR", "More than one `<script src='/resources/testdriver-vendor.js'>`", filename, None),
+ ]
+ elif kind == "python":
+ assert errors == [
+ ("PARSE-FAILED", "Unable to parse file", filename, 2),
+ ]
+
+
+def test_missing_testdriver_vendor():
+ code = b"""
+<html xmlns="http://www.w3.org/1999/xhtml">
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script src="/resources/testdriver.js"></script>
+</html>
+"""
+ error_map = check_with_files(code)
+
+ for (filename, (errors, kind)) in error_map.items():
+ check_errors(errors)
+
+ if kind in ["web-lax", "web-strict"]:
+ assert errors == [
+ ("MISSING-TESTDRIVER-VENDOR", "Missing `<script src='/resources/testdriver-vendor.js'>`", filename, None),
+ ]
+ elif kind == "python":
+ assert errors == [
+ ("PARSE-FAILED", "Unable to parse file", filename, 2),
+ ]
+
+
+def test_testharness_path():
+ code = b"""\
+<html xmlns="http://www.w3.org/1999/xhtml">
+<script src="testharness.js"></script>
+<script src="resources/testharness.js"></script>
+<script src="../resources/testharness.js"></script>
+<script src="http://w3c-test.org/resources/testharness.js"></script>
+</html>
+"""
+ error_map = check_with_files(code)
+
+ for (filename, (errors, kind)) in error_map.items():
+ check_errors(errors)
+
+ expected = [("W3C-TEST.ORG", "Test-file line has the string `w3c-test.org`", filename, 5)]
+ if kind == "python":
+ expected.append(("PARSE-FAILED", "Unable to parse file", filename, 1))
+ elif kind in ["web-lax", "web-strict"]:
+ expected.extend([
+ ("TESTHARNESS-PATH", "testharness.js script seen with incorrect path", filename, None),
+ ("TESTHARNESS-PATH", "testharness.js script seen with incorrect path", filename, None),
+ ("TESTHARNESS-PATH", "testharness.js script seen with incorrect path", filename, None),
+ ("TESTHARNESS-PATH", "testharness.js script seen with incorrect path", filename, None),
+ ])
+ assert errors == expected
+
+
+def test_testharnessreport_path():
+ code = b"""\
+<html xmlns="http://www.w3.org/1999/xhtml">
+<script src="testharnessreport.js"></script>
+<script src="resources/testharnessreport.js"></script>
+<script src="../resources/testharnessreport.js"></script>
+<script src="http://w3c-test.org/resources/testharnessreport.js"></script>
+</html>
+"""
+ error_map = check_with_files(code)
+
+ for (filename, (errors, kind)) in error_map.items():
+ check_errors(errors)
+
+ expected = [("W3C-TEST.ORG", "Test-file line has the string `w3c-test.org`", filename, 5)]
+ if kind == "python":
+ expected.append(("PARSE-FAILED", "Unable to parse file", filename, 1))
+ elif kind in ["web-lax", "web-strict"]:
+ expected.extend([
+ ("TESTHARNESSREPORT-PATH", "testharnessreport.js script seen with incorrect path", filename, None),
+ ("TESTHARNESSREPORT-PATH", "testharnessreport.js script seen with incorrect path", filename, None),
+ ("TESTHARNESSREPORT-PATH", "testharnessreport.js script seen with incorrect path", filename, None),
+ ("TESTHARNESSREPORT-PATH", "testharnessreport.js script seen with incorrect path", filename, None),
+ ])
+ assert errors == expected
+
+
+def test_testdriver_path():
+ code = b"""\
+<html xmlns="http://www.w3.org/1999/xhtml">
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script src="testdriver.js"></script>
+<script src="/elsewhere/testdriver.js"></script>
+<script src="/elsewhere/resources/testdriver.js"></script>
+<script src="/resources/elsewhere/testdriver.js"></script>
+<script src="../resources/testdriver.js"></script>
+<script src="/resources/testdriver-vendor.js"></script>
+</html>
+"""
+ error_map = check_with_files(code)
+
+ for (filename, (errors, kind)) in error_map.items():
+ check_errors(errors)
+
+ expected = []
+ if kind == "python":
+ expected.append(("PARSE-FAILED", "Unable to parse file", filename, 1))
+ elif kind in ["web-lax", "web-strict"]:
+ expected.extend([
+ ("TESTDRIVER-PATH", "testdriver.js script seen with incorrect path", filename, None),
+ ("TESTDRIVER-PATH", "testdriver.js script seen with incorrect path", filename, None),
+ ("TESTDRIVER-PATH", "testdriver.js script seen with incorrect path", filename, None),
+ ("TESTDRIVER-PATH", "testdriver.js script seen with incorrect path", filename, None),
+ ("TESTDRIVER-PATH", "testdriver.js script seen with incorrect path", filename, None)
+ ])
+ assert errors == expected
+
+
+def test_testdriver_vendor_path():
+ code = b"""\
+<html xmlns="http://www.w3.org/1999/xhtml">
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script src="/resources/testdriver.js"></script>
+<script src="testdriver-vendor.js"></script>
+<script src="/elsewhere/testdriver-vendor.js"></script>
+<script src="/elsewhere/resources/testdriver-vendor.js"></script>
+<script src="/resources/elsewhere/testdriver-vendor.js"></script>
+<script src="../resources/testdriver-vendor.js"></script>
+</html>
+"""
+ error_map = check_with_files(code)
+
+ for (filename, (errors, kind)) in error_map.items():
+ check_errors(errors)
+
+ if kind == "python":
+ expected = {("PARSE-FAILED", "Unable to parse file", filename, 1)}
+ elif kind in ["web-lax", "web-strict"]:
+ expected = {
+ ("MISSING-TESTDRIVER-VENDOR", "Missing `<script src='/resources/testdriver-vendor.js'>`", filename, None),
+ ("TESTDRIVER-VENDOR-PATH", "testdriver-vendor.js script seen with incorrect path", filename, None),
+ ("TESTDRIVER-VENDOR-PATH", "testdriver-vendor.js script seen with incorrect path", filename, None),
+ ("TESTDRIVER-VENDOR-PATH", "testdriver-vendor.js script seen with incorrect path", filename, None),
+ ("TESTDRIVER-VENDOR-PATH", "testdriver-vendor.js script seen with incorrect path", filename, None),
+ ("TESTDRIVER-VENDOR-PATH", "testdriver-vendor.js script seen with incorrect path", filename, None)
+ }
+ else:
+ expected = set()
+
+ assert set(errors) == expected
+
+
+def test_not_testharness_path():
+ code = b"""\
+<html xmlns="http://www.w3.org/1999/xhtml">
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script src="resources/webperftestharness.js"></script>
+<script src="/resources/testdriver.js"></script>
+<script src="/resources/testdriver-vendor.js"></script>
+</html>
+"""
+ error_map = check_with_files(code)
+
+ for (filename, (errors, kind)) in error_map.items():
+ check_errors(errors)
+
+ if kind == "python":
+ assert errors == [
+ ("PARSE-FAILED", "Unable to parse file", filename, 1),
+ ]
+ else:
+ assert errors == []
+
+
+def test_variant_missing():
+ code = b"""\
+<html xmlns="http://www.w3.org/1999/xhtml">
+<meta name="variant">
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+</html>
+"""
+ error_map = check_with_files(code)
+
+ for (filename, (errors, kind)) in error_map.items():
+ check_errors(errors)
+
+ if kind == "python":
+ assert errors == [
+ ("PARSE-FAILED", "Unable to parse file", filename, 1),
+ ]
+ elif kind == "web-lax":
+ assert errors == [
+ ("VARIANT-MISSING",
+ "Test file with a `<meta name='variant'...>` element that's missing a `content` attribute",
+ filename,
+ None)
+ ]
+
+
+# A corresponding "positive" test cannot be written because the manifest
+# SourceFile implementation raises a runtime exception for the condition this
+# linting rule describes
+@pytest.mark.parametrize("content", ["",
+ "?foo"
+ "#bar"])
+def test_variant_malformed_negative(content):
+ code = """\
+<html xmlns="http://www.w3.org/1999/xhtml">
+<meta name="variant" content="{}">
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+</html>
+""".format(content).encode("utf-8")
+ error_map = check_with_files(code)
+
+ for (filename, (errors, kind)) in error_map.items():
+ check_errors(errors)
+
+ if kind == "python":
+ assert errors == [
+ ("PARSE-FAILED", "Unable to parse file", filename, 1),
+ ]
+ elif kind == "web-lax":
+ assert errors == []
+
+
+def test_late_timeout():
+ code = b"""\
+<html xmlns="http://www.w3.org/1999/xhtml">
+<script src="/resources/testharness.js"></script>
+<meta name="timeout" content="long">
+<script src="/resources/testharnessreport.js"></script>
+</html>
+"""
+ error_map = check_with_files(code)
+
+ for (filename, (errors, kind)) in error_map.items():
+ check_errors(errors)
+
+ if kind == "python":
+ assert errors == [
+ ("PARSE-FAILED", "Unable to parse file", filename, 1),
+ ]
+ elif kind == "web-lax":
+ assert errors == [
+ ("LATE-TIMEOUT",
+ "Test file with `<meta name='timeout'...>` element after `<script src='/resources/testharnessreport.js'>` element",
+ filename,
+ None)
+ ]
+
+
+def test_print_function():
+ error_map = check_with_files(b"def foo():\n print('function')\n")
+
+ for (filename, (errors, kind)) in error_map.items():
+ check_errors(errors)
+
+ if kind == "python":
+ assert errors == [
+ ("PRINT STATEMENT", "A server-side python support file contains a `print` statement", filename, 2),
+ ]
+ elif kind == "web-strict":
+ assert errors == [
+ ("PARSE-FAILED", "Unable to parse file", filename, None),
+ ]
+ else:
+ assert errors == []
+
+
+def test_ahem_system_font():
+ code = b"""\
+<html>
+<style>
+body {
+ font-family: aHEm, sans-serif;
+}
+</style>
+</html>
+"""
+ error_map = check_with_files(code)
+ for (filename, (errors, kind)) in error_map.items():
+ check_errors(errors)
+
+ if filename.endswith((".htm", ".html", ".xht", ".xhtml")):
+ assert errors == [
+ ("AHEM SYSTEM FONT", "Don't use Ahem as a system font, use /fonts/ahem.css", filename, None)
+ ]
+
+
+def test_ahem_web_font():
+ code = b"""\
+<html>
+<link rel="stylesheet" type="text/css" href="/fonts/ahem.css" />
+<style>
+body {
+ font-family: aHEm, sans-serif;
+}
+</style>
+</html>
+"""
+ error_map = check_with_files(code)
+ for (filename, (errors, kind)) in error_map.items():
+ check_errors(errors)
+
+ if filename.endswith((".htm", ".html", ".xht", ".xhtml")):
+ assert errors == []
+
+
+open_mode_code = """
+def first():
+ return {0}("test.png")
+
+def second():
+ return {0}("test.png", "r")
+
+def third():
+ return {0}("test.png", "rb")
+
+def fourth():
+ return {0}("test.png", encoding="utf-8")
+
+def fifth():
+ return {0}("test.png", mode="rb")
+"""
+
+
+def test_open_mode():
+ for method in ["open", "file"]:
+ code = open_mode_code.format(method).encode("utf-8")
+ errors = check_file_contents("", "test.py", io.BytesIO(code))
+ check_errors(errors)
+
+ message = ("File opened without providing an explicit mode (note: " +
+ "binary files must be read with 'b' in the mode flags)")
+
+ assert errors == [
+ ("OPEN-NO-MODE", message, "test.py", 3),
+ ("OPEN-NO-MODE", message, "test.py", 12),
+ ]
+
+
+def test_css_missing_file_in_css():
+ code = b"""\
+<html xmlns="http://www.w3.org/1999/xhtml">
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+</html>
+"""
+ errors = check_file_contents("", "css/foo/bar.html", io.BytesIO(code))
+ check_errors(errors)
+
+ assert errors == [
+ ('MISSING-LINK',
+ 'Testcase file must have a link to a spec',
+ "css/foo/bar.html",
+ None),
+ ]
+
+
+def test_css_missing_file_manual():
+ errors = check_file_contents("", "css/foo/bar-manual.html", io.BytesIO(b""))
+ check_errors(errors)
+
+ assert errors == [
+ ('MISSING-LINK',
+ 'Testcase file must have a link to a spec',
+ "css/foo/bar-manual.html",
+ None),
+ ]
+
+
+def test_css_missing_file_tentative():
+ code = b"""\
+<html xmlns="http://www.w3.org/1999/xhtml">
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+</html>
+"""
+
+ # The tentative flag covers tests that make assertions 'not yet required by
+ # any specification', so they need not have a specification link.
+ errors = check_file_contents("", "css/foo/bar.tentative.html", io.BytesIO(code))
+ assert not errors
+
+
+@pytest.mark.parametrize("filename", [
+ "foo.worker.js",
+ "foo.any.js",
+])
+@pytest.mark.parametrize("input,error", [
+ (b"""//META: title=foo\n""", None),
+ (b"""//META: timeout=long\n""", None),
+ (b"""// META: timeout=long\n""", None),
+ (b"""// META: timeout=long\n""", None),
+ (b"""// META: script=foo.js\n""", None),
+ (b"""// META: variant=\n""", None),
+ (b"""// META: variant=?wss\n""", None),
+ (b"""# META:\n""", None),
+ (b"""\n// META: timeout=long\n""", (2, "STRAY-METADATA")),
+ (b""" // META: timeout=long\n""", (1, "INDENTED-METADATA")),
+ (b"""// META: timeout=long\n// META: timeout=long\n""", None),
+ (b"""// META: timeout=long\n\n// META: timeout=long\n""", (3, "STRAY-METADATA")),
+ (b"""// META: timeout=long\n// Start of the test\n// META: timeout=long\n""", (3, "STRAY-METADATA")),
+ (b"""// META:\n""", (1, "BROKEN-METADATA")),
+ (b"""// META: foobar\n""", (1, "BROKEN-METADATA")),
+ (b"""// META: foo=bar\n""", (1, "UNKNOWN-METADATA")),
+ (b"""// META: timeout=bar\n""", (1, "UNKNOWN-TIMEOUT-METADATA")),
+])
+def test_script_metadata(filename, input, error):
+ errors = check_file_contents("", filename, io.BytesIO(input))
+ check_errors(errors)
+
+ if error is not None:
+ line, kind = error
+ messages = {
+ "STRAY-METADATA": "Metadata comments should start the file",
+ "INDENTED-METADATA": "Metadata comments should start the line",
+ "BROKEN-METADATA": "Metadata comment is not formatted correctly",
+ "UNKNOWN-TIMEOUT-METADATA": "Unexpected value for timeout metadata",
+ "UNKNOWN-METADATA": "Unexpected kind of metadata",
+ }
+ assert errors == [
+ (kind,
+ messages[kind],
+ filename,
+ line),
+ ]
+ else:
+ assert errors == []
+
+
+@pytest.mark.parametrize("globals,error", [
+ (b"", None),
+ (b"default", "UNKNOWN-GLOBAL-METADATA"),
+ (b"!default", "UNKNOWN-GLOBAL-METADATA"),
+ (b"window", None),
+ (b"!window", "UNKNOWN-GLOBAL-METADATA"),
+ (b"!dedicatedworker", "UNKNOWN-GLOBAL-METADATA"),
+ (b"window, !window", "UNKNOWN-GLOBAL-METADATA"),
+ (b"!serviceworker", "UNKNOWN-GLOBAL-METADATA"),
+ (b"serviceworker, !serviceworker", "UNKNOWN-GLOBAL-METADATA"),
+ (b"worker, !dedicatedworker", "UNKNOWN-GLOBAL-METADATA"),
+ (b"worker, !serviceworker", "UNKNOWN-GLOBAL-METADATA"),
+ (b"!worker", "UNKNOWN-GLOBAL-METADATA"),
+ (b"foo", "UNKNOWN-GLOBAL-METADATA"),
+ (b"!foo", "UNKNOWN-GLOBAL-METADATA"),
+])
+def test_script_globals_metadata(globals, error):
+ filename = "foo.any.js"
+ input = b"""// META: global=%s\n""" % globals
+ errors = check_file_contents("", filename, io.BytesIO(input))
+ check_errors(errors)
+
+ if error is not None:
+ errors = [(k, f, l) for (k, _, f, l) in errors]
+ assert errors == [
+ (error,
+ filename,
+ 1),
+ ]
+ else:
+ assert errors == []
+
+
+@pytest.mark.parametrize("input,error", [
+ (b"""#META: timeout=long\n""", None),
+ (b"""# META: timeout=long\n""", None),
+ (b"""# META: timeout=long\n""", None),
+ (b""""// META:"\n""", None),
+ (b"""\n# META: timeout=long\n""", (2, "STRAY-METADATA")),
+ (b""" # META: timeout=long\n""", (1, "INDENTED-METADATA")),
+ (b"""# META: timeout=long\n# META: timeout=long\n""", None),
+ (b"""# META: timeout=long\n\n# META: timeout=long\n""", (3, "STRAY-METADATA")),
+ (b"""# META: timeout=long\n# Start of the test\n# META: timeout=long\n""", (3, "STRAY-METADATA")),
+ (b"""# META:\n""", (1, "BROKEN-METADATA")),
+ (b"""# META: foobar\n""", (1, "BROKEN-METADATA")),
+ (b"""# META: foo=bar\n""", (1, "UNKNOWN-METADATA")),
+ (b"""# META: timeout=bar\n""", (1, "UNKNOWN-TIMEOUT-METADATA")),
+])
+def test_python_metadata(input, error):
+ filename = "test.py"
+ errors = check_file_contents("", filename, io.BytesIO(input))
+ check_errors(errors)
+
+ if error is not None:
+ line, kind = error
+ messages = {
+ "STRAY-METADATA": "Metadata comments should start the file",
+ "INDENTED-METADATA": "Metadata comments should start the line",
+ "BROKEN-METADATA": "Metadata comment is not formatted correctly",
+ "UNKNOWN-TIMEOUT-METADATA": "Unexpected value for timeout metadata",
+ "UNKNOWN-METADATA": "Unexpected kind of metadata",
+ }
+ assert errors == [
+ (kind,
+ messages[kind],
+ filename,
+ line),
+ ]
+ else:
+ assert errors == []
diff --git a/testing/web-platform/tests/tools/lint/tests/test_lint.py b/testing/web-platform/tests/tools/lint/tests/test_lint.py
new file mode 100644
index 0000000000..8a304bd369
--- /dev/null
+++ b/testing/web-platform/tests/tools/lint/tests/test_lint.py
@@ -0,0 +1,440 @@
+# mypy: allow-untyped-defs
+
+import io
+import os
+import sys
+from unittest import mock
+
+from ...localpaths import repo_root
+from .. import lint as lint_mod
+from ..lint import filter_ignorelist_errors, parse_ignorelist, lint, create_parser
+
+_dummy_repo = os.path.join(os.path.dirname(__file__), "dummy")
+
+def _mock_lint(name, **kwargs):
+ wrapped = getattr(lint_mod, name)
+ return mock.patch(lint_mod.__name__ + "." + name, wraps=wrapped, **kwargs)
+
+
+def test_filter_ignorelist_errors():
+ ignorelist = {
+ 'CONSOLE': {
+ 'svg/*': {12}
+ },
+ 'INDENT TABS': {
+ 'svg/*': {None}
+ }
+ }
+ # parse_ignorelist normalises the case/path of the match string so need to do the same
+ ignorelist = {e: {os.path.normcase(k): v for k, v in p.items()}
+ for e, p in ignorelist.items()}
+ # paths passed into filter_ignorelist_errors are always Unix style
+ filteredfile = 'svg/test.html'
+ unfilteredfile = 'html/test.html'
+ # Tests for passing no errors
+ filtered = filter_ignorelist_errors(ignorelist, [])
+ assert filtered == []
+ filtered = filter_ignorelist_errors(ignorelist, [])
+ assert filtered == []
+ # Tests for filtering on file and line number
+ filtered = filter_ignorelist_errors(ignorelist, [['CONSOLE', '', filteredfile, 12]])
+ assert filtered == []
+ filtered = filter_ignorelist_errors(ignorelist, [['CONSOLE', '', unfilteredfile, 12]])
+ assert filtered == [['CONSOLE', '', unfilteredfile, 12]]
+ filtered = filter_ignorelist_errors(ignorelist, [['CONSOLE', '', filteredfile, 11]])
+ assert filtered == [['CONSOLE', '', filteredfile, 11]]
+ # Tests for filtering on just file
+ filtered = filter_ignorelist_errors(ignorelist, [['INDENT TABS', '', filteredfile, 12]])
+ assert filtered == []
+ filtered = filter_ignorelist_errors(ignorelist, [['INDENT TABS', '', filteredfile, 11]])
+ assert filtered == []
+ filtered = filter_ignorelist_errors(ignorelist, [['INDENT TABS', '', unfilteredfile, 11]])
+ assert filtered == [['INDENT TABS', '', unfilteredfile, 11]]
+
+
+def test_parse_ignorelist():
+ input_buffer = io.StringIO("""
+# Comment
+CR AT EOL: svg/import/*
+CR AT EOL: streams/resources/test-utils.js
+
+INDENT TABS: .gitmodules
+INDENT TABS: app-uri/*
+INDENT TABS: svg/*
+
+TRAILING WHITESPACE: app-uri/*
+
+CONSOLE:streams/resources/test-utils.js: 12
+
+CR AT EOL, INDENT TABS: html/test.js
+
+CR AT EOL, INDENT TABS: html/test2.js: 42
+
+*:*.pdf
+*:resources/*
+
+*, CR AT EOL: *.png
+""")
+
+ expected_data = {
+ 'INDENT TABS': {
+ '.gitmodules': {None},
+ 'app-uri/*': {None},
+ 'svg/*': {None},
+ 'html/test.js': {None},
+ 'html/test2.js': {42},
+ },
+ 'TRAILING WHITESPACE': {
+ 'app-uri/*': {None},
+ },
+ 'CONSOLE': {
+ 'streams/resources/test-utils.js': {12},
+ },
+ 'CR AT EOL': {
+ 'streams/resources/test-utils.js': {None},
+ 'svg/import/*': {None},
+ 'html/test.js': {None},
+ 'html/test2.js': {42},
+ }
+ }
+ expected_data = {e: {os.path.normcase(k): v for k, v in p.items()}
+ for e, p in expected_data.items()}
+ expected_skipped = {os.path.normcase(x) for x in {"*.pdf", "resources/*", "*.png"}}
+ data, skipped_files = parse_ignorelist(input_buffer)
+ assert data == expected_data
+ assert skipped_files == expected_skipped
+
+
+def test_lint_no_files(caplog):
+ rv = lint(_dummy_repo, [], "normal")
+ assert rv == 0
+ assert caplog.text == ""
+
+
+def test_lint_ignored_file(caplog):
+ with _mock_lint("check_path") as mocked_check_path:
+ with _mock_lint("check_file_contents") as mocked_check_file_contents:
+ rv = lint(_dummy_repo, ["broken_ignored.html"], "normal")
+ assert rv == 0
+ assert not mocked_check_path.called
+ assert not mocked_check_file_contents.called
+ assert caplog.text == ""
+
+
+def test_lint_not_existing_file(caplog):
+ with _mock_lint("check_path") as mocked_check_path:
+ with _mock_lint("check_file_contents") as mocked_check_file_contents:
+ # really long path-linted filename
+ name = "a" * 256 + ".html"
+ rv = lint(_dummy_repo, [name], "normal")
+ assert rv == 0
+ assert not mocked_check_path.called
+ assert not mocked_check_file_contents.called
+ assert caplog.text == ""
+
+
+def test_lint_passing(caplog):
+ with _mock_lint("check_path") as mocked_check_path:
+ with _mock_lint("check_file_contents") as mocked_check_file_contents:
+ rv = lint(_dummy_repo, ["okay.html"], "normal")
+ assert rv == 0
+ assert mocked_check_path.call_count == 1
+ assert mocked_check_file_contents.call_count == 1
+ assert caplog.text == ""
+
+
+def test_lint_failing(caplog):
+ with _mock_lint("check_path") as mocked_check_path:
+ with _mock_lint("check_file_contents") as mocked_check_file_contents:
+ rv = lint(_dummy_repo, ["broken.html"], "normal")
+ assert rv == 1
+ assert mocked_check_path.call_count == 1
+ assert mocked_check_file_contents.call_count == 1
+ assert "TRAILING WHITESPACE" in caplog.text
+ assert "broken.html:1" in caplog.text
+
+
+def test_ref_existent_relative(caplog):
+ with _mock_lint("check_path") as mocked_check_path:
+ with _mock_lint("check_file_contents") as mocked_check_file_contents:
+ rv = lint(_dummy_repo, ["ref/existent_relative.html"], "normal")
+ assert rv == 0
+ assert mocked_check_path.call_count == 1
+ assert mocked_check_file_contents.call_count == 1
+ assert caplog.text == ""
+
+
+def test_ref_existent_root_relative(caplog):
+ with _mock_lint("check_path") as mocked_check_path:
+ with _mock_lint("check_file_contents") as mocked_check_file_contents:
+ rv = lint(_dummy_repo, ["ref/existent_root_relative.html"], "normal")
+ assert rv == 0
+ assert mocked_check_path.call_count == 1
+ assert mocked_check_file_contents.call_count == 1
+ assert caplog.text == ""
+
+
+def test_ref_non_existent_relative(caplog):
+ with _mock_lint("check_path") as mocked_check_path:
+ with _mock_lint("check_file_contents") as mocked_check_file_contents:
+ rv = lint(_dummy_repo, ["ref/non_existent_relative.html"], "normal")
+ assert rv == 1
+ assert mocked_check_path.call_count == 1
+ assert mocked_check_file_contents.call_count == 1
+ assert "NON-EXISTENT-REF" in caplog.text
+ assert "ref/non_existent_relative.html" in caplog.text
+ assert "non_existent_file.html" in caplog.text
+
+
+def test_ref_non_existent_root_relative(caplog):
+ with _mock_lint("check_path") as mocked_check_path:
+ with _mock_lint("check_file_contents") as mocked_check_file_contents:
+ rv = lint(_dummy_repo, ["ref/non_existent_root_relative.html"], "normal")
+ assert rv == 1
+ assert mocked_check_path.call_count == 1
+ assert mocked_check_file_contents.call_count == 1
+ assert "NON-EXISTENT-REF" in caplog.text
+ assert "ref/non_existent_root_relative.html" in caplog.text
+ assert "/non_existent_file.html" in caplog.text
+
+
+
+def test_ref_absolute_url(caplog):
+ with _mock_lint("check_path") as mocked_check_path:
+ with _mock_lint("check_file_contents") as mocked_check_file_contents:
+ rv = lint(_dummy_repo, ["ref/absolute.html"], "normal")
+ assert rv == 1
+ assert mocked_check_path.call_count == 1
+ assert mocked_check_file_contents.call_count == 1
+ assert "ABSOLUTE-URL-REF" in caplog.text
+ assert "http://example.com/reference.html" in caplog.text
+ assert "ref/absolute.html" in caplog.text
+
+
+def test_about_blank_as_ref(caplog):
+ with _mock_lint("check_path"):
+ with _mock_lint("check_file_contents") as mocked_check_file_contents:
+ rv = lint(_dummy_repo, ["about_blank.html"], "normal")
+ assert rv == 0
+ assert mocked_check_file_contents.call_count == 1
+ assert caplog.text == ""
+
+
+def test_ref_same_file_empty(caplog):
+ with _mock_lint("check_path") as mocked_check_path:
+ with _mock_lint("check_file_contents") as mocked_check_file_contents:
+ rv = lint(_dummy_repo, ["ref/same_file_empty.html"], "normal")
+ assert rv == 1
+ assert mocked_check_path.call_count == 1
+ assert mocked_check_file_contents.call_count == 1
+ assert "SAME-FILE-REF" in caplog.text
+ assert "same_file_empty.html" in caplog.text
+
+
+def test_ref_same_file_path(caplog):
+ with _mock_lint("check_path") as mocked_check_path:
+ with _mock_lint("check_file_contents") as mocked_check_file_contents:
+ rv = lint(_dummy_repo, ["ref/same_file_path.html"], "normal")
+ assert rv == 1
+ assert mocked_check_path.call_count == 1
+ assert mocked_check_file_contents.call_count == 1
+ assert "SAME-FILE-REF" in caplog.text
+ assert "same_file_path.html" in caplog.text
+
+
+def test_manual_path_testharness(caplog):
+ rv = lint(_dummy_repo, ["tests/relative-testharness-manual.html"], "normal")
+ assert rv == 2
+ assert "TESTHARNESS-PATH" in caplog.text
+ assert "TESTHARNESSREPORT-PATH" in caplog.text
+
+
+def test_css_visual_path_testharness(caplog):
+ rv = lint(_dummy_repo, ["css/css-unique/relative-testharness.html"], "normal")
+ assert rv == 3
+ assert "CONTENT-VISUAL" in caplog.text
+ assert "TESTHARNESS-PATH" in caplog.text
+ assert "TESTHARNESSREPORT-PATH" in caplog.text
+
+
+def test_css_manual_path_testharness(caplog):
+ rv = lint(_dummy_repo, ["css/css-unique/relative-testharness-interact.html"], "normal")
+ assert rv == 3
+ assert "CONTENT-MANUAL" in caplog.text
+ assert "TESTHARNESS-PATH" in caplog.text
+ assert "TESTHARNESSREPORT-PATH" in caplog.text
+
+
+def test_lint_passing_and_failing(caplog):
+ with _mock_lint("check_path") as mocked_check_path:
+ with _mock_lint("check_file_contents") as mocked_check_file_contents:
+ rv = lint(_dummy_repo, ["broken.html", "okay.html"], "normal")
+ assert rv == 1
+ assert mocked_check_path.call_count == 2
+ assert mocked_check_file_contents.call_count == 2
+ assert "TRAILING WHITESPACE" in caplog.text
+ assert "broken.html:1" in caplog.text
+ assert "okay.html" not in caplog.text
+
+
+def test_check_unique_testharness_basename_same_basename(caplog):
+ # Precondition: There are testharness files with conflicting basename paths.
+ assert os.path.exists(os.path.join(_dummy_repo, 'tests', 'dir1', 'a.html'))
+ assert os.path.exists(os.path.join(_dummy_repo, 'tests', 'dir1', 'a.xhtml'))
+
+ with _mock_lint("check_path") as mocked_check_path:
+ with _mock_lint("check_file_contents") as mocked_check_file_contents:
+ rv = lint(_dummy_repo, ["tests/dir1/a.html", "tests/dir1/a.xhtml"], "normal")
+ # There will be one failure for each file.
+ assert rv == 2
+ assert mocked_check_path.call_count == 2
+ assert mocked_check_file_contents.call_count == 2
+ assert "DUPLICATE-BASENAME-PATH" in caplog.text
+
+
+def test_check_unique_testharness_basename_different_name(caplog):
+ # Precondition: There are two testharness files in the same directory with
+ # different names.
+ assert os.path.exists(os.path.join(_dummy_repo, 'tests', 'dir1', 'a.html'))
+ assert os.path.exists(os.path.join(_dummy_repo, 'tests', 'dir1', 'b.html'))
+
+ with _mock_lint("check_path") as mocked_check_path:
+ with _mock_lint("check_file_contents") as mocked_check_file_contents:
+ rv = lint(_dummy_repo, ["tests/dir1/a.html", "tests/dir1/b.html"], "normal")
+ assert rv == 0
+ assert mocked_check_path.call_count == 2
+ assert mocked_check_file_contents.call_count == 2
+ assert caplog.text == ""
+
+
+def test_check_unique_testharness_basename_different_dir(caplog):
+ # Precondition: There are two testharness files in different directories
+ # with the same basename.
+ assert os.path.exists(os.path.join(_dummy_repo, 'tests', 'dir1', 'a.html'))
+ assert os.path.exists(os.path.join(_dummy_repo, 'tests', 'dir2', 'a.xhtml'))
+
+ with _mock_lint("check_path") as mocked_check_path:
+ with _mock_lint("check_file_contents") as mocked_check_file_contents:
+ rv = lint(_dummy_repo, ["tests/dir1/a.html", "tests/dir2/a.xhtml"], "normal")
+ assert rv == 0
+ assert mocked_check_path.call_count == 2
+ assert mocked_check_file_contents.call_count == 2
+ assert caplog.text == ""
+
+
+def test_check_unique_testharness_basename_not_testharness(caplog):
+ # Precondition: There are non-testharness files with conflicting basename paths.
+ assert os.path.exists(os.path.join(_dummy_repo, 'tests', 'dir1', 'a.html'))
+ assert os.path.exists(os.path.join(_dummy_repo, 'tests', 'dir1', 'a.js'))
+
+ with _mock_lint("check_path") as mocked_check_path:
+ with _mock_lint("check_file_contents") as mocked_check_file_contents:
+ rv = lint(_dummy_repo, ["tests/dir1/a.html", "tests/dir1/a.js"], "normal")
+ assert rv == 0
+ assert mocked_check_path.call_count == 2
+ assert mocked_check_file_contents.call_count == 2
+ assert caplog.text == ""
+
+
+def test_ignore_glob(caplog):
+ # Lint two files in the ref/ directory, and pass in ignore_glob to omit one
+ # of them.
+ # When we omit absolute.html, no lint errors appear since the other file is
+ # clean.
+ with _mock_lint("check_path") as mocked_check_path:
+ with _mock_lint("check_file_contents") as mocked_check_file_contents:
+ rv = lint(_dummy_repo,
+ ["broken.html", "ref/absolute.html", "ref/existent_relative.html"],
+ "normal",
+ ["broken*", "*solu*"])
+ assert rv == 0
+ # Also confirm that only one file is checked
+ assert mocked_check_path.call_count == 1
+ assert mocked_check_file_contents.call_count == 1
+ assert caplog.text == ""
+ # However, linting the same two files without ignore_glob yields lint errors.
+ with _mock_lint("check_path") as mocked_check_path:
+ with _mock_lint("check_file_contents") as mocked_check_file_contents:
+ rv = lint(_dummy_repo, ["broken.html", "ref/absolute.html", "ref/existent_relative.html"], "normal")
+ assert rv == 2
+ assert mocked_check_path.call_count == 3
+ assert mocked_check_file_contents.call_count == 3
+ assert "TRAILING WHITESPACE" in caplog.text
+ assert "ABSOLUTE-URL-REF" in caplog.text
+
+
+def test_all_filesystem_paths():
+ with mock.patch(
+ 'tools.lint.lint.walk',
+ return_value=[(b'',
+ [(b'dir_a', None), (b'dir_b', None)],
+ [(b'file_a', None), (b'file_b', None)]),
+ (b'dir_a',
+ [],
+ [(b'file_c', None), (b'file_d', None)])]
+ ):
+ got = list(lint_mod.all_filesystem_paths('.'))
+ assert got == ['file_a',
+ 'file_b',
+ os.path.join('dir_a', 'file_c'),
+ os.path.join('dir_a', 'file_d')]
+
+
+def test_filesystem_paths_subdir():
+ with mock.patch(
+ 'tools.lint.lint.walk',
+ return_value=[(b'',
+ [(b'dir_a', None), (b'dir_b', None)],
+ [(b'file_a', None), (b'file_b', None)]),
+ (b'dir_a',
+ [],
+ [(b'file_c', None), (b'file_d', None)])]
+ ):
+ got = list(lint_mod.all_filesystem_paths('.', 'dir'))
+ assert got == [os.path.join('dir', 'file_a'),
+ os.path.join('dir', 'file_b'),
+ os.path.join('dir', 'dir_a', 'file_c'),
+ os.path.join('dir', 'dir_a', 'file_d')]
+
+
+def test_main_with_args():
+ orig_argv = sys.argv
+ try:
+ sys.argv = ['./lint', 'a', 'b', 'c']
+ with mock.patch(lint_mod.__name__ + ".os.path.isfile") as mock_isfile:
+ mock_isfile.return_value = True
+ with _mock_lint('lint', return_value=True) as m:
+ lint_mod.main(**vars(create_parser().parse_args()))
+ m.assert_called_once_with(repo_root,
+ [os.path.relpath(os.path.join(os.getcwd(), x), repo_root)
+ for x in ['a', 'b', 'c']],
+ "normal",
+ None,
+ None,
+ 0)
+ finally:
+ sys.argv = orig_argv
+
+
+def test_main_no_args():
+ orig_argv = sys.argv
+ try:
+ sys.argv = ['./lint']
+ with _mock_lint('lint', return_value=True) as m:
+ with _mock_lint('changed_files', return_value=['foo', 'bar']):
+ lint_mod.main(**vars(create_parser().parse_args()))
+ m.assert_called_once_with(repo_root, ['foo', 'bar'], "normal", None, None, 0)
+ finally:
+ sys.argv = orig_argv
+
+
+def test_main_all():
+ orig_argv = sys.argv
+ try:
+ sys.argv = ['./lint', '--all']
+ with _mock_lint('lint', return_value=True) as m:
+ with _mock_lint('all_filesystem_paths', return_value=['foo', 'bar']):
+ lint_mod.main(**vars(create_parser().parse_args()))
+ m.assert_called_once_with(repo_root, ['foo', 'bar'], "normal", None, None, 0)
+ finally:
+ sys.argv = orig_argv
diff --git a/testing/web-platform/tests/tools/lint/tests/test_path_lints.py b/testing/web-platform/tests/tools/lint/tests/test_path_lints.py
new file mode 100644
index 0000000000..99c8f7dce6
--- /dev/null
+++ b/testing/web-platform/tests/tools/lint/tests/test_path_lints.py
@@ -0,0 +1,164 @@
+# mypy: allow-untyped-defs
+
+import os
+from unittest import mock
+
+from ..lint import check_path, check_unique_case_insensitive_paths
+from .base import check_errors
+import pytest
+
+
+def test_allowed_path_length():
+ basename = 29 * "test/"
+
+ for idx in range(5):
+ filename = basename + idx * "a"
+
+ errors = check_path("/foo/", filename)
+ check_errors(errors)
+ assert errors == []
+
+
+def test_forbidden_path_length():
+ basename = 29 * "test/"
+
+ for idx in range(5, 10):
+ filename = basename + idx * "a"
+ message = f"/{filename} longer than maximum path length ({146 + idx} > 150)"
+
+ errors = check_path("/foo/", filename)
+ check_errors(errors)
+ assert errors == [("PATH LENGTH", message, filename, None)]
+
+
+@pytest.mark.parametrize("path_ending,generated", [(".worker.html", ".worker.js"),
+ (".any.worker.html", ".any.js"),
+ (".any.html", ".any.js")])
+def test_forbidden_path_endings(path_ending, generated):
+ path = "test/test" + path_ending
+
+ message = ("path ends with %s which collides with generated tests from %s files" %
+ (path_ending, generated))
+
+ errors = check_path("/foo/", path)
+ check_errors(errors)
+ assert errors == [("WORKER COLLISION", message, path, None)]
+
+
+def test_file_type():
+ path = "test/test"
+
+ message = f"/{path} is an unsupported file type (symlink)"
+
+ with mock.patch("os.path.islink", returnvalue=True):
+ errors = check_path("/foo/", path)
+
+ assert errors == [("FILE TYPE", message, path, None)]
+
+
+@pytest.mark.parametrize("path", ["ahem.ttf",
+ "Ahem.ttf",
+ "ahem.tTf",
+ "not-ahem.ttf",
+ "support/ahem.ttf",
+ "ahem/other.ttf"])
+def test_ahem_copy(path):
+ expected_error = ("AHEM COPY",
+ "Don't add extra copies of Ahem, use /fonts/Ahem.ttf",
+ path,
+ None)
+
+ errors = check_path("/foo/", path)
+
+ assert errors == [expected_error]
+
+
+@pytest.mark.parametrize("path", ["ahem.woff",
+ "ahem.ttff",
+ "support/ahem.woff",
+ "ahem/other.woff"])
+def test_ahem_copy_negative(path):
+ errors = check_path("/foo/", path)
+
+ assert errors == []
+
+
+def test_mojom_js_file():
+ path = "resources/fake_device.mojom.js"
+ errors = check_path("/foo/", path)
+ assert errors == [("MOJOM-JS",
+ "Don't check *.mojom.js files into WPT",
+ path,
+ None)]
+
+
+@pytest.mark.parametrize("path", ["css/foo.tentative/bar.html",
+ "css/.tentative/bar.html",
+ "css/tentative.bar/baz.html",
+ "css/bar-tentative/baz.html"])
+def test_tentative_directories(path):
+ path = os.path.join(*path.split("/"))
+ expected_error = ("TENTATIVE-DIRECTORY-NAME",
+ "Directories for tentative tests must be named exactly 'tentative'",
+ path,
+ None)
+
+ errors = check_path("/foo/", path)
+
+ assert errors == [expected_error]
+
+
+@pytest.mark.parametrize("path", ["css/bar.html",
+ "css/tentative/baz.html"])
+def test_tentative_directories_negative(path):
+ path = os.path.join(*path.split("/"))
+ errors = check_path("/foo/", path)
+
+ assert errors == []
+
+
+@pytest.mark.parametrize("path", ["elsewhere/.gitignore",
+ "else/where/.gitignore"
+ "elsewhere/tools/.gitignore",
+ "elsewhere/docs/.gitignore",
+ "elsewhere/resources/webidl2/.gitignore"])
+def test_gitignore_file(path):
+ path = os.path.join(*path.split("/"))
+
+ expected_error = ("GITIGNORE",
+ ".gitignore found outside the root",
+ path,
+ None)
+
+ errors = check_path("/foo/", path)
+
+ assert errors == [expected_error]
+
+
+@pytest.mark.parametrize("path", [".gitignore",
+ "elsewhere/.gitignores",
+ "elsewhere/name.gitignore",
+ "tools/.gitignore",
+ "tools/elsewhere/.gitignore",
+ "docs/.gitignore"
+ "docs/elsewhere/.gitignore",
+ "resources/webidl2/.gitignore",
+ "resources/webidl2/elsewhere/.gitignore"])
+def test_gitignore_negative(path):
+ path = os.path.join(*path.split("/"))
+
+ errors = check_path("/foo/", path)
+
+ assert errors == []
+
+
+@pytest.mark.parametrize("paths,errors",
+ [(["a/b.html", "a/B.html"], ["a/B.html"]),
+ (["A/b.html", "a/b.html"], ["a/b.html"]),
+ (["a/b.html", "a/c.html"], [])])
+def test_unique_case_insensitive_paths(paths, errors):
+ got_errors = check_unique_case_insensitive_paths(None, paths)
+ assert len(got_errors) == len(errors)
+ for (name, _, path, _), expected_path in zip(got_errors, errors):
+ assert name == "DUPLICATE-CASE-INSENSITIVE-PATH"
+ assert path == expected_path