diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-28 14:29:10 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-28 14:29:10 +0000 |
commit | 2aa4a82499d4becd2284cdb482213d541b8804dd (patch) | |
tree | b80bf8bf13c3766139fbacc530efd0dd9d54394c /tools/lint/perfdocs | |
parent | Initial commit. (diff) | |
download | firefox-2aa4a82499d4becd2284cdb482213d541b8804dd.tar.xz firefox-2aa4a82499d4becd2284cdb482213d541b8804dd.zip |
Adding upstream version 86.0.1.upstream/86.0.1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r-- | tools/lint/perfdocs.yml | 10 | ||||
-rw-r--r-- | tools/lint/perfdocs/__init__.py | 13 | ||||
-rw-r--r-- | tools/lint/perfdocs/framework_gatherers.py | 258 | ||||
-rw-r--r-- | tools/lint/perfdocs/gatherer.py | 142 | ||||
-rw-r--r-- | tools/lint/perfdocs/generator.py | 251 | ||||
-rw-r--r-- | tools/lint/perfdocs/logger.py | 82 | ||||
-rw-r--r-- | tools/lint/perfdocs/perfdocs.py | 79 | ||||
-rw-r--r-- | tools/lint/perfdocs/templates/index.rst | 14 | ||||
-rw-r--r-- | tools/lint/perfdocs/utils.py | 83 | ||||
-rw-r--r-- | tools/lint/perfdocs/verifier.py | 310 |
10 files changed, 1242 insertions, 0 deletions
diff --git a/tools/lint/perfdocs.yml b/tools/lint/perfdocs.yml new file mode 100644 index 0000000000..51a35e65a9 --- /dev/null +++ b/tools/lint/perfdocs.yml @@ -0,0 +1,10 @@ +--- +perfdocs: + description: Performance Documentation linter + # This task handles its own search, so just include cwd + include: ['testing/raptor', 'python/mozperftest', 'testing/talos'] + exclude: [] + extensions: ['rst', 'ini', 'yml'] + support-files: [] + type: structured_log + payload: perfdocs:lint diff --git a/tools/lint/perfdocs/__init__.py b/tools/lint/perfdocs/__init__.py new file mode 100644 index 0000000000..1194d38624 --- /dev/null +++ b/tools/lint/perfdocs/__init__.py @@ -0,0 +1,13 @@ +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +import os + +from perfdocs import perfdocs + +here = os.path.abspath(os.path.dirname(__file__)) +PERFDOCS_REQUIREMENTS_PATH = os.path.join(here, "requirements.txt") + + +def lint(paths, config, logger, fix=False, **lintargs): + return perfdocs.run_perfdocs(config, logger=logger, paths=paths, generate=fix) diff --git a/tools/lint/perfdocs/framework_gatherers.py b/tools/lint/perfdocs/framework_gatherers.py new file mode 100644 index 0000000000..29d34c2d00 --- /dev/null +++ b/tools/lint/perfdocs/framework_gatherers.py @@ -0,0 +1,258 @@ +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +from __future__ import absolute_import + +import collections +import os +import pathlib +import re + +from manifestparser import TestManifest +from mozperftest.script import ScriptInfo +from perfdocs.utils import read_yaml +from perfdocs.logger import PerfDocLogger + +""" +This file is for framework specific gatherers since manifests +might be parsed differently in each of them. The gatherers +must implement the FrameworkGatherer class. +""" + + +class FrameworkGatherer(object): + """ + Abstract class for framework gatherers. + """ + + def __init__(self, yaml_path, workspace_dir): + """ + Generic initialization for a framework gatherer. + """ + self.workspace_dir = workspace_dir + self._yaml_path = yaml_path + self._suite_list = {} + self._test_list = {} + self._urls = {} + self._manifest_path = "" + self._manifest = None + self.script_infos = {} + + def get_manifest_path(self): + """ + Returns the path to the manifest based on the + manifest entry in the frameworks YAML configuration + file. + + :return str: Path to the manifest. + """ + if self._manifest_path: + return self._manifest_path + + yaml_content = read_yaml(self._yaml_path) + self._manifest_path = os.path.join(self.workspace_dir, yaml_content["manifest"]) + return self._manifest_path + + def get_suite_list(self): + """ + Each framework gatherer must return a dictionary with + the following structure. Note that the test names must + be relative paths so that issues can be correctly issued + by the reviewbot. + + :return dict: A dictionary with the following structure: { + "suite_name": [ + 'testing/raptor/test1', + 'testing/raptor/test2' + ] + } + """ + raise NotImplementedError + + def _build_section_with_header(self, title, content, header_type=None): + """ + Adds a section to the documentation with the title as the type mentioned + and paragraph as content mentioned. + :param title: title of the section + :param content: content of section paragraph + :param header_type: type of the title heading + """ + heading_map = {"H3": "=", "H4": "-", "H5": "^"} + return [title, heading_map.get(header_type, "^") * len(title), content, ""] + + +class RaptorGatherer(FrameworkGatherer): + """ + Gatherer for the Raptor framework. + """ + + def get_suite_list(self): + """ + Returns a dictionary containing a mapping from suites + to the tests they contain. + + :return dict: A dictionary with the following structure: { + "suite_name": [ + 'testing/raptor/test1', + 'testing/raptor/test2' + ] + } + """ + if self._suite_list: + return self._suite_list + + manifest_path = self.get_manifest_path() + + # Get the tests from the manifest + test_manifest = TestManifest([manifest_path], strict=False) + test_list = test_manifest.active_tests(exists=False, disabled=False) + + # Parse the tests into the expected dictionary + for test in test_list: + # Get the top-level suite + s = os.path.basename(test["here"]) + if s not in self._suite_list: + self._suite_list[s] = [] + + # Get the individual test + fpath = re.sub(".*testing", "testing", test["manifest"]) + + if fpath not in self._suite_list[s]: + self._suite_list[s].append(fpath) + + return self._suite_list + + def _get_subtests_from_ini(self, manifest_path, suite_name): + """ + Returns a list of (sub)tests from an ini file containing the test definitions. + + :param str manifest_path: path to the ini file + :return list: the list of the tests + """ + test_manifest = TestManifest([manifest_path], strict=False) + test_list = test_manifest.active_tests(exists=False, disabled=False) + subtest_list = {} + for subtest in test_list: + subtest_list[subtest["name"]] = subtest["manifest"] + self._urls[subtest["name"]] = { + "type": suite_name, + "url": subtest["test_url"], + } + + self._urls = collections.OrderedDict( + sorted(self._urls.items(), key=lambda t: len(t[0])) + ) + + return subtest_list + + def get_test_list(self): + """ + Returns a dictionary containing the tests in every suite ini file. + + :return dict: A dictionary with the following structure: { + "suite_name": { + 'raptor_test1', + 'raptor_test2' + }, + } + """ + if self._test_list: + return self._test_list + + suite_list = self.get_suite_list() + + # Iterate over each manifest path from suite_list[suite_name] + # and place the subtests into self._test_list under the same key + for suite_name, manifest_paths in suite_list.items(): + if not self._test_list.get(suite_name): + self._test_list[suite_name] = {} + for i, manifest_path in enumerate(manifest_paths, 1): + subtest_list = self._get_subtests_from_ini(manifest_path, suite_name) + self._test_list[suite_name].update(subtest_list) + + return self._test_list + + def build_test_description(self, title, test_description="", suite_name=""): + matcher = set() + for name, val in self._urls.items(): + if title == name and suite_name == val["type"]: + matcher.add(val["url"]) + break + + if len(matcher) == 0: + for name, val in self._urls.items(): + if title in name and suite_name == val["type"]: + matcher.add(val["url"]) + break + + return [ + "* `" + + title + + " (" + + test_description + + ") " + + "<" + + matcher.pop() + + ">`__" + ] + + def build_suite_section(self, title, content): + return self._build_section_with_header( + title.capitalize(), content, header_type="H4" + ) + + +class MozperftestGatherer(FrameworkGatherer): + """ + Gatherer for the Mozperftest framework. + """ + + def get_test_list(self): + """ + Returns a dictionary containing the tests that are in perftest.ini manifest. + + :return dict: A dictionary with the following structure: { + "suite_name": { + 'perftest_test1', + 'perftest_test2', + }, + } + """ + for path in pathlib.Path(self.workspace_dir).rglob("perftest.ini"): + suite_name = re.sub(self.workspace_dir, "", os.path.dirname(path)) + + # If the workspace dir doesn't end with a forward-slash, + # the substitution above won't work completely + if suite_name.startswith("/") or suite_name.startswith("\\"): + suite_name = suite_name[1:] + + # We have to add new paths to the logger as we search + # because mozperftest tests exist in multiple places in-tree + PerfDocLogger.PATHS.append(suite_name) + + # Get the tests from perftest.ini + test_manifest = TestManifest([str(path)], strict=False) + test_list = test_manifest.active_tests(exists=False, disabled=False) + for test in test_list: + si = ScriptInfo(test["path"]) + self.script_infos[si["name"]] = si + self._test_list.setdefault(suite_name, {}).update( + {si["name"]: str(path)} + ) + + return self._test_list + + def build_test_description(self, title, test_description="", suite_name=""): + return [str(self.script_infos[title])] + + def build_suite_section(self, title, content): + return self._build_section_with_header(title, content, header_type="H4") + + +class TalosGatherer(FrameworkGatherer): + """ + Gatherer for the Talos framework. + TODO - Bug 1674220 + """ + + pass diff --git a/tools/lint/perfdocs/gatherer.py b/tools/lint/perfdocs/gatherer.py new file mode 100644 index 0000000000..2f6a0b5e91 --- /dev/null +++ b/tools/lint/perfdocs/gatherer.py @@ -0,0 +1,142 @@ +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +from __future__ import absolute_import + +import os +import pathlib +import re + +from perfdocs.logger import PerfDocLogger +from perfdocs.utils import read_yaml +from perfdocs.framework_gatherers import ( + RaptorGatherer, + MozperftestGatherer, + TalosGatherer, +) + +logger = PerfDocLogger() + +# TODO: Implement decorator/searcher to find the classes. +frameworks = { + "raptor": RaptorGatherer, + "mozperftest": MozperftestGatherer, + "Talos": TalosGatherer, +} + + +class Gatherer(object): + """ + Gatherer produces the tree of the perfdoc's entries found + and can obtain manifest-based test lists. Used by the Verifier. + """ + + def __init__(self, workspace_dir): + """ + Initialzie the Gatherer. + + :param str workspace_dir: Path to the gecko checkout. + """ + self.workspace_dir = workspace_dir + self._perfdocs_tree = [] + self._test_list = [] + self.framework_gatherers = {} + + @property + def perfdocs_tree(self): + """ + Returns the perfdocs_tree, and computes it + if it doesn't exist. + + :return dict: The perfdocs tree containing all + framework perfdoc entries. See `fetch_perfdocs_tree` + for information on the data structure. + """ + if self._perfdocs_tree: + return self._perfdocs_tree + else: + self.fetch_perfdocs_tree() + return self._perfdocs_tree + + def fetch_perfdocs_tree(self): + """ + Creates the perfdocs tree with the following structure: + [ + { + "path": Path to the perfdocs directory. + "yml": Name of the configuration YAML file. + "rst": Name of the RST file. + "static": Name of the static file. + }, ... + ] + + This method doesn't return anything. The result can be found in + the perfdocs_tree attribute. + """ + exclude_dir = ["tools/lint", ".hg", "testing/perfdocs"] + + for path in pathlib.Path(self.workspace_dir).rglob("perfdocs"): + if any(re.search(d, str(path)) for d in exclude_dir): + continue + files = [f for f in os.listdir(path)] + matched = {"path": str(path), "yml": "", "rst": "", "static": []} + + for file in files: + # Add the yml/rst/static file to its key if re finds the searched file + if file == "config.yml" or file == "config.yaml": + matched["yml"] = file + elif file == "index.rst": + matched["rst"] = file + elif file.endswith(".rst"): + matched["static"].append(file) + + # Append to structdocs if all the searched files were found + if all(val for val in matched.values() if not type(val) == list): + self._perfdocs_tree.append(matched) + + logger.log( + "Found {} perfdocs directories in {}".format( + len(self._perfdocs_tree), [d["path"] for d in self._perfdocs_tree] + ) + ) + + def get_test_list(self, sdt_entry): + """ + Use a perfdocs_tree entry to find the test list for + the framework that was found. + + :return: A framework info dictionary with fields: { + 'yml_path': Path to YAML, + 'yml_content': Content of YAML, + 'name': Name of framework, + 'test_list': Test list found for the framework + } + """ + + # If it was computed before, return it + yaml_path = os.path.join(sdt_entry["path"], sdt_entry["yml"]) + for entry in self._test_list: + if entry["yml_path"] == yaml_path: + return entry + + # Set up framework entry with meta data + yaml_content = read_yaml(yaml_path) + framework = { + "yml_content": yaml_content, + "yml_path": yaml_path, + "name": yaml_content["name"], + "test_list": {}, + } + + # Get and then store the frameworks tests + self.framework_gatherers[framework["name"]] = frameworks[framework["name"]]( + framework["yml_path"], self.workspace_dir + ) + + if not yaml_content["static-only"]: + framework["test_list"] = self.framework_gatherers[ + framework["name"] + ].get_test_list() + + self._test_list.append(framework) + return framework diff --git a/tools/lint/perfdocs/generator.py b/tools/lint/perfdocs/generator.py new file mode 100644 index 0000000000..6d3c99d00b --- /dev/null +++ b/tools/lint/perfdocs/generator.py @@ -0,0 +1,251 @@ +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +from __future__ import absolute_import + +import os +import re +import shutil +import tempfile + +from perfdocs.logger import PerfDocLogger +from perfdocs.utils import are_dirs_equal, read_file, read_yaml, save_file + +logger = PerfDocLogger() + + +class Generator(object): + """ + After each perfdocs directory was validated, the generator uses the templates + for each framework, fills them with the test descriptions in config and saves + the perfdocs in the form index.rst as index file and suite_name.rst for + each suite of tests in the framework. + """ + + def __init__(self, verifier, workspace, generate=False): + """ + Initialize the Generator. + + :param verifier: Verifier object. It should not be a fresh Verifier object, + but an initialized one with validate_tree() method already called + :param workspace: Path to the top-level checkout directory. + :param generate: Flag for generating the documentation + """ + self._workspace = workspace + if not self._workspace: + raise Exception("PerfDocs Generator requires a workspace directory.") + # Template documents without added information reside here + self.templates_path = os.path.join( + self._workspace, "tools", "lint", "perfdocs", "templates" + ) + self.perfdocs_path = os.path.join( + self._workspace, "testing", "perfdocs", "generated" + ) + + self._generate = generate + self._verifier = verifier + self._perfdocs_tree = self._verifier._gatherer.perfdocs_tree + + def build_perfdocs_from_tree(self): + """ + Builds up a document for each framework that was found. + + :return dict: A dictionary containing a mapping from each framework + to the document that was built for it, i.e: + { + framework_name: framework_document, + ... + } + """ + + # Using the verified `perfdocs_tree`, build up the documentation. + frameworks_info = {} + for framework in self._perfdocs_tree: + yaml_content = read_yaml(os.path.join(framework["path"], framework["yml"])) + rst_content = read_file( + os.path.join(framework["path"], framework["rst"]), stringify=True + ) + + # Gather all tests and descriptions and format them into + # documentation content + documentation = [] + suites = yaml_content["suites"] + for suite_name in sorted(suites.keys()): + suite_info = suites[suite_name] + + # Add the suite section + documentation.extend( + self._verifier._gatherer.framework_gatherers[ + yaml_content["name"] + ].build_suite_section(suite_name, suite_info["description"]) + ) + + tests = suite_info.get("tests", {}) + for test_name in sorted(tests.keys()): + documentation.extend( + self._verifier._gatherer.framework_gatherers[ + yaml_content["name"] + ].build_test_description( + test_name, tests[test_name], suite_name + ) + ) + documentation.append("") + + # Insert documentation into `.rst` file + framework_rst = re.sub( + r"{documentation}", os.linesep.join(documentation), rst_content + ) + frameworks_info[yaml_content["name"]] = { + "dynamic": framework_rst, + "static": [], + } + + # For static `.rst` file + for static_file in framework["static"]: + frameworks_info[yaml_content["name"]]["static"].append( + { + "file": static_file, + "content": read_file( + os.path.join(framework["path"], static_file), stringify=True + ), + } + ) + + return frameworks_info + + def _create_temp_dir(self): + """ + Create a temp directory as preparation of saving the documentation tree. + :return: str the location of perfdocs_tmpdir + """ + # Build the directory that will contain the final result (a tmp dir + # that will be moved to the final location afterwards) + try: + tmpdir = tempfile.mkdtemp() + perfdocs_tmpdir = os.path.join(tmpdir, "generated") + os.mkdir(perfdocs_tmpdir) + except OSError as e: + logger.critical("Error creating temp file: {}".format(e)) + + success = False or os.path.isdir(perfdocs_tmpdir) + if success: + return perfdocs_tmpdir + else: + return success + + def _create_perfdocs(self): + """ + Creates the perfdocs documentation. + :return: str path of the temp dir it is saved in + """ + # All directories that are kept in the perfdocs tree are valid, + # so use it to build up the documentation. + framework_docs = self.build_perfdocs_from_tree() + perfdocs_tmpdir = self._create_temp_dir() + + # Save the documentation files + frameworks = [] + for framework_name in sorted(framework_docs.keys()): + frameworks.append(framework_name) + save_file( + framework_docs[framework_name]["dynamic"], + os.path.join(perfdocs_tmpdir, framework_name), + ) + + for static_name in framework_docs[framework_name]["static"]: + save_file( + static_name["content"], + os.path.join(perfdocs_tmpdir, static_name["file"].split(".")[0]), + ) + + # Get the main page and add the framework links to it + mainpage = read_file( + os.path.join(self.templates_path, "index.rst"), stringify=True + ) + fmt_frameworks = os.linesep.join( + [" * :doc:`%s`" % name for name in frameworks] + ) + fmt_mainpage = re.sub(r"{test_documentation}", fmt_frameworks, mainpage) + save_file(fmt_mainpage, os.path.join(perfdocs_tmpdir, "index")) + + return perfdocs_tmpdir + + def _save_perfdocs(self, perfdocs_tmpdir): + """ + Copies the perfdocs tree after it was saved into the perfdocs_tmpdir + :param perfdocs_tmpdir: str location of the temp dir where the + perfdocs was saved + """ + # Remove the old docs and copy the new version there without + # checking if they need to be regenerated. + logger.log("Regenerating perfdocs...") + + if os.path.exists(self.perfdocs_path): + shutil.rmtree(self.perfdocs_path) + + try: + saved = shutil.copytree(perfdocs_tmpdir, self.perfdocs_path) + if saved: + logger.log( + "Documentation saved to {}/".format( + re.sub(".*testing", "testing", self.perfdocs_path) + ) + ) + except Exception as e: + logger.critical( + "There was an error while saving the documentation: {}".format(e) + ) + + def generate_perfdocs(self): + """ + Generate the performance documentation. + + If `self._generate` is True, then the documentation will be regenerated + without any checks. Otherwise, if it is False, the new documentation will be + prepare and compare with the existing documentation to determine if + it should be regenerated. + + :return bool: True/False - For True, if `self._generate` is True, then the + docs were regenerated. If `self._generate` is False, then True will mean + that the docs should be regenerated, and False means that they do not + need to be regenerated. + """ + + def get_possibly_changed_files(): + """ + Returns files that might have been modified + (used to output a linter warning for regeneration) + :return: list - files that might have been modified + """ + # Returns files that might have been modified + # (used to output a linter warning for regeneration) + files = [] + for entry in self._perfdocs_tree: + files.extend( + [ + os.path.join(entry["path"], entry["yml"]), + os.path.join(entry["path"], entry["rst"]), + ] + ) + return files + + # Throw a warning if there's no need for generating + if not os.path.exists(self.perfdocs_path) and not self._generate: + # If they don't exist and we are not generating, then throw + # a linting error and exit. + logger.warning( + "PerfDocs need to be regenerated.", files=get_possibly_changed_files() + ) + return True + + perfdocs_tmpdir = self._create_perfdocs() + if self._generate: + self._save_perfdocs(perfdocs_tmpdir) + else: + # If we are not generating, then at least check if they + # should be regenerated by comparing the directories. + if not are_dirs_equal(perfdocs_tmpdir, self.perfdocs_path): + logger.warning( + "PerfDocs are outdated, run ./mach lint -l perfdocs --fix` to update them.", + files=get_possibly_changed_files(), + ) diff --git a/tools/lint/perfdocs/logger.py b/tools/lint/perfdocs/logger.py new file mode 100644 index 0000000000..ff6321a7f1 --- /dev/null +++ b/tools/lint/perfdocs/logger.py @@ -0,0 +1,82 @@ +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +from __future__ import absolute_import + +import re + + +class PerfDocLogger(object): + """ + Logger for the PerfDoc tooling. Handles the warnings by outputting + them into through the StructuredLogger provided by lint. + """ + + TOP_DIR = "" + PATHS = [] + LOGGER = None + FAILED = False + + def __init__(self): + """Initializes the PerfDocLogger.""" + + # Set up class attributes for all logger instances + if not PerfDocLogger.LOGGER: + raise Exception( + "Missing linting LOGGER instance for PerfDocLogger initialization" + ) + if not PerfDocLogger.PATHS: + raise Exception("Missing PATHS for PerfDocLogger initialization") + self.logger = PerfDocLogger.LOGGER + + def log(self, msg): + """ + Log an info message. + + :param str msg: Message to log. + """ + self.logger.info(msg) + + def warning(self, msg, files): + """ + Logs a validation warning message. The warning message is + used as the error message that is output in the reviewbot. + + :param str msg: Message to log, it's also used as the error message + for the issue that is output by the reviewbot. + :param list/str files: The file(s) that this warning is about. + """ + if type(files) != list: + files = [files] + + # Add a reviewbot error for each file that is given + for file in files: + # Get a relative path (reviewbot can't handle absolute paths) + fpath = re.sub(PerfDocLogger.TOP_DIR, "", file) + + # Filter out any issues that do not relate to the paths + # that are being linted + for path in PerfDocLogger.PATHS: + if path not in file: + continue + + # Output error entry + self.logger.lint_error( + message=msg, + lineno=0, + column=None, + path=fpath, + linter="perfdocs", + rule="Flawless performance docs.", + ) + + PerfDocLogger.FAILED = True + break + + def critical(self, msg): + """ + Log a critical message. + + :param str msg: Message to log. + """ + self.logger.critical(msg) diff --git a/tools/lint/perfdocs/perfdocs.py b/tools/lint/perfdocs/perfdocs.py new file mode 100644 index 0000000000..3df01595c2 --- /dev/null +++ b/tools/lint/perfdocs/perfdocs.py @@ -0,0 +1,79 @@ +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +from __future__ import absolute_import, print_function + +import os +import re + + +def run_perfdocs(config, logger=None, paths=None, generate=True): + """ + Build up performance testing documentation dynamically by combining + text data from YAML files that reside in `perfdoc` folders + across the `testing` directory. Each directory is expected to have + an `index.rst` file along with `config.yml` YAMLs defining what needs + to be added to the documentation. + + The YAML must also define the name of the "framework" that should be + used in the main index.rst for the performance testing documentation. + + The testing documentation list will be ordered alphabetically once + it's produced (to avoid unwanted shifts because of unordered dicts + and path searching). + + Note that the suite name headings will be given the H4 (---) style so it + is suggested that you use H3 (===) style as the heading for your + test section. H5 will be used be used for individual tests within each + suite. + + Usage for verification: ./mach lint -l perfdocs + Usage for generation: ./mach lint -l perfdocs --fix + + For validation, see the Verifier class for a description of how + it works. + + The run will fail if the valid result from validate_tree is not + False, implying some warning/problem was logged. + + :param dict config: The configuration given by mozlint. + :param StructuredLogger logger: The StructuredLogger instance to be used to + output the linting warnings/errors. + :param list paths: The paths that are being tested. Used to filter + out errors from files outside of these paths. + :param bool generate: If true, the docs will be (re)generated. + """ + from perfdocs.logger import PerfDocLogger + + top_dir = os.environ.get("WORKSPACE", None) + if not top_dir: + floc = os.path.abspath(__file__) + top_dir = floc.split("tools")[0] + top_dir = top_dir.replace("\\", "\\\\") + + PerfDocLogger.LOGGER = logger + PerfDocLogger.TOP_DIR = top_dir + + # Convert all the paths to relative ones + rel_paths = [re.sub(top_dir, "", path) for path in paths] + PerfDocLogger.PATHS = rel_paths + + target_dir = [os.path.join(top_dir, i) for i in rel_paths] + for path in target_dir: + if not os.path.exists(path): + raise Exception("Cannot locate directory at %s" % path) + + # Late import because logger isn't defined until later + from perfdocs.generator import Generator + from perfdocs.verifier import Verifier + + # Run the verifier first + verifier = Verifier(top_dir) + verifier.validate_tree() + + if not PerfDocLogger.FAILED: + # Even if the tree is valid, we need to check if the documentation + # needs to be regenerated, and if it does, we throw a linting error. + # `generate` dictates whether or not the documentation is generated. + generator = Generator(verifier, generate=generate, workspace=top_dir) + generator.generate_perfdocs() diff --git a/tools/lint/perfdocs/templates/index.rst b/tools/lint/perfdocs/templates/index.rst new file mode 100644 index 0000000000..cd6e258980 --- /dev/null +++ b/tools/lint/perfdocs/templates/index.rst @@ -0,0 +1,14 @@ +################### +Performance Testing +################### + +Performance tests are designed to catch performance regressions before they reach our +end users. At this time, there is no unified approach for these types of tests, +but `mozperftest </testing/perfdocs/mozperftest.html>`_ aims to provide this in the future. + +For more detailed information about each test suite, see each projects' documentation: + +{test_documentation} + +For more information about the performance testing team, +`visit the wiki page <https://wiki.mozilla.org/TestEngineering/Performance>`_. diff --git a/tools/lint/perfdocs/utils.py b/tools/lint/perfdocs/utils.py new file mode 100644 index 0000000000..99b304260b --- /dev/null +++ b/tools/lint/perfdocs/utils.py @@ -0,0 +1,83 @@ +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +from __future__ import absolute_import + +import filecmp +import os +import yaml +from perfdocs.logger import PerfDocLogger + +logger = PerfDocLogger() + + +def save_file(file_content, path, extension="rst"): + """ + Saves data into a file. + + :param str path: Location and name of the file being saved + (without an extension). + :param str data: Content to write into the file. + :param str extension: Extension to save the file as. + """ + with open("{}.{}".format(path, extension), "w") as f: + f.write(file_content) + + +def read_file(path, stringify=False): + """ + Opens a file and returns its contents. + + :param str path: Path to the file. + :return list: List containing the lines in the file. + """ + with open(path, "r") as f: + return f.read() if stringify else f.readlines() + + +def read_yaml(yaml_path): + """ + Opens a YAML file and returns the contents. + + :param str yaml_path: Path to the YAML to open. + :return dict: Dictionary containing the YAML content. + """ + contents = {} + try: + with open(yaml_path, "r") as f: + contents = yaml.safe_load(f) + except Exception as e: + logger.warning("Error opening file {}: {}".format(yaml_path, str(e)), yaml_path) + + return contents + + +def are_dirs_equal(dir_1, dir_2): + """ + Compare two directories to see if they are equal. Files in each + directory are assumed to be equal if their names and contents + are equal. + + :param dir_1: First directory path + :param dir_2: Second directory path + :return: True if the directory trees are the same and False otherwise. + """ + + dirs_cmp = filecmp.dircmp(dir_1, dir_2) + if dirs_cmp.left_only or dirs_cmp.right_only or dirs_cmp.funny_files: + return False + + _, mismatch, errors = filecmp.cmpfiles( + dir_1, dir_2, dirs_cmp.common_files, shallow=False + ) + + if mismatch or errors: + return False + + for common_dir in dirs_cmp.common_dirs: + subdir_1 = os.path.join(dir_1, common_dir) + subdir_2 = os.path.join(dir_2, common_dir) + if not are_dirs_equal(subdir_1, subdir_2): + return False + + return True diff --git a/tools/lint/perfdocs/verifier.py b/tools/lint/perfdocs/verifier.py new file mode 100644 index 0000000000..a9a34478eb --- /dev/null +++ b/tools/lint/perfdocs/verifier.py @@ -0,0 +1,310 @@ +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +from __future__ import absolute_import + +import jsonschema +import os +import re + +from perfdocs.logger import PerfDocLogger +from perfdocs.utils import read_file, read_yaml +from perfdocs.gatherer import Gatherer + +logger = PerfDocLogger() + +""" +Schema for the config.yml file. +Expecting a YAML file with a format such as this: + +name: raptor +manifest: testing/raptor/raptor/raptor.ini +static-only: False +suites: + desktop: + description: "Desktop tests." + tests: + raptor-tp6: "Raptor TP6 tests." + mobile: + description: "Mobile tests" + benchmarks: + description: "Benchmark tests." + tests: + wasm: "All wasm tests." + +""" +CONFIG_SCHEMA = { + "type": "object", + "properties": { + "name": {"type": "string"}, + "manifest": {"type": "string"}, + "static-only": {"type": "boolean"}, + "suites": { + "type": "object", + "properties": { + "suite_name": { + "type": "object", + "properties": { + "tests": { + "type": "object", + "properties": {"test_name": {"type": "string"}}, + }, + "description": {"type": "string"}, + }, + "required": ["description"], + } + }, + }, + }, + "required": ["name", "manifest", "static-only", "suites"], +} + + +class Verifier(object): + """ + Verifier is used for validating the perfdocs folders/tree. In the future, + the generator will make use of this class to obtain a validated set of + descriptions that can be used to build up a document. + """ + + def __init__(self, workspace_dir): + """ + Initialize the Verifier. + + :param str workspace_dir: Path to the top-level checkout directory. + """ + self.workspace_dir = workspace_dir + self._gatherer = Gatherer(workspace_dir) + + def validate_descriptions(self, framework_info): + """ + Cross-validate the tests found in the manifests and the YAML + test definitions. This function doesn't return a valid flag. Instead, + the StructDocLogger.VALIDATION_LOG is used to determine validity. + + The validation proceeds as follows: + 1. Check that all tests/suites in the YAML exist in the manifests. + - At the same time, build a list of global descriptions which + define descriptions for groupings of tests. + 2. Check that all tests/suites found in the manifests exist in the YAML. + - For missing tests, check if a global description for them exists. + + As the validation is completed, errors are output into the validation log + for any issues that are found. + + :param dict framework_info: Contains information about the framework. See + `Gatherer.get_test_list` for information about its structure. + """ + yaml_content = framework_info["yml_content"] + + # Check for any bad test/suite names in the yaml config file + global_descriptions = {} + for suite, ytests in yaml_content["suites"].items(): + # Find the suite, then check against the tests within it + if framework_info["test_list"].get(suite): + global_descriptions[suite] = [] + if not ytests.get("tests"): + # It's possible a suite entry has no tests + continue + + # Suite found - now check if any tests in YAML + # definitions doesn't exist + ytests = ytests["tests"] + for test_name in ytests: + foundtest = False + for t in framework_info["test_list"][suite]: + tb = os.path.basename(t) + tb = re.sub("\..*", "", tb) + if test_name == tb: + # Found an exact match for the test_name + foundtest = True + break + if test_name in tb: + # Found a 'fuzzy' match for the test_name + # i.e. 'wasm' could exist for all raptor wasm tests + global_descriptions[suite].append(test_name) + foundtest = True + break + if not foundtest: + logger.warning( + "Could not find an existing test for {} - bad test name?".format( + test_name + ), + framework_info["yml_path"], + ) + else: + logger.warning( + "Could not find an existing suite for {} - bad suite name?".format( + suite + ), + framework_info["yml_path"], + ) + + # Check for any missing tests/suites + for suite, test_list in framework_info["test_list"].items(): + if not yaml_content["suites"].get(suite): + # Description doesn't exist for the suite + logger.warning( + "Missing suite description for {}".format(suite), + yaml_content["manifest"], + ) + continue + + # If only a description is provided for the suite, assume + # that this is a suite-wide description and don't check for + # it's tests + stests = yaml_content["suites"][suite].get("tests", None) + if not stests: + continue + + tests_found = 0 + missing_tests = [] + test_to_manifest = {} + for test_name, manifest_path in test_list.items(): + tb = os.path.basename(manifest_path) + tb = re.sub("\..*", "", tb) + if ( + stests.get(tb, None) is not None + or stests.get(test_name, None) is not None + ): + # Test description exists, continue with the next test + tests_found += 1 + continue + test_to_manifest[test_name] = manifest_path + missing_tests.append(test_name) + + # Check if global test descriptions exist (i.e. + # ones that cover all of tp6) for the missing tests + new_mtests = [] + for mt in missing_tests: + found = False + for test_name in global_descriptions[suite]: + # Global test exists for this missing test + if mt.startswith(test_name): + found = True + break + if test_name in mt: + found = True + break + if not found: + new_mtests.append(mt) + + if len(new_mtests): + # Output an error for each manifest with a missing + # test description + for test_name in new_mtests: + logger.warning( + "Could not find a test description for {}".format(test_name), + test_to_manifest[test_name], + ) + + def validate_yaml(self, yaml_path): + """ + Validate that the YAML file has all the fields that are + required and parse the descriptions into strings in case + some are give as relative file paths. + + :param str yaml_path: Path to the YAML to validate. + :return bool: True/False => Passed/Failed Validation + """ + + def _get_description(desc): + """ + Recompute the description in case it's a file. + """ + desc_path = os.path.join(self.workspace_dir, desc) + if os.path.exists(desc_path) and os.path.isfile(desc_path): + with open(desc_path, "r") as f: + desc = f.readlines() + return desc + + def _parse_descriptions(content): + for suite, sinfo in content.items(): + desc = sinfo["description"] + sinfo["description"] = _get_description(desc) + + # It's possible that the suite has no tests and + # only a description. If they exist, then parse them. + if "tests" in sinfo: + for test, desc in sinfo["tests"].items(): + sinfo["tests"][test] = _get_description(desc) + + valid = False + yaml_content = read_yaml(yaml_path) + + try: + jsonschema.validate(instance=yaml_content, schema=CONFIG_SCHEMA) + _parse_descriptions(yaml_content["suites"]) + valid = True + except Exception as e: + logger.warning("YAML ValidationError: {}".format(str(e)), yaml_path) + + return valid + + def validate_rst_content(self, rst_path): + """ + Validate that the index file given has a {documentation} entry + so that the documentation can be inserted there. + + :param str rst_path: Path to the RST file. + :return bool: True/False => Passed/Failed Validation + """ + rst_content = read_file(rst_path) + + # Check for a {documentation} entry in some line, + # if we can't find one, then the validation fails. + valid = False + docs_match = re.compile(".*{documentation}.*") + for line in rst_content: + if docs_match.search(line): + valid = True + break + if not valid: + logger.warning( + "Cannot find a '{documentation}' entry in the given index file", + rst_path, + ) + + return valid + + def _check_framework_descriptions(self, item): + """ + Helper method for validating descriptions + """ + framework_info = self._gatherer.get_test_list(item) + self.validate_descriptions(framework_info) + + def validate_tree(self): + """ + Validate the `perfdocs` directory that was found. + Returns True if it is good, false otherwise. + + :return bool: True/False => Passed/Failed Validation + """ + found_good = 0 + + # For each framework, check their files and validate descriptions + for matched in self._gatherer.perfdocs_tree: + # Get the paths to the YAML and RST for this framework + matched_yml = os.path.join(matched["path"], matched["yml"]) + matched_rst = os.path.join(matched["path"], matched["rst"]) + + _valid_files = { + "yml": self.validate_yaml(matched_yml), + "rst": True, + } + if not read_yaml(matched_yml)["static-only"]: + _valid_files["rst"] = self.validate_rst_content(matched_rst) + + # Log independently the errors found for the matched files + for file_format, valid in _valid_files.items(): + if not valid: + logger.log("File validation error: {}".format(file_format)) + if not all(_valid_files.values()): + continue + found_good += 1 + + self._check_framework_descriptions(matched) + + if not found_good: + raise Exception("No valid perfdocs directories found") |