summaryrefslogtreecommitdiffstats
path: root/tools/lint
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-06-12 05:43:14 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-06-12 05:43:14 +0000
commit8dd16259287f58f9273002717ec4d27e97127719 (patch)
tree3863e62a53829a84037444beab3abd4ed9dfc7d0 /tools/lint
parentReleasing progress-linux version 126.0.1-1~progress7.99u1. (diff)
downloadfirefox-8dd16259287f58f9273002717ec4d27e97127719.tar.xz
firefox-8dd16259287f58f9273002717ec4d27e97127719.zip
Merging upstream version 127.0.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'tools/lint')
-rw-r--r--tools/lint/condprof-addons.yml2
-rw-r--r--tools/lint/condprof-addons/__init__.py2
-rw-r--r--tools/lint/eslint/eslint-plugin-mozilla/lib/configs/recommended.js2
-rw-r--r--tools/lint/eslint/eslint-plugin-mozilla/lib/environments/privileged.js1
-rw-r--r--tools/lint/eslint/eslint-plugin-mozilla/lib/environments/special-powers-sandbox.js1
-rw-r--r--tools/lint/license.yml53
-rw-r--r--tools/lint/perfdocs.yml2
-rw-r--r--tools/lint/perfdocs/framework_gatherers.py135
-rw-r--r--tools/lint/perfdocs/gatherer.py5
-rw-r--r--tools/lint/perfdocs/generator.py66
-rw-r--r--tools/lint/perfdocs/verifier.py136
-rw-r--r--tools/lint/python/l10n_lint.py19
-rw-r--r--tools/lint/rejected-words.yml2
-rw-r--r--tools/lint/test/conftest.py8
-rw-r--r--tools/lint/test/test_perfdocs.py59
-rw-r--r--tools/lint/test/test_perfdocs_generation.py68
-rw-r--r--tools/lint/trojan-source.yml2
17 files changed, 355 insertions, 208 deletions
diff --git a/tools/lint/condprof-addons.yml b/tools/lint/condprof-addons.yml
index a62c1fb6b9..b85deda7bc 100644
--- a/tools/lint/condprof-addons.yml
+++ b/tools/lint/condprof-addons.yml
@@ -5,6 +5,6 @@ condprof-addons:
- 'testing/condprofile/condprof/customization'
exclude: []
extensions: ['json']
- support-files: ['taskcluster/ci/fetch/browsertime.yml']
+ support-files: ['taskcluster/kinds/fetch/browsertime.yml']
type: structured_log
payload: condprof-addons:lint
diff --git a/tools/lint/condprof-addons/__init__.py b/tools/lint/condprof-addons/__init__.py
index f17ab26f3f..c569a4d8dc 100644
--- a/tools/lint/condprof-addons/__init__.py
+++ b/tools/lint/condprof-addons/__init__.py
@@ -13,7 +13,7 @@ import requests
import yaml
from mozlint.pathutils import expand_exclusions
-BROWSERTIME_FETCHES_PATH = Path("taskcluster/ci/fetch/browsertime.yml")
+BROWSERTIME_FETCHES_PATH = Path("taskcluster/kinds/fetch/browsertime.yml")
CUSTOMIZATIONS_PATH = Path("testing/condprofile/condprof/customization/")
DOWNLOAD_TIMEOUT = 30
ERR_FETCH_TASK_MISSING = "firefox-addons taskcluster fetch config section not found"
diff --git a/tools/lint/eslint/eslint-plugin-mozilla/lib/configs/recommended.js b/tools/lint/eslint/eslint-plugin-mozilla/lib/configs/recommended.js
index 036ed1bda3..003311f623 100644
--- a/tools/lint/eslint/eslint-plugin-mozilla/lib/configs/recommended.js
+++ b/tools/lint/eslint/eslint-plugin-mozilla/lib/configs/recommended.js
@@ -55,7 +55,6 @@ module.exports = {
files: ["**/*.sys.mjs", "**/*.jsm"],
rules: {
"mozilla/lazy-getter-object-name": "error",
- "mozilla/reject-chromeutils-import": "error",
"mozilla/reject-eager-module-in-lazy-getter": "error",
"mozilla/reject-global-this": "error",
"mozilla/reject-globalThis-modification": "error",
@@ -180,6 +179,7 @@ module.exports = {
"mozilla/prefer-boolean-length-check": "error",
"mozilla/prefer-formatValues": "error",
"mozilla/reject-addtask-only": "error",
+ "mozilla/reject-chromeutils-import": "error",
"mozilla/reject-chromeutils-import-params": "error",
"mozilla/reject-importGlobalProperties": ["error", "allownonwebidl"],
"mozilla/reject-multiple-getters-calls": "error",
diff --git a/tools/lint/eslint/eslint-plugin-mozilla/lib/environments/privileged.js b/tools/lint/eslint/eslint-plugin-mozilla/lib/environments/privileged.js
index 7e6437ce7a..f0bd1aa8b8 100644
--- a/tools/lint/eslint/eslint-plugin-mozilla/lib/environments/privileged.js
+++ b/tools/lint/eslint/eslint-plugin-mozilla/lib/environments/privileged.js
@@ -317,6 +317,7 @@ module.exports = {
ImageData: false,
ImageDocument: false,
InputEvent: false,
+ InspectorCSSParser: false,
InspectorFontFace: false,
InspectorUtils: false,
InstallTriggerImpl: false,
diff --git a/tools/lint/eslint/eslint-plugin-mozilla/lib/environments/special-powers-sandbox.js b/tools/lint/eslint/eslint-plugin-mozilla/lib/environments/special-powers-sandbox.js
index 5a28c91883..2ef47d89e0 100644
--- a/tools/lint/eslint/eslint-plugin-mozilla/lib/environments/special-powers-sandbox.js
+++ b/tools/lint/eslint/eslint-plugin-mozilla/lib/environments/special-powers-sandbox.js
@@ -35,6 +35,7 @@ module.exports = {
assert: false,
Assert: false,
BrowsingContext: false,
+ InspectorCSSParser: false,
InspectorUtils: false,
ok: false,
is: false,
diff --git a/tools/lint/license.yml b/tools/lint/license.yml
index 34e1eb817c..00490b3028 100644
--- a/tools/lint/license.yml
+++ b/tools/lint/license.yml
@@ -26,13 +26,6 @@ license:
- config/external/nspr/_pr_bld.h
# Unknown origin
- gfx/2d/MMIHelpers.h
- # might not work with license
- - gradle.properties
- # might not work with license
- - gradle/wrapper/gradle-wrapper.properties
- - mobile/android/android-components/gradle/wrapper/gradle-wrapper.properties
- - mobile/android/fenix/gradle/wrapper/gradle-wrapper.properties
- - mobile/android/focus-android/gradle/wrapper/gradle-wrapper.properties
# ICU4X data
- intl/icu_segmenter_data
# Imported code that is dual Apache2 / MIT licensed
@@ -44,52 +37,8 @@ license:
- mobile/android/geckoview_example/src/main
- testing/webcompat/interventions/
- testing/webcompat/shims/
- # TODO - Bug 1881094: temporarily ignored for firefox-android migration
- - mobile/android/android-components/components/browser/engine-system/src/main/res/
- - mobile/android/android-components/components/browser/errorpages/src/main/res/
- - mobile/android/android-components/components/browser/menu/src/main/res/
- - mobile/android/android-components/components/browser/menu2/src/main/res/
- - mobile/android/android-components/components/browser/toolbar/src/main/res/
- - mobile/android/android-components/components/compose/awesomebar/src/main/res/
- - mobile/android/android-components/components/compose/browser-toolbar/src/main/res/
- - mobile/android/android-components/components/compose/cfr/src/main/res/
- - mobile/android/android-components/components/compose/tabstray/src/main/res/
- - mobile/android/android-components/components/feature/addons/src/main/res/
- - mobile/android/android-components/components/feature/app-links/src/main/res/
- - mobile/android/android-components/components/feature/autofill/src/main/res/
- - mobile/android/android-components/components/feature/awesomebar/src/main/res/
- - mobile/android/android-components/components/feature/contextmenu/src/main/res/
- - mobile/android/android-components/components/feature/customtabs/src/main/res/
- - mobile/android/android-components/components/feature/downloads/src/main/res/
- - mobile/android/android-components/components/feature/findinpage/src/main/res/
- - mobile/android/android-components/components/feature/fxsuggest/src/main/res/
- - mobile/android/android-components/components/feature/media/src/main/res/
- - mobile/android/android-components/components/feature/privatemode/src/main/res/
- - mobile/android/android-components/components/feature/prompts/src/main/res/
- - mobile/android/android-components/components/feature/pwa/src/main/res/
- - mobile/android/android-components/components/feature/qr/src/main/res/
- - mobile/android/android-components/components/feature/readerview/src/main/res/
- - mobile/android/android-components/components/feature/search/src/main/res/
- - mobile/android/android-components/components/feature/sitepermissions/
- - mobile/android/android-components/components/feature/tabs/src/main/res/
- - mobile/android/android-components/components/feature/webcompat/src/main/assets/extensions/webcompat/injections/
- - mobile/android/android-components/components/feature/webnotifications/src/main/res/
- - mobile/android/android-components/components/lib/crash/src/main/res/
- - mobile/android/android-components/components/service/nimbus/src/main/res/
- - mobile/android/android-components/components/support/base/src/main/res/
- - mobile/android/android-components/components/support/ktx/src/main/res/
- - mobile/android/android-components/components/support/utils/src/main/res/
- - mobile/android/android-components/components/ui/tabcounter/src/main/res/
- - mobile/android/android-components/components/ui/widgets/src/main/res/
+ # TODO - Bug 1882443: temporarily ignored for firefox-android migration
- mobile/android/android-components/docs/
- - mobile/android/fenix/app/src/main/res/
- - mobile/android/fenix/benchmark/src/main/AndroidManifest.xml
- - mobile/android/focus-android/app/lint-baseline.xml
- - mobile/android/focus-android/app/src/main/res/
- # might not work with license
- - mobile/android/gradle/dotgradle-offline/gradle.properties
- # might not work with license
- - mobile/android/gradle/dotgradle-online/gradle.properties
# Almost empty file
- modules/libpref/greprefs.js
- parser/html/java/named-character-references.html
diff --git a/tools/lint/perfdocs.yml b/tools/lint/perfdocs.yml
index 175781ad00..62d6b40897 100644
--- a/tools/lint/perfdocs.yml
+++ b/tools/lint/perfdocs.yml
@@ -12,7 +12,7 @@ perfdocs:
'dom/indexedDB/test',
]
exclude: []
- extensions: ['rst', 'ini', 'yml']
+ extensions: []
support-files: []
type: structured_log
payload: perfdocs:lint
diff --git a/tools/lint/perfdocs/framework_gatherers.py b/tools/lint/perfdocs/framework_gatherers.py
index 3c0a4026d9..c875b2a2c8 100644
--- a/tools/lint/perfdocs/framework_gatherers.py
+++ b/tools/lint/perfdocs/framework_gatherers.py
@@ -51,6 +51,38 @@ class FrameworkGatherer(object):
self._task_list = {}
self._task_match_pattern = re.compile(r"([\w\W]*/[pgo|opt]*)-([\w\W]*)")
+ def _build_section_with_header(self, title, content, header_type=None):
+ """
+ Adds a section to the documentation with the title as the type mentioned
+ and paragraph as content mentioned.
+ :param title: title of the section
+ :param content: content of section paragraph
+ :param header_type: type of the title heading
+ """
+ heading_map = {"H2": "*", "H3": "=", "H4": "-", "H5": "^"}
+ return [title, heading_map.get(header_type, "^") * len(title), content, ""]
+
+ def _get_metric_heading(self, metric, metrics_info):
+ """
+ Gets the heading of a specific metric.
+
+ :param str metric: The metric to search for.
+ :param dict metrics_info: The information of all the
+ metrics that were documented.
+ :return str: The heading to use for the given metric.
+ """
+ for metric_heading, metric_info in metrics_info.items():
+ if metric == metric_heading or any(
+ metric == alias for alias in metric_info.get("aliases", [])
+ ):
+ return metric_heading
+ if metric_info.get("matcher"):
+ match = re.search(metric_info["matcher"], metric)
+ if match:
+ return metric_heading
+
+ raise Exception(f"Could not find a metric heading for `{metric}`")
+
def get_task_match(self, task_name):
return re.search(self._task_match_pattern, task_name)
@@ -85,16 +117,23 @@ class FrameworkGatherer(object):
"""
raise NotImplementedError
- def _build_section_with_header(self, title, content, header_type=None):
+ def build_metrics_documentation(self, yaml_content):
"""
- Adds a section to the documentation with the title as the type mentioned
- and paragraph as content mentioned.
- :param title: title of the section
- :param content: content of section paragraph
- :param header_type: type of the title heading
+ Each framework that provides a page with descriptions about the
+ metrics it produces must implement this method. The metrics defined
+ for the framework can be found in the `yaml_content` variable.
+
+ The framework gatherer is expected to produce the full documentation
+ for all the metrics defined in the yaml_content at once. This is done
+ to allow differentiation between how metrics are displayed between
+ the different frameworks.
+
+ :param dict yaml_content: A dictionary of the YAML config file for
+ the specific framework.
+ :return list: A list of all the lines being added to the metrics
+ documentation.
"""
- heading_map = {"H2": "*", "H3": "=", "H4": "-", "H5": "^"}
- return [title, heading_map.get(header_type, "^") * len(title), content, ""]
+ raise NotImplementedError
class RaptorGatherer(FrameworkGatherer):
@@ -167,7 +206,9 @@ class RaptorGatherer(FrameworkGatherer):
:return list: the list of the tests
"""
desc_exclusion = ["here", "manifest_relpath", "path", "relpath"]
- test_manifest = TestManifest([str(manifest_path)], strict=False, document=True)
+ test_manifest = TestManifest(
+ [str(manifest_path)], strict=False, document=True, add_line_no=True
+ )
test_list = test_manifest.active_tests(exists=False, disabled=False)
subtests = {}
for subtest in test_list:
@@ -206,6 +247,21 @@ class RaptorGatherer(FrameworkGatherer):
return subtests
+ def _get_metric_heading(self, metric, metrics_info):
+ """
+ Finds, and returns the correct heading for a metric to target in a reference link.
+
+ :param str metric: The metric to search for.
+ :param dict metrics_info: The information of all the
+ metrics that were documented.
+ :return str: A formatted string containing the reference link to the
+ documented metric.
+ """
+ metric_heading = super(RaptorGatherer, self)._get_metric_heading(
+ metric, metrics_info
+ )
+ return f"`{metric} <raptor-metrics.html#{metric_heading.lower().replace(' ', '-')}>`__"
+
def get_test_list(self):
"""
Returns a dictionary containing the tests in every suite ini file.
@@ -235,7 +291,9 @@ class RaptorGatherer(FrameworkGatherer):
return self._test_list
- def build_test_description(self, title, test_description="", suite_name=""):
+ def build_test_description(
+ self, title, test_description="", suite_name="", metrics_info=None
+ ):
matcher = []
browsers = [
"firefox",
@@ -292,6 +350,18 @@ class RaptorGatherer(FrameworkGatherer):
f" * **{sub_title}**: "
f"{description[key].replace('{subtest}', description['name'])}\n"
)
+ elif key == "alert_on":
+ result += (
+ f" * **{sub_title}**: "
+ + ", ".join(
+ self._get_metric_heading(metric.strip(), metrics_info)
+ for metric in description[key]
+ .replace("\n", " ")
+ .replace(",", " ")
+ .split()
+ )
+ + "\n"
+ )
else:
if "\n" in description[key]:
description[key] = description[key].replace("\n", " ")
@@ -328,6 +398,39 @@ class RaptorGatherer(FrameworkGatherer):
title.capitalize(), content, header_type="H4"
)
+ def build_metrics_documentation(self, parsed_metrics):
+ metrics_documentation = []
+ for metric, metric_info in sorted(
+ parsed_metrics.items(), key=lambda item: item[0]
+ ):
+ metric_content = metric_info["description"] + "\n\n"
+
+ metric_content += (
+ f" * **Aliases**: {', '.join(sorted(metric_info['aliases']))}\n"
+ )
+ metric_content += " * **Tests using it**:\n"
+
+ for suite, tests in sorted(
+ metric_info["location"].items(), key=lambda item: item[0]
+ ):
+ metric_content += f" * **{suite.capitalize()}**: "
+
+ test_links = []
+ for test in sorted(tests):
+ test_links.append(
+ f"`{test} <raptor.html#{test}-{suite.lower()[0]}>`__"
+ )
+
+ metric_content += ", ".join(test_links) + "\n"
+
+ metrics_documentation.extend(
+ self._build_section_with_header(
+ metric, metric_content, header_type="H3"
+ )
+ )
+
+ return metrics_documentation
+
class MozperftestGatherer(FrameworkGatherer):
"""
@@ -371,7 +474,9 @@ class MozperftestGatherer(FrameworkGatherer):
return self._test_list
- def build_test_description(self, title, test_description="", suite_name=""):
+ def build_test_description(
+ self, title, test_description="", suite_name="", metrics_info=None
+ ):
return [str(self.script_infos[title])]
def build_suite_section(self, title, content):
@@ -428,7 +533,9 @@ class TalosGatherer(FrameworkGatherer):
return self._test_list
- def build_test_description(self, title, test_description="", suite_name=""):
+ def build_test_description(
+ self, title, test_description="", suite_name="", metrics_info=None
+ ):
result = f".. dropdown:: {title}\n"
result += f" :class-container: anchor-id-{title}\n\n"
@@ -548,7 +655,9 @@ class AwsyGatherer(FrameworkGatherer):
title.capitalize(), content, header_type="H4"
)
- def build_test_description(self, title, test_description="", suite_name=""):
+ def build_test_description(
+ self, title, test_description="", suite_name="", metrics_info=None
+ ):
dropdown_suite_name = suite_name.replace(" ", "-")
result = f".. dropdown:: {title} ({test_description})\n"
result += f" :class-container: anchor-id-{title}-{dropdown_suite_name}\n\n"
diff --git a/tools/lint/perfdocs/gatherer.py b/tools/lint/perfdocs/gatherer.py
index 828c2f7f2b..89f03cfdbb 100644
--- a/tools/lint/perfdocs/gatherer.py
+++ b/tools/lint/perfdocs/gatherer.py
@@ -87,6 +87,9 @@ class Gatherer(object):
if any(d in str(path.resolve()) for d in exclude_dir):
continue
files = [f for f in os.listdir(path)]
+
+ # Metrics are optional so it's only added to the matched if we
+ # find the `metrics.rst` file in the perfdocs folder
matched = {"path": str(path), "yml": "", "rst": "", "static": []}
for file in files:
@@ -95,6 +98,8 @@ class Gatherer(object):
matched["yml"] = file
elif file == "index.rst":
matched["rst"] = file
+ elif file == "metrics.rst":
+ matched["metrics"] = file
elif file.split(".")[-1] in ALLOWED_STATIC_FILETYPES:
matched["static"].append(file)
diff --git a/tools/lint/perfdocs/generator.py b/tools/lint/perfdocs/generator.py
index 3f3a0acefa..240468d62c 100644
--- a/tools/lint/perfdocs/generator.py
+++ b/tools/lint/perfdocs/generator.py
@@ -70,6 +70,17 @@ class Generator(object):
rst_content = read_file(
pathlib.Path(framework["path"], framework["rst"]), stringify=True
)
+ gatherer = self._verifier._gatherer.framework_gatherers[
+ yaml_content["name"]
+ ]
+
+ metrics_rst_content = None
+ metrics_info = self._verifier.metrics_info[yaml_content["name"]]
+ if framework.get("metrics"):
+ metrics_rst_content = read_file(
+ pathlib.Path(framework["path"], framework["metrics"]),
+ stringify=True,
+ )
# Gather all tests and descriptions and format them into
# documentation content
@@ -87,21 +98,41 @@ class Generator(object):
tests = suite_info.get("tests", {})
for test_name in sorted(tests.keys()):
- gatherer = self._verifier._gatherer.framework_gatherers[
- yaml_content["name"]
- ]
test_description = gatherer.build_test_description(
- test_name, tests[test_name], suite_name
+ test_name,
+ test_description=tests[test_name],
+ suite_name=suite_name,
+ metrics_info=metrics_info,
)
documentation.extend(test_description)
documentation.append("")
+ # For each framework, we may want to organize the metrics differently
+ # so delegate the complete setup of the metric documentation to the
+ # framework-specific gatherers
+ metrics_rst = ""
+ if metrics_rst_content:
+ metrics_documentation = gatherer.build_metrics_documentation(
+ metrics_info
+ )
+ metrics_rst = re.sub(
+ r"{metrics_documentation}",
+ "\n".join(metrics_documentation),
+ metrics_rst_content,
+ )
+ rst_content = re.sub(
+ r"{metrics_rst_name}",
+ f"{yaml_content['name']}-metrics",
+ rst_content,
+ )
+
# Insert documentation into `.rst` file
framework_rst = re.sub(
r"{documentation}", "\n".join(documentation), rst_content
)
frameworks_info[yaml_content["name"]] = {
"dynamic": framework_rst,
+ "metrics": metrics_rst,
"static": [],
}
@@ -165,6 +196,12 @@ class Generator(object):
pathlib.Path(perfdocs_tmpdir, framework_name),
)
+ if framework_docs[framework_name]["metrics"]:
+ save_file(
+ framework_docs[framework_name]["metrics"],
+ pathlib.Path(perfdocs_tmpdir, f"{framework_name}-metrics"),
+ )
+
for static_name in framework_docs[framework_name]["static"]:
if static_name["file"].endswith(".rst"):
# XXX Replace this with a shutil.copy call (like below)
@@ -266,16 +303,15 @@ class Generator(object):
perfdocs_tmpdir = self._create_perfdocs()
if self._generate:
self._save_perfdocs(perfdocs_tmpdir)
- else:
+ elif not are_dirs_equal(perfdocs_tmpdir, self.perfdocs_path):
# If we are not generating, then at least check if they
# should be regenerated by comparing the directories.
- if not are_dirs_equal(perfdocs_tmpdir, self.perfdocs_path):
- logger.warning(
- "PerfDocs are outdated, run ./mach lint -l perfdocs --fix .` "
- + "to update them. You can also apply the "
- + f"{'perfdocs.diff' if ON_TRY else 'diff.txt'} patch file "
- + f"{'produced from this reviewbot test ' if ON_TRY else ''}"
- + "to fix the issue.",
- files=get_changed_files(self._workspace),
- restricted=False,
- )
+ logger.warning(
+ "PerfDocs are outdated, run ./mach lint -l perfdocs --fix .` "
+ + "to update them. You can also apply the "
+ + f"{'perfdocs.diff' if ON_TRY else 'diff.txt'} patch file "
+ + f"{'produced from this reviewbot test ' if ON_TRY else ''}"
+ + "to fix the issue.",
+ files=get_changed_files(self._workspace),
+ restricted=False,
+ )
diff --git a/tools/lint/perfdocs/verifier.py b/tools/lint/perfdocs/verifier.py
index 0f39e3a047..5f0b62cbf2 100644
--- a/tools/lint/perfdocs/verifier.py
+++ b/tools/lint/perfdocs/verifier.py
@@ -63,12 +63,10 @@ CONFIG_SCHEMA = {
"type": "object",
"properties": {
"test_name": {"type": "string"},
- "metrics": {"$ref": "#/definitions/metrics_schema"},
},
},
"description": {"type": "string"},
"owner": {"type": "string"},
- "metrics": {"$ref": "#/definitions/metrics_schema"},
},
"required": ["description"],
}
@@ -93,6 +91,7 @@ class Verifier(object):
:param str workspace_dir: Path to the top-level checkout directory.
"""
self.workspace_dir = workspace_dir
+ self.metrics_info = {}
self._gatherer = Gatherer(workspace_dir, taskgraph)
self._compiled_matchers = {}
@@ -242,104 +241,32 @@ class Verifier(object):
the test harness as real metrics. Failures here suggest that a metric
changed name, is missing an alias, is misnamed, duplicated, or was removed.
"""
- yaml_suite = yaml_content["suites"][suite]
- suite_metrics = yaml_suite.get("metrics", {})
-
- # Check to make sure all the metrics with given descriptions
- # are actually being measured. Add the metric to the "verified" field in
- # global_metrics to use it later for "global" metrics that can
- # have their descriptions removed. Start from the test level.
- for test_name, test_info in yaml_suite.get("tests", {}).items():
- if not isinstance(test_info, dict):
- continue
- test_metrics_info = test_info.get("metrics", {})
-
- # Find all tests that match with this name in case they measure
- # different things
- measured_metrics = []
- for t in framework_info["test_list"][suite]:
- if not self._is_yaml_test_match(t, test_name):
- # Check to make sure we are checking against the right
- # test. Skip the metric check if we can't find the test.
- continue
- measured_metrics.extend(
- framework_info["test_list"][suite][t].get("metrics", [])
- )
-
- if len(measured_metrics) == 0:
- continue
-
- # Check if all the test metrics documented exist
- for metric_name, metric_info in test_metrics_info.items():
+ for global_metric_name, global_metric_info in global_metrics["global"].items():
+ for test, test_info in framework_info["test_list"][suite].items():
verified_metrics = self._match_metrics(
- metric_name, metric_info, measured_metrics
+ global_metric_name, global_metric_info, test_info.get("metrics", [])
)
- if len(verified_metrics) > 0:
+ if len(verified_metrics) == 0:
+ continue
+
+ if global_metric_info.get("verified", False):
+ # We already verified this global metric, but add any
+ # extra verified metrics here
+ global_metrics["verified"].extend(verified_metrics)
+ else:
+ global_metric_info["verified"] = True
global_metrics["yaml-verified"].extend(
- [metric_name] + metric_info["aliases"]
+ [global_metric_name] + global_metric_info["aliases"]
)
global_metrics["verified"].extend(
- [metric_name] + metric_info["aliases"] + verified_metrics
- )
- else:
- logger.warning(
- (
- "Cannot find documented metric `{}` "
- "being used in the specified test `{}`."
- ).format(metric_name, test_name),
- framework_info["yml_path"],
+ [global_metric_name]
+ + global_metric_info["aliases"]
+ + verified_metrics
)
- # Check the suite level now
- for suite_metric_name, suite_metric_info in suite_metrics.items():
- measured_metrics = []
- for _, test_info in framework_info["test_list"][suite].items():
- measured_metrics.extend(test_info.get("metrics", []))
-
- verified_metrics = self._match_metrics(
- suite_metric_name, suite_metric_info, measured_metrics
- )
- if len(verified_metrics) > 0:
- global_metrics["yaml-verified"].extend(
- [suite_metric_name] + suite_metric_info["aliases"]
- )
- global_metrics["verified"].extend(
- [suite_metric_name]
- + suite_metric_info["aliases"]
- + verified_metrics
- )
- else:
- logger.warning(
- (
- "Cannot find documented metric `{}` "
- "being used in the specified suite `{}`."
- ).format(suite_metric_name, suite),
- framework_info["yml_path"],
- )
-
- # Finally check the global level (output failures later)
- all_measured_metrics = []
- for _, test_info in framework_info["test_list"][suite].items():
- all_measured_metrics.extend(test_info.get("metrics", []))
- for global_metric_name, global_metric_info in global_metrics["global"].items():
- verified_metrics = self._match_metrics(
- global_metric_name, global_metric_info, all_measured_metrics
- )
- if global_metric_info.get("verified", False):
- # We already verified this global metric, but add any
- # extra verified metrics here
- global_metrics["verified"].extend(verified_metrics)
- continue
- if len(verified_metrics) > 0:
- global_metric_info["verified"] = True
- global_metrics["yaml-verified"].extend(
- [global_metric_name] + global_metric_info["aliases"]
- )
- global_metrics["verified"].extend(
- [global_metric_name]
- + global_metric_info["aliases"]
- + verified_metrics
- )
+ global_metric_info.setdefault("location", {}).setdefault(
+ suite, []
+ ).append(test)
def _validate_metrics_harness_direction(
self, suite, test_list, yaml_content, global_metrics
@@ -484,6 +411,8 @@ class Verifier(object):
suite, test_list, yaml_content, global_metrics
)
+ self.metrics_info[framework_info["name"]] = global_metrics["global"]
+
def validate_yaml(self, yaml_path):
"""
Validate that the YAML file has all the fields that are
@@ -532,10 +461,10 @@ class Verifier(object):
return valid
- def validate_rst_content(self, rst_path):
+ def validate_rst_content(self, rst_path, expected_str):
"""
- Validate that the index file given has a {documentation} entry
- so that the documentation can be inserted there.
+ Validate that a given RST has the expected string in it
+ so that the generated documentation can be inserted there.
:param str rst_path: Path to the RST file.
:return bool: True/False => Passed/Failed Validation
@@ -545,14 +474,14 @@ class Verifier(object):
# Check for a {documentation} entry in some line,
# if we can't find one, then the validation fails.
valid = False
- docs_match = re.compile(".*{documentation}.*")
+ docs_match = re.compile(f".*{expected_str}.*")
for line in rst_content:
if docs_match.search(line):
valid = True
break
if not valid:
logger.warning( # noqa: PLE1205
- "Cannot find a '{documentation}' entry in the given index file",
+ f"Cannot find a '{expected_str}' entry in the given index file",
rst_path,
)
@@ -583,9 +512,18 @@ class Verifier(object):
_valid_files = {
"yml": self.validate_yaml(matched_yml),
"rst": True,
+ "metrics": True,
}
if not read_yaml(matched_yml)["static-only"]:
- _valid_files["rst"] = self.validate_rst_content(matched_rst)
+ _valid_files["rst"] = self.validate_rst_content(
+ matched_rst, "{documentation}"
+ )
+
+ if matched.get("metrics"):
+ _valid_files["metrics"] = self.validate_rst_content(
+ pathlib.Path(matched["path"], matched["metrics"]),
+ "{metrics_documentation}",
+ )
# Log independently the errors found for the matched files
for file_format, valid in _valid_files.items():
diff --git a/tools/lint/python/l10n_lint.py b/tools/lint/python/l10n_lint.py
index 158cb5f7e6..2ba2278d22 100644
--- a/tools/lint/python/l10n_lint.py
+++ b/tools/lint/python/l10n_lint.py
@@ -40,15 +40,14 @@ def lint_strings(name, paths, lintconfig, **lintargs):
extensions = lintconfig.get("extensions")
# Load l10n.toml configs
- l10nconfigs = load_configs(lintconfig, root, l10n_base, name)
+ l10nconfigs = load_configs(lintconfig["l10n_configs"], root, l10n_base, name)
- # Check include paths in l10n.yml if it's in our given paths
- # Only the l10n.yml will show up here, but if the l10n.toml files
- # change, we also get the l10n.yml as the toml files are listed as
- # support files.
+ # If l10n.yml is included in the provided paths, validate it against the
+ # TOML files, then remove it to avoid parsing it as a localizable resource.
if lintconfig["path"] in paths:
results = validate_linter_includes(lintconfig, l10nconfigs, lintargs)
paths.remove(lintconfig["path"])
+ lintconfig["include"].remove(mozpath.relpath(lintconfig["path"], root))
else:
results = []
@@ -60,8 +59,7 @@ def lint_strings(name, paths, lintconfig, **lintargs):
all_files.append(fileobj.path)
if fp.isfile:
all_files.append(p)
- # Filter again, our directories might have picked up files the
- # explicitly excluded in the l10n.yml configuration.
+ # Filter out files explicitly excluded in the l10n.yml configuration.
# `browser/locales/en-US/firefox-l10n.js` is a good example.
all_files, _ = pathutils.filterpaths(
lintargs["root"],
@@ -70,7 +68,8 @@ def lint_strings(name, paths, lintconfig, **lintargs):
exclude=exclude,
extensions=extensions,
)
- # These should be excluded in l10n.yml
+ # Filter again, our directories might have picked up files that should be
+ # excluded in l10n.yml
skips = {p for p in all_files if not parser.hasParser(p)}
results.extend(
result.from_config(
@@ -142,11 +141,11 @@ def strings_repo_setup(repo: str, name: str):
fh.flush()
-def load_configs(lintconfig, root, l10n_base, locale):
+def load_configs(l10n_configs, root, l10n_base, locale):
"""Load l10n configuration files specified in the linter configuration."""
configs = []
env = {"l10n_base": l10n_base}
- for toml in lintconfig["l10n_configs"]:
+ for toml in l10n_configs:
cfg = TOMLParser().parse(
mozpath.join(root, toml), env=env, ignore_missing_includes=True
)
diff --git a/tools/lint/rejected-words.yml b/tools/lint/rejected-words.yml
index ed9e8c9d60..6a809930ec 100644
--- a/tools/lint/rejected-words.yml
+++ b/tools/lint/rejected-words.yml
@@ -213,7 +213,7 @@ avoid-blacklist-and-whitelist:
- security/sandbox/linux/SandboxFilter.cpp
- security/sandbox/linux/SandboxFilterUtil.h
- security/sandbox/linux/Sandbox.h
- - taskcluster/ci/docker-image/kind.yml
+ - taskcluster/kinds/docker-image/kind.yml
- taskcluster/gecko_taskgraph/actions/create_interactive.py
- taskcluster/gecko_taskgraph/transforms/test/other.py
- taskcluster/gecko_taskgraph/try_option_syntax.py
diff --git a/tools/lint/test/conftest.py b/tools/lint/test/conftest.py
index ad88f8aa97..94ac511804 100644
--- a/tools/lint/test/conftest.py
+++ b/tools/lint/test/conftest.py
@@ -260,6 +260,7 @@ def perfdocs_sample():
DYNAMIC_SAMPLE_CONFIG,
SAMPLE_CONFIG,
SAMPLE_INI,
+ SAMPLE_METRICS_CONFIG,
SAMPLE_TEST,
temp_dir,
temp_file,
@@ -291,7 +292,11 @@ def perfdocs_sample():
) as tmpconfig, temp_file(
"config_2.yml", tempdir=perfdocs_dir, content=DYNAMIC_SAMPLE_CONFIG
) as tmpconfig_2, temp_file(
- "index.rst", tempdir=perfdocs_dir, content="{documentation}"
+ "config_metrics.yml", tempdir=perfdocs_dir, content=SAMPLE_METRICS_CONFIG
+ ) as tmpconfig_metrics, temp_file(
+ "index.rst",
+ tempdir=perfdocs_dir,
+ content="{metrics_rst_name}{documentation}",
) as tmpindex:
yield {
"top_dir": tmpdir,
@@ -301,5 +306,6 @@ def perfdocs_sample():
"test": tmptest,
"config": tmpconfig,
"config_2": tmpconfig_2,
+ "config_metrics": tmpconfig_metrics,
"index": tmpindex,
}
diff --git a/tools/lint/test/test_perfdocs.py b/tools/lint/test/test_perfdocs.py
index 4ee834ad68..5a12c82d4d 100644
--- a/tools/lint/test/test_perfdocs.py
+++ b/tools/lint/test/test_perfdocs.py
@@ -85,11 +85,32 @@ suites:
SAMPLE_METRICS_CONFIG = """
name: raptor
+manifest: "None"
+metrics:
+ 'test':
+ aliases: [t1, t2]
+ description: a description
+ matcher: f.*|S.*
+static-only: False
+suites:
+ suite:
+ description: "Performance tests from the 'suite' folder."
+ tests:
+ Example: "Performance test Example from another_suite."
+ another_suite:
+ description: "Performance tests from the 'another_suite' folder."
+ tests:
+ Example: "Performance test Example from another_suite."
+"""
+
+
+DYNAMIC_METRICS_CONFIG = """
+name: raptor
manifest: "None"{}
static-only: False
suites:
suite:
- description: "Performance tests from the 'suite' folder."{}
+ description: "Performance tests from the 'suite' folder."
tests:
Example: "Performance test Example from another_suite."
another_suite:
@@ -324,7 +345,9 @@ def test_perfdocs_verifier_validate_rst_pass(
from perfdocs.verifier import Verifier
- valid = Verifier(top_dir).validate_rst_content(pathlib.Path(rst_path))
+ valid = Verifier(top_dir).validate_rst_content(
+ pathlib.Path(rst_path), expected_str="{documentation}"
+ )
assert valid
@@ -347,7 +370,7 @@ def test_perfdocs_verifier_invalid_rst(logger, structured_logger, perfdocs_sampl
from perfdocs.verifier import Verifier
verifier = Verifier("top_dir")
- valid = verifier.validate_rst_content(rst_path)
+ valid = verifier.validate_rst_content(rst_path, expected_str="{documentation}")
expected = (
"Cannot find a '{documentation}' entry in the given index file",
@@ -532,7 +555,7 @@ def test_perfdocs_verifier_nonexistent_documented_metrics(
setup_sample_logger(logger, structured_logger, top_dir)
with open(perfdocs_sample["config"], "w", newline="\n") as f:
- f.write(SAMPLE_METRICS_CONFIG.format(metric_definitions, ""))
+ f.write(DYNAMIC_METRICS_CONFIG.format(metric_definitions, ""))
with open(perfdocs_sample["manifest"]["path"], "w", newline="\n") as f:
f.write(manifest)
@@ -587,7 +610,7 @@ def test_perfdocs_verifier_undocumented_metrics(
setup_sample_logger(logger, structured_logger, top_dir)
with open(perfdocs_sample["config"], "w", newline="\n") as f:
- f.write(SAMPLE_METRICS_CONFIG.format(metric_definitions, ""))
+ f.write(DYNAMIC_METRICS_CONFIG.format(metric_definitions, ""))
with open(perfdocs_sample["manifest"]["path"], "w", newline="\n") as f:
f.write(manifest)
@@ -619,6 +642,13 @@ metrics:
aliases:
- fcp
- SpeedIndex
+ - SpeedIndex2
+ description: "Example"
+ "FirstPaint2":
+ aliases:
+ - fcp
+ - SpeedIndex
+ - SpeedIndex2
description: "Example" """,
3,
],
@@ -629,12 +659,20 @@ metrics:
FirstPaint:
aliases:
- fcp
+ - SpeedIndex3
+ - SpeedIndex
description: Example
SpeedIndex:
aliases:
- speedindex
- si
description: Example
+ SpeedIndex3:
+ aliases:
+ - speedindex
+ - si
+ - fcp
+ description: Example
""",
5,
],
@@ -648,10 +686,7 @@ def test_perfdocs_verifier_duplicate_metrics(
setup_sample_logger(logger, structured_logger, top_dir)
with open(perfdocs_sample["config"], "w", newline="\n") as f:
- indented_defs = "\n".join(
- [(" " * 8) + metric_line for metric_line in metric_definitions.split("\n")]
- )
- f.write(SAMPLE_METRICS_CONFIG.format(metric_definitions, indented_defs))
+ f.write(DYNAMIC_METRICS_CONFIG.format(metric_definitions))
with open(perfdocs_sample["manifest"]["path"], "w", newline="\n") as f:
f.write(manifest)
@@ -710,7 +745,7 @@ def test_perfdocs_verifier_valid_metrics(
setup_sample_logger(logger, structured_logger, top_dir)
with open(perfdocs_sample["config"], "w", newline="\n") as f:
- f.write(SAMPLE_METRICS_CONFIG.format(metric_definitions, ""))
+ f.write(DYNAMIC_METRICS_CONFIG.format(metric_definitions, ""))
with open(perfdocs_sample["manifest"]["path"], "w", newline="\n") as f:
f.write(manifest)
@@ -836,7 +871,9 @@ def test_perfdocs_framework_gatherers_urls(logger, structured_logger, perfdocs_s
for test_name in tests.keys():
desc = gn._verifier._gatherer.framework_gatherers[
"raptor"
- ].build_test_description(fg, test_name, tests[test_name], suite_name)
+ ].build_test_description(
+ fg, test_name, tests[test_name], suite_name, {"fcp": {}}
+ )
assert f"**test url**: `<{url[0]['test_url']}>`__" in desc[0]
assert f"**expected**: {url[0]['expected']}" in desc[0]
assert test_name in desc[0]
diff --git a/tools/lint/test/test_perfdocs_generation.py b/tools/lint/test/test_perfdocs_generation.py
index b9b540d234..7966ed0f12 100644
--- a/tools/lint/test/test_perfdocs_generation.py
+++ b/tools/lint/test/test_perfdocs_generation.py
@@ -51,6 +51,72 @@ def test_perfdocs_generator_generate_perfdocs_pass(
@mock.patch("perfdocs.logger.PerfDocLogger")
+def test_perfdocs_generator_generate_perfdocs_metrics_pass(
+ logger, structured_logger, perfdocs_sample
+):
+ from test_perfdocs import temp_file
+
+ top_dir = perfdocs_sample["top_dir"]
+ setup_sample_logger(logger, structured_logger, top_dir)
+
+ templates_dir = pathlib.Path(top_dir, "tools", "lint", "perfdocs", "templates")
+ templates_dir.mkdir(parents=True, exist_ok=True)
+
+ from perfdocs.generator import Generator
+ from perfdocs.verifier import Verifier
+
+ sample_gatherer_result = {
+ "suite": {"Example": {"metrics": ["fcp", "SpeedIndex"]}},
+ "another_suite": {"Example": {}},
+ }
+ sample_test_list_result = {
+ "suite": [{"metrics": ["fcp", "SpeedIndex"], "name": "Example"}],
+ "another_suite": [{"name": "Example"}],
+ }
+
+ with temp_file(
+ "metrics.rst",
+ tempdir=pathlib.Path(top_dir, "perfdocs"),
+ content="{metrics_documentation}",
+ ):
+ with mock.patch(
+ "perfdocs.framework_gatherers.RaptorGatherer.get_test_list"
+ ) as m:
+ m.return_value = sample_gatherer_result
+ with perfdocs_sample["config"].open("w") as f1, perfdocs_sample[
+ "config_metrics"
+ ].open("r") as f2:
+ # Overwrite content of config.yml with metrics config
+ f1.write(f2.read())
+
+ verifier = Verifier(top_dir)
+ verifier.validate_tree()
+
+ verifier._gatherer.framework_gatherers[
+ "raptor"
+ ]._descriptions = sample_test_list_result
+
+ generator = Generator(verifier, generate=True, workspace=top_dir)
+ with temp_file(
+ "index.rst", tempdir=templates_dir, content="{test_documentation}"
+ ):
+ generator.generate_perfdocs()
+
+ with pathlib.Path(generator.perfdocs_path, "raptor-metrics.rst").open() as f:
+ metrics_content = f.read()
+ assert "{metrics_documentation}" not in metrics_content
+ assert "a description" in metrics_content
+ assert "**Tests using it**" in metrics_content
+
+ with pathlib.Path(generator.perfdocs_path, "raptor.rst").open() as f:
+ raptor_index_content = f.read()
+ assert "{metrics_rst_name}" not in raptor_index_content
+ assert "raptor-metrics" in raptor_index_content
+
+ assert logger.warning.call_count == 0
+
+
+@mock.patch("perfdocs.logger.PerfDocLogger")
def test_perfdocs_generator_needed_regeneration(
logger, structured_logger, perfdocs_sample
):
@@ -227,7 +293,7 @@ def test_perfdocs_generator_build_perfdocs(logger, structured_logger, perfdocs_s
generator = Generator(verifier, generate=True, workspace=top_dir)
frameworks_info = generator.build_perfdocs_from_tree()
- expected = ["dynamic", "static"]
+ expected = ["dynamic", "metrics", "static"]
for framework in sorted(frameworks_info.keys()):
for i, framework_info in enumerate(frameworks_info[framework]):
diff --git a/tools/lint/trojan-source.yml b/tools/lint/trojan-source.yml
index a273626b58..7a0ac594c3 100644
--- a/tools/lint/trojan-source.yml
+++ b/tools/lint/trojan-source.yml
@@ -16,10 +16,10 @@ trojan-source:
- third_party/rust/clap_builder/src/output/textwrap/core.rs
- third_party/rust/textwrap/src/core.rs
- third_party/rust/icu_provider/src/hello_world.rs
- - third_party/rust/uniffi-example-rondpoint/tests/bindings/test_rondpoint.py
- third_party/rust/error-chain/tests/tests.rs
- third_party/rust/unicode-width/src/tests.rs
- security/nss/gtests/mozpkix_gtest/pkixnames_tests.cpp
+ - toolkit/components/uniffi-fixtures/rondpoint/tests/bindings/test_rondpoint.py
extensions:
- .c
- .cc