summaryrefslogtreecommitdiffstats
path: root/tools/lint/test
diff options
context:
space:
mode:
Diffstat (limited to 'tools/lint/test')
-rw-r--r--tools/lint/test/conftest.py8
-rw-r--r--tools/lint/test/test_perfdocs.py59
-rw-r--r--tools/lint/test/test_perfdocs_generation.py68
3 files changed, 122 insertions, 13 deletions
diff --git a/tools/lint/test/conftest.py b/tools/lint/test/conftest.py
index ad88f8aa97..94ac511804 100644
--- a/tools/lint/test/conftest.py
+++ b/tools/lint/test/conftest.py
@@ -260,6 +260,7 @@ def perfdocs_sample():
DYNAMIC_SAMPLE_CONFIG,
SAMPLE_CONFIG,
SAMPLE_INI,
+ SAMPLE_METRICS_CONFIG,
SAMPLE_TEST,
temp_dir,
temp_file,
@@ -291,7 +292,11 @@ def perfdocs_sample():
) as tmpconfig, temp_file(
"config_2.yml", tempdir=perfdocs_dir, content=DYNAMIC_SAMPLE_CONFIG
) as tmpconfig_2, temp_file(
- "index.rst", tempdir=perfdocs_dir, content="{documentation}"
+ "config_metrics.yml", tempdir=perfdocs_dir, content=SAMPLE_METRICS_CONFIG
+ ) as tmpconfig_metrics, temp_file(
+ "index.rst",
+ tempdir=perfdocs_dir,
+ content="{metrics_rst_name}{documentation}",
) as tmpindex:
yield {
"top_dir": tmpdir,
@@ -301,5 +306,6 @@ def perfdocs_sample():
"test": tmptest,
"config": tmpconfig,
"config_2": tmpconfig_2,
+ "config_metrics": tmpconfig_metrics,
"index": tmpindex,
}
diff --git a/tools/lint/test/test_perfdocs.py b/tools/lint/test/test_perfdocs.py
index 4ee834ad68..5a12c82d4d 100644
--- a/tools/lint/test/test_perfdocs.py
+++ b/tools/lint/test/test_perfdocs.py
@@ -85,11 +85,32 @@ suites:
SAMPLE_METRICS_CONFIG = """
name: raptor
+manifest: "None"
+metrics:
+ 'test':
+ aliases: [t1, t2]
+ description: a description
+ matcher: f.*|S.*
+static-only: False
+suites:
+ suite:
+ description: "Performance tests from the 'suite' folder."
+ tests:
+ Example: "Performance test Example from another_suite."
+ another_suite:
+ description: "Performance tests from the 'another_suite' folder."
+ tests:
+ Example: "Performance test Example from another_suite."
+"""
+
+
+DYNAMIC_METRICS_CONFIG = """
+name: raptor
manifest: "None"{}
static-only: False
suites:
suite:
- description: "Performance tests from the 'suite' folder."{}
+ description: "Performance tests from the 'suite' folder."
tests:
Example: "Performance test Example from another_suite."
another_suite:
@@ -324,7 +345,9 @@ def test_perfdocs_verifier_validate_rst_pass(
from perfdocs.verifier import Verifier
- valid = Verifier(top_dir).validate_rst_content(pathlib.Path(rst_path))
+ valid = Verifier(top_dir).validate_rst_content(
+ pathlib.Path(rst_path), expected_str="{documentation}"
+ )
assert valid
@@ -347,7 +370,7 @@ def test_perfdocs_verifier_invalid_rst(logger, structured_logger, perfdocs_sampl
from perfdocs.verifier import Verifier
verifier = Verifier("top_dir")
- valid = verifier.validate_rst_content(rst_path)
+ valid = verifier.validate_rst_content(rst_path, expected_str="{documentation}")
expected = (
"Cannot find a '{documentation}' entry in the given index file",
@@ -532,7 +555,7 @@ def test_perfdocs_verifier_nonexistent_documented_metrics(
setup_sample_logger(logger, structured_logger, top_dir)
with open(perfdocs_sample["config"], "w", newline="\n") as f:
- f.write(SAMPLE_METRICS_CONFIG.format(metric_definitions, ""))
+ f.write(DYNAMIC_METRICS_CONFIG.format(metric_definitions, ""))
with open(perfdocs_sample["manifest"]["path"], "w", newline="\n") as f:
f.write(manifest)
@@ -587,7 +610,7 @@ def test_perfdocs_verifier_undocumented_metrics(
setup_sample_logger(logger, structured_logger, top_dir)
with open(perfdocs_sample["config"], "w", newline="\n") as f:
- f.write(SAMPLE_METRICS_CONFIG.format(metric_definitions, ""))
+ f.write(DYNAMIC_METRICS_CONFIG.format(metric_definitions, ""))
with open(perfdocs_sample["manifest"]["path"], "w", newline="\n") as f:
f.write(manifest)
@@ -619,6 +642,13 @@ metrics:
aliases:
- fcp
- SpeedIndex
+ - SpeedIndex2
+ description: "Example"
+ "FirstPaint2":
+ aliases:
+ - fcp
+ - SpeedIndex
+ - SpeedIndex2
description: "Example" """,
3,
],
@@ -629,12 +659,20 @@ metrics:
FirstPaint:
aliases:
- fcp
+ - SpeedIndex3
+ - SpeedIndex
description: Example
SpeedIndex:
aliases:
- speedindex
- si
description: Example
+ SpeedIndex3:
+ aliases:
+ - speedindex
+ - si
+ - fcp
+ description: Example
""",
5,
],
@@ -648,10 +686,7 @@ def test_perfdocs_verifier_duplicate_metrics(
setup_sample_logger(logger, structured_logger, top_dir)
with open(perfdocs_sample["config"], "w", newline="\n") as f:
- indented_defs = "\n".join(
- [(" " * 8) + metric_line for metric_line in metric_definitions.split("\n")]
- )
- f.write(SAMPLE_METRICS_CONFIG.format(metric_definitions, indented_defs))
+ f.write(DYNAMIC_METRICS_CONFIG.format(metric_definitions))
with open(perfdocs_sample["manifest"]["path"], "w", newline="\n") as f:
f.write(manifest)
@@ -710,7 +745,7 @@ def test_perfdocs_verifier_valid_metrics(
setup_sample_logger(logger, structured_logger, top_dir)
with open(perfdocs_sample["config"], "w", newline="\n") as f:
- f.write(SAMPLE_METRICS_CONFIG.format(metric_definitions, ""))
+ f.write(DYNAMIC_METRICS_CONFIG.format(metric_definitions, ""))
with open(perfdocs_sample["manifest"]["path"], "w", newline="\n") as f:
f.write(manifest)
@@ -836,7 +871,9 @@ def test_perfdocs_framework_gatherers_urls(logger, structured_logger, perfdocs_s
for test_name in tests.keys():
desc = gn._verifier._gatherer.framework_gatherers[
"raptor"
- ].build_test_description(fg, test_name, tests[test_name], suite_name)
+ ].build_test_description(
+ fg, test_name, tests[test_name], suite_name, {"fcp": {}}
+ )
assert f"**test url**: `<{url[0]['test_url']}>`__" in desc[0]
assert f"**expected**: {url[0]['expected']}" in desc[0]
assert test_name in desc[0]
diff --git a/tools/lint/test/test_perfdocs_generation.py b/tools/lint/test/test_perfdocs_generation.py
index b9b540d234..7966ed0f12 100644
--- a/tools/lint/test/test_perfdocs_generation.py
+++ b/tools/lint/test/test_perfdocs_generation.py
@@ -51,6 +51,72 @@ def test_perfdocs_generator_generate_perfdocs_pass(
@mock.patch("perfdocs.logger.PerfDocLogger")
+def test_perfdocs_generator_generate_perfdocs_metrics_pass(
+ logger, structured_logger, perfdocs_sample
+):
+ from test_perfdocs import temp_file
+
+ top_dir = perfdocs_sample["top_dir"]
+ setup_sample_logger(logger, structured_logger, top_dir)
+
+ templates_dir = pathlib.Path(top_dir, "tools", "lint", "perfdocs", "templates")
+ templates_dir.mkdir(parents=True, exist_ok=True)
+
+ from perfdocs.generator import Generator
+ from perfdocs.verifier import Verifier
+
+ sample_gatherer_result = {
+ "suite": {"Example": {"metrics": ["fcp", "SpeedIndex"]}},
+ "another_suite": {"Example": {}},
+ }
+ sample_test_list_result = {
+ "suite": [{"metrics": ["fcp", "SpeedIndex"], "name": "Example"}],
+ "another_suite": [{"name": "Example"}],
+ }
+
+ with temp_file(
+ "metrics.rst",
+ tempdir=pathlib.Path(top_dir, "perfdocs"),
+ content="{metrics_documentation}",
+ ):
+ with mock.patch(
+ "perfdocs.framework_gatherers.RaptorGatherer.get_test_list"
+ ) as m:
+ m.return_value = sample_gatherer_result
+ with perfdocs_sample["config"].open("w") as f1, perfdocs_sample[
+ "config_metrics"
+ ].open("r") as f2:
+ # Overwrite content of config.yml with metrics config
+ f1.write(f2.read())
+
+ verifier = Verifier(top_dir)
+ verifier.validate_tree()
+
+ verifier._gatherer.framework_gatherers[
+ "raptor"
+ ]._descriptions = sample_test_list_result
+
+ generator = Generator(verifier, generate=True, workspace=top_dir)
+ with temp_file(
+ "index.rst", tempdir=templates_dir, content="{test_documentation}"
+ ):
+ generator.generate_perfdocs()
+
+ with pathlib.Path(generator.perfdocs_path, "raptor-metrics.rst").open() as f:
+ metrics_content = f.read()
+ assert "{metrics_documentation}" not in metrics_content
+ assert "a description" in metrics_content
+ assert "**Tests using it**" in metrics_content
+
+ with pathlib.Path(generator.perfdocs_path, "raptor.rst").open() as f:
+ raptor_index_content = f.read()
+ assert "{metrics_rst_name}" not in raptor_index_content
+ assert "raptor-metrics" in raptor_index_content
+
+ assert logger.warning.call_count == 0
+
+
+@mock.patch("perfdocs.logger.PerfDocLogger")
def test_perfdocs_generator_needed_regeneration(
logger, structured_logger, perfdocs_sample
):
@@ -227,7 +293,7 @@ def test_perfdocs_generator_build_perfdocs(logger, structured_logger, perfdocs_s
generator = Generator(verifier, generate=True, workspace=top_dir)
frameworks_info = generator.build_perfdocs_from_tree()
- expected = ["dynamic", "static"]
+ expected = ["dynamic", "metrics", "static"]
for framework in sorted(frameworks_info.keys()):
for i, framework_info in enumerate(frameworks_info[framework]):