summaryrefslogtreecommitdiffstats
path: root/tests/benchmark/conftest.py
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--tests/benchmark/conftest.py35
1 files changed, 31 insertions, 4 deletions
diff --git a/tests/benchmark/conftest.py b/tests/benchmark/conftest.py
index 61f2fa1..04ce54c 100644
--- a/tests/benchmark/conftest.py
+++ b/tests/benchmark/conftest.py
@@ -4,12 +4,14 @@
"""Fixtures for benchmarking ANTA."""
import logging
+from collections import defaultdict
import pytest
import respx
from _pytest.terminal import TerminalReporter
from anta.catalog import AntaCatalog
+from anta.result_manager import ResultManager
from .utils import AntaMockEnvironment
@@ -17,6 +19,12 @@ logger = logging.getLogger(__name__)
TEST_CASE_COUNT = None
+# Used to globally configure the benchmarks by specifying parameters for inventories
+BENCHMARK_PARAMETERS = [
+ pytest.param({"count": 1, "disable_cache": True, "reachable": True}, id="1-device"),
+ pytest.param({"count": 2, "disable_cache": True, "reachable": True}, id="2-devices"),
+]
+
@pytest.fixture(name="anta_mock_env", scope="session") # We want this fixture to have a scope set to session to avoid reparsing all the unit tests data.
def anta_mock_env_fixture() -> AntaMockEnvironment:
@@ -35,6 +43,22 @@ def catalog(anta_mock_env: AntaMockEnvironment) -> AntaCatalog:
return anta_mock_env.catalog
+@pytest.fixture(name="session_results", scope="session") # We want this fixture to be reused across test modules within tests/benchmark
+def session_results_fixture() -> defaultdict[str, ResultManager]:
+ """Return a dictionary of ResultManger objects for the benchmarks.
+
+ The key is the test id as defined in the pytest_generate_tests in this module.
+ Used to pass a populated ResultManager from one benchmark to another.
+ """
+ return defaultdict(lambda: ResultManager())
+
+
+@pytest.fixture
+def results(request: pytest.FixtureRequest, session_results: defaultdict[str, ResultManager]) -> ResultManager:
+ """Return the unique ResultManger object for the current benchmark parameter."""
+ return session_results[request.node.callspec.id]
+
+
def pytest_terminal_summary(terminalreporter: TerminalReporter) -> None:
"""Display the total number of ANTA unit test cases used to benchmark."""
terminalreporter.write_sep("=", f"{TEST_CASE_COUNT} ANTA test cases")
@@ -49,9 +73,12 @@ def pytest_generate_tests(metafunc: pytest.Metafunc) -> None:
return
metafunc.parametrize(
"inventory",
- [
- pytest.param({"count": 1, "disable_cache": True, "reachable": True}, id="1-device"),
- pytest.param({"count": 2, "disable_cache": True, "reachable": True}, id="2-devices"),
- ],
+ BENCHMARK_PARAMETERS,
+ indirect=True,
+ )
+ elif "results" in metafunc.fixturenames:
+ metafunc.parametrize(
+ "results",
+ BENCHMARK_PARAMETERS,
indirect=True,
)