summaryrefslogtreecommitdiffstats
path: root/tests/units/cli
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-10-15 20:30:44 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-10-15 20:30:44 +0000
commit4a398db99d88dd17dabc408fb2b58c610792bc1e (patch)
treee5404d6d19a4d67a9428b3d10f886717b9756352 /tests/units/cli
parentAdding upstream version 1.0.0. (diff)
downloadanta-upstream.tar.xz
anta-upstream.zip
Adding upstream version 1.1.0.upstream/1.1.0upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--tests/units/cli/conftest.py133
-rw-r--r--tests/units/cli/debug/test_commands.py13
-rw-r--r--tests/units/cli/exec/test_utils.py223
-rw-r--r--tests/units/cli/get/test_commands.py3
-rw-r--r--tests/units/cli/get/test_utils.py1
-rw-r--r--tests/units/cli/nrfu/test__init__.py20
-rw-r--r--tests/units/cli/nrfu/test_commands.py111
7 files changed, 459 insertions, 45 deletions
diff --git a/tests/units/cli/conftest.py b/tests/units/cli/conftest.py
new file mode 100644
index 0000000..e63e60e
--- /dev/null
+++ b/tests/units/cli/conftest.py
@@ -0,0 +1,133 @@
+# Copyright (c) 2023-2024 Arista Networks, Inc.
+# Use of this source code is governed by the Apache License 2.0
+# that can be found in the LICENSE file.
+"""See https://docs.pytest.org/en/stable/reference/fixtures.html#conftest-py-sharing-fixtures-across-multiple-files."""
+
+from __future__ import annotations
+
+import logging
+import shutil
+from typing import TYPE_CHECKING, Any
+from unittest.mock import patch
+
+import pytest
+from click.testing import CliRunner, Result
+
+import asynceapi
+from anta.cli.console import console
+
+if TYPE_CHECKING:
+ from collections.abc import Iterator
+ from pathlib import Path
+
+
+logger = logging.getLogger(__name__)
+
+
+MOCK_CLI_JSON: dict[str, asynceapi.EapiCommandError | dict[str, Any]] = {
+ "show version": {
+ "modelName": "DCS-7280CR3-32P4-F",
+ "version": "4.31.1F",
+ },
+ "enable": {},
+ "clear counters": {},
+ "clear hardware counter drop": {},
+ "undefined": asynceapi.EapiCommandError(
+ passed=[],
+ failed="show version",
+ errors=["Authorization denied for command 'show version'"],
+ errmsg="Invalid command",
+ not_exec=[],
+ ),
+}
+
+MOCK_CLI_TEXT: dict[str, asynceapi.EapiCommandError | str] = {
+ "show version": "Arista cEOSLab",
+ "bash timeout 10 ls -1t /mnt/flash/schedule/tech-support": "dummy_tech-support_2023-12-01.1115.log.gz\ndummy_tech-support_2023-12-01.1015.log.gz",
+ "bash timeout 10 ls -1t /mnt/flash/schedule/tech-support | head -1": "dummy_tech-support_2023-12-01.1115.log.gz",
+ "show running-config | include aaa authorization exec default": "aaa authorization exec default local",
+}
+
+
+@pytest.fixture
+def temp_env(anta_env: dict[str, str], tmp_path: Path) -> dict[str, str]:
+ """Fixture that create a temporary ANTA inventory.
+
+ The inventory can be overridden and returns the corresponding environment variables.
+ """
+ anta_inventory = str(anta_env["ANTA_INVENTORY"])
+ temp_inventory = tmp_path / "test_inventory.yml"
+ shutil.copy(anta_inventory, temp_inventory)
+ anta_env["ANTA_INVENTORY"] = str(temp_inventory)
+ return anta_env
+
+
+@pytest.fixture
+# Disabling C901 - too complex as we like our runner like this
+def click_runner(capsys: pytest.CaptureFixture[str], anta_env: dict[str, str]) -> Iterator[CliRunner]: # noqa: C901
+ """Return a click.CliRunner for cli testing."""
+
+ class AntaCliRunner(CliRunner):
+ """Override CliRunner to inject specific variables for ANTA."""
+
+ def invoke(self, *args: Any, **kwargs: Any) -> Result: # noqa: ANN401
+ # Inject default env vars if not provided
+ kwargs["env"] = anta_env | kwargs.get("env", {})
+ # Deterministic terminal width
+ kwargs["env"]["COLUMNS"] = "165"
+
+ kwargs["auto_envvar_prefix"] = "ANTA"
+ # Way to fix https://github.com/pallets/click/issues/824
+ with capsys.disabled():
+ result = super().invoke(*args, **kwargs)
+ # disabling T201 as we want to print here
+ print("--- CLI Output ---") # noqa: T201
+ print(result.output) # noqa: T201
+ return result
+
+ def cli(
+ command: str | None = None,
+ commands: list[dict[str, Any]] | None = None,
+ ofmt: str = "json",
+ _version: int | str | None = "latest",
+ **_kwargs: Any, # noqa: ANN401
+ ) -> dict[str, Any] | list[dict[str, Any]]:
+ def get_output(command: str | dict[str, Any]) -> dict[str, Any]:
+ if isinstance(command, dict):
+ command = command["cmd"]
+ mock_cli: dict[str, Any]
+ if ofmt == "json":
+ mock_cli = MOCK_CLI_JSON
+ elif ofmt == "text":
+ mock_cli = MOCK_CLI_TEXT
+ for mock_cmd, output in mock_cli.items():
+ if command == mock_cmd:
+ logger.info("Mocking command %s", mock_cmd)
+ if isinstance(output, asynceapi.EapiCommandError):
+ raise output
+ return output
+ message = f"Command '{command}' is not mocked"
+ logger.critical(message)
+ raise NotImplementedError(message)
+
+ res: dict[str, Any] | list[dict[str, Any]]
+ if command is not None:
+ logger.debug("Mock input %s", command)
+ res = get_output(command)
+ if commands is not None:
+ logger.debug("Mock input %s", commands)
+ res = list(map(get_output, commands))
+ logger.debug("Mock output %s", res)
+ return res
+
+ # Patch asynceapi methods used by AsyncEOSDevice. See tests/units/test_device.py
+ with (
+ patch("asynceapi.device.Device.check_connection", return_value=True),
+ patch("asynceapi.device.Device.cli", side_effect=cli),
+ patch("asyncssh.connect"),
+ patch(
+ "asyncssh.scp",
+ ),
+ ):
+ console._color_system = None
+ yield AntaCliRunner()
diff --git a/tests/units/cli/debug/test_commands.py b/tests/units/cli/debug/test_commands.py
index 039e09e..c802b0d 100644
--- a/tests/units/cli/debug/test_commands.py
+++ b/tests/units/cli/debug/test_commands.py
@@ -19,12 +19,12 @@ if TYPE_CHECKING:
@pytest.mark.parametrize(
("command", "ofmt", "version", "revision", "device", "failed"),
[
- pytest.param("show version", "json", None, None, "dummy", False, id="json command"),
- pytest.param("show version", "text", None, None, "dummy", False, id="text command"),
- pytest.param("show version", None, "latest", None, "dummy", False, id="version-latest"),
- pytest.param("show version", None, "1", None, "dummy", False, id="version"),
- pytest.param("show version", None, None, 3, "dummy", False, id="revision"),
- pytest.param("undefined", None, None, None, "dummy", True, id="command fails"),
+ pytest.param("show version", "json", None, None, "leaf1", False, id="json command"),
+ pytest.param("show version", "text", None, None, "leaf1", False, id="text command"),
+ pytest.param("show version", None, "latest", None, "leaf1", False, id="version-latest"),
+ pytest.param("show version", None, "1", None, "leaf1", False, id="version"),
+ pytest.param("show version", None, None, 3, "leaf1", False, id="revision"),
+ pytest.param("undefined", None, None, None, "leaf1", True, id="command fails"),
pytest.param("undefined", None, None, None, "doesnotexist", True, id="Device does not exist"),
],
)
@@ -38,7 +38,6 @@ def test_run_cmd(
failed: bool,
) -> None:
"""Test `anta debug run-cmd`."""
- # pylint: disable=too-many-arguments
cli_args = ["-l", "debug", "debug", "run-cmd", "--command", command, "--device", device]
# ofmt
diff --git a/tests/units/cli/exec/test_utils.py b/tests/units/cli/exec/test_utils.py
index ad1a78a..503327a 100644
--- a/tests/units/cli/exec/test_utils.py
+++ b/tests/units/cli/exec/test_utils.py
@@ -5,17 +5,19 @@
from __future__ import annotations
+import logging
+from pathlib import Path
from typing import TYPE_CHECKING, Any
from unittest.mock import call, patch
import pytest
+import respx
-from anta.cli.exec.utils import (
- clear_counters,
-)
+from anta.cli.exec.utils import clear_counters, collect_commands
from anta.models import AntaCommand
+from anta.tools import safe_command
-# , collect_commands, collect_scheduled_show_tech
+# collect_scheduled_show_tech
if TYPE_CHECKING:
from anta.device import AntaDevice
@@ -23,55 +25,59 @@ if TYPE_CHECKING:
# TODO: complete test cases
-@pytest.mark.asyncio()
@pytest.mark.parametrize(
- ("inventory_state", "per_device_command_output", "tags"),
+ ("inventory", "inventory_state", "per_device_command_output", "tags"),
[
pytest.param(
+ {"count": 3},
{
- "dummy": {"is_online": False},
- "dummy2": {"is_online": False},
- "dummy3": {"is_online": False},
+ "device-0": {"is_online": False},
+ "device-1": {"is_online": False},
+ "device-2": {"is_online": False},
},
{},
None,
id="no_connected_device",
),
pytest.param(
+ {"count": 3},
{
- "dummy": {"is_online": True, "hw_model": "cEOSLab"},
- "dummy2": {"is_online": True, "hw_model": "vEOS-lab"},
- "dummy3": {"is_online": False},
+ "device-0": {"is_online": True, "hw_model": "cEOSLab"},
+ "device-1": {"is_online": True, "hw_model": "vEOS-lab"},
+ "device-2": {"is_online": False},
},
{},
None,
id="cEOSLab and vEOS-lab devices",
),
pytest.param(
+ {"count": 3},
{
- "dummy": {"is_online": True},
- "dummy2": {"is_online": True},
- "dummy3": {"is_online": False},
+ "device-0": {"is_online": True},
+ "device-1": {"is_online": True},
+ "device-2": {"is_online": False},
},
- {"dummy": None}, # None means the command failed to collect
+ {"device-0": None}, # None means the command failed to collect
None,
id="device with error",
),
pytest.param(
+ {"count": 3},
{
- "dummy": {"is_online": True},
- "dummy2": {"is_online": True},
- "dummy3": {"is_online": True},
+ "device-0": {"is_online": True},
+ "device-1": {"is_online": True},
+ "device-2": {"is_online": True},
},
{},
["spine"],
id="tags",
),
],
+ indirect=["inventory"],
)
async def test_clear_counters(
caplog: pytest.LogCaptureFixture,
- test_inventory: AntaInventory,
+ inventory: AntaInventory,
inventory_state: dict[str, Any],
per_device_command_output: dict[str, Any],
tags: set[str] | None,
@@ -80,12 +86,12 @@ async def test_clear_counters(
async def mock_connect_inventory() -> None:
"""Mock connect_inventory coroutine."""
- for name, device in test_inventory.items():
+ for name, device in inventory.items():
device.is_online = inventory_state[name].get("is_online", True)
device.established = inventory_state[name].get("established", device.is_online)
device.hw_model = inventory_state[name].get("hw_model", "dummy")
- async def collect(self: AntaDevice, command: AntaCommand, *args: Any, **kwargs: Any) -> None: # noqa: ARG001, ANN401 #pylint: disable=unused-argument
+ async def collect(self: AntaDevice, command: AntaCommand, *args: Any, **kwargs: Any) -> None: # noqa: ARG001, ANN401
"""Mock collect coroutine."""
command.output = per_device_command_output.get(self.name, "")
@@ -97,10 +103,10 @@ async def test_clear_counters(
side_effect=mock_connect_inventory,
) as mocked_connect_inventory,
):
- await clear_counters(test_inventory, tags=tags)
+ await clear_counters(inventory, tags=tags)
mocked_connect_inventory.assert_awaited_once()
- devices_established = test_inventory.get_inventory(established_only=True, tags=tags).devices
+ devices_established = inventory.get_inventory(established_only=True, tags=tags).devices
if devices_established:
# Building the list of calls
calls = []
@@ -142,3 +148,172 @@ async def test_clear_counters(
assert f"Could not clear counters on device {key}: []" in caplog.text
else:
mocked_collect.assert_not_awaited()
+
+
+# TODO: test with changing root_dir, test with failing to write (OSError)
+@pytest.mark.parametrize(
+ ("inventory", "inventory_state", "commands", "tags"),
+ [
+ pytest.param(
+ {"count": 1},
+ {
+ "device-0": {"is_online": False},
+ },
+ {"json_format": ["show version"]},
+ None,
+ id="no_connected_device",
+ ),
+ pytest.param(
+ {"count": 3},
+ {
+ "device-0": {"is_online": True},
+ "device-1": {"is_online": True},
+ "device-2": {"is_online": False},
+ },
+ {"json_format": ["show version", "show ip interface brief"]},
+ None,
+ id="JSON commands",
+ ),
+ pytest.param(
+ {"count": 3},
+ {
+ "device-0": {"is_online": True},
+ "device-1": {"is_online": True},
+ "device-2": {"is_online": False},
+ },
+ {"json_format": ["show version"], "text_format": ["show running-config", "show ip interface"]},
+ None,
+ id="Text commands",
+ ),
+ pytest.param(
+ {"count": 2},
+ {
+ "device-0": {"is_online": True, "tags": {"spine"}},
+ "device-1": {"is_online": True},
+ },
+ {"json_format": ["show version"]},
+ {"spine"},
+ id="tags",
+ ),
+ pytest.param( # TODO: This test should not be there we should catch the wrong user input with pydantic.
+ {"count": 1},
+ {
+ "device-0": {"is_online": True},
+ },
+ {"blah_format": ["42"]},
+ None,
+ id="bad-input",
+ ),
+ pytest.param(
+ {"count": 1},
+ {
+ "device-0": {"is_online": True},
+ },
+ {"json_format": ["undefined command", "show version"]},
+ None,
+ id="command-failed-to-be-collected",
+ ),
+ pytest.param(
+ {"count": 1},
+ {
+ "device-0": {"is_online": True},
+ },
+ {"json_format": ["uncaught exception"]},
+ None,
+ id="uncaught-exception",
+ ),
+ ],
+ indirect=["inventory"],
+)
+async def test_collect_commands(
+ caplog: pytest.LogCaptureFixture,
+ tmp_path: Path,
+ inventory: AntaInventory,
+ inventory_state: dict[str, Any],
+ commands: dict[str, list[str]],
+ tags: set[str] | None,
+) -> None:
+ """Test anta.cli.exec.utils.collect_commands."""
+ caplog.set_level(logging.INFO)
+ root_dir = tmp_path
+
+ async def mock_connect_inventory() -> None:
+ """Mock connect_inventory coroutine."""
+ for name, device in inventory.items():
+ device.is_online = inventory_state[name].get("is_online", True)
+ device.established = inventory_state[name].get("established", device.is_online)
+ device.hw_model = inventory_state[name].get("hw_model", "dummy")
+ device.tags = inventory_state[name].get("tags", set())
+
+ # Need to patch the child device class
+ # ruff: noqa: C901
+ with (
+ respx.mock,
+ patch(
+ "anta.inventory.AntaInventory.connect_inventory",
+ side_effect=mock_connect_inventory,
+ ) as mocked_connect_inventory,
+ ):
+ # Mocking responses from devices
+ respx.post(path="/command-api", headers={"Content-Type": "application/json-rpc"}, json__params__cmds__0__cmd="show version").respond(
+ json={"result": [{"toto": 42}]}
+ )
+ respx.post(path="/command-api", headers={"Content-Type": "application/json-rpc"}, json__params__cmds__0__cmd="show ip interface brief").respond(
+ json={"result": [{"toto": 42}]}
+ )
+ respx.post(path="/command-api", headers={"Content-Type": "application/json-rpc"}, json__params__cmds__0__cmd="show running-config").respond(
+ json={"result": [{"output": "blah"}]}
+ )
+ respx.post(path="/command-api", headers={"Content-Type": "application/json-rpc"}, json__params__cmds__0__cmd="show ip interface").respond(
+ json={"result": [{"output": "blah"}]}
+ )
+ respx.post(path="/command-api", headers={"Content-Type": "application/json-rpc"}, json__params__cmds__0__cmd="undefined command").respond(
+ json={
+ "error": {
+ "code": 1002,
+ "message": "CLI command 1 of 1 'undefined command' failed: invalid command",
+ "data": [{"errors": ["Invalid input (at token 0: 'undefined')"]}],
+ }
+ }
+ )
+ await collect_commands(inventory, commands, root_dir, tags=tags)
+
+ mocked_connect_inventory.assert_awaited_once()
+ devices_established = inventory.get_inventory(established_only=True, tags=tags or None).devices
+ if not devices_established:
+ assert "INFO" in caplog.text
+ assert "No online device found. Exiting" in caplog.text
+ return
+
+ for device in devices_established:
+ # Verify tags selection
+ assert device.tags.intersection(tags) != {} if tags else True
+ json_path = root_dir / device.name / "json"
+ text_path = root_dir / device.name / "text"
+ if "json_format" in commands:
+ # Handle undefined command
+ if "undefined command" in commands["json_format"]:
+ assert "ERROR" in caplog.text
+ assert "Command 'undefined command' failed on device-0: Invalid input (at token 0: 'undefined')" in caplog.text
+ # Verify we don't claim it was collected
+ assert f"Collected command 'undefined command' from device {device.name}" not in caplog.text
+ commands["json_format"].remove("undefined command")
+ # Handle uncaught exception
+ elif "uncaught exception" in commands["json_format"]:
+ assert "ERROR" in caplog.text
+ assert "Error when collecting commands: " in caplog.text
+ # Verify we don't claim it was collected
+ assert f"Collected command 'uncaught exception' from device {device.name}" not in caplog.text
+ commands["json_format"].remove("uncaught exception")
+
+ assert json_path.is_dir()
+ assert len(list(Path.iterdir(json_path))) == len(commands["json_format"])
+ for command in commands["json_format"]:
+ assert Path.is_file(json_path / f"{safe_command(command)}.json")
+ assert f"Collected command '{command}' from device {device.name}" in caplog.text
+ if "text_format" in commands:
+ assert text_path.is_dir()
+ assert len(list(text_path.iterdir())) == len(commands["text_format"])
+ for command in commands["text_format"]:
+ assert Path.is_file(text_path / f"{safe_command(command)}.log")
+ assert f"Collected command '{command}' from device {device.name}" in caplog.text
diff --git a/tests/units/cli/get/test_commands.py b/tests/units/cli/get/test_commands.py
index 1e8c6e9..ff3d922 100644
--- a/tests/units/cli/get/test_commands.py
+++ b/tests/units/cli/get/test_commands.py
@@ -42,7 +42,6 @@ def test_from_cvp(
cv_token_failure: bool,
cvp_connect_failure: bool,
) -> None:
- # pylint: disable=too-many-arguments
# ruff: noqa: C901
"""Test `anta get from-cvp`.
@@ -144,7 +143,6 @@ def test_from_ansible(
expected_exit: int,
expected_log: str | None,
) -> None:
- # pylint: disable=too-many-arguments
"""Test `anta get from-ansible`.
This test verifies:
@@ -230,7 +228,6 @@ def test_from_ansible_overwrite(
expected_exit: int,
expected_log: str | None,
) -> None:
- # pylint: disable=too-many-arguments
"""Test `anta get from-ansible` overwrite mechanism.
The test uses a static ansible-inventory and output as these are tested in other functions
diff --git a/tests/units/cli/get/test_utils.py b/tests/units/cli/get/test_utils.py
index e105f94..46ce14f 100644
--- a/tests/units/cli/get/test_utils.py
+++ b/tests/units/cli/get/test_utils.py
@@ -144,7 +144,6 @@ def test_create_inventory_from_ansible(
expected_inv_length: int,
) -> None:
"""Test anta.get.utils.create_inventory_from_ansible."""
- # pylint: disable=R0913
target_file = tmp_path / "inventory.yml"
inventory_file_path = DATA_DIR / inventory_filename
diff --git a/tests/units/cli/nrfu/test__init__.py b/tests/units/cli/nrfu/test__init__.py
index a9dcd9c..d08499c 100644
--- a/tests/units/cli/nrfu/test__init__.py
+++ b/tests/units/cli/nrfu/test__init__.py
@@ -9,7 +9,6 @@ from typing import TYPE_CHECKING
from anta.cli import anta
from anta.cli.utils import ExitCode
-from tests.lib.utils import default_anta_env
if TYPE_CHECKING:
from click.testing import CliRunner
@@ -49,10 +48,16 @@ def test_anta_nrfu_dry_run(click_runner: CliRunner) -> None:
assert "Dry-run" in result.output
+def test_anta_nrfu_wrong_catalog_format(click_runner: CliRunner) -> None:
+ """Test anta nrfu --dry-run, catalog is given via env."""
+ result = click_runner.invoke(anta, ["nrfu", "--dry-run", "--catalog-format", "toto"])
+ assert result.exit_code == ExitCode.USAGE_ERROR
+ assert "Invalid value for '--catalog-format': 'toto' is not one of 'yaml', 'json'." in result.output
+
+
def test_anta_password_required(click_runner: CliRunner) -> None:
"""Test that password is provided."""
- env = default_anta_env()
- env["ANTA_PASSWORD"] = None
+ env = {"ANTA_PASSWORD": None}
result = click_runner.invoke(anta, ["nrfu"], env=env)
assert result.exit_code == ExitCode.USAGE_ERROR
@@ -61,8 +66,7 @@ def test_anta_password_required(click_runner: CliRunner) -> None:
def test_anta_password(click_runner: CliRunner) -> None:
"""Test that password can be provided either via --password or --prompt."""
- env = default_anta_env()
- env["ANTA_PASSWORD"] = None
+ env = {"ANTA_PASSWORD": None}
result = click_runner.invoke(anta, ["nrfu", "--password", "secret"], env=env)
assert result.exit_code == ExitCode.OK
result = click_runner.invoke(anta, ["nrfu", "--prompt"], input="password\npassword\n", env=env)
@@ -113,3 +117,9 @@ def test_disable_cache(click_runner: CliRunner) -> None:
if "disable_cache" in line:
assert "True" in line
assert result.exit_code == ExitCode.OK
+
+
+def test_hide(click_runner: CliRunner) -> None:
+ """Test the `--hide` option of the `anta nrfu` command."""
+ result = click_runner.invoke(anta, ["nrfu", "--hide", "success", "text"])
+ assert "SUCCESS" not in result.output
diff --git a/tests/units/cli/nrfu/test_commands.py b/tests/units/cli/nrfu/test_commands.py
index e2b5031..6a2624c 100644
--- a/tests/units/cli/nrfu/test_commands.py
+++ b/tests/units/cli/nrfu/test_commands.py
@@ -8,7 +8,8 @@ from __future__ import annotations
import json
import re
from pathlib import Path
-from typing import TYPE_CHECKING
+from typing import TYPE_CHECKING, Any
+from unittest.mock import patch
from anta.cli import anta
from anta.cli.utils import ExitCode
@@ -51,7 +52,7 @@ def test_anta_nrfu_table(click_runner: CliRunner) -> None:
"""Test anta nrfu, catalog is given via env."""
result = click_runner.invoke(anta, ["nrfu", "table"])
assert result.exit_code == ExitCode.OK
- assert "dummy │ VerifyEOSVersion │ success" in result.output
+ assert "leaf1 │ VerifyEOSVersion │ success" in result.output
def test_anta_nrfu_table_group_by_device(click_runner: CliRunner) -> None:
@@ -72,7 +73,7 @@ def test_anta_nrfu_text(click_runner: CliRunner) -> None:
"""Test anta nrfu, catalog is given via env."""
result = click_runner.invoke(anta, ["nrfu", "text"])
assert result.exit_code == ExitCode.OK
- assert "dummy :: VerifyEOSVersion :: SUCCESS" in result.output
+ assert "leaf1 :: VerifyEOSVersion :: SUCCESS" in result.output
def test_anta_nrfu_json(click_runner: CliRunner) -> None:
@@ -84,13 +85,113 @@ def test_anta_nrfu_json(click_runner: CliRunner) -> None:
assert match is not None
result_list = json.loads(match.group())
for res in result_list:
- if res["name"] == "dummy":
+ if res["name"] == "leaf1":
assert res["test"] == "VerifyEOSVersion"
assert res["result"] == "success"
+def test_anta_nrfu_json_output(click_runner: CliRunner, tmp_path: Path) -> None:
+ """Test anta nrfu json with output file."""
+ json_output = tmp_path / "test.json"
+ result = click_runner.invoke(anta, ["nrfu", "json", "--output", str(json_output)])
+
+ # Making sure the output is not printed to stdout
+ match = re.search(r"\[\n {2}{[\s\S]+ {2}}\n\]", result.output)
+ assert match is None
+
+ assert result.exit_code == ExitCode.OK
+ assert "JSON results saved to" in result.output
+ assert json_output.exists()
+
+
+def test_anta_nrfu_json_output_failure(click_runner: CliRunner, tmp_path: Path) -> None:
+ """Test anta nrfu json with output file."""
+ json_output = tmp_path / "test.json"
+
+ original_open = Path.open
+
+ def mock_path_open(*args: Any, **kwargs: Any) -> Path: # noqa: ANN401
+ """Mock Path.open only for the json_output file of this test."""
+ if args[0] == json_output:
+ msg = "Simulated OSError"
+ raise OSError(msg)
+
+ # If not the json_output file, call the original Path.open
+ return original_open(*args, **kwargs)
+
+ with patch("pathlib.Path.open", mock_path_open):
+ result = click_runner.invoke(anta, ["nrfu", "json", "--output", str(json_output)])
+
+ assert result.exit_code == ExitCode.USAGE_ERROR
+ assert "Failed to save JSON results to" in result.output
+ assert not json_output.exists()
+
+
def test_anta_nrfu_template(click_runner: CliRunner) -> None:
"""Test anta nrfu, catalog is given via env."""
result = click_runner.invoke(anta, ["nrfu", "tpl-report", "--template", str(DATA_DIR / "template.j2")])
assert result.exit_code == ExitCode.OK
- assert "* VerifyEOSVersion is SUCCESS for dummy" in result.output
+ assert "* VerifyEOSVersion is SUCCESS for leaf1" in result.output
+
+
+def test_anta_nrfu_csv(click_runner: CliRunner, tmp_path: Path) -> None:
+ """Test anta nrfu csv."""
+ csv_output = tmp_path / "test.csv"
+ result = click_runner.invoke(anta, ["nrfu", "csv", "--csv-output", str(csv_output)])
+ assert result.exit_code == ExitCode.OK
+ assert "CSV report saved to" in result.output
+ assert csv_output.exists()
+
+
+def test_anta_nrfu_csv_failure(click_runner: CliRunner, tmp_path: Path) -> None:
+ """Test anta nrfu csv."""
+ csv_output = tmp_path / "test.csv"
+ with patch("anta.reporter.csv_reporter.ReportCsv.generate", side_effect=OSError()):
+ result = click_runner.invoke(anta, ["nrfu", "csv", "--csv-output", str(csv_output)])
+ assert result.exit_code == ExitCode.USAGE_ERROR
+ assert "Failed to save CSV report to" in result.output
+ assert not csv_output.exists()
+
+
+def test_anta_nrfu_md_report(click_runner: CliRunner, tmp_path: Path) -> None:
+ """Test anta nrfu md-report."""
+ md_output = tmp_path / "test.md"
+ result = click_runner.invoke(anta, ["nrfu", "md-report", "--md-output", str(md_output)])
+ assert result.exit_code == ExitCode.OK
+ assert "Markdown report saved to" in result.output
+ assert md_output.exists()
+
+
+def test_anta_nrfu_md_report_failure(click_runner: CliRunner, tmp_path: Path) -> None:
+ """Test anta nrfu md-report failure."""
+ md_output = tmp_path / "test.md"
+ with patch("anta.reporter.md_reporter.MDReportGenerator.generate", side_effect=OSError()):
+ result = click_runner.invoke(anta, ["nrfu", "md-report", "--md-output", str(md_output)])
+
+ assert result.exit_code == ExitCode.USAGE_ERROR
+ assert "Failed to save Markdown report to" in result.output
+ assert not md_output.exists()
+
+
+def test_anta_nrfu_md_report_with_hide(click_runner: CliRunner, tmp_path: Path) -> None:
+ """Test anta nrfu md-report with the `--hide` option."""
+ md_output = tmp_path / "test.md"
+ result = click_runner.invoke(anta, ["nrfu", "--hide", "success", "md-report", "--md-output", str(md_output)])
+
+ assert result.exit_code == ExitCode.OK
+ assert "Markdown report saved to" in result.output
+ assert md_output.exists()
+
+ with md_output.open("r", encoding="utf-8") as f:
+ content = f.read()
+
+ # Use regex to find the "Total Tests Success" value
+ match = re.search(r"\| (\d+) \| (\d+) \| \d+ \| \d+ \| \d+ \|", content)
+
+ assert match is not None
+
+ total_tests = int(match.group(1))
+ total_tests_success = int(match.group(2))
+
+ assert total_tests == 0
+ assert total_tests_success == 0