diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2025-01-14 10:18:19 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2025-01-14 10:18:29 +0000 |
commit | 6818d016122ee845a2011b94bbdad0ed28a9aae7 (patch) | |
tree | e9865932680acf05b8c353347cf362ab3fd10ff0 | |
parent | Releasing debian version 1.1.0-1. (diff) | |
download | anta-6818d016122ee845a2011b94bbdad0ed28a9aae7.tar.xz anta-6818d016122ee845a2011b94bbdad0ed28a9aae7.zip |
Merging upstream version 1.2.0.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
154 files changed, 7131 insertions, 4765 deletions
diff --git a/.github/workflows/code-testing.yml b/.github/workflows/code-testing.yml index 3a66c5c..de2e6bc 100644 --- a/.github/workflows/code-testing.yml +++ b/.github/workflows/code-testing.yml @@ -46,7 +46,7 @@ jobs: runs-on: ubuntu-20.04 strategy: matrix: - python-version: ["3.9", "3.10", "3.11", "3.12"] + python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"] needs: file-changes steps: - uses: actions/checkout@v4 @@ -108,7 +108,7 @@ jobs: needs: [lint-python, type-python] strategy: matrix: - python: ["3.9", "3.10", "3.11", "3.12"] + python: ["3.9", "3.10", "3.11", "3.12", "3.13"] steps: - uses: actions/checkout@v4 - name: Setup Python @@ -119,6 +119,23 @@ jobs: run: pip install tox tox-gh-actions - name: "Run pytest via tox for ${{ matrix.python }}" run: tox + test-python-windows: + name: Pytest on 3.12 for windows + runs-on: windows-2022 + needs: [lint-python, type-python] + env: + # Required to prevent asyncssh to fail. + USERNAME: WindowsUser + steps: + - uses: actions/checkout@v4 + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: 3.12 + - name: Install dependencies + run: pip install tox tox-gh-actions + - name: Run pytest via tox for 3.12 on Windows + run: tox test-documentation: name: Build offline documentation for testing runs-on: ubuntu-20.04 @@ -149,4 +166,4 @@ jobs: uses: CodSpeedHQ/action@v3 with: token: ${{ secrets.CODSPEED_TOKEN }} - run: pytest --codspeed --no-cov --log-cli-level INFO tests/benchmark
\ No newline at end of file + run: pytest --codspeed --no-cov --log-cli-level INFO tests/benchmark diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index d32be46..4b3b357 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -7,8 +7,13 @@ on: jobs: pypi: - name: Publish version to Pypi servers + name: Publish Python 🐍 distribution 📦 to PyPI runs-on: ubuntu-latest + environment: + name: production + url: https://pypi.org/p/anta + permissions: + id-token: write steps: - name: Checkout code uses: actions/checkout@v4 @@ -19,11 +24,8 @@ jobs: - name: Build package run: | python -m build - - name: Publish package to Pypi + - name: Publish distribution 📦 to PyPI uses: pypa/gh-action-pypi-publish@release/v1 - with: - user: __token__ - password: ${{ secrets.PYPI_API_TOKEN }} release-coverage: name: Updated ANTA release coverage badge diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 0d6cd18..f33db65 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -46,7 +46,7 @@ repos: - '<!--| ~| -->' - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.6.9 + rev: v0.8.4 hooks: - id: ruff name: Run Ruff linter @@ -55,7 +55,7 @@ repos: name: Run Ruff formatter - repo: https://github.com/pycqa/pylint - rev: "v3.3.1" + rev: "v3.3.2" hooks: - id: pylint name: Check code style with pylint @@ -85,7 +85,7 @@ repos: types: [text] - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.11.2 + rev: v1.14.0 hooks: - id: mypy name: Check typing with mypy @@ -100,7 +100,7 @@ repos: files: ^(anta|tests)/ - repo: https://github.com/igorshubovych/markdownlint-cli - rev: v0.42.0 + rev: v0.43.0 hooks: - id: markdownlint name: Check Markdown files style. @@ -108,3 +108,19 @@ repos: - --config=.github/markdownlint.yaml - --ignore-path=.github/markdownlintignore - --fix + + - repo: local + hooks: + - id: examples-test + name: Generate examples/tests.yaml + entry: >- + sh -c "docs/scripts/generate_examples_tests.py" + language: python + types: [python] + files: anta/ + verbose: true + pass_filenames: false + additional_dependencies: + - anta[cli] + # TODO: next can go once we have it added to anta properly + - numpydoc diff --git a/anta/cli/__init__.py b/anta/cli/__init__.py index b542a6d..90be5c7 100644 --- a/anta/cli/__init__.py +++ b/anta/cli/__init__.py @@ -35,7 +35,7 @@ except ImportError as exc: cli = build_cli(exc) -__all__ = ["cli", "anta"] +__all__ = ["anta", "cli"] if __name__ == "__main__": cli() diff --git a/anta/cli/exec/commands.py b/anta/cli/exec/commands.py index 531614a..ff36e56 100644 --- a/anta/cli/exec/commands.py +++ b/anta/cli/exec/commands.py @@ -84,7 +84,10 @@ def snapshot(inventory: AntaInventory, tags: set[str] | None, commands_list: Pat ) @click.option( "--configure", - help="Ensure devices have 'aaa authorization exec default local' configured (required for SCP on EOS). THIS WILL CHANGE THE CONFIGURATION OF YOUR NETWORK.", + help=( + "[DEPRECATED] Ensure devices have 'aaa authorization exec default local' configured (required for SCP on EOS). " + "THIS WILL CHANGE THE CONFIGURATION OF YOUR NETWORK." + ), default=False, is_flag=True, show_default=True, diff --git a/anta/cli/exec/utils.py b/anta/cli/exec/utils.py index ce13622..33a0222 100644 --- a/anta/cli/exec/utils.py +++ b/anta/cli/exec/utils.py @@ -128,6 +128,13 @@ async def collect_show_tech(inv: AntaInventory, root_dir: Path, *, configure: bo logger.error("Unable to collect tech-support on %s: configuration 'aaa authorization exec default local' is not present", device.name) return + # TODO: ANTA 2.0.0 + msg = ( + "[DEPRECATED] Using '--configure' for collecting show-techs is deprecated and will be removed in ANTA 2.0.0. " + "Please add the required configuration on your devices before running this command from ANTA." + ) + logger.warning(msg) + commands = [] # TODO: @mtache - add `config` field to `AntaCommand` object to handle this use case. # Otherwise mypy complains about enable as it is only implemented for AsyncEOSDevice diff --git a/anta/cli/get/__init__.py b/anta/cli/get/__init__.py index abc7b38..8763b35 100644 --- a/anta/cli/get/__init__.py +++ b/anta/cli/get/__init__.py @@ -17,3 +17,4 @@ get.add_command(commands.from_cvp) get.add_command(commands.from_ansible) get.add_command(commands.inventory) get.add_command(commands.tags) +get.add_command(commands.tests) diff --git a/anta/cli/get/commands.py b/anta/cli/get/commands.py index ea1cc75..3cc9126 100644 --- a/anta/cli/get/commands.py +++ b/anta/cli/get/commands.py @@ -22,7 +22,7 @@ from anta.cli.console import console from anta.cli.get.utils import inventory_output_options from anta.cli.utils import ExitCode, inventory_options -from .utils import create_inventory_from_ansible, create_inventory_from_cvp, get_cv_token +from .utils import create_inventory_from_ansible, create_inventory_from_cvp, explore_package, get_cv_token if TYPE_CHECKING: from anta.inventory import AntaInventory @@ -75,7 +75,11 @@ def from_cvp(ctx: click.Context, output: Path, host: str, username: str, passwor # Get devices under a container logger.info("Getting inventory for container %s from CloudVision instance '%s'", container, host) cvp_inventory = clnt.api.get_devices_in_container(container) - create_inventory_from_cvp(cvp_inventory, output) + try: + create_inventory_from_cvp(cvp_inventory, output) + except OSError as e: + logger.error(str(e)) + ctx.exit(ExitCode.USAGE_ERROR) @click.command @@ -101,7 +105,7 @@ def from_ansible(ctx: click.Context, output: Path, ansible_group: str, ansible_i output=output, ansible_group=ansible_group, ) - except ValueError as e: + except (ValueError, OSError) as e: logger.error(str(e)) ctx.exit(ExitCode.USAGE_ERROR) @@ -132,3 +136,25 @@ def tags(inventory: AntaInventory, **kwargs: Any) -> None: tags.update(device.tags) console.print("Tags found:") console.print_json(json.dumps(sorted(tags), indent=2)) + + +@click.command +@click.pass_context +@click.option("--module", help="Filter tests by module name.", default="anta.tests", show_default=True) +@click.option("--test", help="Filter by specific test name. If module is specified, searches only within that module.", type=str) +@click.option("--short", help="Display test names without their inputs.", is_flag=True, default=False) +@click.option("--count", help="Print only the number of tests found.", is_flag=True, default=False) +def tests(ctx: click.Context, module: str, test: str | None, *, short: bool, count: bool) -> None: + """Show all builtin ANTA tests with an example output retrieved from each test documentation.""" + try: + tests_found = explore_package(module, test_name=test, short=short, count=count) + if tests_found == 0: + console.print(f"""No test {f"'{test}' " if test else ""}found in '{module}'.""") + elif count: + if tests_found == 1: + console.print(f"There is 1 test available in '{module}'.") + else: + console.print(f"There are {tests_found} tests available in '{module}'.") + except ValueError as e: + logger.error(str(e)) + ctx.exit(ExitCode.USAGE_ERROR) diff --git a/anta/cli/get/utils.py b/anta/cli/get/utils.py index 8f11676..d21dc54 100644 --- a/anta/cli/get/utils.py +++ b/anta/cli/get/utils.py @@ -6,8 +6,14 @@ from __future__ import annotations import functools +import importlib +import inspect import json import logging +import pkgutil +import re +import sys +import textwrap from pathlib import Path from sys import stdin from typing import Any, Callable @@ -17,9 +23,11 @@ import requests import urllib3 import yaml +from anta.cli.console import console from anta.cli.utils import ExitCode from anta.inventory import AntaInventory from anta.inventory.models import AntaInventoryHost, AntaInventoryInput +from anta.models import AntaTest urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) @@ -114,11 +122,28 @@ def get_cv_token(cvp_ip: str, cvp_username: str, cvp_password: str, *, verify_ce def write_inventory_to_file(hosts: list[AntaInventoryHost], output: Path) -> None: - """Write a file inventory from pydantic models.""" + """Write a file inventory from pydantic models. + + Parameters + ---------- + hosts: + the list of AntaInventoryHost to write to an inventory file + output: + the Path where the inventory should be written. + + Raises + ------ + OSError + When anything goes wrong while writing the file. + """ i = AntaInventoryInput(hosts=hosts) - with output.open(mode="w", encoding="UTF-8") as out_fd: - out_fd.write(yaml.dump({AntaInventory.INVENTORY_ROOT_KEY: yaml.safe_load(i.yaml())})) - logger.info("ANTA inventory file has been created: '%s'", output) + try: + with output.open(mode="w", encoding="UTF-8") as out_fd: + out_fd.write(yaml.dump({AntaInventory.INVENTORY_ROOT_KEY: yaml.safe_load(i.yaml())})) + logger.info("ANTA inventory file has been created: '%s'", output) + except OSError as exc: + msg = f"Could not write inventory to path '{output}'." + raise OSError(msg) from exc def create_inventory_from_cvp(inv: list[dict[str, Any]], output: Path) -> None: @@ -204,3 +229,148 @@ def create_inventory_from_ansible(inventory: Path, output: Path, ansible_group: raise ValueError(msg) ansible_hosts = deep_yaml_parsing(ansible_inventory) write_inventory_to_file(ansible_hosts, output) + + +def explore_package(module_name: str, test_name: str | None = None, *, short: bool = False, count: bool = False) -> int: + """Parse ANTA test submodules recursively and print AntaTest examples. + + Parameters + ---------- + module_name + Name of the module to explore (e.g., 'anta.tests.routing.bgp'). + test_name + If provided, only show tests starting with this name. + short + If True, only print test names without their inputs. + count + If True, only count the tests. + + Returns + ------- + int: + The number of tests found. + """ + try: + module_spec = importlib.util.find_spec(module_name) + except ModuleNotFoundError: + # Relying on module_spec check below. + module_spec = None + except ImportError as e: + msg = "`anta get tests --module <module>` does not support relative imports" + raise ValueError(msg) from e + + # Giving a second chance adding CWD to PYTHONPATH + if module_spec is None: + try: + logger.info("Could not find module `%s`, injecting CWD in PYTHONPATH and retrying...", module_name) + sys.path = [str(Path.cwd()), *sys.path] + module_spec = importlib.util.find_spec(module_name) + except ImportError: + module_spec = None + + if module_spec is None or module_spec.origin is None: + msg = f"Module `{module_name}` was not found!" + raise ValueError(msg) + + tests_found = 0 + if module_spec.submodule_search_locations: + for _, sub_module_name, ispkg in pkgutil.walk_packages(module_spec.submodule_search_locations): + qname = f"{module_name}.{sub_module_name}" + if ispkg: + tests_found += explore_package(qname, test_name=test_name, short=short, count=count) + continue + tests_found += find_tests_examples(qname, test_name, short=short, count=count) + + else: + tests_found += find_tests_examples(module_spec.name, test_name, short=short, count=count) + + return tests_found + + +def find_tests_examples(qname: str, test_name: str | None, *, short: bool = False, count: bool = False) -> int: + """Print tests from `qname`, filtered by `test_name` if provided. + + Parameters + ---------- + qname + Name of the module to explore (e.g., 'anta.tests.routing.bgp'). + test_name + If provided, only show tests starting with this name. + short + If True, only print test names without their inputs. + count + If True, only count the tests. + + Returns + ------- + int: + The number of tests found. + """ + try: + qname_module = importlib.import_module(qname) + except (AssertionError, ImportError) as e: + msg = f"Error when importing `{qname}` using importlib!" + raise ValueError(msg) from e + + module_printed = False + tests_found = 0 + + for _name, obj in inspect.getmembers(qname_module): + # Only retrieves the subclasses of AntaTest + if not inspect.isclass(obj) or not issubclass(obj, AntaTest) or obj == AntaTest: + continue + if test_name and not obj.name.startswith(test_name): + continue + if not module_printed: + if not count: + console.print(f"{qname}:") + module_printed = True + tests_found += 1 + if count: + continue + print_test(obj, short=short) + + return tests_found + + +def print_test(test: type[AntaTest], *, short: bool = False) -> None: + """Print a single test. + + Parameters + ---------- + test + the representation of the AntaTest as returned by inspect.getmembers + short + If True, only print test names without their inputs. + """ + if not test.__doc__ or (example := extract_examples(test.__doc__)) is None: + msg = f"Test {test.name} in module {test.__module__} is missing an Example" + raise LookupError(msg) + # Picking up only the inputs in the examples + # Need to handle the fact that we nest the routing modules in Examples. + # This is a bit fragile. + inputs = example.split("\n") + try: + test_name_line = next((i for i, input_entry in enumerate(inputs) if test.name in input_entry)) + except StopIteration as e: + msg = f"Could not find the name of the test '{test.name}' in the Example section in the docstring." + raise ValueError(msg) from e + # TODO: handle not found + console.print(f" {inputs[test_name_line].strip()}") + # Injecting the description + console.print(f" # {test.description}", soft_wrap=True) + if not short and len(inputs) > test_name_line + 2: # There are params + console.print(textwrap.indent(textwrap.dedent("\n".join(inputs[test_name_line + 1 : -1])), " " * 6)) + + +def extract_examples(docstring: str) -> str | None: + """Extract the content of the Example section in a Numpy docstring. + + Returns + ------- + str | None + The content of the section if present, None if the section is absent or empty. + """ + pattern = r"Examples\s*--------\s*(.*)(?:\n\s*\n|\Z)" + match = re.search(pattern, docstring, flags=re.DOTALL) + return match[1].strip() if match and match[1].strip() != "" else None diff --git a/anta/cli/nrfu/utils.py b/anta/cli/nrfu/utils.py index 947c089..375e6e1 100644 --- a/anta/cli/nrfu/utils.py +++ b/anta/cli/nrfu/utils.py @@ -116,8 +116,12 @@ def print_text(ctx: click.Context) -> None: """Print results as simple text.""" console.print() for test in _get_result_manager(ctx).results: - message = f" ({test.messages[0]!s})" if len(test.messages) > 0 else "" - console.print(f"{test.name} :: {test.test} :: [{test.result}]{test.result.upper()}[/{test.result}]{message}", highlight=False) + if len(test.messages) <= 1: + message = test.messages[0] if len(test.messages) == 1 else "" + console.print(f"{test.name} :: {test.test} :: [{test.result}]{test.result.upper()}[/{test.result}]({message})", highlight=False) + else: # len(test.messages) > 1 + console.print(f"{test.name} :: {test.test} :: [{test.result}]{test.result.upper()}[/{test.result}]", highlight=False) + console.print("\n".join(f" {message}" for message in test.messages), highlight=False) def print_jinja(results: ResultManager, template: pathlib.Path, output: pathlib.Path | None = None) -> None: diff --git a/anta/constants.py b/anta/constants.py index 175a4ad..4dcef30 100644 --- a/anta/constants.py +++ b/anta/constants.py @@ -17,3 +17,12 @@ MD_REPORT_TOC = """**Table of Contents:** - [Summary Totals Per Category](#summary-totals-per-category) - [Test Results](#test-results)""" """Table of Contents for the Markdown report.""" + +KNOWN_EOS_ERRORS = [ + r"BGP inactive", + r"VRF '.*' is not active", + r".* does not support IP", + r"IS-IS (.*) is disabled because: .*", + r"No source interface .*", +] +"""List of known EOS errors that should set a test status to 'failure' with the error message.""" diff --git a/anta/custom_types.py b/anta/custom_types.py index c298118..297f1f5 100644 --- a/anta/custom_types.py +++ b/anta/custom_types.py @@ -208,3 +208,33 @@ SnmpPdu = Literal["inGetPdus", "inGetNextPdus", "inSetPdus", "outGetResponsePdus SnmpErrorCounter = Literal[ "inVersionErrs", "inBadCommunityNames", "inBadCommunityUses", "inParseErrs", "outTooBigErrs", "outNoSuchNameErrs", "outBadValueErrs", "outGeneralErrs" ] + +IPv4RouteType = Literal[ + "connected", + "static", + "kernel", + "OSPF", + "OSPF inter area", + "OSPF external type 1", + "OSPF external type 2", + "OSPF NSSA external type 1", + "OSPF NSSA external type2", + "Other BGP Routes", + "iBGP", + "eBGP", + "RIP", + "IS-IS level 1", + "IS-IS level 2", + "OSPFv3", + "BGP Aggregate", + "OSPF Summary", + "Nexthop Group Static Route", + "VXLAN Control Service", + "Martian", + "DHCP client installed default route", + "Dynamic Policy Route", + "VRF Leaked", + "gRIBI", + "Route Cache Route", + "CBF Leaked Route", +] diff --git a/anta/decorators.py b/anta/decorators.py index f5608ef..0431623 100644 --- a/anta/decorators.py +++ b/anta/decorators.py @@ -17,7 +17,8 @@ if TYPE_CHECKING: F = TypeVar("F", bound=Callable[..., Any]) -def deprecated_test(new_tests: list[str] | None = None) -> Callable[[F], F]: +# TODO: Remove this decorator in ANTA v2.0.0 in favor of deprecated_test_class +def deprecated_test(new_tests: list[str] | None = None) -> Callable[[F], F]: # pragma: no cover """Return a decorator to log a message of WARNING severity when a test is deprecated. Parameters @@ -62,6 +63,57 @@ def deprecated_test(new_tests: list[str] | None = None) -> Callable[[F], F]: return decorator +def deprecated_test_class(new_tests: list[str] | None = None, removal_in_version: str | None = None) -> Callable[[type[AntaTest]], type[AntaTest]]: + """Return a decorator to log a message of WARNING severity when a test is deprecated. + + Parameters + ---------- + new_tests + A list of new test classes that should replace the deprecated test. + removal_in_version + A string indicating the version in which the test will be removed. + + Returns + ------- + Callable[[type], type] + A decorator that can be used to wrap test functions. + + """ + + def decorator(cls: type[AntaTest]) -> type[AntaTest]: + """Actual decorator that logs the message. + + Parameters + ---------- + cls + The cls to be decorated. + + Returns + ------- + cls + The decorated cls. + """ + orig_init = cls.__init__ + + def new_init(*args: Any, **kwargs: Any) -> None: + """Overload __init__ to generate a warning message for deprecation.""" + if new_tests: + new_test_names = ", ".join(new_tests) + logger.warning("%s test is deprecated. Consider using the following new tests: %s.", cls.name, new_test_names) + else: + logger.warning("%s test is deprecated.", cls.name) + orig_init(*args, **kwargs) + + if removal_in_version is not None: + cls.__removal_in_version = removal_in_version + + # NOTE: we are ignoring mypy warning as we want to assign to a method here + cls.__init__ = new_init # type: ignore[method-assign] + return cls + + return decorator + + def skip_on_platforms(platforms: list[str]) -> Callable[[F], F]: """Return a decorator to skip a test based on the device's hardware model. diff --git a/anta/device.py b/anta/device.py index d7d2b0d..561323f 100644 --- a/anta/device.py +++ b/anta/device.py @@ -255,7 +255,7 @@ class AsyncEOSDevice(AntaDevice): """ - def __init__( + def __init__( # noqa: PLR0913 self, host: str, username: str, @@ -372,7 +372,7 @@ class AsyncEOSDevice(AntaDevice): """ return (self._session.host, self._session.port) - async def _collect(self, command: AntaCommand, *, collection_id: str | None = None) -> None: # noqa: C901 function is too complex - because of many required except blocks + async def _collect(self, command: AntaCommand, *, collection_id: str | None = None) -> None: """Collect device command output from EOS using aio-eapi. Supports outformat `json` and `text` as output structure. @@ -409,15 +409,7 @@ class AsyncEOSDevice(AntaDevice): command.output = response[-1] except asynceapi.EapiCommandError as e: # This block catches exceptions related to EOS issuing an error. - command.errors = e.errors - if command.requires_privileges: - logger.error( - "Command '%s' requires privileged mode on %s. Verify user permissions and if the `enable` option is required.", command.command, self.name - ) - if command.supported: - logger.error("Command '%s' failed on %s: %s", command.command, self.name, e.errors[0] if len(e.errors) == 1 else e.errors) - else: - logger.debug("Command '%s' is not supported on '%s' (%s)", command.command, self.name, self.hw_model) + self._log_eapi_command_error(command, e) except TimeoutException as e: # This block catches Timeout exceptions. command.errors = [exc_to_str(e)] @@ -446,6 +438,18 @@ class AsyncEOSDevice(AntaDevice): anta_log_exception(e, f"An error occurred while issuing an eAPI request to {self.name}", logger) logger.debug("%s: %s", self.name, command) + def _log_eapi_command_error(self, command: AntaCommand, e: asynceapi.EapiCommandError) -> None: + """Appropriately log the eapi command error.""" + command.errors = e.errors + if command.requires_privileges: + logger.error("Command '%s' requires privileged mode on %s. Verify user permissions and if the `enable` option is required.", command.command, self.name) + if not command.supported: + logger.debug("Command '%s' is not supported on '%s' (%s)", command.command, self.name, self.hw_model) + elif command.returned_known_eos_error: + logger.debug("Command '%s' returned a known error '%s': %s", command.command, self.name, command.errors) + else: + logger.error("Command '%s' failed on %s: %s", command.command, self.name, e.errors[0] if len(e.errors) == 1 else e.errors) + async def refresh(self) -> None: """Update attributes of an AsyncEOSDevice instance. diff --git a/anta/input_models/__init__.py b/anta/input_models/__init__.py new file mode 100644 index 0000000..5b8974c --- /dev/null +++ b/anta/input_models/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2023-2024 Arista Networks, Inc. +# Use of this source code is governed by the Apache License 2.0 +# that can be found in the LICENSE file. +"""Package related to all ANTA tests input models.""" diff --git a/anta/input_models/avt.py b/anta/input_models/avt.py new file mode 100644 index 0000000..9219c2f --- /dev/null +++ b/anta/input_models/avt.py @@ -0,0 +1,36 @@ +# Copyright (c) 2023-2024 Arista Networks, Inc. +# Use of this source code is governed by the Apache License 2.0 +# that can be found in the LICENSE file. +"""Module containing input models for AVT tests.""" + +from __future__ import annotations + +from ipaddress import IPv4Address + +from pydantic import BaseModel, ConfigDict + + +class AVTPath(BaseModel): + """AVT (Adaptive Virtual Topology) model representing path details and associated information.""" + + model_config = ConfigDict(extra="forbid") + vrf: str = "default" + """VRF context. Defaults to `default`.""" + avt_name: str + """The name of the Adaptive Virtual Topology (AVT).""" + destination: IPv4Address + """The IPv4 address of the destination peer in the AVT.""" + next_hop: IPv4Address + """The IPv4 address of the next hop used to reach the AVT peer.""" + path_type: str | None = None + """Specifies the type of path for the AVT. If not specified, both types 'direct' and 'multihop' are considered.""" + + def __str__(self) -> str: + """Return a human-readable string representation of the AVTPath for reporting. + + Examples + -------- + AVT CONTROL-PLANE-PROFILE VRF: default (Destination: 10.101.255.2, Next-hop: 10.101.255.1) + + """ + return f"AVT {self.avt_name} VRF: {self.vrf} (Destination: {self.destination}, Next-hop: {self.next_hop})" diff --git a/anta/input_models/bfd.py b/anta/input_models/bfd.py new file mode 100644 index 0000000..9ccc625 --- /dev/null +++ b/anta/input_models/bfd.py @@ -0,0 +1,37 @@ +# Copyright (c) 2023-2024 Arista Networks, Inc. +# Use of this source code is governed by the Apache License 2.0 +# that can be found in the LICENSE file. +"""Module containing input models for BFD tests.""" + +from __future__ import annotations + +from ipaddress import IPv4Address + +from pydantic import BaseModel, ConfigDict + +from anta.custom_types import BfdInterval, BfdMultiplier, BfdProtocol + + +class BFDPeer(BaseModel): + """BFD (Bidirectional Forwarding Detection) model representing the peer details. + + Only IPv4 peers are supported for now. + """ + + model_config = ConfigDict(extra="forbid") + peer_address: IPv4Address + """IPv4 address of a BFD peer.""" + vrf: str = "default" + """Optional VRF for the BFD peer. Defaults to `default`.""" + tx_interval: BfdInterval | None = None + """Tx interval of BFD peer in milliseconds. Required field in the `VerifyBFDPeersIntervals` test.""" + rx_interval: BfdInterval | None = None + """Rx interval of BFD peer in milliseconds. Required field in the `VerifyBFDPeersIntervals` test.""" + multiplier: BfdMultiplier | None = None + """Multiplier of BFD peer. Required field in the `VerifyBFDPeersIntervals` test.""" + protocols: list[BfdProtocol] | None = None + """List of protocols to be verified. Required field in the `VerifyBFDPeersRegProtocols` test.""" + + def __str__(self) -> str: + """Return a human-readable string representation of the BFDPeer for reporting.""" + return f"Peer: {self.peer_address} VRF: {self.vrf}" diff --git a/anta/input_models/connectivity.py b/anta/input_models/connectivity.py new file mode 100644 index 0000000..e8f5553 --- /dev/null +++ b/anta/input_models/connectivity.py @@ -0,0 +1,83 @@ +# Copyright (c) 2023-2024 Arista Networks, Inc. +# Use of this source code is governed by the Apache License 2.0 +# that can be found in the LICENSE file. +"""Module containing input models for connectivity tests.""" + +from __future__ import annotations + +from ipaddress import IPv4Address +from typing import Any +from warnings import warn + +from pydantic import BaseModel, ConfigDict + +from anta.custom_types import Interface + + +class Host(BaseModel): + """Model for a remote host to ping.""" + + model_config = ConfigDict(extra="forbid") + destination: IPv4Address + """IPv4 address to ping.""" + source: IPv4Address | Interface + """IPv4 address source IP or egress interface to use.""" + vrf: str = "default" + """VRF context. Defaults to `default`.""" + repeat: int = 2 + """Number of ping repetition. Defaults to 2.""" + size: int = 100 + """Specify datagram size. Defaults to 100.""" + df_bit: bool = False + """Enable do not fragment bit in IP header. Defaults to False.""" + + def __str__(self) -> str: + """Return a human-readable string representation of the Host for reporting. + + Examples + -------- + Host 10.1.1.1 (src: 10.2.2.2, vrf: mgmt, size: 100B, repeat: 2) + + """ + df_status = ", df-bit: enabled" if self.df_bit else "" + return f"Host {self.destination} (src: {self.source}, vrf: {self.vrf}, size: {self.size}B, repeat: {self.repeat}{df_status})" + + +class LLDPNeighbor(BaseModel): + """LLDP (Link Layer Discovery Protocol) model representing the port details and neighbor information.""" + + model_config = ConfigDict(extra="forbid") + port: Interface + """The LLDP port for the local device.""" + neighbor_device: str + """The system name of the LLDP neighbor device.""" + neighbor_port: Interface + """The LLDP port on the neighboring device.""" + + def __str__(self) -> str: + """Return a human-readable string representation of the LLDPNeighbor for reporting. + + Examples + -------- + Port Ethernet1 (Neighbor: DC1-SPINE2, Neighbor Port: Ethernet2) + + """ + return f"Port {self.port} (Neighbor: {self.neighbor_device}, Neighbor Port: {self.neighbor_port})" + + +class Neighbor(LLDPNeighbor): # pragma: no cover + """Alias for the LLDPNeighbor model to maintain backward compatibility. + + When initialized, it will emit a deprecation warning and call the LLDPNeighbor model. + + TODO: Remove this class in ANTA v2.0.0. + """ + + def __init__(self, **data: Any) -> None: # noqa: ANN401 + """Initialize the LLDPNeighbor class, emitting a depreciation warning.""" + warn( + message="Neighbor model is deprecated and will be removed in ANTA v2.0.0. Use the LLDPNeighbor model instead.", + category=DeprecationWarning, + stacklevel=2, + ) + super().__init__(**data) diff --git a/anta/input_models/cvx.py b/anta/input_models/cvx.py new file mode 100644 index 0000000..4f93749 --- /dev/null +++ b/anta/input_models/cvx.py @@ -0,0 +1,19 @@ +# Copyright (c) 2023-2024 Arista Networks, Inc. +# Use of this source code is governed by the Apache License 2.0 +# that can be found in the LICENSE file. +"""Module containing input models for CVX tests.""" + +from __future__ import annotations + +from typing import Literal + +from pydantic import BaseModel + +from anta.custom_types import Hostname + + +class CVXPeers(BaseModel): + """Model for a CVX Cluster Peer.""" + + peer_name: Hostname + registration_state: Literal["Connecting", "Connected", "Registration error", "Registration complete", "Unexpected peer state"] = "Registration complete" diff --git a/anta/input_models/interfaces.py b/anta/input_models/interfaces.py new file mode 100644 index 0000000..9e33a2c --- /dev/null +++ b/anta/input_models/interfaces.py @@ -0,0 +1,48 @@ +# Copyright (c) 2023-2024 Arista Networks, Inc. +# Use of this source code is governed by the Apache License 2.0 +# that can be found in the LICENSE file. +"""Module containing input models for interface tests.""" + +from __future__ import annotations + +from typing import Literal + +from pydantic import BaseModel, ConfigDict + +from anta.custom_types import Interface, PortChannelInterface + + +class InterfaceState(BaseModel): + """Model for an interface state.""" + + model_config = ConfigDict(extra="forbid") + name: Interface + """Interface to validate.""" + status: Literal["up", "down", "adminDown"] | None = None + """Expected status of the interface. Required field in the `VerifyInterfacesStatus` test.""" + line_protocol_status: Literal["up", "down", "testing", "unknown", "dormant", "notPresent", "lowerLayerDown"] | None = None + """Expected line protocol status of the interface. Optional field in the `VerifyInterfacesStatus` test.""" + portchannel: PortChannelInterface | None = None + """Port-Channel in which the interface is bundled. Required field in the `VerifyLACPInterfacesStatus` test.""" + lacp_rate_fast: bool = False + """Specifies the LACP timeout mode for the link aggregation group. + + Options: + - True: Also referred to as fast mode. + - False: The default mode, also known as slow mode. + + Can be enabled in the `VerifyLACPInterfacesStatus` tests. + """ + + def __str__(self) -> str: + """Return a human-readable string representation of the InterfaceState for reporting. + + Examples + -------- + - Interface: Ethernet1 Port-Channel: Port-Channel100 + - Interface: Ethernet1 + """ + base_string = f"Interface: {self.name}" + if self.portchannel is not None: + base_string += f" Port-Channel: {self.portchannel}" + return base_string diff --git a/anta/input_models/routing/__init__.py b/anta/input_models/routing/__init__.py new file mode 100644 index 0000000..e1188cc --- /dev/null +++ b/anta/input_models/routing/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2023-2024 Arista Networks, Inc. +# Use of this source code is governed by the Apache License 2.0 +# that can be found in the LICENSE file. +"""Package related to routing tests input models.""" diff --git a/anta/input_models/routing/bgp.py b/anta/input_models/routing/bgp.py new file mode 100644 index 0000000..57c8217 --- /dev/null +++ b/anta/input_models/routing/bgp.py @@ -0,0 +1,209 @@ +# Copyright (c) 2023-2024 Arista Networks, Inc. +# Use of this source code is governed by the Apache License 2.0 +# that can be found in the LICENSE file. +"""Module containing input models for routing BGP tests.""" + +from __future__ import annotations + +from ipaddress import IPv4Address, IPv4Network, IPv6Address +from typing import TYPE_CHECKING, Any +from warnings import warn + +from pydantic import BaseModel, ConfigDict, Field, PositiveInt, model_validator +from pydantic_extra_types.mac_address import MacAddress + +from anta.custom_types import Afi, BgpDropStats, BgpUpdateError, MultiProtocolCaps, Safi, Vni + +if TYPE_CHECKING: + import sys + + if sys.version_info >= (3, 11): + from typing import Self + else: + from typing_extensions import Self + +AFI_SAFI_EOS_KEY = { + ("ipv4", "unicast"): "ipv4Unicast", + ("ipv4", "multicast"): "ipv4Multicast", + ("ipv4", "labeled-unicast"): "ipv4MplsLabels", + ("ipv4", "sr-te"): "ipv4SrTe", + ("ipv6", "unicast"): "ipv6Unicast", + ("ipv6", "multicast"): "ipv6Multicast", + ("ipv6", "labeled-unicast"): "ipv6MplsLabels", + ("ipv6", "sr-te"): "ipv6SrTe", + ("vpn-ipv4", None): "ipv4MplsVpn", + ("vpn-ipv6", None): "ipv6MplsVpn", + ("evpn", None): "l2VpnEvpn", + ("rt-membership", None): "rtMembership", + ("path-selection", None): "dps", + ("link-state", None): "linkState", +} +"""Dictionary mapping AFI/SAFI to EOS key representation.""" + + +class BgpAddressFamily(BaseModel): + """Model for a BGP address family.""" + + model_config = ConfigDict(extra="forbid") + afi: Afi + """BGP Address Family Identifier (AFI).""" + safi: Safi | None = None + """BGP Subsequent Address Family Identifier (SAFI). Required when `afi` is `ipv4` or `ipv6`.""" + vrf: str = "default" + """Optional VRF when `afi` is `ipv4` or `ipv6`. Defaults to `default`. + + If the input `afi` is NOT `ipv4` or `ipv6` (e.g. `evpn`, `vpn-ipv4`, etc.), the `vrf` must be `default`. + + These AFIs operate at a global level and do not use the VRF concept in the same way as IPv4/IPv6. + """ + num_peers: PositiveInt | None = None + """Number of expected established BGP peers with negotiated AFI/SAFI. Required field in the `VerifyBGPPeerCount` test.""" + peers: list[IPv4Address | IPv6Address] | None = None + """List of expected IPv4/IPv6 BGP peers supporting the AFI/SAFI. Required field in the `VerifyBGPSpecificPeers` test.""" + check_tcp_queues: bool = True + """Flag to check if the TCP session queues are empty for a BGP peer. Defaults to `True`. + + Can be disabled in the `VerifyBGPPeersHealth` and `VerifyBGPSpecificPeers` tests. + """ + check_peer_state: bool = False + """Flag to check if the peers are established with negotiated AFI/SAFI. Defaults to `False`. + + Can be enabled in the `VerifyBGPPeerCount` tests. + """ + + @model_validator(mode="after") + def validate_inputs(self) -> Self: + """Validate the inputs provided to the BgpAddressFamily class. + + If `afi` is either `ipv4` or `ipv6`, `safi` must be provided. + + If `afi` is not `ipv4` or `ipv6`, `safi` must NOT be provided and `vrf` must be `default`. + """ + if self.afi in ["ipv4", "ipv6"]: + if self.safi is None: + msg = "'safi' must be provided when afi is ipv4 or ipv6" + raise ValueError(msg) + elif self.safi is not None: + msg = "'safi' must not be provided when afi is not ipv4 or ipv6" + raise ValueError(msg) + elif self.vrf != "default": + msg = "'vrf' must be default when afi is not ipv4 or ipv6" + raise ValueError(msg) + return self + + @property + def eos_key(self) -> str: + """AFI/SAFI EOS key representation.""" + # Pydantic handles the validation of the AFI/SAFI combination, so we can ignore error handling here. + return AFI_SAFI_EOS_KEY[(self.afi, self.safi)] + + def __str__(self) -> str: + """Return a human-readable string representation of the BgpAddressFamily for reporting. + + Examples + -------- + - AFI:ipv4 SAFI:unicast VRF:default + - AFI:evpn + """ + base_string = f"AFI: {self.afi}" + if self.safi is not None: + base_string += f" SAFI: {self.safi}" + if self.afi in ["ipv4", "ipv6"]: + base_string += f" VRF: {self.vrf}" + return base_string + + +class BgpAfi(BgpAddressFamily): # pragma: no cover + """Alias for the BgpAddressFamily model to maintain backward compatibility. + + When initialized, it will emit a deprecation warning and call the BgpAddressFamily model. + + TODO: Remove this class in ANTA v2.0.0. + """ + + def __init__(self, **data: Any) -> None: # noqa: ANN401 + """Initialize the BgpAfi class, emitting a deprecation warning.""" + warn( + message="BgpAfi model is deprecated and will be removed in ANTA v2.0.0. Use the BgpAddressFamily model instead.", + category=DeprecationWarning, + stacklevel=2, + ) + super().__init__(**data) + + +class BgpPeer(BaseModel): + """Model for a BGP peer. + + Only IPv4 peers are supported for now. + """ + + model_config = ConfigDict(extra="forbid") + peer_address: IPv4Address + """IPv4 address of the BGP peer.""" + vrf: str = "default" + """Optional VRF for the BGP peer. Defaults to `default`.""" + advertised_routes: list[IPv4Network] | None = None + """List of advertised routes in CIDR format. Required field in the `VerifyBGPExchangedRoutes` test.""" + received_routes: list[IPv4Network] | None = None + """List of received routes in CIDR format. Required field in the `VerifyBGPExchangedRoutes` test.""" + capabilities: list[MultiProtocolCaps] | None = None + """List of BGP multiprotocol capabilities. Required field in the `VerifyBGPPeerMPCaps` test.""" + strict: bool = False + """If True, requires exact match of the provided BGP multiprotocol capabilities. + + Optional field in the `VerifyBGPPeerMPCaps` test. Defaults to False.""" + hold_time: int | None = Field(default=None, ge=3, le=7200) + """BGP hold time in seconds. Required field in the `VerifyBGPTimers` test.""" + keep_alive_time: int | None = Field(default=None, ge=0, le=3600) + """BGP keepalive time in seconds. Required field in the `VerifyBGPTimers` test.""" + drop_stats: list[BgpDropStats] | None = None + """List of drop statistics to be verified. + + Optional field in the `VerifyBGPPeerDropStats` test. If not provided, the test will verifies all drop statistics.""" + update_errors: list[BgpUpdateError] | None = None + """List of update error counters to be verified. + + Optional field in the `VerifyBGPPeerUpdateErrors` test. If not provided, the test will verifies all the update error counters.""" + inbound_route_map: str | None = None + """Inbound route map applied, defaults to None. Required field in the `VerifyBgpRouteMaps` test.""" + outbound_route_map: str | None = None + """Outbound route map applied, defaults to None. Required field in the `VerifyBgpRouteMaps` test.""" + maximum_routes: int | None = Field(default=None, ge=0, le=4294967294) + """The maximum allowable number of BGP routes, `0` means unlimited. Required field in the `VerifyBGPPeerRouteLimit` test""" + warning_limit: int | None = Field(default=None, ge=0, le=4294967294) + """Optional maximum routes warning limit. If not provided, it defaults to `0` meaning no warning limit.""" + + def __str__(self) -> str: + """Return a human-readable string representation of the BgpPeer for reporting.""" + return f"Peer: {self.peer_address} VRF: {self.vrf}" + + +class BgpNeighbor(BgpPeer): # pragma: no cover + """Alias for the BgpPeer model to maintain backward compatibility. + + When initialised, it will emit a deprecation warning and call the BgpPeer model. + + TODO: Remove this class in ANTA v2.0.0. + """ + + def __init__(self, **data: Any) -> None: # noqa: ANN401 + """Initialize the BgpPeer class, emitting a depreciation warning.""" + warn( + message="BgpNeighbor model is deprecated and will be removed in ANTA v2.0.0. Use the BgpPeer model instead.", + category=DeprecationWarning, + stacklevel=2, + ) + super().__init__(**data) + + +class VxlanEndpoint(BaseModel): + """Model for a VXLAN endpoint.""" + + address: IPv4Address | MacAddress + """IPv4 or MAC address of the VXLAN endpoint.""" + vni: Vni + """VNI of the VXLAN endpoint.""" + + def __str__(self) -> str: + """Return a human-readable string representation of the VxlanEndpoint for reporting.""" + return f"Address: {self.address} VNI: {self.vni}" diff --git a/anta/input_models/routing/generic.py b/anta/input_models/routing/generic.py new file mode 100644 index 0000000..41c78a1 --- /dev/null +++ b/anta/input_models/routing/generic.py @@ -0,0 +1,28 @@ +# Copyright (c) 2023-2024 Arista Networks, Inc. +# Use of this source code is governed by the Apache License 2.0 +# that can be found in the LICENSE file. +"""Module containing input models for generic routing tests.""" + +from __future__ import annotations + +from ipaddress import IPv4Network + +from pydantic import BaseModel, ConfigDict + +from anta.custom_types import IPv4RouteType + + +class IPv4Routes(BaseModel): + """Model for a list of IPV4 route entries.""" + + model_config = ConfigDict(extra="forbid") + prefix: IPv4Network + """The IPV4 network to validate the route type.""" + vrf: str = "default" + """VRF context. Defaults to `default` VRF.""" + route_type: IPv4RouteType + """List of IPV4 Route type to validate the valid rout type.""" + + def __str__(self) -> str: + """Return a human-readable string representation of the IPv4RouteType for reporting.""" + return f"Prefix: {self.prefix} VRF: {self.vrf}" diff --git a/anta/input_models/security.py b/anta/input_models/security.py new file mode 100644 index 0000000..373d897 --- /dev/null +++ b/anta/input_models/security.py @@ -0,0 +1,61 @@ +# Copyright (c) 2023-2024 Arista Networks, Inc. +# Use of this source code is governed by the Apache License 2.0 +# that can be found in the LICENSE file. +"""Module containing input models for security tests.""" + +from __future__ import annotations + +from ipaddress import IPv4Address +from typing import Any +from warnings import warn + +from pydantic import BaseModel, ConfigDict + + +class IPSecPeer(BaseModel): + """IPSec (Internet Protocol Security) model represents the details of an IPv4 security peer.""" + + model_config = ConfigDict(extra="forbid") + peer: IPv4Address + """The IPv4 address of the security peer.""" + vrf: str = "default" + """VRF context. Defaults to `default`.""" + connections: list[IPSecConn] | None = None + """A list of IPv4 security connections associated with the peer. Defaults to None.""" + + def __str__(self) -> str: + """Return a string representation of the IPSecPeer model. Used in failure messages. + + Examples + -------- + - Peer: 1.1.1.1 VRF: default + """ + return f"Peer: {self.peer} VRF: {self.vrf}" + + +class IPSecConn(BaseModel): + """Details of an IPv4 security connection for a peer.""" + + model_config = ConfigDict(extra="forbid") + source_address: IPv4Address + """The IPv4 address of the source in the security connection.""" + destination_address: IPv4Address + """The IPv4 address of the destination in the security connection.""" + + +class IPSecPeers(IPSecPeer): # pragma: no cover + """Alias for the IPSecPeers model to maintain backward compatibility. + + When initialized, it will emit a deprecation warning and call the IPSecPeer model. + + TODO: Remove this class in ANTA v2.0.0. + """ + + def __init__(self, **data: Any) -> None: # noqa: ANN401 + """Initialize the IPSecPeer class, emitting a deprecation warning.""" + warn( + message="IPSecPeers model is deprecated and will be removed in ANTA v2.0.0. Use the IPSecPeer model instead.", + category=DeprecationWarning, + stacklevel=2, + ) + super().__init__(**data) diff --git a/anta/input_models/services.py b/anta/input_models/services.py new file mode 100644 index 0000000..596a3e3 --- /dev/null +++ b/anta/input_models/services.py @@ -0,0 +1,31 @@ +# Copyright (c) 2023-2024 Arista Networks, Inc. +# Use of this source code is governed by the Apache License 2.0 +# that can be found in the LICENSE file. +"""Module containing input models for services tests.""" + +from __future__ import annotations + +from ipaddress import IPv4Address, IPv6Address + +from pydantic import BaseModel, ConfigDict, Field + + +class DnsServer(BaseModel): + """Model for a DNS server configuration.""" + + model_config = ConfigDict(extra="forbid") + server_address: IPv4Address | IPv6Address + """The IPv4 or IPv6 address of the DNS server.""" + vrf: str = "default" + """The VRF instance in which the DNS server resides. Defaults to 'default'.""" + priority: int = Field(ge=0, le=4) + """The priority level of the DNS server, ranging from 0 to 4. Lower values indicate a higher priority, with 0 being the highest and 4 the lowest.""" + + def __str__(self) -> str: + """Return a human-readable string representation of the DnsServer for reporting. + + Examples + -------- + Server 10.0.0.1 (VRF: default, Priority: 1) + """ + return f"Server {self.server_address} (VRF: {self.vrf}, Priority: {self.priority})" diff --git a/anta/input_models/stun.py b/anta/input_models/stun.py new file mode 100644 index 0000000..d1af405 --- /dev/null +++ b/anta/input_models/stun.py @@ -0,0 +1,35 @@ +# Copyright (c) 2023-2024 Arista Networks, Inc. +# Use of this source code is governed by the Apache License 2.0 +# that can be found in the LICENSE file. +"""Module containing input models for services tests.""" + +from __future__ import annotations + +from ipaddress import IPv4Address + +from pydantic import BaseModel, ConfigDict + +from anta.custom_types import Port + + +class StunClientTranslation(BaseModel): + """STUN (Session Traversal Utilities for NAT) model represents the configuration of an IPv4-based client translations.""" + + model_config = ConfigDict(extra="forbid") + source_address: IPv4Address + """The IPv4 address of the STUN client""" + source_port: Port = 4500 + """The port number used by the STUN client for communication. Defaults to 4500.""" + public_address: IPv4Address | None = None + """The public-facing IPv4 address of the STUN client, discovered via the STUN server.""" + public_port: Port | None = None + """The public-facing port number of the STUN client, discovered via the STUN server.""" + + def __str__(self) -> str: + """Return a human-readable string representation of the StunClientTranslation for reporting. + + Examples + -------- + Client 10.0.0.1 Port: 4500 + """ + return f"Client {self.source_address} Port: {self.source_port}" diff --git a/anta/input_models/system.py b/anta/input_models/system.py new file mode 100644 index 0000000..7600d28 --- /dev/null +++ b/anta/input_models/system.py @@ -0,0 +1,31 @@ +# Copyright (c) 2023-2024 Arista Networks, Inc. +# Use of this source code is governed by the Apache License 2.0 +# that can be found in the LICENSE file. +"""Module containing input models for system tests.""" + +from __future__ import annotations + +from ipaddress import IPv4Address + +from pydantic import BaseModel, ConfigDict, Field + +from anta.custom_types import Hostname + + +class NTPServer(BaseModel): + """Model for a NTP server.""" + + model_config = ConfigDict(extra="forbid") + server_address: Hostname | IPv4Address + """The NTP server address as an IPv4 address or hostname. The NTP server name defined in the running configuration + of the device may change during DNS resolution, which is not handled in ANTA. Please provide the DNS-resolved server name. + For example, 'ntp.example.com' in the configuration might resolve to 'ntp3.example.com' in the device output.""" + preferred: bool = False + """Optional preferred for NTP server. If not provided, it defaults to `False`.""" + stratum: int = Field(ge=0, le=16) + """NTP stratum level (0 to 15) where 0 is the reference clock and 16 indicates unsynchronized. + Values should be between 0 and 15 for valid synchronization and 16 represents an out-of-sync state.""" + + def __str__(self) -> str: + """Representation of the NTPServer model.""" + return f"{self.server_address} (Preferred: {self.preferred}, Stratum: {self.stratum})" diff --git a/anta/models.py b/anta/models.py index b103a99..c69f78e 100644 --- a/anta/models.py +++ b/anta/models.py @@ -16,6 +16,7 @@ from typing import TYPE_CHECKING, Any, Callable, ClassVar, Literal, TypeVar from pydantic import BaseModel, ConfigDict, ValidationError, create_model from anta import GITHUB_SUGGESTION +from anta.constants import KNOWN_EOS_ERRORS from anta.custom_types import REGEXP_EOS_BLACKLIST_CMDS, Revision from anta.logger import anta_log_exception, exc_to_str from anta.result_manager.models import AntaTestStatus, TestResult @@ -240,7 +241,12 @@ class AntaCommand(BaseModel): @property def supported(self) -> bool: - """Return True if the command is supported on the device hardware platform, False otherwise. + """Indicates if the command is supported on the device. + + Returns + ------- + bool + True if the command is supported on the device hardware platform, False otherwise. Raises ------ @@ -250,8 +256,22 @@ class AntaCommand(BaseModel): """ if not self.collected and not self.error: msg = f"Command '{self.command}' has not been collected and has not returned an error. Call AntaDevice.collect()." + + raise RuntimeError(msg) + return all("not supported on this hardware platform" not in e for e in self.errors) + + @property + def returned_known_eos_error(self) -> bool: + """Return True if the command returned a known_eos_error on the device, False otherwise. + + RuntimeError + If the command has not been collected and has not returned an error. + AntaDevice.collect() must be called before this property. + """ + if not self.collected and not self.error: + msg = f"Command '{self.command}' has not been collected and has not returned an error. Call AntaDevice.collect()." raise RuntimeError(msg) - return not any("not supported on this hardware platform" in e for e in self.errors) + return any(any(re.match(pattern, e) for e in self.errors) for pattern in KNOWN_EOS_ERRORS) class AntaTemplateRenderError(RuntimeError): @@ -284,8 +304,7 @@ class AntaTest(ABC): The following is an example of an AntaTest subclass implementation: ```python class VerifyReachability(AntaTest): - name = "VerifyReachability" - description = "Test the network reachability to one or many destination IP(s)." + '''Test the network reachability to one or many destination IP(s).''' categories = ["connectivity"] commands = [AntaTemplate(template="ping vrf {vrf} {dst} source {src} repeat 2")] @@ -326,12 +345,19 @@ class AntaTest(ABC): Python logger for this test instance. """ - # Mandatory class attributes - # TODO: find a way to tell mypy these are mandatory for child classes - maybe Protocol + # Optional class attributes name: ClassVar[str] description: ClassVar[str] + __removal_in_version: ClassVar[str] + """Internal class variable set by the `deprecated_test_class` decorator.""" + + # Mandatory class attributes + # TODO: find a way to tell mypy these are mandatory for child classes + # follow this https://discuss.python.org/t/provide-a-canonical-way-to-declare-an-abstract-class-variable/69416 + # for now only enforced at runtime with __init_subclass__ categories: ClassVar[list[str]] commands: ClassVar[list[AntaTemplate | AntaCommand]] + # Class attributes to handle the progress bar of ANTA CLI progress: Progress | None = None nrfu_task: TaskID | None = None @@ -505,12 +531,19 @@ class AntaTest(ABC): self.instance_commands[index].output = data def __init_subclass__(cls) -> None: - """Verify that the mandatory class attributes are defined.""" - mandatory_attributes = ["name", "description", "categories", "commands"] - for attr in mandatory_attributes: - if not hasattr(cls, attr): - msg = f"Class {cls.__module__}.{cls.__name__} is missing required class attribute {attr}" - raise NotImplementedError(msg) + """Verify that the mandatory class attributes are defined and set name and description if not set.""" + mandatory_attributes = ["categories", "commands"] + if missing_attrs := [attr for attr in mandatory_attributes if not hasattr(cls, attr)]: + msg = f"Class {cls.__module__}.{cls.__name__} is missing required class attribute(s): {', '.join(missing_attrs)}" + raise AttributeError(msg) + + cls.name = getattr(cls, "name", cls.__name__) + if not hasattr(cls, "description"): + if not cls.__doc__ or cls.__doc__.strip() == "": + # No doctsring or empty doctsring - raise + msg = f"Cannot set the description for class {cls.name}, either set it in the class definition or add a docstring to the class." + raise AttributeError(msg) + cls.description = cls.__doc__.split(sep="\n", maxsplit=1)[0] @property def module(self) -> str: @@ -617,14 +650,9 @@ class AntaTest(ABC): AntaTest.update_progress() return self.result - if cmds := self.failed_commands: - unsupported_commands = [f"'{c.command}' is not supported on {self.device.hw_model}" for c in cmds if not c.supported] - if unsupported_commands: - msg = f"Test {self.name} has been skipped because it is not supported on {self.device.hw_model}: {GITHUB_SUGGESTION}" - self.logger.warning(msg) - self.result.is_skipped("\n".join(unsupported_commands)) - else: - self.result.is_error(message="\n".join([f"{c.command} has failed: {', '.join(c.errors)}" for c in cmds])) + if self.failed_commands: + self._handle_failed_commands() + AntaTest.update_progress() return self.result @@ -644,6 +672,28 @@ class AntaTest(ABC): return wrapper + def _handle_failed_commands(self) -> None: + """Handle failed commands inside a test. + + There can be 3 types: + * unsupported on hardware commands which set the test status to 'skipped' + * known EOS error which set the test status to 'failure' + * unknown failure which set the test status to 'error' + """ + cmds = self.failed_commands + unsupported_commands = [f"'{c.command}' is not supported on {self.device.hw_model}" for c in cmds if not c.supported] + if unsupported_commands: + msg = f"Test {self.name} has been skipped because it is not supported on {self.device.hw_model}: {GITHUB_SUGGESTION}" + self.logger.warning(msg) + self.result.is_skipped("\n".join(unsupported_commands)) + return + returned_known_eos_error = [f"'{c.command}' failed on {self.device.name}: {', '.join(c.errors)}" for c in cmds if c.returned_known_eos_error] + if returned_known_eos_error: + self.result.is_failure("\n".join(returned_known_eos_error)) + return + + self.result.is_error(message="\n".join([f"{c.command} has failed: {', '.join(c.errors)}" for c in cmds])) + @classmethod def update_progress(cls: type[AntaTest]) -> None: """Update progress bar for all AntaTest objects if it exists.""" diff --git a/anta/py.typed b/anta/py.typed new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/anta/py.typed diff --git a/anta/reporter/csv_reporter.py b/anta/reporter/csv_reporter.py index 33c50a8..3f55923 100644 --- a/anta/reporter/csv_reporter.py +++ b/anta/reporter/csv_reporter.py @@ -58,8 +58,7 @@ class ReportCsv: @classmethod def convert_to_list(cls, result: TestResult) -> list[str]: - """ - Convert a TestResult into a list of string for creating file content. + """Convert a TestResult into a list of string for creating file content. Parameters ---------- @@ -108,7 +107,7 @@ class ReportCsv: ] try: - with csv_filename.open(mode="w", encoding="utf-8") as csvfile: + with csv_filename.open(mode="w", encoding="utf-8", newline="") as csvfile: csvwriter = csv.writer( csvfile, delimiter=",", diff --git a/anta/reporter/md_reporter.py b/anta/reporter/md_reporter.py index be3e86f..94c4a86 100644 --- a/anta/reporter/md_reporter.py +++ b/anta/reporter/md_reporter.py @@ -8,7 +8,7 @@ from __future__ import annotations import logging import re from abc import ABC, abstractmethod -from typing import TYPE_CHECKING, ClassVar +from typing import TYPE_CHECKING, ClassVar, TextIO from anta.constants import MD_REPORT_TOC from anta.logger import anta_log_exception @@ -17,7 +17,6 @@ from anta.tools import convert_categories if TYPE_CHECKING: from collections.abc import Generator - from io import TextIOWrapper from pathlib import Path from anta.result_manager import ResultManager @@ -72,7 +71,7 @@ class MDReportBase(ABC): to generate and write content to the provided markdown file. """ - def __init__(self, mdfile: TextIOWrapper, results: ResultManager) -> None: + def __init__(self, mdfile: TextIO, results: ResultManager) -> None: """Initialize the MDReportBase with an open markdown file object to write to and a ResultManager instance. Parameters diff --git a/anta/result_manager/__init__.py b/anta/result_manager/__init__.py index 055a3a1..b5b0f39 100644 --- a/anta/result_manager/__init__.py +++ b/anta/result_manager/__init__.py @@ -6,15 +6,20 @@ from __future__ import annotations import json +import logging from collections import defaultdict from functools import cached_property from itertools import chain +from typing import Any from anta.result_manager.models import AntaTestStatus, TestResult from .models import CategoryStats, DeviceStats, TestStats +logger = logging.getLogger(__name__) + +# pylint: disable=too-many-instance-attributes class ResultManager: """Helper to manage Test Results and generate reports. @@ -68,6 +73,15 @@ class ResultManager: ] """ + _result_entries: list[TestResult] + status: AntaTestStatus + error_status: bool + + _device_stats: defaultdict[str, DeviceStats] + _category_stats: defaultdict[str, CategoryStats] + _test_stats: defaultdict[str, TestStats] + _stats_in_sync: bool + def __init__(self) -> None: """Class constructor. @@ -89,13 +103,16 @@ class ResultManager: If the status of the added test is error, the status is untouched and the error_status is set to True. """ + self.reset() + + def reset(self) -> None: + """Create or reset the attributes of the ResultManager instance.""" self._result_entries: list[TestResult] = [] self.status: AntaTestStatus = AntaTestStatus.UNSET self.error_status = False - self.device_stats: defaultdict[str, DeviceStats] = defaultdict(DeviceStats) - self.category_stats: defaultdict[str, CategoryStats] = defaultdict(CategoryStats) - self.test_stats: defaultdict[str, TestStats] = defaultdict(TestStats) + # Initialize the statistics attributes + self._reset_stats() def __len__(self) -> int: """Implement __len__ method to count number of results.""" @@ -110,26 +127,43 @@ class ResultManager: def results(self, value: list[TestResult]) -> None: """Set the list of TestResult.""" # When setting the results, we need to reset the state of the current instance - self._result_entries = [] - self.status = AntaTestStatus.UNSET - self.error_status = False - - # Also reset the stats attributes - self.device_stats = defaultdict(DeviceStats) - self.category_stats = defaultdict(CategoryStats) - self.test_stats = defaultdict(TestStats) + self.reset() for result in value: self.add(result) @property + def dump(self) -> list[dict[str, Any]]: + """Get a list of dictionary of the results.""" + return [result.model_dump() for result in self._result_entries] + + @property def json(self) -> str: """Get a JSON representation of the results.""" - return json.dumps([result.model_dump() for result in self._result_entries], indent=4) + return json.dumps(self.dump, indent=4) + + @property + def device_stats(self) -> defaultdict[str, DeviceStats]: + """Get the device statistics.""" + self._ensure_stats_in_sync() + return self._device_stats + + @property + def category_stats(self) -> defaultdict[str, CategoryStats]: + """Get the category statistics.""" + self._ensure_stats_in_sync() + return self._category_stats + + @property + def test_stats(self) -> defaultdict[str, TestStats]: + """Get the test statistics.""" + self._ensure_stats_in_sync() + return self._test_stats @property def sorted_category_stats(self) -> dict[str, CategoryStats]: """A property that returns the category_stats dictionary sorted by key name.""" + self._ensure_stats_in_sync() return dict(sorted(self.category_stats.items())) @cached_property @@ -148,11 +182,18 @@ class ResultManager: if test_status == "error": self.error_status = True return - if self.status == "unset" or self.status == "skipped" and test_status in {"success", "failure"}: + if self.status == "unset" or (self.status == "skipped" and test_status in {"success", "failure"}): self.status = test_status elif self.status == "success" and test_status == "failure": self.status = AntaTestStatus.FAILURE + def _reset_stats(self) -> None: + """Create or reset the statistics attributes.""" + self._device_stats = defaultdict(DeviceStats) + self._category_stats = defaultdict(CategoryStats) + self._test_stats = defaultdict(TestStats) + self._stats_in_sync = False + def _update_stats(self, result: TestResult) -> None: """Update the statistics based on the test result. @@ -164,7 +205,7 @@ class ResultManager: count_attr = f"tests_{result.result}_count" # Update device stats - device_stats: DeviceStats = self.device_stats[result.name] + device_stats: DeviceStats = self._device_stats[result.name] setattr(device_stats, count_attr, getattr(device_stats, count_attr) + 1) if result.result in ("failure", "error"): device_stats.tests_failure.add(result.test) @@ -174,16 +215,34 @@ class ResultManager: # Update category stats for category in result.categories: - category_stats: CategoryStats = self.category_stats[category] + category_stats: CategoryStats = self._category_stats[category] setattr(category_stats, count_attr, getattr(category_stats, count_attr) + 1) # Update test stats count_attr = f"devices_{result.result}_count" - test_stats: TestStats = self.test_stats[result.test] + test_stats: TestStats = self._test_stats[result.test] setattr(test_stats, count_attr, getattr(test_stats, count_attr) + 1) if result.result in ("failure", "error"): test_stats.devices_failure.add(result.name) + def _compute_stats(self) -> None: + """Compute all statistics from the current results.""" + logger.info("Computing statistics for all results.") + + # Reset all stats + self._reset_stats() + + # Recompute stats for all results + for result in self._result_entries: + self._update_stats(result) + + self._stats_in_sync = True + + def _ensure_stats_in_sync(self) -> None: + """Ensure statistics are in sync with current results.""" + if not self._stats_in_sync: + self._compute_stats() + def add(self, result: TestResult) -> None: """Add a result to the ResultManager instance. @@ -197,7 +256,7 @@ class ResultManager: """ self._result_entries.append(result) self._update_status(result.result) - self._update_stats(result) + self._stats_in_sync = False # Every time a new result is added, we need to clear the cached property self.__dict__.pop("results_by_status", None) diff --git a/anta/runner.py b/anta/runner.py index 0147c3c..4c6da92 100644 --- a/anta/runner.py +++ b/anta/runner.py @@ -8,7 +8,7 @@ from __future__ import annotations import asyncio import logging import os -import resource +import sys from collections import defaultdict from typing import TYPE_CHECKING, Any @@ -26,35 +26,38 @@ if TYPE_CHECKING: from anta.result_manager import ResultManager from anta.result_manager.models import TestResult -logger = logging.getLogger(__name__) +if os.name == "posix": + import resource -DEFAULT_NOFILE = 16384 + DEFAULT_NOFILE = 16384 + def adjust_rlimit_nofile() -> tuple[int, int]: + """Adjust the maximum number of open file descriptors for the ANTA process. -def adjust_rlimit_nofile() -> tuple[int, int]: - """Adjust the maximum number of open file descriptors for the ANTA process. + The limit is set to the lower of the current hard limit and the value of the ANTA_NOFILE environment variable. - The limit is set to the lower of the current hard limit and the value of the ANTA_NOFILE environment variable. + If the `ANTA_NOFILE` environment variable is not set or is invalid, `DEFAULT_NOFILE` is used. - If the `ANTA_NOFILE` environment variable is not set or is invalid, `DEFAULT_NOFILE` is used. + Returns + ------- + tuple[int, int] + The new soft and hard limits for open file descriptors. + """ + try: + nofile = int(os.environ.get("ANTA_NOFILE", DEFAULT_NOFILE)) + except ValueError as exception: + logger.warning("The ANTA_NOFILE environment variable value is invalid: %s\nDefault to %s.", exc_to_str(exception), DEFAULT_NOFILE) + nofile = DEFAULT_NOFILE + + limits = resource.getrlimit(resource.RLIMIT_NOFILE) + logger.debug("Initial limit numbers for open file descriptors for the current ANTA process: Soft Limit: %s | Hard Limit: %s", limits[0], limits[1]) + nofile = min(limits[1], nofile) + logger.debug("Setting soft limit for open file descriptors for the current ANTA process to %s", nofile) + resource.setrlimit(resource.RLIMIT_NOFILE, (nofile, limits[1])) + return resource.getrlimit(resource.RLIMIT_NOFILE) - Returns - ------- - tuple[int, int] - The new soft and hard limits for open file descriptors. - """ - try: - nofile = int(os.environ.get("ANTA_NOFILE", DEFAULT_NOFILE)) - except ValueError as exception: - logger.warning("The ANTA_NOFILE environment variable value is invalid: %s\nDefault to %s.", exc_to_str(exception), DEFAULT_NOFILE) - nofile = DEFAULT_NOFILE - limits = resource.getrlimit(resource.RLIMIT_NOFILE) - logger.debug("Initial limit numbers for open file descriptors for the current ANTA process: Soft Limit: %s | Hard Limit: %s", limits[0], limits[1]) - nofile = min(limits[1], nofile) - logger.debug("Setting soft limit for open file descriptors for the current ANTA process to %s", nofile) - resource.setrlimit(resource.RLIMIT_NOFILE, (nofile, limits[1])) - return resource.getrlimit(resource.RLIMIT_NOFILE) +logger = logging.getLogger(__name__) def log_cache_statistics(devices: list[AntaDevice]) -> None: @@ -146,22 +149,29 @@ def prepare_tests( # Using a set to avoid inserting duplicate tests device_to_tests: defaultdict[AntaDevice, set[AntaTestDefinition]] = defaultdict(set) + total_test_count = 0 + # Create the device to tests mapping from the tags for device in inventory.devices: if tags: - if not any(tag in device.tags for tag in tags): + # If there are CLI tags, execute tests with matching tags for this device + if not (matching_tags := tags.intersection(device.tags)): # The device does not have any selected tag, skipping continue + device_to_tests[device].update(catalog.get_tests_by_tags(matching_tags)) else: # If there is no CLI tags, execute all tests that do not have any tags device_to_tests[device].update(catalog.tag_to_tests[None]) - # Add the tests with matching tags from device tags - device_to_tests[device].update(catalog.get_tests_by_tags(device.tags)) + # Then add the tests with matching tags from device tags + device_to_tests[device].update(catalog.get_tests_by_tags(device.tags)) + + total_test_count += len(device_to_tests[device]) - if len(device_to_tests.values()) == 0: + if total_test_count == 0: msg = ( - f"There are no tests{f' matching the tags {tags} ' if tags else ' '}to run in the current test catalog and device inventory, please verify your inputs." + f"There are no tests{f' matching the tags {tags} ' if tags else ' '}to run in the current " + "test catalog and device inventory, please verify your inputs." ) logger.warning(msg) return None @@ -169,7 +179,7 @@ def prepare_tests( return device_to_tests -def get_coroutines(selected_tests: defaultdict[AntaDevice, set[AntaTestDefinition]], manager: ResultManager) -> list[Coroutine[Any, Any, TestResult]]: +def get_coroutines(selected_tests: defaultdict[AntaDevice, set[AntaTestDefinition]], manager: ResultManager | None = None) -> list[Coroutine[Any, Any, TestResult]]: """Get the coroutines for the ANTA run. Parameters @@ -177,7 +187,7 @@ def get_coroutines(selected_tests: defaultdict[AntaDevice, set[AntaTestDefinitio selected_tests A mapping of devices to the tests to run. The selected tests are generated by the `prepare_tests` function. manager - A ResultManager + An optional ResultManager object to pre-populate with the test results. Used in dry-run mode. Returns ------- @@ -189,7 +199,8 @@ def get_coroutines(selected_tests: defaultdict[AntaDevice, set[AntaTestDefinitio for test in test_definitions: try: test_instance = test.test(device=device, inputs=test.inputs) - manager.add(test_instance.result) + if manager is not None: + manager.add(test_instance.result) coros.append(test_instance.test()) except Exception as e: # noqa: PERF203, BLE001 # An AntaTest instance is potentially user-defined code. @@ -205,7 +216,7 @@ def get_coroutines(selected_tests: defaultdict[AntaDevice, set[AntaTestDefinitio @cprofile() -async def main( # noqa: PLR0913 +async def main( manager: ResultManager, inventory: AntaInventory, catalog: AntaCatalog, @@ -240,9 +251,6 @@ async def main( # noqa: PLR0913 dry_run Build the list of coroutine to run and stop before test execution. """ - # Adjust the maximum number of open file descriptors for the ANTA process - limits = adjust_rlimit_nofile() - if not catalog.tests: logger.info("The list of tests is empty, exiting") return @@ -263,10 +271,19 @@ async def main( # noqa: PLR0913 "--- ANTA NRFU Run Information ---\n" f"Number of devices: {len(inventory)} ({len(selected_inventory)} established)\n" f"Total number of selected tests: {final_tests_count}\n" - f"Maximum number of open file descriptors for the current ANTA process: {limits[0]}\n" - "---------------------------------" ) + if os.name == "posix": + # Adjust the maximum number of open file descriptors for the ANTA process + limits = adjust_rlimit_nofile() + run_info += f"Maximum number of open file descriptors for the current ANTA process: {limits[0]}\n" + else: + # Running on non-Posix system, cannot manage the resource. + limits = (sys.maxsize, sys.maxsize) + run_info += "Running on a non-POSIX system, cannot adjust the maximum number of file descriptors.\n" + + run_info += "---------------------------------" + logger.info(run_info) if final_tests_count > limits[0]: @@ -276,7 +293,7 @@ async def main( # noqa: PLR0913 "Please consult the ANTA FAQ." ) - coroutines = get_coroutines(selected_tests, manager) + coroutines = get_coroutines(selected_tests, manager if dry_run else None) if dry_run: logger.info("Dry-run mode, exiting before running the tests.") @@ -288,6 +305,8 @@ async def main( # noqa: PLR0913 AntaTest.nrfu_task = AntaTest.progress.add_task("Running NRFU Tests...", total=len(coroutines)) with Catchtime(logger=logger, message="Running ANTA tests"): - await asyncio.gather(*coroutines) + results = await asyncio.gather(*coroutines) + for result in results: + manager.add(result) log_cache_statistics(selected_inventory.devices) diff --git a/anta/tests/aaa.py b/anta/tests/aaa.py index d6d0689..019bf1a 100644 --- a/anta/tests/aaa.py +++ b/anta/tests/aaa.py @@ -35,8 +35,6 @@ class VerifyTacacsSourceIntf(AntaTest): ``` """ - name = "VerifyTacacsSourceIntf" - description = "Verifies TACACS source-interface for a specified VRF." categories: ClassVar[list[str]] = ["aaa"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show tacacs", revision=1)] @@ -81,8 +79,6 @@ class VerifyTacacsServers(AntaTest): ``` """ - name = "VerifyTacacsServers" - description = "Verifies TACACS servers are configured for a specified VRF." categories: ClassVar[list[str]] = ["aaa"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show tacacs", revision=1)] @@ -134,8 +130,6 @@ class VerifyTacacsServerGroups(AntaTest): ``` """ - name = "VerifyTacacsServerGroups" - description = "Verifies if the provided TACACS server group(s) are configured." categories: ClassVar[list[str]] = ["aaa"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show tacacs", revision=1)] @@ -173,19 +167,17 @@ class VerifyAuthenMethods(AntaTest): ```yaml anta.tests.aaa: - VerifyAuthenMethods: - methods: - - local - - none - - logging - types: - - login - - enable - - dot1x + methods: + - local + - none + - logging + types: + - login + - enable + - dot1x ``` """ - name = "VerifyAuthenMethods" - description = "Verifies the AAA authentication method lists for different authentication types (login, enable, dot1x)." categories: ClassVar[list[str]] = ["aaa"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show aaa methods authentication", revision=1)] @@ -245,8 +237,6 @@ class VerifyAuthzMethods(AntaTest): ``` """ - name = "VerifyAuthzMethods" - description = "Verifies the AAA authorization method lists for different authorization types (commands, exec)." categories: ClassVar[list[str]] = ["aaa"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show aaa methods authorization", revision=1)] @@ -301,8 +291,6 @@ class VerifyAcctDefaultMethods(AntaTest): ``` """ - name = "VerifyAcctDefaultMethods" - description = "Verifies the AAA accounting default method lists for different accounting types (system, exec, commands, dot1x)." categories: ClassVar[list[str]] = ["aaa"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show aaa methods accounting", revision=1)] @@ -364,8 +352,6 @@ class VerifyAcctConsoleMethods(AntaTest): ``` """ - name = "VerifyAcctConsoleMethods" - description = "Verifies the AAA accounting console method lists for different accounting types (system, exec, commands, dot1x)." categories: ClassVar[list[str]] = ["aaa"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show aaa methods accounting", revision=1)] diff --git a/anta/tests/avt.py b/anta/tests/avt.py index d72296a..b0f1a46 100644 --- a/anta/tests/avt.py +++ b/anta/tests/avt.py @@ -7,19 +7,16 @@ # mypy: disable-error-code=attr-defined from __future__ import annotations -from ipaddress import IPv4Address from typing import ClassVar -from pydantic import BaseModel - from anta.decorators import skip_on_platforms +from anta.input_models.avt import AVTPath from anta.models import AntaCommand, AntaTemplate, AntaTest from anta.tools import get_value class VerifyAVTPathHealth(AntaTest): - """ - Verifies the status of all Adaptive Virtual Topology (AVT) paths for all VRFs. + """Verifies the status of all Adaptive Virtual Topology (AVT) paths for all VRFs. Expected Results ---------------- @@ -34,7 +31,6 @@ class VerifyAVTPathHealth(AntaTest): ``` """ - name = "VerifyAVTPathHealth" description = "Verifies the status of all AVT paths for all VRFs." categories: ClassVar[list[str]] = ["avt"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show adaptive-virtual-topology path")] @@ -73,15 +69,22 @@ class VerifyAVTPathHealth(AntaTest): class VerifyAVTSpecificPath(AntaTest): - """ - Verifies the status and type of an Adaptive Virtual Topology (AVT) path for a specified VRF. + """Verifies the Adaptive Virtual Topology (AVT) path. + + This test performs the following checks for each specified LLDP neighbor: + + 1. Confirming that the AVT paths are associated with the specified VRF. + 2. Verifying that each AVT path is active and valid. + 3. Ensuring that the AVT path matches the specified type (direct/multihop) if provided. Expected Results ---------------- - * Success: The test will pass if all AVT paths for the specified VRF are active, valid, and match the specified type (direct/multihop) if provided. - If multiple paths are configured, the test will pass only if all the paths are valid and active. - * Failure: The test will fail if no AVT paths are configured for the specified VRF, or if any configured path is not active, valid, - or does not match the specified type. + * Success: The test will pass if all of the following conditions are met: + - All AVT paths for the specified VRF are active, valid, and match the specified path type (direct/multihop), if provided. + - If multiple paths are configured, the test will pass only if all paths meet these criteria. + * Failure: The test will fail if any of the following conditions are met: + - No AVT paths are configured for the specified VRF. + - Any configured path is inactive, invalid, or does not match the specified type. Examples -------- @@ -97,36 +100,16 @@ class VerifyAVTSpecificPath(AntaTest): ``` """ - name = "VerifyAVTSpecificPath" - description = "Verifies the status and type of an AVT path for a specified VRF." categories: ClassVar[list[str]] = ["avt"] - commands: ClassVar[list[AntaCommand | AntaTemplate]] = [ - AntaTemplate(template="show adaptive-virtual-topology path vrf {vrf} avt {avt_name} destination {destination}") - ] + commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show adaptive-virtual-topology path", revision=1)] class Input(AntaTest.Input): """Input model for the VerifyAVTSpecificPath test.""" - avt_paths: list[AVTPaths] + avt_paths: list[AVTPath] """List of AVT paths to verify.""" - - class AVTPaths(BaseModel): - """Model for the details of AVT paths.""" - - vrf: str = "default" - """The VRF for the AVT path. Defaults to 'default' if not provided.""" - avt_name: str - """Name of the adaptive virtual topology.""" - destination: IPv4Address - """The IPv4 address of the AVT peer.""" - next_hop: IPv4Address - """The IPv4 address of the next hop for the AVT peer.""" - path_type: str | None = None - """The type of the AVT path. If not provided, both 'direct' and 'multihop' paths are considered.""" - - def render(self, template: AntaTemplate) -> list[AntaCommand]: - """Render the template for each input AVT path/peer.""" - return [template.render(vrf=path.vrf, avt_name=path.avt_name, destination=path.destination) for path in self.inputs.avt_paths] + AVTPaths: ClassVar[type[AVTPath]] = AVTPath + """To maintain backward compatibility.""" @skip_on_platforms(["cEOSLab", "vEOS-lab"]) @AntaTest.anta_test @@ -135,64 +118,43 @@ class VerifyAVTSpecificPath(AntaTest): # Assume the test is successful until a failure is detected self.result.is_success() - # Process each command in the instance - for command, input_avt in zip(self.instance_commands, self.inputs.avt_paths): - # Extract the command output and parameters - vrf = command.params.vrf - avt_name = command.params.avt_name - peer = str(command.params.destination) - - command_output = command.json_output.get("vrfs", {}) - - # If no AVT is configured, mark the test as failed and skip to the next command - if not command_output: - self.result.is_failure(f"AVT configuration for peer '{peer}' under topology '{avt_name}' in VRF '{vrf}' is not found.") - continue - - # Extract the AVT paths - avt_paths = get_value(command_output, f"{vrf}.avts.{avt_name}.avtPaths") - next_hop, input_path_type = str(input_avt.next_hop), input_avt.path_type + command_output = self.instance_commands[0].json_output + for avt_path in self.inputs.avt_paths: + if (path_output := get_value(command_output, f"vrfs.{avt_path.vrf}.avts.{avt_path.avt_name}.avtPaths")) is None: + self.result.is_failure(f"{avt_path} - No AVT path configured") + return - nexthop_path_found = path_type_found = False + path_found = path_type_found = False # Check each AVT path - for path, path_data in avt_paths.items(): - # If the path does not match the expected next hop, skip to the next path - if path_data.get("nexthopAddr") != next_hop: - continue - - nexthop_path_found = True + for path, path_data in path_output.items(): + dest = path_data.get("destination") + nexthop = path_data.get("nexthopAddr") path_type = "direct" if get_value(path_data, "flags.directPath") else "multihop" - # If the path type does not match the expected path type, skip to the next path - if input_path_type and path_type != input_path_type: - continue - - path_type_found = True - valid = get_value(path_data, "flags.valid") - active = get_value(path_data, "flags.active") + if not avt_path.path_type: + path_found = all([dest == str(avt_path.destination), nexthop == str(avt_path.next_hop)]) - # Check the path status and type against the expected values - if not all([valid, active]): - failure_reasons = [] - if not get_value(path_data, "flags.active"): - failure_reasons.append("inactive") - if not get_value(path_data, "flags.valid"): - failure_reasons.append("invalid") - # Construct the failure message prefix - failed_log = f"AVT path '{path}' for topology '{avt_name}' in VRF '{vrf}'" - self.result.is_failure(f"{failed_log} is {', '.join(failure_reasons)}.") + else: + path_type_found = all([dest == str(avt_path.destination), nexthop == str(avt_path.next_hop), path_type == avt_path.path_type]) + if path_type_found: + path_found = True + # Check the path status and type against the expected values + valid = get_value(path_data, "flags.valid") + active = get_value(path_data, "flags.active") + if not all([valid, active]): + self.result.is_failure(f"{avt_path} - Incorrect path {path} - Valid: {valid}, Active: {active}") - # If no matching next hop or path type was found, mark the test as failed - if not nexthop_path_found or not path_type_found: - self.result.is_failure( - f"No '{input_path_type}' path found with next-hop address '{next_hop}' for AVT peer '{peer}' under topology '{avt_name}' in VRF '{vrf}'." - ) + # If no matching path found, mark the test as failed + if not path_found: + if avt_path.path_type and not path_type_found: + self.result.is_failure(f"{avt_path} Path Type: {avt_path.path_type} - Path not found") + else: + self.result.is_failure(f"{avt_path} - Path not found") class VerifyAVTRole(AntaTest): - """ - Verifies the Adaptive Virtual Topology (AVT) role of a device. + """Verifies the Adaptive Virtual Topology (AVT) role of a device. Expected Results ---------------- @@ -208,7 +170,6 @@ class VerifyAVTRole(AntaTest): ``` """ - name = "VerifyAVTRole" description = "Verifies the AVT role of a device." categories: ClassVar[list[str]] = ["avt"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show adaptive-virtual-topology path")] diff --git a/anta/tests/bfd.py b/anta/tests/bfd.py index f42d80d..ba27f94 100644 --- a/anta/tests/bfd.py +++ b/anta/tests/bfd.py @@ -8,12 +8,11 @@ from __future__ import annotations from datetime import datetime, timezone -from ipaddress import IPv4Address -from typing import TYPE_CHECKING, Any, ClassVar +from typing import TYPE_CHECKING, ClassVar -from pydantic import BaseModel, Field +from pydantic import Field -from anta.custom_types import BfdInterval, BfdMultiplier, BfdProtocol +from anta.input_models.bfd import BFDPeer from anta.models import AntaCommand, AntaTest from anta.tools import get_value @@ -22,12 +21,24 @@ if TYPE_CHECKING: class VerifyBFDSpecificPeers(AntaTest): - """Verifies if the IPv4 BFD peer's sessions are UP and remote disc is non-zero in the specified VRF. + """Verifies the state of IPv4 BFD peer sessions. + + This test performs the following checks for each specified peer: + + 1. Confirms that the specified VRF is configured. + 2. Verifies that the peer exists in the BFD configuration. + 3. For each specified BFD peer: + - Validates that the state is `up` + - Confirms that the remote discriminator identifier (disc) is non-zero. Expected Results ---------------- - * Success: The test will pass if IPv4 BFD peers are up and remote disc is non-zero in the specified VRF. - * Failure: The test will fail if IPv4 BFD peers are not found, the status is not UP or remote disc is zero in the specified VRF. + * Success: If all of the following conditions are met: + - All specified peers are found in the BFD configuration within the specified VRF. + - All BFD peers are `up` and remote disc is non-zero. + * Failure: If any of the following occur: + - A specified peer is not found in the BFD configuration within the specified VRF. + - Any BFD peer session is not `up` or the remote discriminator identifier is zero. Examples -------- @@ -42,8 +53,6 @@ class VerifyBFDSpecificPeers(AntaTest): ``` """ - name = "VerifyBFDSpecificPeers" - description = "Verifies the IPv4 BFD peer's sessions and remote disc in the specified VRF." categories: ClassVar[list[str]] = ["bfd"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show bfd peers", revision=1)] @@ -51,20 +60,14 @@ class VerifyBFDSpecificPeers(AntaTest): """Input model for the VerifyBFDSpecificPeers test.""" bfd_peers: list[BFDPeer] - """List of IPv4 BFD peers.""" - - class BFDPeer(BaseModel): - """Model for an IPv4 BFD peer.""" - - peer_address: IPv4Address - """IPv4 address of a BFD peer.""" - vrf: str = "default" - """Optional VRF for BFD peer. If not provided, it defaults to `default`.""" + """List of IPv4 BFD""" + BFDPeer: ClassVar[type[BFDPeer]] = BFDPeer + """To maintain backward compatibility.""" @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyBFDSpecificPeers.""" - failures: dict[Any, Any] = {} + self.result.is_success() # Iterating over BFD peers for bfd_peer in self.inputs.bfd_peers: @@ -78,31 +81,33 @@ class VerifyBFDSpecificPeers(AntaTest): # Check if BFD peer configured if not bfd_output: - failures[peer] = {vrf: "Not Configured"} + self.result.is_failure(f"{bfd_peer} - Not found") continue # Check BFD peer status and remote disc - if not (bfd_output.get("status") == "up" and bfd_output.get("remoteDisc") != 0): - failures[peer] = { - vrf: { - "status": bfd_output.get("status"), - "remote_disc": bfd_output.get("remoteDisc"), - } - } - - if not failures: - self.result.is_success() - else: - self.result.is_failure(f"Following BFD peers are not configured, status is not up or remote disc is zero:\n{failures}") + state = bfd_output.get("status") + remote_disc = bfd_output.get("remoteDisc") + if not (state == "up" and remote_disc != 0): + self.result.is_failure(f"{bfd_peer} - Session not properly established - State: {state} Remote Discriminator: {remote_disc}") class VerifyBFDPeersIntervals(AntaTest): - """Verifies the timers of the IPv4 BFD peers in the specified VRF. + """Verifies the timers of IPv4 BFD peer sessions. + + This test performs the following checks for each specified peer: + + 1. Confirms that the specified VRF is configured. + 2. Verifies that the peer exists in the BFD configuration. + 3. Confirms that BFD peer is correctly configured with the `Transmit interval, Receive interval and Multiplier`. Expected Results ---------------- - * Success: The test will pass if the timers of the IPv4 BFD peers are correct in the specified VRF. - * Failure: The test will fail if the IPv4 BFD peers are not found or their timers are incorrect in the specified VRF. + * Success: If all of the following conditions are met: + - All specified peers are found in the BFD configuration within the specified VRF. + - All BFD peers are correctly configured with the `Transmit interval, Receive interval and Multiplier`. + * Failure: If any of the following occur: + - A specified peer is not found in the BFD configuration within the specified VRF. + - Any BFD peer not correctly configured with the `Transmit interval, Receive interval and Multiplier`. Examples -------- @@ -123,8 +128,6 @@ class VerifyBFDPeersIntervals(AntaTest): ``` """ - name = "VerifyBFDPeersIntervals" - description = "Verifies the timers of the IPv4 BFD peers in the specified VRF." categories: ClassVar[list[str]] = ["bfd"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show bfd peers detail", revision=1)] @@ -132,34 +135,22 @@ class VerifyBFDPeersIntervals(AntaTest): """Input model for the VerifyBFDPeersIntervals test.""" bfd_peers: list[BFDPeer] - """List of BFD peers.""" - - class BFDPeer(BaseModel): - """Model for an IPv4 BFD peer.""" - - peer_address: IPv4Address - """IPv4 address of a BFD peer.""" - vrf: str = "default" - """Optional VRF for BFD peer. If not provided, it defaults to `default`.""" - tx_interval: BfdInterval - """Tx interval of BFD peer in milliseconds.""" - rx_interval: BfdInterval - """Rx interval of BFD peer in milliseconds.""" - multiplier: BfdMultiplier - """Multiplier of BFD peer.""" + """List of IPv4 BFD""" + BFDPeer: ClassVar[type[BFDPeer]] = BFDPeer + """To maintain backward compatibility""" @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyBFDPeersIntervals.""" - failures: dict[Any, Any] = {} + self.result.is_success() # Iterating over BFD peers - for bfd_peers in self.inputs.bfd_peers: - peer = str(bfd_peers.peer_address) - vrf = bfd_peers.vrf - tx_interval = bfd_peers.tx_interval - rx_interval = bfd_peers.rx_interval - multiplier = bfd_peers.multiplier + for bfd_peer in self.inputs.bfd_peers: + peer = str(bfd_peer.peer_address) + vrf = bfd_peer.vrf + tx_interval = bfd_peer.tx_interval + rx_interval = bfd_peer.rx_interval + multiplier = bfd_peer.multiplier # Check if BFD peer configured bfd_output = get_value( @@ -168,7 +159,7 @@ class VerifyBFDPeersIntervals(AntaTest): separator="..", ) if not bfd_output: - failures[peer] = {vrf: "Not Configured"} + self.result.is_failure(f"{bfd_peer} - Not found") continue # Convert interval timer(s) into milliseconds to be consistent with the inputs. @@ -176,38 +167,34 @@ class VerifyBFDPeersIntervals(AntaTest): op_tx_interval = bfd_details.get("operTxInterval") // 1000 op_rx_interval = bfd_details.get("operRxInterval") // 1000 detect_multiplier = bfd_details.get("detectMult") - intervals_ok = op_tx_interval == tx_interval and op_rx_interval == rx_interval and detect_multiplier == multiplier - # Check timers of BFD peer - if not intervals_ok: - failures[peer] = { - vrf: { - "tx_interval": op_tx_interval, - "rx_interval": op_rx_interval, - "multiplier": detect_multiplier, - } - } + if op_tx_interval != tx_interval: + self.result.is_failure(f"{bfd_peer} - Incorrect Transmit interval - Expected: {tx_interval} Actual: {op_tx_interval}") - # Check if any failures - if not failures: - self.result.is_success() - else: - self.result.is_failure(f"Following BFD peers are not configured or timers are not correct:\n{failures}") + if op_rx_interval != rx_interval: + self.result.is_failure(f"{bfd_peer} - Incorrect Receive interval - Expected: {rx_interval} Actual: {op_rx_interval}") + + if detect_multiplier != multiplier: + self.result.is_failure(f"{bfd_peer} - Incorrect Multiplier - Expected: {multiplier} Actual: {detect_multiplier}") class VerifyBFDPeersHealth(AntaTest): """Verifies the health of IPv4 BFD peers across all VRFs. - It checks that no BFD peer is in the down state and that the discriminator value of the remote system is not zero. + This test performs the following checks for BFD peers across all VRFs: - Optionally, it can also verify that BFD peers have not been down before a specified threshold of hours. + 1. Validates that the state is `up`. + 2. Confirms that the remote discriminator identifier (disc) is non-zero. + 3. Optionally verifies that the peer have not been down before a specified threshold of hours. Expected Results ---------------- - * Success: The test will pass if all IPv4 BFD peers are up, the discriminator value of each remote system is non-zero, - and the last downtime of each peer is above the defined threshold. - * Failure: The test will fail if any IPv4 BFD peer is down, the discriminator value of any remote system is zero, - or the last downtime of any peer is below the defined threshold. + * Success: If all of the following conditions are met: + - All BFD peers across the VRFs are up and remote disc is non-zero. + - Last downtime of each peer is above the defined threshold, if specified. + * Failure: If any of the following occur: + - Any BFD peer session is not up or the remote discriminator identifier is zero. + - Last downtime of any peer is below the defined threshold, if specified. Examples -------- @@ -218,8 +205,6 @@ class VerifyBFDPeersHealth(AntaTest): ``` """ - name = "VerifyBFDPeersHealth" - description = "Verifies the health of all IPv4 BFD peers." categories: ClassVar[list[str]] = ["bfd"] # revision 1 as later revision introduces additional nesting for type commands: ClassVar[list[AntaCommand | AntaTemplate]] = [ @@ -236,18 +221,13 @@ class VerifyBFDPeersHealth(AntaTest): @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyBFDPeersHealth.""" - # Initialize failure strings - down_failures = [] - up_failures = [] + self.result.is_success() # Extract the current timestamp and command output clock_output = self.instance_commands[1].json_output current_timestamp = clock_output["utcTime"] bfd_output = self.instance_commands[0].json_output - # set the initial result - self.result.is_success() - # Check if any IPv4 BFD peer is configured ipv4_neighbors_exist = any(vrf_data["ipv4Neighbors"] for vrf_data in bfd_output["vrfs"].values()) if not ipv4_neighbors_exist: @@ -260,40 +240,40 @@ class VerifyBFDPeersHealth(AntaTest): for peer_data in neighbor_data["peerStats"].values(): peer_status = peer_data["status"] remote_disc = peer_data["remoteDisc"] - remote_disc_info = f" with remote disc {remote_disc}" if remote_disc == 0 else "" last_down = peer_data["lastDown"] hours_difference = ( datetime.fromtimestamp(current_timestamp, tz=timezone.utc) - datetime.fromtimestamp(last_down, tz=timezone.utc) ).total_seconds() / 3600 - # Check if peer status is not up - if peer_status != "up": - down_failures.append(f"{peer} is {peer_status} in {vrf} VRF{remote_disc_info}.") + if not (peer_status == "up" and remote_disc != 0): + self.result.is_failure( + f"Peer: {peer} VRF: {vrf} - Session not properly established - State: {peer_status} Remote Discriminator: {remote_disc}" + ) # Check if the last down is within the threshold - elif self.inputs.down_threshold and hours_difference < self.inputs.down_threshold: - up_failures.append(f"{peer} in {vrf} VRF was down {round(hours_difference)} hours ago{remote_disc_info}.") + if self.inputs.down_threshold and hours_difference < self.inputs.down_threshold: + self.result.is_failure( + f"Peer: {peer} VRF: {vrf} - Session failure detected within the expected uptime threshold ({round(hours_difference)} hours ago)" + ) - # Check if remote disc is 0 - elif remote_disc == 0: - up_failures.append(f"{peer} in {vrf} VRF has remote disc {remote_disc}.") - # Check if there are any failures - if down_failures: - down_failures_str = "\n".join(down_failures) - self.result.is_failure(f"Following BFD peers are not up:\n{down_failures_str}") - if up_failures: - up_failures_str = "\n".join(up_failures) - self.result.is_failure(f"\nFollowing BFD peers were down:\n{up_failures_str}") +class VerifyBFDPeersRegProtocols(AntaTest): + """Verifies the registered routing protocol of IPv4 BFD peer sessions. + This test performs the following checks for each specified peer: -class VerifyBFDPeersRegProtocols(AntaTest): - """Verifies that IPv4 BFD peer(s) have the specified protocol(s) registered. + 1. Confirms that the specified VRF is configured. + 2. Verifies that the peer exists in the BFD configuration. + 3. Confirms that BFD peer is correctly configured with the `routing protocol`. Expected Results ---------------- - * Success: The test will pass if IPv4 BFD peers are registered with the specified protocol(s). - * Failure: The test will fail if IPv4 BFD peers are not found or the specified protocol(s) are not registered for the BFD peer(s). + * Success: If all of the following conditions are met: + - All specified peers are found in the BFD configuration within the specified VRF. + - All BFD peers are correctly configured with the `routing protocol`. + * Failure: If any of the following occur: + - A specified peer is not found in the BFD configuration within the specified VRF. + - Any BFD peer not correctly configured with the `routing protocol`. Examples -------- @@ -308,8 +288,6 @@ class VerifyBFDPeersRegProtocols(AntaTest): ``` """ - name = "VerifyBFDPeersRegProtocols" - description = "Verifies that IPv4 BFD peer(s) have the specified protocol(s) registered." categories: ClassVar[list[str]] = ["bfd"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show bfd peers detail", revision=1)] @@ -317,23 +295,14 @@ class VerifyBFDPeersRegProtocols(AntaTest): """Input model for the VerifyBFDPeersRegProtocols test.""" bfd_peers: list[BFDPeer] - """List of IPv4 BFD peers.""" - - class BFDPeer(BaseModel): - """Model for an IPv4 BFD peer.""" - - peer_address: IPv4Address - """IPv4 address of a BFD peer.""" - vrf: str = "default" - """Optional VRF for BFD peer. If not provided, it defaults to `default`.""" - protocols: list[BfdProtocol] - """List of protocols to be verified.""" + """List of IPv4 BFD""" + BFDPeer: ClassVar[type[BFDPeer]] = BFDPeer + """To maintain backward compatibility""" @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyBFDPeersRegProtocols.""" - # Initialize failure messages - failures: dict[Any, Any] = {} + self.result.is_success() # Iterating over BFD peers, extract the parameters and command output for bfd_peer in self.inputs.bfd_peers: @@ -348,16 +317,11 @@ class VerifyBFDPeersRegProtocols(AntaTest): # Check if BFD peer configured if not bfd_output: - failures[peer] = {vrf: "Not Configured"} + self.result.is_failure(f"{bfd_peer} - Not found") continue # Check registered protocols - difference = set(protocols) - set(get_value(bfd_output, "peerStatsDetail.apps")) - + difference = sorted(set(protocols) - set(get_value(bfd_output, "peerStatsDetail.apps"))) if difference: - failures[peer] = {vrf: sorted(difference)} - - if not failures: - self.result.is_success() - else: - self.result.is_failure(f"The following BFD peers are not configured or have non-registered protocol(s):\n{failures}") + failures = " ".join(f"`{item}`" for item in difference) + self.result.is_failure(f"{bfd_peer} - {failures} routing protocol(s) not configured") diff --git a/anta/tests/configuration.py b/anta/tests/configuration.py index 4a1bd31..cff7ec6 100644 --- a/anta/tests/configuration.py +++ b/anta/tests/configuration.py @@ -33,8 +33,6 @@ class VerifyZeroTouch(AntaTest): ``` """ - name = "VerifyZeroTouch" - description = "Verifies ZeroTouch is disabled" categories: ClassVar[list[str]] = ["configuration"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show zerotouch", revision=1)] @@ -64,8 +62,6 @@ class VerifyRunningConfigDiffs(AntaTest): ``` """ - name = "VerifyRunningConfigDiffs" - description = "Verifies there is no difference between the running-config and the startup-config" categories: ClassVar[list[str]] = ["configuration"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show running-config diffs", ofmt="text")] @@ -98,13 +94,12 @@ class VerifyRunningConfigLines(AntaTest): ```yaml anta.tests.configuration: - VerifyRunningConfigLines: - regex_patterns: - - "^enable password.*$" - - "bla bla" + regex_patterns: + - "^enable password.*$" + - "bla bla" ``` """ - name = "VerifyRunningConfigLines" description = "Search the Running-Config for the given RegEx patterns." categories: ClassVar[list[str]] = ["configuration"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show running-config", ofmt="text")] diff --git a/anta/tests/connectivity.py b/anta/tests/connectivity.py index c0c6f73..afcfa11 100644 --- a/anta/tests/connectivity.py +++ b/anta/tests/connectivity.py @@ -7,12 +7,9 @@ # mypy: disable-error-code=attr-defined from __future__ import annotations -from ipaddress import IPv4Address from typing import ClassVar -from pydantic import BaseModel - -from anta.custom_types import Interface +from anta.input_models.connectivity import Host, LLDPNeighbor, Neighbor from anta.models import AntaCommand, AntaTemplate, AntaTest @@ -43,11 +40,8 @@ class VerifyReachability(AntaTest): ``` """ - name = "VerifyReachability" - description = "Test the network reachability to one or many destination IP(s)." categories: ClassVar[list[str]] = ["connectivity"] - # Removing the <space> between '{size}' and '{df_bit}' to compensate the df-bit set default value - # i.e if df-bit kept disable then it will add redundant space in between the command + # Template uses '{size}{df_bit}' without space since df_bit includes leading space when enabled commands: ClassVar[list[AntaCommand | AntaTemplate]] = [ AntaTemplate(template="ping vrf {vrf} {destination} source {source} size {size}{df_bit} repeat {repeat}", revision=1) ] @@ -57,62 +51,43 @@ class VerifyReachability(AntaTest): hosts: list[Host] """List of host to ping.""" - - class Host(BaseModel): - """Model for a remote host to ping.""" - - destination: IPv4Address - """IPv4 address to ping.""" - source: IPv4Address | Interface - """IPv4 address source IP or egress interface to use.""" - vrf: str = "default" - """VRF context. Defaults to `default`.""" - repeat: int = 2 - """Number of ping repetition. Defaults to 2.""" - size: int = 100 - """Specify datagram size. Defaults to 100.""" - df_bit: bool = False - """Enable do not fragment bit in IP header. Defaults to False.""" + Host: ClassVar[type[Host]] = Host + """To maintain backward compatibility.""" def render(self, template: AntaTemplate) -> list[AntaCommand]: """Render the template for each host in the input list.""" - commands = [] - for host in self.inputs.hosts: - # Enables do not fragment bit in IP header if needed else keeping disable. - # Adding the <space> at start to compensate change in AntaTemplate - df_bit = " df-bit" if host.df_bit else "" - command = template.render(destination=host.destination, source=host.source, vrf=host.vrf, repeat=host.repeat, size=host.size, df_bit=df_bit) - commands.append(command) - return commands + return [ + template.render( + destination=host.destination, source=host.source, vrf=host.vrf, repeat=host.repeat, size=host.size, df_bit=" df-bit" if host.df_bit else "" + ) + for host in self.inputs.hosts + ] @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyReachability.""" - failures = [] + self.result.is_success() - for command in self.instance_commands: - src = command.params.source - dst = command.params.destination - repeat = command.params.repeat + for command, host in zip(self.instance_commands, self.inputs.hosts): + if f"{host.repeat} received" not in command.json_output["messages"][0]: + self.result.is_failure(f"{host} - Unreachable") - if f"{repeat} received" not in command.json_output["messages"][0]: - failures.append((str(src), str(dst))) - if not failures: - self.result.is_success() - else: - self.result.is_failure(f"Connectivity test failed for the following source-destination pairs: {failures}") +class VerifyLLDPNeighbors(AntaTest): + """Verifies the connection status of the specified LLDP (Link Layer Discovery Protocol) neighbors. + This test performs the following checks for each specified LLDP neighbor: -class VerifyLLDPNeighbors(AntaTest): - """Verifies that the provided LLDP neighbors are present and connected with the correct configuration. + 1. Confirming matching ports on both local and neighboring devices. + 2. Ensuring compatibility of device names and interface identifiers. + 3. Verifying neighbor configurations match expected values per interface; extra neighbors are ignored. Expected Results ---------------- - * Success: The test will pass if each of the provided LLDP neighbors is present and connected to the specified port and device. + * Success: The test will pass if all the provided LLDP neighbors are present and correctly connected to the specified port and device. * Failure: The test will fail if any of the following conditions are met: - - The provided LLDP neighbor is not found. - - The system name or port of the LLDP neighbor does not match the provided information. + - The provided LLDP neighbor is not found in the LLDP table. + - The system name or port of the LLDP neighbor does not match the expected information. Examples -------- @@ -129,60 +104,37 @@ class VerifyLLDPNeighbors(AntaTest): ``` """ - name = "VerifyLLDPNeighbors" - description = "Verifies that the provided LLDP neighbors are connected properly." categories: ClassVar[list[str]] = ["connectivity"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show lldp neighbors detail", revision=1)] class Input(AntaTest.Input): """Input model for the VerifyLLDPNeighbors test.""" - neighbors: list[Neighbor] + neighbors: list[LLDPNeighbor] """List of LLDP neighbors.""" - - class Neighbor(BaseModel): - """Model for an LLDP neighbor.""" - - port: Interface - """LLDP port.""" - neighbor_device: str - """LLDP neighbor device.""" - neighbor_port: Interface - """LLDP neighbor port.""" + Neighbor: ClassVar[type[Neighbor]] = Neighbor + """To maintain backward compatibility.""" @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyLLDPNeighbors.""" - failures: dict[str, list[str]] = {} + self.result.is_success() output = self.instance_commands[0].json_output["lldpNeighbors"] - for neighbor in self.inputs.neighbors: if neighbor.port not in output: - failures.setdefault("Port(s) not configured", []).append(neighbor.port) + self.result.is_failure(f"{neighbor} - Port not found") continue if len(lldp_neighbor_info := output[neighbor.port]["lldpNeighborInfo"]) == 0: - failures.setdefault("No LLDP neighbor(s) on port(s)", []).append(neighbor.port) + self.result.is_failure(f"{neighbor} - No LLDP neighbors") continue - if not any( + # Check if the system name and neighbor port matches + match_found = any( info["systemName"] == neighbor.neighbor_device and info["neighborInterfaceInfo"]["interfaceId_v2"] == neighbor.neighbor_port for info in lldp_neighbor_info - ): - neighbors = "\n ".join( - [ - f"{neighbor[0]}_{neighbor[1]}" - for neighbor in [(info["systemName"], info["neighborInterfaceInfo"]["interfaceId_v2"]) for info in lldp_neighbor_info] - ] - ) - failures.setdefault("Wrong LLDP neighbor(s) on port(s)", []).append(f"{neighbor.port}\n {neighbors}") - - if not failures: - self.result.is_success() - else: - failure_messages = [] - for failure_type, ports in failures.items(): - ports_str = "\n ".join(ports) - failure_messages.append(f"{failure_type}:\n {ports_str}") - self.result.is_failure("\n".join(failure_messages)) + ) + if not match_found: + failure_msg = [f"{info['systemName']}/{info['neighborInterfaceInfo']['interfaceId_v2']}" for info in lldp_neighbor_info] + self.result.is_failure(f"{neighbor} - Wrong LLDP neighbors: {', '.join(failure_msg)}") diff --git a/anta/tests/cvx.py b/anta/tests/cvx.py new file mode 100644 index 0000000..6160082 --- /dev/null +++ b/anta/tests/cvx.py @@ -0,0 +1,283 @@ +# Copyright (c) 2023-2024 Arista Networks, Inc. +# Use of this source code is governed by the Apache License 2.0 +# that can be found in the LICENSE file. +"""Module related to the CVX tests.""" + +# Mypy does not understand AntaTest.Input typing +# mypy: disable-error-code=attr-defined +from __future__ import annotations + +from typing import TYPE_CHECKING, Any, ClassVar, Literal + +from anta.custom_types import PositiveInteger +from anta.models import AntaCommand, AntaTest +from anta.tools import get_value + +if TYPE_CHECKING: + from anta.models import AntaTemplate +from anta.input_models.cvx import CVXPeers + + +class VerifyMcsClientMounts(AntaTest): + """Verify if all MCS client mounts are in mountStateMountComplete. + + Expected Results + ---------------- + * Success: The test will pass if the MCS mount status on MCS Clients are mountStateMountComplete. + * Failure: The test will fail even if one switch's MCS client mount status is not mountStateMountComplete. + + Examples + -------- + ```yaml + anta.tests.cvx: + - VerifyMcsClientMounts: + ``` + """ + + categories: ClassVar[list[str]] = ["cvx"] + commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show management cvx mounts", revision=1)] + + @AntaTest.anta_test + def test(self) -> None: + """Main test function for VerifyMcsClientMounts.""" + command_output = self.instance_commands[0].json_output + self.result.is_success() + mount_states = command_output["mountStates"] + mcs_mount_state_detected = False + for mount_state in mount_states: + if not mount_state["type"].startswith("Mcs"): + continue + mcs_mount_state_detected = True + if (state := mount_state["state"]) != "mountStateMountComplete": + self.result.is_failure(f"MCS Client mount states are not valid: {state}") + + if not mcs_mount_state_detected: + self.result.is_failure("MCS Client mount states are not present") + + +class VerifyManagementCVX(AntaTest): + """Verifies the management CVX global status. + + Expected Results + ---------------- + * Success: The test will pass if the management CVX global status matches the expected status. + * Failure: The test will fail if the management CVX global status does not match the expected status. + + + Examples + -------- + ```yaml + anta.tests.cvx: + - VerifyManagementCVX: + enabled: true + ``` + """ + + categories: ClassVar[list[str]] = ["cvx"] + commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show management cvx", revision=3)] + + class Input(AntaTest.Input): + """Input model for the VerifyManagementCVX test.""" + + enabled: bool + """Whether management CVX must be enabled (True) or disabled (False).""" + + @AntaTest.anta_test + def test(self) -> None: + """Main test function for VerifyManagementCVX.""" + command_output = self.instance_commands[0].json_output + self.result.is_success() + if (cluster_state := get_value(command_output, "clusterStatus.enabled")) != self.inputs.enabled: + self.result.is_failure(f"Management CVX status is not valid: {cluster_state}") + + +class VerifyMcsServerMounts(AntaTest): + """Verify if all MCS server mounts are in a MountComplete state. + + Expected Results + ---------------- + * Success: The test will pass if all the MCS mount status on MCS server are mountStateMountComplete. + * Failure: The test will fail even if any MCS server mount status is not mountStateMountComplete. + + Examples + -------- + ```yaml + anta.tests.cvx: + + - VerifyMcsServerMounts: + connections_count: 100 + ``` + """ + + categories: ClassVar[list[str]] = ["cvx"] + commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show cvx mounts", revision=1)] + + mcs_path_types: ClassVar[list[str]] = ["Mcs::ApiConfigRedundancyStatus", "Mcs::ActiveFlows", "Mcs::Client::Status"] + """The list of expected MCS path types to verify.""" + + class Input(AntaTest.Input): + """Input model for the VerifyMcsServerMounts test.""" + + connections_count: int + """The expected number of active CVX Connections with mountStateMountComplete""" + + def validate_mount_states(self, mount: dict[str, Any], hostname: str) -> None: + """Validate the mount states of a given mount.""" + mount_states = mount["mountStates"][0] + + if (num_path_states := len(mount_states["pathStates"])) != (expected_num := len(self.mcs_path_types)): + self.result.is_failure(f"Incorrect number of mount path states for {hostname} - Expected: {expected_num}, Actual: {num_path_states}") + + for path in mount_states["pathStates"]: + if (path_type := path.get("type")) not in self.mcs_path_types: + self.result.is_failure(f"Unexpected MCS path type for {hostname}: '{path_type}'.") + if (path_state := path.get("state")) != "mountStateMountComplete": + self.result.is_failure(f"MCS server mount state for path '{path_type}' is not valid is for {hostname}: '{path_state}'.") + + @AntaTest.anta_test + def test(self) -> None: + """Main test function for VerifyMcsServerMounts.""" + command_output = self.instance_commands[0].json_output + self.result.is_success() + active_count = 0 + + if not (connections := command_output.get("connections")): + self.result.is_failure("CVX connections are not available.") + return + + for connection in connections: + mounts = connection.get("mounts", []) + hostname = connection["hostname"] + + mcs_mounts = [mount for mount in mounts if mount["service"] == "Mcs"] + + if not mounts: + self.result.is_failure(f"No mount status for {hostname}") + continue + + if not mcs_mounts: + self.result.is_failure(f"MCS mount state not detected for {hostname}") + else: + for mount in mcs_mounts: + self.validate_mount_states(mount, hostname) + active_count += 1 + + if active_count != self.inputs.connections_count: + self.result.is_failure(f"Incorrect CVX successful connections count. Expected: {self.inputs.connections_count}, Actual : {active_count}") + + +class VerifyActiveCVXConnections(AntaTest): + """Verifies the number of active CVX Connections. + + Expected Results + ---------------- + * Success: The test will pass if number of connections is equal to the expected number of connections. + * Failure: The test will fail otherwise. + + Examples + -------- + ```yaml + anta.tests.cvx: + - VerifyActiveCVXConnections: + connections_count: 100 + ``` + """ + + categories: ClassVar[list[str]] = ["cvx"] + # TODO: @gmuloc - cover "% Unavailable command (controller not ready)" + commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show cvx connections brief", revision=1)] + + class Input(AntaTest.Input): + """Input model for the VerifyActiveCVXConnections test.""" + + connections_count: PositiveInteger + """The expected number of active CVX Connections.""" + + @AntaTest.anta_test + def test(self) -> None: + """Main test function for VerifyActiveCVXConnections.""" + command_output = self.instance_commands[0].json_output + self.result.is_success() + + if not (connections := command_output.get("connections")): + self.result.is_failure("CVX connections are not available.") + return + + active_count = len([connection for connection in connections if connection.get("oobConnectionActive")]) + + if self.inputs.connections_count != active_count: + self.result.is_failure(f"CVX active connections count. Expected: {self.inputs.connections_count}, Actual : {active_count}") + + +class VerifyCVXClusterStatus(AntaTest): + """Verifies the CVX Server Cluster status. + + Expected Results + ---------------- + * Success: The test will pass if all of the following conditions is met: + - CVX Enabled state is true + - Cluster Mode is true + - Role is either Master or Standby. + - peer_status matches defined state + * Failure: The test will fail if any of the success conditions is not met. + + Examples + -------- + ```yaml + anta.tests.cvx: + - VerifyCVXClusterStatus: + role: Master + peer_status: + - peer_name : cvx-red-2 + registration_state: Registration complete + - peer_name: cvx-red-3 + registration_state: Registration error + ``` + """ + + categories: ClassVar[list[str]] = ["cvx"] + commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show cvx", revision=1)] + + class Input(AntaTest.Input): + """Input model for the VerifyCVXClusterStatus test.""" + + role: Literal["Master", "Standby", "Disconnected"] = "Master" + peer_status: list[CVXPeers] + + @AntaTest.anta_test + def test(self) -> None: + """Run the main test for VerifyCVXClusterStatus.""" + command_output = self.instance_commands[0].json_output + self.result.is_success() + + # Validate Server enabled status + if not command_output.get("enabled"): + self.result.is_failure("CVX Server status is not enabled") + + # Validate cluster status and mode + if not (cluster_status := command_output.get("clusterStatus")) or not command_output.get("clusterMode"): + self.result.is_failure("CVX Server is not a cluster") + return + + # Check cluster role + if (cluster_role := cluster_status.get("role")) != self.inputs.role: + self.result.is_failure(f"CVX Role is not valid: {cluster_role}") + return + + # Validate peer status + peer_cluster = cluster_status.get("peerStatus", {}) + + # Check peer count + if (num_of_peers := len(peer_cluster)) != (expected_num_of_peers := len(self.inputs.peer_status)): + self.result.is_failure(f"Unexpected number of peers {num_of_peers} vs {expected_num_of_peers}") + + # Check each peer + for peer in self.inputs.peer_status: + # Retrieve the peer status from the peer cluster + if (eos_peer_status := get_value(peer_cluster, peer.peer_name, separator="..")) is None: + self.result.is_failure(f"{peer.peer_name} is not present") + continue + + # Validate the registration state of the peer + if (peer_reg_state := eos_peer_status.get("registrationState")) != peer.registration_state: + self.result.is_failure(f"{peer.peer_name} registration state is not complete: {peer_reg_state}") diff --git a/anta/tests/field_notices.py b/anta/tests/field_notices.py index 6f98a2c..41e81a8 100644 --- a/anta/tests/field_notices.py +++ b/anta/tests/field_notices.py @@ -34,7 +34,6 @@ class VerifyFieldNotice44Resolution(AntaTest): ``` """ - name = "VerifyFieldNotice44Resolution" description = "Verifies that the device is using the correct Aboot version per FN0044." categories: ClassVar[list[str]] = ["field notices"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show version detail", revision=1)] @@ -110,15 +109,11 @@ class VerifyFieldNotice44Resolution(AntaTest): self.result.is_success() incorrect_aboot_version = ( - aboot_version.startswith("4.0.") - and int(aboot_version.split(".")[2]) < 7 - or aboot_version.startswith("4.1.") - and int(aboot_version.split(".")[2]) < 1 + (aboot_version.startswith("4.0.") and int(aboot_version.split(".")[2]) < 7) + or (aboot_version.startswith("4.1.") and int(aboot_version.split(".")[2]) < 1) or ( - aboot_version.startswith("6.0.") - and int(aboot_version.split(".")[2]) < 9 - or aboot_version.startswith("6.1.") - and int(aboot_version.split(".")[2]) < 7 + (aboot_version.startswith("6.0.") and int(aboot_version.split(".")[2]) < 9) + or (aboot_version.startswith("6.1.") and int(aboot_version.split(".")[2]) < 7) ) ) if incorrect_aboot_version: @@ -143,7 +138,6 @@ class VerifyFieldNotice72Resolution(AntaTest): ``` """ - name = "VerifyFieldNotice72Resolution" description = "Verifies if the device is exposed to FN0072, and if the issue has been mitigated." categories: ClassVar[list[str]] = ["field notices"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show version detail", revision=1)] diff --git a/anta/tests/flow_tracking.py b/anta/tests/flow_tracking.py index 676bdb4..9b9acc6 100644 --- a/anta/tests/flow_tracking.py +++ b/anta/tests/flow_tracking.py @@ -17,8 +17,7 @@ from anta.tools import get_failed_logs def validate_record_export(record_export: dict[str, str], tracker_info: dict[str, str]) -> str: - """ - Validate the record export configuration against the tracker info. + """Validate the record export configuration against the tracker info. Parameters ---------- @@ -41,8 +40,7 @@ def validate_record_export(record_export: dict[str, str], tracker_info: dict[str def validate_exporters(exporters: list[dict[str, str]], tracker_info: dict[str, str]) -> str: - """ - Validate the exporter configurations against the tracker info. + """Validate the exporter configurations against the tracker info. Parameters ---------- @@ -74,8 +72,7 @@ def validate_exporters(exporters: list[dict[str, str]], tracker_info: dict[str, class VerifyHardwareFlowTrackerStatus(AntaTest): - """ - Verifies if hardware flow tracking is running and an input tracker is active. + """Verifies if hardware flow tracking is running and an input tracker is active. This test optionally verifies the tracker interval/timeout and exporter configuration. @@ -89,7 +86,7 @@ class VerifyHardwareFlowTrackerStatus(AntaTest): -------- ```yaml anta.tests.flow_tracking: - - VerifyFlowTrackingHardware: + - VerifyHardwareFlowTrackerStatus: trackers: - name: FLOW-TRACKER record_export: @@ -102,7 +99,6 @@ class VerifyHardwareFlowTrackerStatus(AntaTest): ``` """ - name = "VerifyHardwareFlowTrackerStatus" description = ( "Verifies if hardware flow tracking is running and an input tracker is active. Optionally verifies the tracker interval/timeout and exporter configuration." ) diff --git a/anta/tests/greent.py b/anta/tests/greent.py index b763242..67bb25b 100644 --- a/anta/tests/greent.py +++ b/anta/tests/greent.py @@ -25,11 +25,11 @@ class VerifyGreenTCounters(AntaTest): -------- ```yaml anta.tests.greent: - - VerifyGreenT: + - VerifyGreenTCounters: ``` + """ - name = "VerifyGreenTCounters" description = "Verifies if the GreenT counters are incremented." categories: ClassVar[list[str]] = ["greent"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show monitor telemetry postcard counters", revision=1)] @@ -57,12 +57,12 @@ class VerifyGreenT(AntaTest): -------- ```yaml anta.tests.greent: - - VerifyGreenTCounters: + - VerifyGreenT: ``` + """ - name = "VerifyGreenT" - description = "Verifies if a GreenT policy is created." + description = "Verifies if a GreenT policy other than the default is created." categories: ClassVar[list[str]] = ["greent"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show monitor telemetry postcard policy profile", revision=1)] diff --git a/anta/tests/hardware.py b/anta/tests/hardware.py index 569c180..1c562b0 100644 --- a/anta/tests/hardware.py +++ b/anta/tests/hardware.py @@ -36,8 +36,6 @@ class VerifyTransceiversManufacturers(AntaTest): ``` """ - name = "VerifyTransceiversManufacturers" - description = "Verifies if all transceivers come from approved manufacturers." categories: ClassVar[list[str]] = ["hardware"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show inventory", revision=2)] @@ -77,8 +75,6 @@ class VerifyTemperature(AntaTest): ``` """ - name = "VerifyTemperature" - description = "Verifies the device temperature." categories: ClassVar[list[str]] = ["hardware"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show system environment temperature", revision=1)] @@ -110,8 +106,6 @@ class VerifyTransceiversTemperature(AntaTest): ``` """ - name = "VerifyTransceiversTemperature" - description = "Verifies the transceivers temperature." categories: ClassVar[list[str]] = ["hardware"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show system environment temperature transceiver", revision=1)] @@ -151,8 +145,6 @@ class VerifyEnvironmentSystemCooling(AntaTest): ``` """ - name = "VerifyEnvironmentSystemCooling" - description = "Verifies the system cooling status." categories: ClassVar[list[str]] = ["hardware"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show system environment cooling", revision=1)] @@ -232,8 +224,6 @@ class VerifyEnvironmentPower(AntaTest): ``` """ - name = "VerifyEnvironmentPower" - description = "Verifies the power supplies status." categories: ClassVar[list[str]] = ["hardware"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show system environment power", revision=1)] @@ -274,7 +264,6 @@ class VerifyAdverseDrops(AntaTest): ``` """ - name = "VerifyAdverseDrops" description = "Verifies there are no adverse drops on DCS-7280 and DCS-7500 family switches." categories: ClassVar[list[str]] = ["hardware"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show hardware counter drop", revision=1)] diff --git a/anta/tests/interfaces.py b/anta/tests/interfaces.py index 32b85d4..bc1acbb 100644 --- a/anta/tests/interfaces.py +++ b/anta/tests/interfaces.py @@ -8,17 +8,18 @@ from __future__ import annotations import re -from ipaddress import IPv4Network -from typing import Any, ClassVar, Literal +from ipaddress import IPv4Interface +from typing import Any, ClassVar from pydantic import BaseModel, Field from pydantic_extra_types.mac_address import MacAddress from anta import GITHUB_SUGGESTION -from anta.custom_types import EthernetInterface, Interface, Percent, PortChannelInterface, PositiveInteger +from anta.custom_types import EthernetInterface, Interface, Percent, PositiveInteger from anta.decorators import skip_on_platforms +from anta.input_models.interfaces import InterfaceState from anta.models import AntaCommand, AntaTemplate, AntaTest -from anta.tools import custom_division, get_failed_logs, get_item, get_value +from anta.tools import custom_division, format_data, get_failed_logs, get_item, get_value BPS_GBPS_CONVERSIONS = 1000000000 @@ -44,8 +45,6 @@ class VerifyInterfaceUtilization(AntaTest): ``` """ - name = "VerifyInterfaceUtilization" - description = "Verifies that the utilization of interfaces is below a certain threshold." categories: ClassVar[list[str]] = ["interfaces"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [ AntaCommand(command="show interfaces counters rates", revision=1), @@ -105,8 +104,6 @@ class VerifyInterfaceErrors(AntaTest): ``` """ - name = "VerifyInterfaceErrors" - description = "Verifies there are no interface error counters." categories: ClassVar[list[str]] = ["interfaces"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show interfaces counters errors", revision=1)] @@ -140,8 +137,6 @@ class VerifyInterfaceDiscards(AntaTest): ``` """ - name = "VerifyInterfaceDiscards" - description = "Verifies there are no interface discard counters." categories: ClassVar[list[str]] = ["interfaces"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show interfaces counters discards", revision=1)] @@ -174,8 +169,6 @@ class VerifyInterfaceErrDisabled(AntaTest): ``` """ - name = "VerifyInterfaceErrDisabled" - description = "Verifies there are no interfaces in the errdisabled state." categories: ClassVar[list[str]] = ["interfaces"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show interfaces status", revision=1)] @@ -191,16 +184,20 @@ class VerifyInterfaceErrDisabled(AntaTest): class VerifyInterfacesStatus(AntaTest): - """Verifies if the provided list of interfaces are all in the expected state. + """Verifies the operational states of specified interfaces to ensure they match expected configurations. - - If line protocol status is provided, prioritize checking against both status and line protocol status - - If line protocol status is not provided and interface status is "up", expect both status and line protocol to be "up" - - If interface status is not "up", check only the interface status without considering line protocol status + This test performs the following checks for each specified interface: + + 1. If `line_protocol_status` is defined, both `status` and `line_protocol_status` are verified for the specified interface. + 2. If `line_protocol_status` is not provided but the `status` is "up", it is assumed that both the status and line protocol should be "up". + 3. If the interface `status` is not "up", only the interface's status is validated, with no line protocol check performed. Expected Results ---------------- - * Success: The test will pass if the provided interfaces are all in the expected state. - * Failure: The test will fail if any interface is not in the expected state. + * Success: If the interface status and line protocol status matches the expected operational state for all specified interfaces. + * Failure: If any of the following occur: + - The specified interface is not configured. + - The specified interface status and line protocol status does not match the expected operational state for any interface. Examples -------- @@ -219,8 +216,6 @@ class VerifyInterfacesStatus(AntaTest): ``` """ - name = "VerifyInterfacesStatus" - description = "Verifies the status of the provided interfaces." categories: ClassVar[list[str]] = ["interfaces"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show interfaces description", revision=1)] @@ -229,30 +224,17 @@ class VerifyInterfacesStatus(AntaTest): interfaces: list[InterfaceState] """List of interfaces with their expected state.""" - - class InterfaceState(BaseModel): - """Model for an interface state.""" - - name: Interface - """Interface to validate.""" - status: Literal["up", "down", "adminDown"] - """Expected status of the interface.""" - line_protocol_status: Literal["up", "down", "testing", "unknown", "dormant", "notPresent", "lowerLayerDown"] | None = None - """Expected line protocol status of the interface.""" + InterfaceState: ClassVar[type[InterfaceState]] = InterfaceState @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyInterfacesStatus.""" - command_output = self.instance_commands[0].json_output - self.result.is_success() - intf_not_configured = [] - intf_wrong_state = [] - + command_output = self.instance_commands[0].json_output for interface in self.inputs.interfaces: if (intf_status := get_value(command_output["interfaceDescriptions"], interface.name, separator="..")) is None: - intf_not_configured.append(interface.name) + self.result.is_failure(f"{interface.name} - Not configured") continue status = "up" if intf_status["interfaceStatus"] in {"up", "connected"} else intf_status["interfaceStatus"] @@ -261,18 +243,15 @@ class VerifyInterfacesStatus(AntaTest): # If line protocol status is provided, prioritize checking against both status and line protocol status if interface.line_protocol_status: if interface.status != status or interface.line_protocol_status != proto: - intf_wrong_state.append(f"{interface.name} is {status}/{proto}") + actual_state = f"Expected: {interface.status}/{interface.line_protocol_status}, Actual: {status}/{proto}" + self.result.is_failure(f"{interface.name} - {actual_state}") # If line protocol status is not provided and interface status is "up", expect both status and proto to be "up" # If interface status is not "up", check only the interface status without considering line protocol status - elif (interface.status == "up" and (status != "up" or proto != "up")) or (interface.status != status): - intf_wrong_state.append(f"{interface.name} is {status}/{proto}") - - if intf_not_configured: - self.result.is_failure(f"The following interface(s) are not configured: {intf_not_configured}") - - if intf_wrong_state: - self.result.is_failure(f"The following interface(s) are not in the expected state: {intf_wrong_state}") + elif interface.status == "up" and (status != "up" or proto != "up"): + self.result.is_failure(f"{interface.name} - Expected: up/up, Actual: {status}/{proto}") + elif interface.status != status: + self.result.is_failure(f"{interface.name} - Expected: {interface.status}, Actual: {status}") class VerifyStormControlDrops(AntaTest): @@ -291,8 +270,6 @@ class VerifyStormControlDrops(AntaTest): ``` """ - name = "VerifyStormControlDrops" - description = "Verifies there are no interface storm-control drop counters." categories: ClassVar[list[str]] = ["interfaces"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show storm-control", revision=1)] @@ -329,8 +306,6 @@ class VerifyPortChannels(AntaTest): ``` """ - name = "VerifyPortChannels" - description = "Verifies there are no inactive ports in all port channels." categories: ClassVar[list[str]] = ["interfaces"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show port-channel", revision=1)] @@ -364,8 +339,6 @@ class VerifyIllegalLACP(AntaTest): ``` """ - name = "VerifyIllegalLACP" - description = "Verifies there are no illegal LACP packets in all port channels." categories: ClassVar[list[str]] = ["interfaces"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show lacp counters all-ports", revision=1)] @@ -401,7 +374,6 @@ class VerifyLoopbackCount(AntaTest): ``` """ - name = "VerifyLoopbackCount" description = "Verifies the number of loopback interfaces and their status." categories: ClassVar[list[str]] = ["interfaces"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show ip interface brief", revision=1)] @@ -450,8 +422,6 @@ class VerifySVI(AntaTest): ``` """ - name = "VerifySVI" - description = "Verifies the status of all SVIs." categories: ClassVar[list[str]] = ["interfaces"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show ip interface brief", revision=1)] @@ -495,7 +465,6 @@ class VerifyL3MTU(AntaTest): ``` """ - name = "VerifyL3MTU" description = "Verifies the global L3 MTU of all L3 interfaces." categories: ClassVar[list[str]] = ["interfaces"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show interfaces", revision=1)] @@ -553,7 +522,6 @@ class VerifyIPProxyARP(AntaTest): ``` """ - name = "VerifyIPProxyARP" description = "Verifies if Proxy ARP is enabled." categories: ClassVar[list[str]] = ["interfaces"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaTemplate(template="show ip interface {intf}", revision=2)] @@ -607,7 +575,6 @@ class VerifyL2MTU(AntaTest): ``` """ - name = "VerifyL2MTU" description = "Verifies the global L2 MTU of all L2 interfaces." categories: ClassVar[list[str]] = ["interfaces"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show interfaces", revision=1)] @@ -662,14 +629,13 @@ class VerifyInterfaceIPv4(AntaTest): - VerifyInterfaceIPv4: interfaces: - name: Ethernet2 - primary_ip: 172.30.11.0/31 + primary_ip: 172.30.11.1/31 secondary_ips: - - 10.10.10.0/31 + - 10.10.10.1/31 - 10.10.10.10/31 ``` """ - name = "VerifyInterfaceIPv4" description = "Verifies the interface IPv4 addresses." categories: ClassVar[list[str]] = ["interfaces"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaTemplate(template="show ip interface {interface}", revision=2)] @@ -685,9 +651,9 @@ class VerifyInterfaceIPv4(AntaTest): name: Interface """Name of the interface.""" - primary_ip: IPv4Network + primary_ip: IPv4Interface """Primary IPv4 address in CIDR notation.""" - secondary_ips: list[IPv4Network] | None = None + secondary_ips: list[IPv4Interface] | None = None """Optional list of secondary IPv4 addresses in CIDR notation.""" def render(self, template: AntaTemplate) -> list[AntaCommand]: @@ -765,8 +731,6 @@ class VerifyIpVirtualRouterMac(AntaTest): ``` """ - name = "VerifyIpVirtualRouterMac" - description = "Verifies the IP virtual router MAC address." categories: ClassVar[list[str]] = ["interfaces"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show ip virtual-router", revision=2)] @@ -818,8 +782,6 @@ class VerifyInterfacesSpeed(AntaTest): ``` """ - name = "VerifyInterfacesSpeed" - description = "Verifies the speed, lanes, auto-negotiation status, and mode as full duplex for interfaces." categories: ClassVar[list[str]] = ["interfaces"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show interfaces")] @@ -886,17 +848,27 @@ class VerifyInterfacesSpeed(AntaTest): class VerifyLACPInterfacesStatus(AntaTest): - """Verifies the Link Aggregation Control Protocol (LACP) status of the provided interfaces. + """Verifies the Link Aggregation Control Protocol (LACP) status of the interface. - - Verifies that the interface is a member of the LACP port channel. - - Ensures that the synchronization is established. - - Ensures the interfaces are in the correct state for collecting and distributing traffic. - - Validates that LACP settings, such as timeouts, are correctly configured. (i.e The long timeout mode, also known as "slow" mode, is the default setting.) + This test performs the following checks for each specified interface: + + 1. Verifies that the interface is a member of the LACP port channel. + 2. Verifies LACP port states and operational status: + - Activity: Active LACP mode (initiates) + - Timeout: Short (Fast Mode), Long (Slow Mode - default) + - Aggregation: Port aggregable + - Synchronization: Port in sync with partner + - Collecting: Incoming frames aggregating + - Distributing: Outgoing frames aggregating Expected Results ---------------- - * Success: The test will pass if the provided interfaces are bundled in port channel and all specified parameters are correct. - * Failure: The test will fail if any interface is not bundled in port channel or any of specified parameter is not correct. + * Success: Interface is bundled and all LACP states match expected values for both actor and partner + * Failure: If any of the following occur: + - Interface or port channel is not configured. + - Interface is not bundled in port channel. + - Actor or partner port LACP states don't match expected configuration. + - LACP rate (timeout) mismatch when fast mode is configured. Examples -------- @@ -909,28 +881,15 @@ class VerifyLACPInterfacesStatus(AntaTest): ``` """ - name = "VerifyLACPInterfacesStatus" - description = "Verifies the Link Aggregation Control Protocol(LACP) status of the provided interfaces." categories: ClassVar[list[str]] = ["interfaces"] - commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaTemplate(template="show lacp interface {interface}", revision=1)] + commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show lacp interface", revision=1)] class Input(AntaTest.Input): """Input model for the VerifyLACPInterfacesStatus test.""" - interfaces: list[LACPInterface] - """List of LACP member interface.""" - - class LACPInterface(BaseModel): - """Model for an LACP member interface.""" - - name: EthernetInterface - """Ethernet interface to validate.""" - portchannel: PortChannelInterface - """Port Channel in which the interface is bundled.""" - - def render(self, template: AntaTemplate) -> list[AntaCommand]: - """Render the template for each interface in the input list.""" - return [template.render(interface=interface.name) for interface in self.inputs.interfaces] + interfaces: list[InterfaceState] + """List of interfaces with their expected state.""" + InterfaceState: ClassVar[type[InterfaceState]] = InterfaceState @AntaTest.anta_test def test(self) -> None: @@ -940,21 +899,17 @@ class VerifyLACPInterfacesStatus(AntaTest): # Member port verification parameters. member_port_details = ["activity", "aggregation", "synchronization", "collecting", "distributing", "timeout"] - # Iterating over command output for different interfaces - for command, input_entry in zip(self.instance_commands, self.inputs.interfaces): - interface = input_entry.name - portchannel = input_entry.portchannel - + command_output = self.instance_commands[0].json_output + for interface in self.inputs.interfaces: # Verify if a PortChannel is configured with the provided interface - if not (interface_details := get_value(command.json_output, f"portChannels.{portchannel}.interfaces.{interface}")): - self.result.is_failure(f"Interface '{interface}' is not configured to be a member of LACP '{portchannel}'.") + if not (interface_details := get_value(command_output, f"portChannels..{interface.portchannel}..interfaces..{interface.name}", separator="..")): + self.result.is_failure(f"{interface} - Not configured") continue # Verify the interface is bundled in port channel. actor_port_status = interface_details.get("actorPortStatus") if actor_port_status != "bundled": - message = f"For Interface {interface}:\nExpected `bundled` as the local port status, but found `{actor_port_status}` instead.\n" - self.result.is_failure(message) + self.result.is_failure(f"{interface} - Not bundled - Port Status: {actor_port_status}") continue # Collecting actor and partner port details @@ -969,21 +924,12 @@ class VerifyLACPInterfacesStatus(AntaTest): # Forming expected interface details expected_details = {param: param != "timeout" for param in member_port_details} - expected_interface_output = {"actor_port_details": expected_details, "partner_port_details": expected_details} + # Updating the short LACP timeout, if expected. + if interface.lacp_rate_fast: + expected_details["timeout"] = True - # Forming failure message - if actual_interface_output != expected_interface_output: - message = f"For Interface {interface}:\n" - actor_port_failed_log = get_failed_logs( - expected_interface_output.get("actor_port_details", {}), actual_interface_output.get("actor_port_details", {}) - ) - partner_port_failed_log = get_failed_logs( - expected_interface_output.get("partner_port_details", {}), actual_interface_output.get("partner_port_details", {}) - ) - - if actor_port_failed_log: - message += f"Actor port details:{actor_port_failed_log}\n" - if partner_port_failed_log: - message += f"Partner port details:{partner_port_failed_log}\n" - - self.result.is_failure(message) + if (act_port_details := actual_interface_output["actor_port_details"]) != expected_details: + self.result.is_failure(f"{interface} - Actor port details mismatch - {format_data(act_port_details)}") + + if (part_port_details := actual_interface_output["partner_port_details"]) != expected_details: + self.result.is_failure(f"{interface} - Partner port details mismatch - {format_data(part_port_details)}") diff --git a/anta/tests/lanz.py b/anta/tests/lanz.py index dcdab69..0995af7 100644 --- a/anta/tests/lanz.py +++ b/anta/tests/lanz.py @@ -30,7 +30,6 @@ class VerifyLANZ(AntaTest): ``` """ - name = "VerifyLANZ" description = "Verifies if LANZ is enabled." categories: ClassVar[list[str]] = ["lanz"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show queue-monitor length status", revision=1)] diff --git a/anta/tests/logging.py b/anta/tests/logging.py index 2972b4e..c391947 100644 --- a/anta/tests/logging.py +++ b/anta/tests/logging.py @@ -59,8 +59,6 @@ class VerifyLoggingPersistent(AntaTest): ``` """ - name = "VerifyLoggingPersistent" - description = "Verifies if logging persistent is enabled and logs are saved in flash." categories: ClassVar[list[str]] = ["logging"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [ AntaCommand(command="show logging", ofmt="text"), @@ -100,8 +98,6 @@ class VerifyLoggingSourceIntf(AntaTest): ``` """ - name = "VerifyLoggingSourceIntf" - description = "Verifies logging source-interface for a specified VRF." categories: ClassVar[list[str]] = ["logging"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show logging", ofmt="text")] @@ -144,8 +140,6 @@ class VerifyLoggingHosts(AntaTest): ``` """ - name = "VerifyLoggingHosts" - description = "Verifies logging hosts (syslog servers) for a specified VRF." categories: ClassVar[list[str]] = ["logging"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show logging", ofmt="text")] @@ -176,10 +170,22 @@ class VerifyLoggingHosts(AntaTest): class VerifyLoggingLogsGeneration(AntaTest): """Verifies if logs are generated. + This test performs the following checks: + + 1. Sends a test log message at the **informational** level + 2. Retrieves the most recent logs (last 30 seconds) + 3. Verifies that the test message was successfully logged + + !!! warning + EOS logging buffer should be set to severity level `informational` or higher for this test to work. + Expected Results ---------------- - * Success: The test will pass if logs are generated. - * Failure: The test will fail if logs are NOT generated. + * Success: If logs are being generated and the test message is found in recent logs. + * Failure: If any of the following occur: + - The test message is not found in recent logs + - The logging system is not capturing new messages + - No logs are being generated Examples -------- @@ -189,8 +195,6 @@ class VerifyLoggingLogsGeneration(AntaTest): ``` """ - name = "VerifyLoggingLogsGeneration" - description = "Verifies if logs are generated." categories: ClassVar[list[str]] = ["logging"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [ AntaCommand(command="send log level informational message ANTA VerifyLoggingLogsGeneration validation", ofmt="text"), @@ -213,10 +217,23 @@ class VerifyLoggingLogsGeneration(AntaTest): class VerifyLoggingHostname(AntaTest): """Verifies if logs are generated with the device FQDN. + This test performs the following checks: + + 1. Retrieves the device's configured FQDN + 2. Sends a test log message at the **informational** level + 3. Retrieves the most recent logs (last 30 seconds) + 4. Verifies that the test message includes the complete FQDN of the device + + !!! warning + EOS logging buffer should be set to severity level `informational` or higher for this test to work. + Expected Results ---------------- - * Success: The test will pass if logs are generated with the device FQDN. - * Failure: The test will fail if logs are NOT generated with the device FQDN. + * Success: If logs are generated with the device's complete FQDN. + * Failure: If any of the following occur: + - The test message is not found in recent logs + - The log message does not include the device's FQDN + - The FQDN in the log message doesn't match the configured FQDN Examples -------- @@ -226,8 +243,6 @@ class VerifyLoggingHostname(AntaTest): ``` """ - name = "VerifyLoggingHostname" - description = "Verifies if logs are generated with the device FQDN." categories: ClassVar[list[str]] = ["logging"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [ AntaCommand(command="show hostname", revision=1), @@ -257,10 +272,24 @@ class VerifyLoggingHostname(AntaTest): class VerifyLoggingTimestamp(AntaTest): """Verifies if logs are generated with the appropriate timestamp. + This test performs the following checks: + + 1. Sends a test log message at the **informational** level + 2. Retrieves the most recent logs (last 30 seconds) + 3. Verifies that the test message is present with a high-resolution RFC3339 timestamp format + - Example format: `2024-01-25T15:30:45.123456+00:00` + - Includes microsecond precision + - Contains timezone offset + + !!! warning + EOS logging buffer should be set to severity level `informational` or higher for this test to work. + Expected Results ---------------- - * Success: The test will pass if logs are generated with the appropriate timestamp. - * Failure: The test will fail if logs are NOT generated with the appropriate timestamp. + * Success: If logs are generated with the correct high-resolution RFC3339 timestamp format. + * Failure: If any of the following occur: + - The test message is not found in recent logs + - The timestamp format does not match the expected RFC3339 format Examples -------- @@ -270,8 +299,6 @@ class VerifyLoggingTimestamp(AntaTest): ``` """ - name = "VerifyLoggingTimestamp" - description = "Verifies if logs are generated with the appropriate timestamp." categories: ClassVar[list[str]] = ["logging"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [ AntaCommand(command="send log level informational message ANTA VerifyLoggingTimestamp validation", ofmt="text"), @@ -312,8 +339,6 @@ class VerifyLoggingAccounting(AntaTest): ``` """ - name = "VerifyLoggingAccounting" - description = "Verifies if AAA accounting logs are generated." categories: ClassVar[list[str]] = ["logging"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show aaa accounting logs | tail", ofmt="text")] @@ -344,8 +369,6 @@ class VerifyLoggingErrors(AntaTest): ``` """ - name = "VerifyLoggingErrors" - description = "Verifies there are no syslog messages with a severity of ERRORS or higher." categories: ClassVar[list[str]] = ["logging"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show logging threshold errors", ofmt="text")] diff --git a/anta/tests/mlag.py b/anta/tests/mlag.py index c894b98..e353420 100644 --- a/anta/tests/mlag.py +++ b/anta/tests/mlag.py @@ -36,8 +36,6 @@ class VerifyMlagStatus(AntaTest): ``` """ - name = "VerifyMlagStatus" - description = "Verifies the health status of the MLAG configuration." categories: ClassVar[list[str]] = ["mlag"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show mlag", revision=2)] @@ -78,8 +76,6 @@ class VerifyMlagInterfaces(AntaTest): ``` """ - name = "VerifyMlagInterfaces" - description = "Verifies there are no inactive or active-partial MLAG ports." categories: ClassVar[list[str]] = ["mlag"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show mlag", revision=2)] @@ -114,8 +110,6 @@ class VerifyMlagConfigSanity(AntaTest): ``` """ - name = "VerifyMlagConfigSanity" - description = "Verifies there are no MLAG config-sanity inconsistencies." categories: ClassVar[list[str]] = ["mlag"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show mlag config-sanity", revision=1)] @@ -153,8 +147,6 @@ class VerifyMlagReloadDelay(AntaTest): ``` """ - name = "VerifyMlagReloadDelay" - description = "Verifies the MLAG reload-delay parameters." categories: ClassVar[list[str]] = ["mlag"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show mlag", revision=2)] @@ -203,7 +195,6 @@ class VerifyMlagDualPrimary(AntaTest): ``` """ - name = "VerifyMlagDualPrimary" description = "Verifies the MLAG dual-primary detection parameters." categories: ClassVar[list[str]] = ["mlag"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show mlag detail", revision=2)] @@ -262,7 +253,6 @@ class VerifyMlagPrimaryPriority(AntaTest): ``` """ - name = "VerifyMlagPrimaryPriority" description = "Verifies the configuration of the MLAG primary priority." categories: ClassVar[list[str]] = ["mlag"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show mlag detail", revision=2)] diff --git a/anta/tests/multicast.py b/anta/tests/multicast.py index 554bd57..f6e84ba 100644 --- a/anta/tests/multicast.py +++ b/anta/tests/multicast.py @@ -35,8 +35,6 @@ class VerifyIGMPSnoopingVlans(AntaTest): ``` """ - name = "VerifyIGMPSnoopingVlans" - description = "Verifies the IGMP snooping status for the provided VLANs." categories: ClassVar[list[str]] = ["multicast"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show ip igmp snooping", revision=1)] @@ -78,8 +76,6 @@ class VerifyIGMPSnoopingGlobal(AntaTest): ``` """ - name = "VerifyIGMPSnoopingGlobal" - description = "Verifies the IGMP snooping global configuration." categories: ClassVar[list[str]] = ["multicast"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show ip igmp snooping", revision=1)] diff --git a/anta/tests/path_selection.py b/anta/tests/path_selection.py index 416cb8c..15b06ae 100644 --- a/anta/tests/path_selection.py +++ b/anta/tests/path_selection.py @@ -18,8 +18,7 @@ from anta.tools import get_value class VerifyPathsHealth(AntaTest): - """ - Verifies the path and telemetry state of all paths under router path-selection. + """Verifies the path and telemetry state of all paths under router path-selection. The expected states are 'IPsec established', 'Resolved' for path and 'active' for telemetry. @@ -38,8 +37,6 @@ class VerifyPathsHealth(AntaTest): ``` """ - name = "VerifyPathsHealth" - description = "Verifies the path and telemetry state of all paths under router path-selection." categories: ClassVar[list[str]] = ["path-selection"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show path-selection paths", revision=1)] @@ -73,8 +70,7 @@ class VerifyPathsHealth(AntaTest): class VerifySpecificPath(AntaTest): - """ - Verifies the path and telemetry state of a specific path for an IPv4 peer under router path-selection. + """Verifies the path and telemetry state of a specific path for an IPv4 peer under router path-selection. The expected states are 'IPsec established', 'Resolved' for path and 'active' for telemetry. @@ -98,8 +94,6 @@ class VerifySpecificPath(AntaTest): ``` """ - name = "VerifySpecificPath" - description = "Verifies the path and telemetry state of a specific path under router path-selection." categories: ClassVar[list[str]] = ["path-selection"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [ AntaTemplate(template="show path-selection paths peer {peer} path-group {group} source {source} destination {destination}", revision=1) diff --git a/anta/tests/profiles.py b/anta/tests/profiles.py index 859c886..93edacd 100644 --- a/anta/tests/profiles.py +++ b/anta/tests/profiles.py @@ -33,7 +33,6 @@ class VerifyUnifiedForwardingTableMode(AntaTest): ``` """ - name = "VerifyUnifiedForwardingTableMode" description = "Verifies the device is using the expected UFT mode." categories: ClassVar[list[str]] = ["profiles"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show platform trident forwarding-table partition", revision=1)] @@ -72,7 +71,6 @@ class VerifyTcamProfile(AntaTest): ``` """ - name = "VerifyTcamProfile" description = "Verifies the device TCAM profile." categories: ClassVar[list[str]] = ["profiles"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show hardware tcam profile", revision=1)] diff --git a/anta/tests/ptp.py b/anta/tests/ptp.py index cbb8ee3..687f175 100644 --- a/anta/tests/ptp.py +++ b/anta/tests/ptp.py @@ -33,7 +33,6 @@ class VerifyPtpModeStatus(AntaTest): ``` """ - name = "VerifyPtpModeStatus" description = "Verifies that the device is configured as a PTP Boundary Clock." categories: ClassVar[list[str]] = ["ptp"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show ptp", revision=2)] @@ -80,7 +79,6 @@ class VerifyPtpGMStatus(AntaTest): gmid: str """Identifier of the Grandmaster to which the device should be locked.""" - name = "VerifyPtpGMStatus" description = "Verifies that the device is locked to a valid PTP Grandmaster." categories: ClassVar[list[str]] = ["ptp"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show ptp", revision=2)] @@ -120,7 +118,6 @@ class VerifyPtpLockStatus(AntaTest): ``` """ - name = "VerifyPtpLockStatus" description = "Verifies that the device was locked to the upstream PTP GM in the last minute." categories: ClassVar[list[str]] = ["ptp"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show ptp", revision=2)] @@ -161,7 +158,6 @@ class VerifyPtpOffset(AntaTest): ``` """ - name = "VerifyPtpOffset" description = "Verifies that the PTP timing offset is within +/- 1000ns from the master clock." categories: ClassVar[list[str]] = ["ptp"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show ptp monitor", revision=1)] @@ -206,7 +202,6 @@ class VerifyPtpPortModeStatus(AntaTest): ``` """ - name = "VerifyPtpPortModeStatus" description = "Verifies the PTP interfaces state." categories: ClassVar[list[str]] = ["ptp"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show ptp", revision=2)] diff --git a/anta/tests/routing/bgp.py b/anta/tests/routing/bgp.py index a373286..2a140dd 100644 --- a/anta/tests/routing/bgp.py +++ b/anta/tests/routing/bgp.py @@ -7,183 +7,56 @@ # mypy: disable-error-code=attr-defined from __future__ import annotations -from ipaddress import IPv4Address, IPv4Network, IPv6Address -from typing import TYPE_CHECKING, Any, ClassVar +from typing import ClassVar, TypeVar -from pydantic import BaseModel, Field, PositiveInt, model_validator -from pydantic.v1.utils import deep_update -from pydantic_extra_types.mac_address import MacAddress +from pydantic import field_validator -from anta.custom_types import Afi, BgpDropStats, BgpUpdateError, MultiProtocolCaps, Safi, Vni +from anta.input_models.routing.bgp import BgpAddressFamily, BgpAfi, BgpNeighbor, BgpPeer, VxlanEndpoint from anta.models import AntaCommand, AntaTemplate, AntaTest -from anta.tools import get_item, get_value +from anta.tools import format_data, get_item, get_value -if TYPE_CHECKING: - import sys +# Using a TypeVar for the BgpPeer model since mypy thinks it's a ClassVar and not a valid type when used in field validators +T = TypeVar("T", bound=BgpPeer) - if sys.version_info >= (3, 11): - from typing import Self - else: - from typing_extensions import Self - -def _add_bgp_failures(failures: dict[tuple[str, str | None], dict[str, Any]], afi: Afi, safi: Safi | None, vrf: str, issue: str | dict[str, Any]) -> None: - """Add a BGP failure entry to the given `failures` dictionary. - - Note: This function modifies `failures` in-place. - - Parameters - ---------- - failures - The dictionary to which the failure will be added. - afi - The address family identifier. - vrf - The VRF name. - safi - The subsequent address family identifier. - issue - A description of the issue. Can be of any type. - - Example - ------- - The `failures` dictionary will have the following structure: - ``` - { - ('afi1', 'safi1'): { - 'afi': 'afi1', - 'safi': 'safi1', - 'vrfs': { - 'vrf1': issue1, - 'vrf2': issue2 - } - }, - ('afi2', None): { - 'afi': 'afi2', - 'vrfs': { - 'vrf1': issue3 - } - } - } - ``` - - """ - key = (afi, safi) - - failure_entry = failures.setdefault(key, {"afi": afi, "safi": safi, "vrfs": {}}) if safi else failures.setdefault(key, {"afi": afi, "vrfs": {}}) - - failure_entry["vrfs"][vrf] = issue - - -def _check_peer_issues(peer_data: dict[str, Any] | None) -> dict[str, Any]: - """Check for issues in BGP peer data. +def _check_bgp_neighbor_capability(capability_status: dict[str, bool]) -> bool: + """Check if a BGP neighbor capability is advertised, received, and enabled. Parameters ---------- - peer_data - The BGP peer data dictionary nested in the `show bgp <afi> <safi> summary` command. + capability_status + A dictionary containing the capability status. Returns ------- - dict - Dictionary with keys indicating issues or an empty dictionary if no issues. - - Raises - ------ - ValueError - If any of the required keys ("peerState", "inMsgQueue", "outMsgQueue") are missing in `peer_data`, i.e. invalid BGP peer data. + bool + True if the capability is advertised, received, and enabled, False otherwise. Example ------- - This can for instance return - ``` - {"peerNotFound": True} - {"peerState": "Idle", "inMsgQueue": 2, "outMsgQueue": 0} - {} - ``` - + >>> _check_bgp_neighbor_capability({"advertised": True, "received": True, "enabled": True}) + True """ - if peer_data is None: - return {"peerNotFound": True} - - if any(key not in peer_data for key in ["peerState", "inMsgQueue", "outMsgQueue"]): - msg = "Provided BGP peer data is invalid." - raise ValueError(msg) - - if peer_data["peerState"] != "Established" or peer_data["inMsgQueue"] != 0 or peer_data["outMsgQueue"] != 0: - return {"peerState": peer_data["peerState"], "inMsgQueue": peer_data["inMsgQueue"], "outMsgQueue": peer_data["outMsgQueue"]} - - return {} - - -def _add_bgp_routes_failure( - bgp_routes: list[str], bgp_output: dict[str, Any], peer: str, vrf: str, route_type: str = "advertised_routes" -) -> dict[str, dict[str, dict[str, dict[str, list[str]]]]]: - """Identify missing BGP routes and invalid or inactive route entries. - - This function checks the BGP output from the device against the expected routes. - - It identifies any missing routes as well as any routes that are invalid or inactive. The results are returned in a dictionary. - - Parameters - ---------- - bgp_routes - The list of expected routes. - bgp_output - The BGP output from the device. - peer - The IP address of the BGP peer. - vrf - The name of the VRF for which the routes need to be verified. - route_type - The type of BGP routes. Defaults to 'advertised_routes'. - - Returns - ------- - dict[str, dict[str, dict[str, dict[str, list[str]]]]] - A dictionary containing the missing routes and invalid or inactive routes. - - """ - # Prepare the failure routes dictionary - failure_routes: dict[str, dict[str, Any]] = {} - - # Iterate over the expected BGP routes - for route in bgp_routes: - str_route = str(route) - failure: dict[str, Any] = {"bgp_peers": {peer: {vrf: {route_type: {}}}}} - - # Check if the route is missing in the BGP output - if str_route not in bgp_output: - # If missing, add it to the failure routes dictionary - failure["bgp_peers"][peer][vrf][route_type][str_route] = "Not found" - failure_routes = deep_update(failure_routes, failure) - continue - - # Check if the route is active and valid - is_active = bgp_output[str_route]["bgpRoutePaths"][0]["routeType"]["valid"] - is_valid = bgp_output[str_route]["bgpRoutePaths"][0]["routeType"]["active"] - - # If the route is either inactive or invalid, add it to the failure routes dictionary - if not is_active or not is_valid: - failure["bgp_peers"][peer][vrf][route_type][str_route] = {"valid": is_valid, "active": is_active} - failure_routes = deep_update(failure_routes, failure) - - return failure_routes + return all(capability_status.get(state, False) for state in ("advertised", "received", "enabled")) class VerifyBGPPeerCount(AntaTest): - """Verifies the count of BGP peers for a given address family. + """Verifies the count of BGP peers for given address families. - It supports multiple types of Address Families Identifiers (AFI) and Subsequent Address Family Identifiers (SAFI). + This test performs the following checks for each specified address family: - For SR-TE SAFI, the EOS command supports sr-te first then ipv4/ipv6 (AFI) which is handled automatically in this test. - - Please refer to the Input class attributes below for details. + 1. Confirms that the specified VRF is configured. + 2. Counts the number of peers that are: + - If `check_peer_state` is set to True, Counts the number of BGP peers that are in the `Established` state and + have successfully negotiated the specified AFI/SAFI + - If `check_peer_state` is set to False, skips validation of the `Established` state and AFI/SAFI negotiation. Expected Results ---------------- - * Success: If the count of BGP peers matches the expected count for each address family and VRF. - * Failure: If the count of BGP peers does not match the expected count, or if BGP is not configured for an expected VRF or address family. + * Success: If the count of BGP peers matches the expected count with `check_peer_state` enabled/disabled. + * Failure: If any of the following occur: + - The specified VRF is not configured. + - The BGP peer count does not match expected value with `check_peer_state` enabled/disabled." Examples -------- @@ -209,130 +82,78 @@ class VerifyBGPPeerCount(AntaTest): ``` """ - name = "VerifyBGPPeerCount" - description = "Verifies the count of BGP peers." categories: ClassVar[list[str]] = ["bgp"] - commands: ClassVar[list[AntaCommand | AntaTemplate]] = [ - AntaTemplate(template="show bgp {afi} {safi} summary vrf {vrf}", revision=3), - AntaTemplate(template="show bgp {afi} summary", revision=3), - ] + commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show bgp summary vrf all", revision=1)] class Input(AntaTest.Input): """Input model for the VerifyBGPPeerCount test.""" - address_families: list[BgpAfi] - """List of BGP address families (BgpAfi).""" - - class BgpAfi(BaseModel): - """Model for a BGP address family (AFI) and subsequent address family (SAFI).""" - - afi: Afi - """BGP address family (AFI).""" - safi: Safi | None = None - """Optional BGP subsequent service family (SAFI). - - If the input `afi` is `ipv4` or `ipv6`, a valid `safi` must be provided. - """ - vrf: str = "default" - """ - Optional VRF for IPv4 and IPv6. If not provided, it defaults to `default`. - - If the input `afi` is not `ipv4` or `ipv6`, e.g. `evpn`, `vrf` must be `default`. - """ - num_peers: PositiveInt - """Number of expected BGP peer(s).""" - - @model_validator(mode="after") - def validate_inputs(self) -> Self: - """Validate the inputs provided to the BgpAfi class. - - If afi is either ipv4 or ipv6, safi must be provided. - - If afi is not ipv4 or ipv6, safi must not be provided and vrf must be default. - """ - if self.afi in ["ipv4", "ipv6"]: - if self.safi is None: - msg = "'safi' must be provided when afi is ipv4 or ipv6" - raise ValueError(msg) - elif self.safi is not None: - msg = "'safi' must not be provided when afi is not ipv4 or ipv6" + address_families: list[BgpAddressFamily] + """List of BGP address families.""" + BgpAfi: ClassVar[type[BgpAfi]] = BgpAfi + + @field_validator("address_families") + @classmethod + def validate_address_families(cls, address_families: list[BgpAddressFamily]) -> list[BgpAddressFamily]: + """Validate that 'num_peers' field is provided in each address family.""" + for af in address_families: + if af.num_peers is None: + msg = f"{af} 'num_peers' field missing in the input" raise ValueError(msg) - elif self.vrf != "default": - msg = "'vrf' must be default when afi is not ipv4 or ipv6" - raise ValueError(msg) - return self - - def render(self, template: AntaTemplate) -> list[AntaCommand]: - """Render the template for each BGP address family in the input list.""" - commands = [] - for afi in self.inputs.address_families: - if template == VerifyBGPPeerCount.commands[0] and afi.afi in ["ipv4", "ipv6"] and afi.safi != "sr-te": - commands.append(template.render(afi=afi.afi, safi=afi.safi, vrf=afi.vrf)) - - # For SR-TE SAFI, the EOS command supports sr-te first then ipv4/ipv6 - elif template == VerifyBGPPeerCount.commands[0] and afi.afi in ["ipv4", "ipv6"] and afi.safi == "sr-te": - commands.append(template.render(afi=afi.safi, safi=afi.afi, vrf=afi.vrf)) - elif template == VerifyBGPPeerCount.commands[1] and afi.afi not in ["ipv4", "ipv6"]: - commands.append(template.render(afi=afi.afi)) - return commands + return address_families @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyBGPPeerCount.""" self.result.is_success() - failures: dict[tuple[str, Any], dict[str, Any]] = {} - - for command in self.instance_commands: - num_peers = None - peer_count = 0 - command_output = command.json_output - - afi = command.params.afi - safi = command.params.safi if hasattr(command.params, "safi") else None - afi_vrf = command.params.vrf if hasattr(command.params, "vrf") else "default" - - # Swapping AFI and SAFI in case of SR-TE - if afi == "sr-te": - afi, safi = safi, afi - - for input_entry in self.inputs.address_families: - if input_entry.afi == afi and input_entry.safi == safi and input_entry.vrf == afi_vrf: - num_peers = input_entry.num_peers - break + output = self.instance_commands[0].json_output - if not (vrfs := command_output.get("vrfs")): - _add_bgp_failures(failures=failures, afi=afi, safi=safi, vrf=afi_vrf, issue="Not Configured") + for address_family in self.inputs.address_families: + # Check if the VRF is configured + if (vrf_output := get_value(output, f"vrfs.{address_family.vrf}")) is None: + self.result.is_failure(f"{address_family} - VRF not configured") continue - if afi_vrf == "all": - for vrf_data in vrfs.values(): - peer_count += len(vrf_data["peers"]) + peers_data = vrf_output.get("peers", {}).values() + if not address_family.check_peer_state: + # Count the number of peers without considering the state and negotiated AFI/SAFI check if the count matches the expected count + peer_count = sum(1 for peer_data in peers_data if address_family.eos_key in peer_data) else: - peer_count += len(command_output["vrfs"][afi_vrf]["peers"]) + # Count the number of established peers with negotiated AFI/SAFI + peer_count = sum( + 1 + for peer_data in peers_data + if peer_data.get("peerState") == "Established" and get_value(peer_data, f"{address_family.eos_key}.afiSafiState") == "negotiated" + ) - if peer_count != num_peers: - _add_bgp_failures(failures=failures, afi=afi, safi=safi, vrf=afi_vrf, issue=f"Expected: {num_peers}, Actual: {peer_count}") - - if failures: - self.result.is_failure(f"Failures: {list(failures.values())}") + # Check if the count matches the expected count + if address_family.num_peers != peer_count: + self.result.is_failure(f"{address_family} - Expected: {address_family.num_peers}, Actual: {peer_count}") class VerifyBGPPeersHealth(AntaTest): - """Verifies the health of BGP peers. - - It will validate that all BGP sessions are established and all message queues for these BGP sessions are empty for a given address family. + """Verifies the health of BGP peers for given address families. - It supports multiple types of Address Families Identifiers (AFI) and Subsequent Address Family Identifiers (SAFI). + This test performs the following checks for each specified address family: - For SR-TE SAFI, the EOS command supports sr-te first then ipv4/ipv6 (AFI) which is handled automatically in this test. - - Please refer to the Input class attributes below for details. + 1. Validates that the VRF is configured. + 2. Checks if there are any peers for the given AFI/SAFI. + 3. For each relevant peer: + - Verifies that the BGP session is in the `Established` state. + - Confirms that the AFI/SAFI state is `negotiated`. + - Checks that both input and output TCP message queues are empty. + Can be disabled by setting `check_tcp_queues` to `False`. Expected Results ---------------- - * Success: If all BGP sessions are established and all messages queues are empty for each address family and VRF. - * Failure: If there are issues with any of the BGP sessions, or if BGP is not configured for an expected VRF or address family. + * Success: If all checks pass for all specified address families and their peers. + * Failure: If any of the following occur: + - The specified VRF is not configured. + - No peers are found for a given AFI/SAFI. + - Any BGP session is not in the `Established` state. + - The AFI/SAFI state is not 'negotiated' for any peer. + - Any TCP message queue (input or output) is not empty when `check_tcp_queues` is `True` (default). Examples -------- @@ -348,130 +169,83 @@ class VerifyBGPPeersHealth(AntaTest): - afi: "ipv6" safi: "unicast" vrf: "DEV" + check_tcp_queues: false ``` """ - name = "VerifyBGPPeersHealth" - description = "Verifies the health of BGP peers" categories: ClassVar[list[str]] = ["bgp"] - commands: ClassVar[list[AntaCommand | AntaTemplate]] = [ - AntaTemplate(template="show bgp {afi} {safi} summary vrf {vrf}", revision=3), - AntaTemplate(template="show bgp {afi} summary", revision=3), - ] + commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show bgp neighbors vrf all", revision=3)] class Input(AntaTest.Input): """Input model for the VerifyBGPPeersHealth test.""" - address_families: list[BgpAfi] - """List of BGP address families (BgpAfi).""" - - class BgpAfi(BaseModel): - """Model for a BGP address family (AFI) and subsequent address family (SAFI).""" - - afi: Afi - """BGP address family (AFI).""" - safi: Safi | None = None - """Optional BGP subsequent service family (SAFI). - - If the input `afi` is `ipv4` or `ipv6`, a valid `safi` must be provided. - """ - vrf: str = "default" - """ - Optional VRF for IPv4 and IPv6. If not provided, it defaults to `default`. - - If the input `afi` is not `ipv4` or `ipv6`, e.g. `evpn`, `vrf` must be `default`. - """ - - @model_validator(mode="after") - def validate_inputs(self) -> Self: - """Validate the inputs provided to the BgpAfi class. - - If afi is either ipv4 or ipv6, safi must be provided. - - If afi is not ipv4 or ipv6, safi must not be provided and vrf must be default. - """ - if self.afi in ["ipv4", "ipv6"]: - if self.safi is None: - msg = "'safi' must be provided when afi is ipv4 or ipv6" - raise ValueError(msg) - elif self.safi is not None: - msg = "'safi' must not be provided when afi is not ipv4 or ipv6" - raise ValueError(msg) - elif self.vrf != "default": - msg = "'vrf' must be default when afi is not ipv4 or ipv6" - raise ValueError(msg) - return self - - def render(self, template: AntaTemplate) -> list[AntaCommand]: - """Render the template for each BGP address family in the input list.""" - commands = [] - for afi in self.inputs.address_families: - if template == VerifyBGPPeersHealth.commands[0] and afi.afi in ["ipv4", "ipv6"] and afi.safi != "sr-te": - commands.append(template.render(afi=afi.afi, safi=afi.safi, vrf=afi.vrf)) - - # For SR-TE SAFI, the EOS command supports sr-te first then ipv4/ipv6 - elif template == VerifyBGPPeersHealth.commands[0] and afi.afi in ["ipv4", "ipv6"] and afi.safi == "sr-te": - commands.append(template.render(afi=afi.safi, safi=afi.afi, vrf=afi.vrf)) - elif template == VerifyBGPPeersHealth.commands[1] and afi.afi not in ["ipv4", "ipv6"]: - commands.append(template.render(afi=afi.afi)) - return commands + address_families: list[BgpAddressFamily] + """List of BGP address families.""" + BgpAfi: ClassVar[type[BgpAfi]] = BgpAfi @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyBGPPeersHealth.""" self.result.is_success() - failures: dict[tuple[str, Any], dict[str, Any]] = {} - - for command in self.instance_commands: - command_output = command.json_output + output = self.instance_commands[0].json_output - afi = command.params.afi - safi = command.params.safi if hasattr(command.params, "safi") else None - afi_vrf = command.params.vrf if hasattr(command.params, "vrf") else "default" + for address_family in self.inputs.address_families: + # Check if the VRF is configured + if (vrf_output := get_value(output, f"vrfs.{address_family.vrf}")) is None: + self.result.is_failure(f"{address_family} - VRF not configured") + continue - # Swapping AFI and SAFI in case of SR-TE - if afi == "sr-te": - afi, safi = safi, afi + # Check if any peers are found for this AFI/SAFI + relevant_peers = [ + peer for peer in vrf_output.get("peerList", []) if get_value(peer, f"neighborCapabilities.multiprotocolCaps.{address_family.eos_key}") is not None + ] - if not (vrfs := command_output.get("vrfs")): - _add_bgp_failures(failures=failures, afi=afi, safi=safi, vrf=afi_vrf, issue="Not Configured") + if not relevant_peers: + self.result.is_failure(f"{address_family} - No peers found") continue - for vrf, vrf_data in vrfs.items(): - if not (peers := vrf_data.get("peers")): - _add_bgp_failures(failures=failures, afi=afi, safi=safi, vrf=afi_vrf, issue="No Peers") + for peer in relevant_peers: + # Check if the BGP session is established + if peer["state"] != "Established": + self.result.is_failure(f"{address_family} Peer: {peer['peerAddress']} - Session state is not established - State: {peer['state']}") continue - peer_issues = {} - for peer, peer_data in peers.items(): - issues = _check_peer_issues(peer_data) + # Check if the AFI/SAFI state is negotiated + capability_status = get_value(peer, f"neighborCapabilities.multiprotocolCaps.{address_family.eos_key}") + if not _check_bgp_neighbor_capability(capability_status): + self.result.is_failure(f"{address_family} Peer: {peer['peerAddress']} - AFI/SAFI state is not negotiated - {format_data(capability_status)}") - if issues: - peer_issues[peer] = issues - - if peer_issues: - _add_bgp_failures(failures=failures, afi=afi, safi=safi, vrf=vrf, issue=peer_issues) - - if failures: - self.result.is_failure(f"Failures: {list(failures.values())}") + # Check the TCP session message queues + if address_family.check_tcp_queues: + inq = peer["peerTcpInfo"]["inputQueueLength"] + outq = peer["peerTcpInfo"]["outputQueueLength"] + if inq != 0 or outq != 0: + self.result.is_failure(f"{address_family} Peer: {peer['peerAddress']} - Session has non-empty message queues - InQ: {inq}, OutQ: {outq}") class VerifyBGPSpecificPeers(AntaTest): - """Verifies the health of specific BGP peer(s). - - It will validate that the BGP session is established and all message queues for this BGP session are empty for the given peer(s). - - It supports multiple types of Address Families Identifiers (AFI) and Subsequent Address Family Identifiers (SAFI). + """Verifies the health of specific BGP peer(s) for given address families. - For SR-TE SAFI, the EOS command supports sr-te first then ipv4/ipv6 (AFI) which is handled automatically in this test. + This test performs the following checks for each specified address family and peer: - Please refer to the Input class attributes below for details. + 1. Confirms that the specified VRF is configured. + 2. For each specified peer: + - Verifies that the peer is found in the BGP configuration. + - Checks that the BGP session is in the `Established` state. + - Confirms that the AFI/SAFI state is `negotiated`. + - Ensures that both input and output TCP message queues are empty. + Can be disabled by setting `check_tcp_queues` to `False`. Expected Results ---------------- - * Success: If the BGP session is established and all messages queues are empty for each given peer. - * Failure: If the BGP session has issues or is not configured, or if BGP is not configured for an expected VRF or address family. + * Success: If all checks pass for all specified peers in all address families. + * Failure: If any of the following occur: + - The specified VRF is not configured. + - A specified peer is not found in the BGP configuration. + - The BGP session for a peer is not in the `Established` state. + - The AFI/SAFI state is not `negotiated` for a peer. + - Any TCP message queue (input or output) is not empty for a peer when `check_tcp_queues` is `True` (default). Examples -------- @@ -494,130 +268,85 @@ class VerifyBGPSpecificPeers(AntaTest): ``` """ - name = "VerifyBGPSpecificPeers" - description = "Verifies the health of specific BGP peer(s)." categories: ClassVar[list[str]] = ["bgp"] - commands: ClassVar[list[AntaCommand | AntaTemplate]] = [ - AntaTemplate(template="show bgp {afi} {safi} summary vrf {vrf}", revision=3), - AntaTemplate(template="show bgp {afi} summary", revision=3), - ] + commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show bgp neighbors vrf all", revision=3)] class Input(AntaTest.Input): """Input model for the VerifyBGPSpecificPeers test.""" - address_families: list[BgpAfi] - """List of BGP address families (BgpAfi).""" - - class BgpAfi(BaseModel): - """Model for a BGP address family (AFI) and subsequent address family (SAFI).""" - - afi: Afi - """BGP address family (AFI).""" - safi: Safi | None = None - """Optional BGP subsequent service family (SAFI). - - If the input `afi` is `ipv4` or `ipv6`, a valid `safi` must be provided. - """ - vrf: str = "default" - """ - Optional VRF for IPv4 and IPv6. If not provided, it defaults to `default`. - - `all` is NOT supported. - - If the input `afi` is not `ipv4` or `ipv6`, e.g. `evpn`, `vrf` must be `default`. - """ - peers: list[IPv4Address | IPv6Address] - """List of BGP IPv4 or IPv6 peer.""" - - @model_validator(mode="after") - def validate_inputs(self) -> Self: - """Validate the inputs provided to the BgpAfi class. - - If afi is either ipv4 or ipv6, safi must be provided and vrf must NOT be all. - - If afi is not ipv4 or ipv6, safi must not be provided and vrf must be default. - """ - if self.afi in ["ipv4", "ipv6"]: - if self.safi is None: - msg = "'safi' must be provided when afi is ipv4 or ipv6" - raise ValueError(msg) - if self.vrf == "all": - msg = "'all' is not supported in this test. Use VerifyBGPPeersHealth test instead." - raise ValueError(msg) - elif self.safi is not None: - msg = "'safi' must not be provided when afi is not ipv4 or ipv6" + address_families: list[BgpAddressFamily] + """List of BGP address families.""" + BgpAfi: ClassVar[type[BgpAfi]] = BgpAfi + + @field_validator("address_families") + @classmethod + def validate_address_families(cls, address_families: list[BgpAddressFamily]) -> list[BgpAddressFamily]: + """Validate that 'peers' field is provided in each address family.""" + for af in address_families: + if af.peers is None: + msg = f"{af} 'peers' field missing in the input" raise ValueError(msg) - elif self.vrf != "default": - msg = "'vrf' must be default when afi is not ipv4 or ipv6" - raise ValueError(msg) - return self - - def render(self, template: AntaTemplate) -> list[AntaCommand]: - """Render the template for each BGP address family in the input list.""" - commands = [] - - for afi in self.inputs.address_families: - if template == VerifyBGPSpecificPeers.commands[0] and afi.afi in ["ipv4", "ipv6"] and afi.safi != "sr-te": - commands.append(template.render(afi=afi.afi, safi=afi.safi, vrf=afi.vrf)) - - # For SR-TE SAFI, the EOS command supports sr-te first then ipv4/ipv6 - elif template == VerifyBGPSpecificPeers.commands[0] and afi.afi in ["ipv4", "ipv6"] and afi.safi == "sr-te": - commands.append(template.render(afi=afi.safi, safi=afi.afi, vrf=afi.vrf)) - elif template == VerifyBGPSpecificPeers.commands[1] and afi.afi not in ["ipv4", "ipv6"]: - commands.append(template.render(afi=afi.afi)) - return commands + return address_families @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyBGPSpecificPeers.""" self.result.is_success() - failures: dict[tuple[str, Any], dict[str, Any]] = {} - - for command in self.instance_commands: - command_output = command.json_output + output = self.instance_commands[0].json_output - afi = command.params.afi - safi = command.params.safi if hasattr(command.params, "safi") else None - afi_vrf = command.params.vrf if hasattr(command.params, "vrf") else "default" + for address_family in self.inputs.address_families: + # Check if the VRF is configured + if (vrf_output := get_value(output, f"vrfs.{address_family.vrf}")) is None: + self.result.is_failure(f"{address_family} - VRF not configured") + continue - # Swapping AFI and SAFI in case of SR-TE - if afi == "sr-te": - afi, safi = safi, afi + for peer in address_family.peers: + peer_ip = str(peer) - for input_entry in self.inputs.address_families: - if input_entry.afi == afi and input_entry.safi == safi and input_entry.vrf == afi_vrf: - afi_peers = input_entry.peers - break + # Check if the peer is found + if (peer_data := get_item(vrf_output["peerList"], "peerAddress", peer_ip)) is None: + self.result.is_failure(f"{address_family} Peer: {peer_ip} - Not configured") + continue - if not (vrfs := command_output.get("vrfs")): - _add_bgp_failures(failures=failures, afi=afi, safi=safi, vrf=afi_vrf, issue="Not Configured") - continue + # Check if the BGP session is established + if peer_data["state"] != "Established": + self.result.is_failure(f"{address_family} Peer: {peer_ip} - Session state is not established - State: {peer_data['state']}") + continue - peer_issues = {} - for peer in afi_peers: - peer_ip = str(peer) - peer_data = get_value(dictionary=vrfs, key=f"{afi_vrf}_peers_{peer_ip}", separator="_") - issues = _check_peer_issues(peer_data) - if issues: - peer_issues[peer_ip] = issues + # Check if the AFI/SAFI state is negotiated + capability_status = get_value(peer_data, f"neighborCapabilities.multiprotocolCaps.{address_family.eos_key}") + if not capability_status: + self.result.is_failure(f"{address_family} Peer: {peer_ip} - AFI/SAFI state is not negotiated") - if peer_issues: - _add_bgp_failures(failures=failures, afi=afi, safi=safi, vrf=afi_vrf, issue=peer_issues) + if capability_status and not _check_bgp_neighbor_capability(capability_status): + self.result.is_failure(f"{address_family} Peer: {peer_ip} - AFI/SAFI state is not negotiated - {format_data(capability_status)}") - if failures: - self.result.is_failure(f"Failures: {list(failures.values())}") + # Check the TCP session message queues + if address_family.check_tcp_queues: + inq = peer_data["peerTcpInfo"]["inputQueueLength"] + outq = peer_data["peerTcpInfo"]["outputQueueLength"] + if inq != 0 or outq != 0: + self.result.is_failure(f"{address_family} Peer: {peer_ip} - Session has non-empty message queues - InQ: {inq}, OutQ: {outq}") class VerifyBGPExchangedRoutes(AntaTest): - """Verifies if the BGP peers have correctly advertised and received routes. + """Verifies the advertised and received routes of BGP peers. + + This test performs the following checks for each specified peer: - The route type should be 'valid' and 'active' for a specified VRF. + For each advertised and received route: + - Confirms that the route exists in the BGP route table. + - Verifies that the route is in an 'active' and 'valid' state. Expected Results ---------------- - * Success: If the BGP peers have correctly advertised and received routes of type 'valid' and 'active' for a specified VRF. - * Failure: If a BGP peer is not found, the expected advertised/received routes are not found, or the routes are not 'valid' or 'active'. + * Success: If all of the following conditions are met: + - All specified advertised/received routes are found in the BGP route table. + - All routes are in both 'active' and 'valid' states. + * Failure: If any of the following occur: + - An advertised/received route is not found in the BGP route table. + - Any route is not in an 'active' or 'valid' state. Examples -------- @@ -642,8 +371,6 @@ class VerifyBGPExchangedRoutes(AntaTest): ``` """ - name = "VerifyBGPExchangedRoutes" - description = "Verifies the advertised and received routes of BGP peers." categories: ClassVar[list[str]] = ["bgp"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [ AntaTemplate(template="show bgp neighbors {peer} advertised-routes vrf {vrf}", revision=3), @@ -653,71 +380,87 @@ class VerifyBGPExchangedRoutes(AntaTest): class Input(AntaTest.Input): """Input model for the VerifyBGPExchangedRoutes test.""" - bgp_peers: list[BgpNeighbor] - """List of BGP neighbors.""" - - class BgpNeighbor(BaseModel): - """Model for a BGP neighbor.""" - - peer_address: IPv4Address - """IPv4 address of a BGP peer.""" - vrf: str = "default" - """Optional VRF for BGP peer. If not provided, it defaults to `default`.""" - advertised_routes: list[IPv4Network] - """List of advertised routes in CIDR format.""" - received_routes: list[IPv4Network] - """List of received routes in CIDR format.""" + bgp_peers: list[BgpPeer] + """List of BGP peers.""" + BgpNeighbor: ClassVar[type[BgpNeighbor]] = BgpNeighbor + + @field_validator("bgp_peers") + @classmethod + def validate_bgp_peers(cls, bgp_peers: list[BgpPeer]) -> list[BgpPeer]: + """Validate that 'advertised_routes' or 'received_routes' field is provided in each address family.""" + for peer in bgp_peers: + if peer.advertised_routes is None or peer.received_routes is None: + msg = f"{peer} 'advertised_routes' or 'received_routes' field missing in the input" + raise ValueError(msg) + return bgp_peers def render(self, template: AntaTemplate) -> list[AntaCommand]: - """Render the template for each BGP neighbor in the input list.""" + """Render the template for each BGP peer in the input list.""" return [template.render(peer=str(bgp_peer.peer_address), vrf=bgp_peer.vrf) for bgp_peer in self.inputs.bgp_peers] @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyBGPExchangedRoutes.""" - failures: dict[str, dict[str, Any]] = {"bgp_peers": {}} - - # Iterating over command output for different peers - for command in self.instance_commands: - peer = command.params.peer - vrf = command.params.vrf - for input_entry in self.inputs.bgp_peers: - if str(input_entry.peer_address) == peer and input_entry.vrf == vrf: - advertised_routes = input_entry.advertised_routes - received_routes = input_entry.received_routes - break - failure = {vrf: ""} - - # Verify if a BGP peer is configured with the provided vrf - if not (bgp_routes := get_value(command.json_output, f"vrfs.{vrf}.bgpRouteEntries")): - failure[vrf] = "Not configured" - failures["bgp_peers"][peer] = failure - continue + self.result.is_success() - # Validate advertised routes - if "advertised-routes" in command.command: - failure_routes = _add_bgp_routes_failure(advertised_routes, bgp_routes, peer, vrf) + num_peers = len(self.inputs.bgp_peers) - # Validate received routes - else: - failure_routes = _add_bgp_routes_failure(received_routes, bgp_routes, peer, vrf, route_type="received_routes") - failures = deep_update(failures, failure_routes) + # Process each peer and its corresponding command pair + for peer_idx, peer in enumerate(self.inputs.bgp_peers): + # For n peers, advertised routes are at indices 0 to n-1, and received routes are at indices n to 2n-1 + advertised_routes_cmd = self.instance_commands[peer_idx] + received_routes_cmd = self.instance_commands[peer_idx + num_peers] + + # Get the BGP route entries of each command + command_output = { + "Advertised": get_value(advertised_routes_cmd.json_output, f"vrfs.{peer.vrf}.bgpRouteEntries", default={}), + "Received": get_value(received_routes_cmd.json_output, f"vrfs.{peer.vrf}.bgpRouteEntries", default={}), + } + + # Validate both advertised and received routes + for route_type, routes in zip(["Advertised", "Received"], [peer.advertised_routes, peer.received_routes]): + entries = command_output[route_type] + for route in routes: + # Check if the route is found + if str(route) not in entries: + self.result.is_failure(f"{peer} {route_type} route: {route} - Not found") + continue - if not failures["bgp_peers"]: - self.result.is_success() - else: - self.result.is_failure(f"Following BGP peers are not found or routes are not exchanged properly:\n{failures}") + # Check if the route is active and valid + route_paths = entries[str(route)]["bgpRoutePaths"][0]["routeType"] + is_active = route_paths["active"] + is_valid = route_paths["valid"] + if not is_active or not is_valid: + self.result.is_failure(f"{peer} {route_type} route: {route} - Valid: {is_valid}, Active: {is_active}") class VerifyBGPPeerMPCaps(AntaTest): - """Verifies the multiprotocol capabilities of a BGP peer in a specified VRF. + """Verifies the multiprotocol capabilities of BGP peers. - Supports `strict: True` to verify that only the specified capabilities are configured, requiring an exact match. + This test performs the following checks for each specified peer: + + 1. Confirms that the specified VRF is configured. + 2. Verifies that the peer exists in the BGP configuration. + 3. For each specified capability: + - Validates that the capability is present in the peer configuration. + - Confirms that the capability is advertised, received, and enabled. + 4. When strict mode is enabled (`strict: true`): + - Verifies that only the specified capabilities are configured. + - Ensures an exact match between configured and expected capabilities. Expected Results ---------------- - * Success: The test will pass if the BGP peer's multiprotocol capabilities are advertised, received, and enabled in the specified VRF. - * Failure: The test will fail if BGP peers are not found or multiprotocol capabilities are not advertised, received, and enabled in the specified VRF. + * Success: If all of the following conditions are met: + - The specified VRF is configured. + - All specified peers are found in the BGP configuration. + - All specified capabilities are present and properly negotiated. + - In strict mode, only the specified capabilities are configured. + * Failure: If any of the following occur: + - The specified VRF is not configured. + - A specified peer is not found in the BGP configuration. + - A specified capability is not found. + - A capability is not properly negotiated (not advertised, received, or enabled). + - In strict mode, additional or missing capabilities are detected. Examples -------- @@ -734,8 +477,6 @@ class VerifyBGPPeerMPCaps(AntaTest): ``` """ - name = "VerifyBGPPeerMPCaps" - description = "Verifies the multiprotocol capabilities of a BGP peer." categories: ClassVar[list[str]] = ["bgp"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show bgp neighbors vrf all", revision=3)] @@ -744,78 +485,77 @@ class VerifyBGPPeerMPCaps(AntaTest): bgp_peers: list[BgpPeer] """List of BGP peers""" - - class BgpPeer(BaseModel): - """Model for a BGP peer.""" - - peer_address: IPv4Address - """IPv4 address of a BGP peer.""" - vrf: str = "default" - """Optional VRF for BGP peer. If not provided, it defaults to `default`.""" - strict: bool = False - """If True, requires exact matching of provided capabilities. Defaults to False.""" - capabilities: list[MultiProtocolCaps] - """List of multiprotocol capabilities to be verified.""" + BgpPeer: ClassVar[type[BgpPeer]] = BgpPeer + + @field_validator("bgp_peers") + @classmethod + def validate_bgp_peers(cls, bgp_peers: list[T]) -> list[T]: + """Validate that 'capabilities' field is provided in each address family.""" + for peer in bgp_peers: + if peer.capabilities is None: + msg = f"{peer} 'capabilities' field missing in the input" + raise ValueError(msg) + return bgp_peers @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyBGPPeerMPCaps.""" - failures: dict[str, Any] = {"bgp_peers": {}} - - # Iterate over each bgp peer. - for bgp_peer in self.inputs.bgp_peers: - peer = str(bgp_peer.peer_address) - vrf = bgp_peer.vrf - capabilities = bgp_peer.capabilities - failure: dict[str, dict[str, dict[str, Any]]] = {"bgp_peers": {peer: {vrf: {}}}} - - # Check if BGP output exists. - if ( - not (bgp_output := get_value(self.instance_commands[0].json_output, f"vrfs.{vrf}.peerList")) - or (bgp_output := get_item(bgp_output, "peerAddress", peer)) is None - ): - failure["bgp_peers"][peer][vrf] = {"status": "Not configured"} - failures = deep_update(failures, failure) + self.result.is_success() + + output = self.instance_commands[0].json_output + + for peer in self.inputs.bgp_peers: + peer_ip = str(peer.peer_address) + + # Check if the VRF is configured + if (vrf_output := get_value(output, f"vrfs.{peer.vrf}")) is None: + self.result.is_failure(f"{peer} - VRF not configured") continue - # Fetching the capabilities output. - bgp_output = get_value(bgp_output, "neighborCapabilities.multiprotocolCaps") + # Check if the peer is found + if (peer_data := get_item(vrf_output["peerList"], "peerAddress", peer_ip)) is None: + self.result.is_failure(f"{peer} - Not found") + continue + + # Fetching the multiprotocol capabilities + act_mp_caps = get_value(peer_data, "neighborCapabilities.multiprotocolCaps") - if bgp_peer.strict and sorted(capabilities) != sorted(bgp_output): - failure["bgp_peers"][peer][vrf] = { - "status": f"Expected only `{', '.join(capabilities)}` capabilities should be listed but found `{', '.join(bgp_output)}` instead." - } - failures = deep_update(failures, failure) + # If strict is True, check if only the specified capabilities are configured + if peer.strict and sorted(peer.capabilities) != sorted(act_mp_caps): + self.result.is_failure(f"{peer} - Mismatch - Expected: {', '.join(peer.capabilities)} Actual: {', '.join(act_mp_caps)}") continue # Check each capability - for capability in capabilities: - capability_output = bgp_output.get(capability) + for capability in peer.capabilities: + # Check if the capability is found + if (capability_status := get_value(act_mp_caps, capability)) is None: + self.result.is_failure(f"{peer} - {capability} not found") - # Check if capabilities are missing - if not capability_output: - failure["bgp_peers"][peer][vrf][capability] = "not found" - failures = deep_update(failures, failure) + # Check if the capability is advertised, received, and enabled + elif not _check_bgp_neighbor_capability(capability_status): + self.result.is_failure(f"{peer} - {capability} not negotiated - {format_data(capability_status)}") - # Check if capabilities are not advertised, received, or enabled - elif not all(capability_output.get(prop, False) for prop in ["advertised", "received", "enabled"]): - failure["bgp_peers"][peer][vrf][capability] = capability_output - failures = deep_update(failures, failure) - # Check if there are any failures - if not failures["bgp_peers"]: - self.result.is_success() - else: - self.result.is_failure(f"Following BGP peer multiprotocol capabilities are not found or not ok:\n{failures}") +class VerifyBGPPeerASNCap(AntaTest): + """Verifies the four octet ASN capability of BGP peers. + This test performs the following checks for each specified peer: -class VerifyBGPPeerASNCap(AntaTest): - """Verifies the four octet asn capabilities of a BGP peer in a specified VRF. + 1. Confirms that the specified VRF is configured. + 2. Verifies that the peer exists in the BGP configuration. + 3. Validates that the capability is present in the peer configuration. + 4. Confirms that the capability is advertised, received, and enabled. Expected Results ---------------- - * Success: The test will pass if BGP peer's four octet asn capabilities are advertised, received, and enabled in the specified VRF. - * Failure: The test will fail if BGP peers are not found or four octet asn capabilities are not advertised, received, and enabled in the specified VRF. + * Success: If all of the following conditions are met: + - All specified peers are found in the BGP configuration. + - The four octet ASN capability is present in each peer configuration. + - The capability is properly negotiated (advertised, received, and enabled) for all peers. + * Failure: If any of the following occur: + - A specified peer is not found in the BGP configuration. + - The four octet ASN capability is not present for a peer. + - The capability is not properly negotiated (not advertised, received, or enabled) for any peer. Examples -------- @@ -829,8 +569,6 @@ class VerifyBGPPeerASNCap(AntaTest): ``` """ - name = "VerifyBGPPeerASNCap" - description = "Verifies the four octet asn capabilities of a BGP peer." categories: ClassVar[list[str]] = ["bgp"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show bgp neighbors vrf all", revision=3)] @@ -839,61 +577,54 @@ class VerifyBGPPeerASNCap(AntaTest): bgp_peers: list[BgpPeer] """List of BGP peers.""" - - class BgpPeer(BaseModel): - """Model for a BGP peer.""" - - peer_address: IPv4Address - """IPv4 address of a BGP peer.""" - vrf: str = "default" - """Optional VRF for BGP peer. If not provided, it defaults to `default`.""" + BgpPeer: ClassVar[type[BgpPeer]] = BgpPeer @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyBGPPeerASNCap.""" - failures: dict[str, Any] = {"bgp_peers": {}} - - # Iterate over each bgp peer - for bgp_peer in self.inputs.bgp_peers: - peer = str(bgp_peer.peer_address) - vrf = bgp_peer.vrf - failure: dict[str, dict[str, dict[str, Any]]] = {"bgp_peers": {peer: {vrf: {}}}} - - # Check if BGP output exists - if ( - not (bgp_output := get_value(self.instance_commands[0].json_output, f"vrfs.{vrf}.peerList")) - or (bgp_output := get_item(bgp_output, "peerAddress", peer)) is None - ): - failure["bgp_peers"][peer][vrf] = {"status": "Not configured"} - failures = deep_update(failures, failure) - continue + self.result.is_success() + + output = self.instance_commands[0].json_output - bgp_output = get_value(bgp_output, "neighborCapabilities.fourOctetAsnCap") + for peer in self.inputs.bgp_peers: + peer_ip = str(peer.peer_address) + peer_list = get_value(output, f"vrfs.{peer.vrf}.peerList", default=[]) - # Check if four octet asn capabilities are found - if not bgp_output: - failure["bgp_peers"][peer][vrf] = {"fourOctetAsnCap": "not found"} - failures = deep_update(failures, failure) + # Check if the peer is found + if (peer_data := get_item(peer_list, "peerAddress", peer_ip)) is None: + self.result.is_failure(f"{peer} - Not found") + continue - # Check if capabilities are not advertised, received, or enabled - elif not all(bgp_output.get(prop, False) for prop in ["advertised", "received", "enabled"]): - failure["bgp_peers"][peer][vrf] = {"fourOctetAsnCap": bgp_output} - failures = deep_update(failures, failure) + # Check if the 4-octet ASN capability is found + if (capablity_status := get_value(peer_data, "neighborCapabilities.fourOctetAsnCap")) is None: + self.result.is_failure(f"{peer} - 4-octet ASN capability not found") + continue - # Check if there are any failures - if not failures["bgp_peers"]: - self.result.is_success() - else: - self.result.is_failure(f"Following BGP peer four octet asn capabilities are not found or not ok:\n{failures}") + # Check if the 4-octet ASN capability is advertised, received, and enabled + if not _check_bgp_neighbor_capability(capablity_status): + self.result.is_failure(f"{peer} - 4-octet ASN capability not negotiated - {format_data(capablity_status)}") class VerifyBGPPeerRouteRefreshCap(AntaTest): """Verifies the route refresh capabilities of a BGP peer in a specified VRF. + This test performs the following checks for each specified peer: + + 1. Confirms that the specified VRF is configured. + 2. Verifies that the peer exists in the BGP configuration. + 3. Validates that the route refresh capability is present in the peer configuration. + 4. Confirms that the capability is advertised, received, and enabled. + Expected Results ---------------- - * Success: The test will pass if the BGP peer's route refresh capabilities are advertised, received, and enabled in the specified VRF. - * Failure: The test will fail if BGP peers are not found or route refresh capabilities are not advertised, received, and enabled in the specified VRF. + * Success: If all of the following conditions are met: + - All specified peers are found in the BGP configuration. + - The route refresh capability is present in each peer configuration. + - The capability is properly negotiated (advertised, received, and enabled) for all peers. + * Failure: If any of the following occur: + - A specified peer is not found in the BGP configuration. + - The route refresh capability is not present for a peer. + - The capability is not properly negotiated (not advertised, received, or enabled) for any peer. Examples -------- @@ -907,8 +638,6 @@ class VerifyBGPPeerRouteRefreshCap(AntaTest): ``` """ - name = "VerifyBGPPeerRouteRefreshCap" - description = "Verifies the route refresh capabilities of a BGP peer." categories: ClassVar[list[str]] = ["bgp"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show bgp neighbors vrf all", revision=3)] @@ -917,61 +646,54 @@ class VerifyBGPPeerRouteRefreshCap(AntaTest): bgp_peers: list[BgpPeer] """List of BGP peers""" - - class BgpPeer(BaseModel): - """Model for a BGP peer.""" - - peer_address: IPv4Address - """IPv4 address of a BGP peer.""" - vrf: str = "default" - """Optional VRF for BGP peer. If not provided, it defaults to `default`.""" + BgpPeer: ClassVar[type[BgpPeer]] = BgpPeer @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyBGPPeerRouteRefreshCap.""" - failures: dict[str, Any] = {"bgp_peers": {}} - - # Iterate over each bgp peer - for bgp_peer in self.inputs.bgp_peers: - peer = str(bgp_peer.peer_address) - vrf = bgp_peer.vrf - failure: dict[str, dict[str, dict[str, Any]]] = {"bgp_peers": {peer: {vrf: {}}}} - - # Check if BGP output exists - if ( - not (bgp_output := get_value(self.instance_commands[0].json_output, f"vrfs.{vrf}.peerList")) - or (bgp_output := get_item(bgp_output, "peerAddress", peer)) is None - ): - failure["bgp_peers"][peer][vrf] = {"status": "Not configured"} - failures = deep_update(failures, failure) - continue + self.result.is_success() - bgp_output = get_value(bgp_output, "neighborCapabilities.routeRefreshCap") + output = self.instance_commands[0].json_output - # Check if route refresh capabilities are found - if not bgp_output: - failure["bgp_peers"][peer][vrf] = {"routeRefreshCap": "not found"} - failures = deep_update(failures, failure) + for peer in self.inputs.bgp_peers: + peer_ip = str(peer.peer_address) + peer_list = get_value(output, f"vrfs.{peer.vrf}.peerList", default=[]) - # Check if capabilities are not advertised, received, or enabled - elif not all(bgp_output.get(prop, False) for prop in ["advertised", "received", "enabled"]): - failure["bgp_peers"][peer][vrf] = {"routeRefreshCap": bgp_output} - failures = deep_update(failures, failure) + # Check if the peer is found + if (peer_data := get_item(peer_list, "peerAddress", peer_ip)) is None: + self.result.is_failure(f"{peer} - Not found") + continue - # Check if there are any failures - if not failures["bgp_peers"]: - self.result.is_success() - else: - self.result.is_failure(f"Following BGP peer route refresh capabilities are not found or not ok:\n{failures}") + # Check if the route refresh capability is found + if (capablity_status := get_value(peer_data, "neighborCapabilities.routeRefreshCap")) is None: + self.result.is_failure(f"{peer} - Route refresh capability not found") + continue + + # Check if the route refresh capability is advertised, received, and enabled + if not _check_bgp_neighbor_capability(capablity_status): + self.result.is_failure(f"{peer} - Route refresh capability not negotiated - {format_data(capablity_status)}") class VerifyBGPPeerMD5Auth(AntaTest): """Verifies the MD5 authentication and state of IPv4 BGP peers in a specified VRF. + This test performs the following checks for each specified peer: + + 1. Confirms that the specified VRF is configured. + 2. Verifies that the peer exists in the BGP configuration. + 3. Validates that the BGP session is in `Established` state. + 4. Confirms that MD5 authentication is enabled for the peer. + Expected Results ---------------- - * Success: The test will pass if IPv4 BGP peers are configured with MD5 authentication and state as established in the specified VRF. - * Failure: The test will fail if IPv4 BGP peers are not found, state is not as established or MD5 authentication is not enabled in the specified VRF. + * Success: If all of the following conditions are met: + - All specified peers are found in the BGP configuration. + - All peers are in `Established` state. + - MD5 authentication is enabled for all peers. + * Failure: If any of the following occur: + - A specified peer is not found in the BGP configuration. + - A peer's session state is not `Established`. + - MD5 authentication is not enabled for a peer. Examples -------- @@ -987,8 +709,6 @@ class VerifyBGPPeerMD5Auth(AntaTest): ``` """ - name = "VerifyBGPPeerMD5Auth" - description = "Verifies the MD5 authentication and state of a BGP peer." categories: ClassVar[list[str]] = ["bgp"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show bgp neighbors vrf all", revision=3)] @@ -997,56 +717,49 @@ class VerifyBGPPeerMD5Auth(AntaTest): bgp_peers: list[BgpPeer] """List of IPv4 BGP peers.""" - - class BgpPeer(BaseModel): - """Model for a BGP peer.""" - - peer_address: IPv4Address - """IPv4 address of BGP peer.""" - vrf: str = "default" - """Optional VRF for BGP peer. If not provided, it defaults to `default`.""" + BgpPeer: ClassVar[type[BgpPeer]] = BgpPeer @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyBGPPeerMD5Auth.""" - failures: dict[str, Any] = {"bgp_peers": {}} - - # Iterate over each command - for bgp_peer in self.inputs.bgp_peers: - peer = str(bgp_peer.peer_address) - vrf = bgp_peer.vrf - failure: dict[str, dict[str, dict[str, Any]]] = {"bgp_peers": {peer: {vrf: {}}}} - - # Check if BGP output exists - if ( - not (bgp_output := get_value(self.instance_commands[0].json_output, f"vrfs.{vrf}.peerList")) - or (bgp_output := get_item(bgp_output, "peerAddress", peer)) is None - ): - failure["bgp_peers"][peer][vrf] = {"status": "Not configured"} - failures = deep_update(failures, failure) - continue + self.result.is_success() - # Check if BGP peer state and authentication - state = bgp_output.get("state") - md5_auth_enabled = bgp_output.get("md5AuthEnabled") - if state != "Established" or not md5_auth_enabled: - failure["bgp_peers"][peer][vrf] = {"state": state, "md5_auth_enabled": md5_auth_enabled} - failures = deep_update(failures, failure) + output = self.instance_commands[0].json_output - # Check if there are any failures - if not failures["bgp_peers"]: - self.result.is_success() - else: - self.result.is_failure(f"Following BGP peers are not configured, not established or MD5 authentication is not enabled:\n{failures}") + for peer in self.inputs.bgp_peers: + peer_ip = str(peer.peer_address) + peer_list = get_value(output, f"vrfs.{peer.vrf}.peerList", default=[]) + + # Check if the peer is found + if (peer_data := get_item(peer_list, "peerAddress", peer_ip)) is None: + self.result.is_failure(f"{peer} - Not found") + continue + + # Check BGP peer state and MD5 authentication + state = peer_data.get("state") + md5_auth_enabled = peer_data.get("md5AuthEnabled") + if state != "Established": + self.result.is_failure(f"{peer} - Session state is not established - State: {state}") + if not md5_auth_enabled: + self.result.is_failure(f"{peer} - Session does not have MD5 authentication enabled") class VerifyEVPNType2Route(AntaTest): """Verifies the EVPN Type-2 routes for a given IPv4 or MAC address and VNI. + This test performs the following checks for each specified VXLAN endpoint: + + 1. Verifies that the endpoint exists in the BGP EVPN table. + 2. Confirms that at least one EVPN Type-2 route with a valid and active path exists. + Expected Results ---------------- - * Success: If all provided VXLAN endpoints have at least one valid and active path to their EVPN Type-2 routes. - * Failure: If any of the provided VXLAN endpoints do not have at least one valid and active path to their EVPN Type-2 routes. + * Success: If all of the following conditions are met: + - All specified VXLAN endpoints are found in the BGP EVPN table. + - Each endpoint has at least one EVPN Type-2 route with a valid and active path. + * Failure: If any of the following occur: + - A VXLAN endpoint is not found in the BGP EVPN table. + - No EVPN Type-2 route with a valid and active path exists for an endpoint. Examples -------- @@ -1062,8 +775,6 @@ class VerifyEVPNType2Route(AntaTest): ``` """ - name = "VerifyEVPNType2Route" - description = "Verifies the EVPN Type-2 routes for a given IPv4 or MAC address and VNI." categories: ClassVar[list[str]] = ["bgp"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaTemplate(template="show bgp evpn route-type mac-ip {address} vni {vni}", revision=2)] @@ -1072,14 +783,7 @@ class VerifyEVPNType2Route(AntaTest): vxlan_endpoints: list[VxlanEndpoint] """List of VXLAN endpoints to verify.""" - - class VxlanEndpoint(BaseModel): - """Model for a VXLAN endpoint.""" - - address: IPv4Address | MacAddress - """IPv4 or MAC address of the VXLAN endpoint.""" - vni: Vni - """VNI of the VXLAN endpoint.""" + VxlanEndpoint: ClassVar[type[VxlanEndpoint]] = VxlanEndpoint def render(self, template: AntaTemplate) -> list[AntaCommand]: """Render the template for each VXLAN endpoint in the input list.""" @@ -1089,41 +793,46 @@ class VerifyEVPNType2Route(AntaTest): def test(self) -> None: """Main test function for VerifyEVPNType2Route.""" self.result.is_success() - no_evpn_routes = [] - bad_evpn_routes = [] - for command in self.instance_commands: - address = command.params.address - vni = command.params.vni + for command, endpoint in zip(self.instance_commands, self.inputs.vxlan_endpoints): # Verify that the VXLAN endpoint is in the BGP EVPN table evpn_routes = command.json_output["evpnRoutes"] if not evpn_routes: - no_evpn_routes.append((address, vni)) + self.result.is_failure(f"{endpoint} - No EVPN Type-2 route") continue - # Verify that each EVPN route has at least one valid and active path - for route, route_data in evpn_routes.items(): - has_active_path = False - for path in route_data["evpnRoutePaths"]: - if path["routeType"]["valid"] is True and path["routeType"]["active"] is True: - # At least one path is valid and active, no need to check the other paths + + # Verify that at least one EVPN Type-2 route has at least one active and valid path across all learned routes from all RDs combined + has_active_path = False + for route_data in evpn_routes.values(): + for path in route_data.get("evpnRoutePaths", []): + route_type = path.get("routeType", {}) + if route_type.get("active") and route_type.get("valid"): has_active_path = True break - if not has_active_path: - bad_evpn_routes.append(route) - - if no_evpn_routes: - self.result.is_failure(f"The following VXLAN endpoint do not have any EVPN Type-2 route: {no_evpn_routes}") - if bad_evpn_routes: - self.result.is_failure(f"The following EVPN Type-2 routes do not have at least one valid and active path: {bad_evpn_routes}") + if not has_active_path: + self.result.is_failure(f"{endpoint} - No valid and active path") class VerifyBGPAdvCommunities(AntaTest): - """Verifies if the advertised communities of BGP peers are standard, extended, and large in the specified VRF. + """Verifies that advertised communities are standard, extended and large for BGP peers. + + This test performs the following checks for each specified peer: + + 1. Confirms that the specified VRF is configured. + 2. Verifies that the peer exists in the BGP configuration. + 3. Validates that all required community types are advertised: + - Standard communities + - Extended communities + - Large communities Expected Results ---------------- - * Success: The test will pass if the advertised communities of BGP peers are standard, extended, and large in the specified VRF. - * Failure: The test will fail if the advertised communities of BGP peers are not standard, extended, and large in the specified VRF. + * Success: If all of the following conditions are met: + - All specified peers are found in the BGP configuration. + - Each peer advertises standard, extended and large communities. + * Failure: If any of the following occur: + - A specified peer is not found in the BGP configuration. + - A peer does not advertise standard, extended or large communities. Examples -------- @@ -1139,8 +848,6 @@ class VerifyBGPAdvCommunities(AntaTest): ``` """ - name = "VerifyBGPAdvCommunities" - description = "Verifies the advertised communities of a BGP peer." categories: ClassVar[list[str]] = ["bgp"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show bgp neighbors vrf all", revision=3)] @@ -1149,54 +856,46 @@ class VerifyBGPAdvCommunities(AntaTest): bgp_peers: list[BgpPeer] """List of BGP peers.""" - - class BgpPeer(BaseModel): - """Model for a BGP peer.""" - - peer_address: IPv4Address - """IPv4 address of a BGP peer.""" - vrf: str = "default" - """Optional VRF for BGP peer. If not provided, it defaults to `default`.""" + BgpPeer: ClassVar[type[BgpPeer]] = BgpPeer @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyBGPAdvCommunities.""" - failures: dict[str, Any] = {"bgp_peers": {}} - - # Iterate over each bgp peer - for bgp_peer in self.inputs.bgp_peers: - peer = str(bgp_peer.peer_address) - vrf = bgp_peer.vrf - failure: dict[str, dict[str, dict[str, Any]]] = {"bgp_peers": {peer: {vrf: {}}}} - - # Verify BGP peer - if ( - not (bgp_output := get_value(self.instance_commands[0].json_output, f"vrfs.{vrf}.peerList")) - or (bgp_output := get_item(bgp_output, "peerAddress", peer)) is None - ): - failure["bgp_peers"][peer][vrf] = {"status": "Not configured"} - failures = deep_update(failures, failure) - continue + self.result.is_success() + + output = self.instance_commands[0].json_output - # Verify BGP peer's advertised communities - bgp_output = bgp_output.get("advertisedCommunities") - if not bgp_output["standard"] or not bgp_output["extended"] or not bgp_output["large"]: - failure["bgp_peers"][peer][vrf] = {"advertised_communities": bgp_output} - failures = deep_update(failures, failure) + for peer in self.inputs.bgp_peers: + peer_ip = str(peer.peer_address) + peer_list = get_value(output, f"vrfs.{peer.vrf}.peerList", default=[]) + + # Check if the peer is found + if (peer_data := get_item(peer_list, "peerAddress", peer_ip)) is None: + self.result.is_failure(f"{peer} - Not found") + continue - if not failures["bgp_peers"]: - self.result.is_success() - else: - self.result.is_failure(f"Following BGP peers are not configured or advertised communities are not standard, extended, and large:\n{failures}") + # Check BGP peer advertised communities + if not all(get_value(peer_data, f"advertisedCommunities.{community}") is True for community in ["standard", "extended", "large"]): + self.result.is_failure(f"{peer} - {format_data(peer_data['advertisedCommunities'])}") class VerifyBGPTimers(AntaTest): - """Verifies if the BGP peers are configured with the correct hold and keep-alive timers in the specified VRF. + """Verifies the timers of BGP peers. + + This test performs the following checks for each specified peer: + + 1. Confirms that the specified VRF is configured. + 2. Verifies that the peer exists in the BGP configuration. + 3. Confirms the BGP session hold time/keepalive timers match the expected value. Expected Results ---------------- - * Success: The test will pass if the hold and keep-alive timers are correct for BGP peers in the specified VRF. - * Failure: The test will fail if BGP peers are not found or hold and keep-alive timers are not correct in the specified VRF. + * Success: If all of the following conditions are met: + - All specified peers are found in the BGP configuration. + - The hold time/keepalive timers match the expected value for each peer. + * Failure: If any of the following occur: + - A specified peer is not found in the BGP configuration. + - The hold time/keepalive timers do not match the expected value for a peer. Examples -------- @@ -1216,8 +915,6 @@ class VerifyBGPTimers(AntaTest): ``` """ - name = "VerifyBGPTimers" - description = "Verifies the timers of a BGP peer." categories: ClassVar[list[str]] = ["bgp"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show bgp neighbors vrf all", revision=3)] @@ -1226,59 +923,62 @@ class VerifyBGPTimers(AntaTest): bgp_peers: list[BgpPeer] """List of BGP peers""" - - class BgpPeer(BaseModel): - """Model for a BGP peer.""" - - peer_address: IPv4Address - """IPv4 address of a BGP peer.""" - vrf: str = "default" - """Optional VRF for BGP peer. If not provided, it defaults to `default`.""" - hold_time: int = Field(ge=3, le=7200) - """BGP hold time in seconds.""" - keep_alive_time: int = Field(ge=0, le=3600) - """BGP keep-alive time in seconds.""" + BgpPeer: ClassVar[type[BgpPeer]] = BgpPeer + + @field_validator("bgp_peers") + @classmethod + def validate_bgp_peers(cls, bgp_peers: list[T]) -> list[T]: + """Validate that 'hold_time' or 'keep_alive_time' field is provided in each address family.""" + for peer in bgp_peers: + if peer.hold_time is None or peer.keep_alive_time is None: + msg = f"{peer} 'hold_time' or 'keep_alive_time' field missing in the input" + raise ValueError(msg) + return bgp_peers @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyBGPTimers.""" - failures: dict[str, Any] = {} - - # Iterate over each bgp peer - for bgp_peer in self.inputs.bgp_peers: - peer_address = str(bgp_peer.peer_address) - vrf = bgp_peer.vrf - hold_time = bgp_peer.hold_time - keep_alive_time = bgp_peer.keep_alive_time - - # Verify BGP peer - if ( - not (bgp_output := get_value(self.instance_commands[0].json_output, f"vrfs.{vrf}.peerList")) - or (bgp_output := get_item(bgp_output, "peerAddress", peer_address)) is None - ): - failures[peer_address] = {vrf: "Not configured"} - continue + self.result.is_success() - # Verify BGP peer's hold and keep alive timers - if bgp_output.get("holdTime") != hold_time or bgp_output.get("keepaliveTime") != keep_alive_time: - failures[peer_address] = {vrf: {"hold_time": bgp_output.get("holdTime"), "keep_alive_time": bgp_output.get("keepaliveTime")}} + output = self.instance_commands[0].json_output - if not failures: - self.result.is_success() - else: - self.result.is_failure(f"Following BGP peers are not configured or hold and keep-alive timers are not correct:\n{failures}") + for peer in self.inputs.bgp_peers: + peer_ip = str(peer.peer_address) + peer_list = get_value(output, f"vrfs.{peer.vrf}.peerList", default=[]) + + # Check if the peer is found + if (peer_data := get_item(peer_list, "peerAddress", peer_ip)) is None: + self.result.is_failure(f"{peer} - Not found") + continue + + # Check BGP peer timers + if peer_data["holdTime"] != peer.hold_time: + self.result.is_failure(f"{peer} - Hold time mismatch - Expected: {peer.hold_time}, Actual: {peer_data['holdTime']}") + if peer_data["keepaliveTime"] != peer.keep_alive_time: + self.result.is_failure(f"{peer} - Keepalive time mismatch - Expected: {peer.keep_alive_time}, Actual: {peer_data['keepaliveTime']}") class VerifyBGPPeerDropStats(AntaTest): """Verifies BGP NLRI drop statistics for the provided BGP IPv4 peer(s). - By default, all drop statistics counters will be checked for any non-zero values. - An optional list of specific drop statistics can be provided for granular testing. + This test performs the following checks for each specified peer: + + 1. Confirms that the specified VRF is configured. + 2. Verifies that the peer exists in the BGP configuration. + 3. Validates the BGP drop statistics: + - If specific drop statistics are provided, checks only those counters. + - If no specific drop statistics are provided, checks all available counters. + - Confirms that all checked counters have a value of zero. Expected Results ---------------- - * Success: The test will pass if the BGP peer's drop statistic(s) are zero. - * Failure: The test will fail if the BGP peer's drop statistic(s) are non-zero/Not Found or peer is not configured. + * Success: If all of the following conditions are met: + - All specified peers are found in the BGP configuration. + - All specified drop statistics counters (or all counters if none specified) are zero. + * Failure: If any of the following occur: + - A specified peer is not found in the BGP configuration. + - Any checked drop statistics counter has a non-zero value. + - A specified drop statistics counter does not exist. Examples -------- @@ -1295,80 +995,69 @@ class VerifyBGPPeerDropStats(AntaTest): ``` """ - name = "VerifyBGPPeerDropStats" - description = "Verifies the NLRI drop statistics of a BGP IPv4 peer(s)." categories: ClassVar[list[str]] = ["bgp"] - commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaTemplate(template="show bgp neighbors {peer} vrf {vrf}", revision=3)] + commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show bgp neighbors vrf all", revision=3)] class Input(AntaTest.Input): """Input model for the VerifyBGPPeerDropStats test.""" bgp_peers: list[BgpPeer] """List of BGP peers""" - - class BgpPeer(BaseModel): - """Model for a BGP peer.""" - - peer_address: IPv4Address - """IPv4 address of a BGP peer.""" - vrf: str = "default" - """Optional VRF for BGP peer. If not provided, it defaults to `default`.""" - drop_stats: list[BgpDropStats] | None = None - """Optional list of drop statistics to be verified. If not provided, test will verifies all the drop statistics.""" - - def render(self, template: AntaTemplate) -> list[AntaCommand]: - """Render the template for each BGP peer in the input list.""" - return [template.render(peer=str(bgp_peer.peer_address), vrf=bgp_peer.vrf) for bgp_peer in self.inputs.bgp_peers] + BgpPeer: ClassVar[type[BgpPeer]] = BgpPeer @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyBGPPeerDropStats.""" - failures: dict[Any, Any] = {} + self.result.is_success() + + output = self.instance_commands[0].json_output - for command, input_entry in zip(self.instance_commands, self.inputs.bgp_peers): - peer = command.params.peer - vrf = command.params.vrf - drop_statistics = input_entry.drop_stats + for peer in self.inputs.bgp_peers: + peer_ip = str(peer.peer_address) + drop_stats_input = peer.drop_stats + peer_list = get_value(output, f"vrfs.{peer.vrf}.peerList", default=[]) - # Verify BGP peer - if not (peer_list := get_value(command.json_output, f"vrfs.{vrf}.peerList")) or (peer_detail := get_item(peer_list, "peerAddress", peer)) is None: - failures[peer] = {vrf: "Not configured"} + # Check if the peer is found + if (peer_data := get_item(peer_list, "peerAddress", peer_ip)) is None: + self.result.is_failure(f"{peer} - Not found") continue - # Verify BGP peer's drop stats - drop_stats_output = peer_detail.get("dropStats", {}) + # Verify BGP peers' drop stats + drop_stats_output = peer_data["dropStats"] # In case drop stats not provided, It will check all drop statistics - if not drop_statistics: - drop_statistics = drop_stats_output + if not drop_stats_input: + drop_stats_input = drop_stats_output # Verify BGP peer's drop stats - drop_stats_not_ok = { - drop_stat: drop_stats_output.get(drop_stat, "Not Found") for drop_stat in drop_statistics if drop_stats_output.get(drop_stat, "Not Found") - } - if any(drop_stats_not_ok): - failures[peer] = {vrf: drop_stats_not_ok} - - # Check if any failures - if not failures: - self.result.is_success() - else: - self.result.is_failure(f"The following BGP peers are not configured or have non-zero NLRI drop statistics counters:\n{failures}") + for drop_stat in drop_stats_input: + if (stat_value := drop_stats_output.get(drop_stat, 0)) != 0: + self.result.is_failure(f"{peer} - Non-zero NLRI drop statistics counter - {drop_stat}: {stat_value}") class VerifyBGPPeerUpdateErrors(AntaTest): """Verifies BGP update error counters for the provided BGP IPv4 peer(s). - By default, all update error counters will be checked for any non-zero values. - An optional list of specific update error counters can be provided for granular testing. + This test performs the following checks for each specified peer: + + 1. Confirms that the specified VRF is configured. + 2. Verifies that the peer exists in the BGP configuration. + 3. Validates the BGP update error counters: + - If specific update error counters are provided, checks only those counters. + - If no update error counters are provided, checks all available counters. + - Confirms that all checked counters have a value of zero. Note: For "disabledAfiSafi" error counter field, checking that it's not "None" versus 0. Expected Results ---------------- - * Success: The test will pass if the BGP peer's update error counter(s) are zero/None. - * Failure: The test will fail if the BGP peer's update error counter(s) are non-zero/not None/Not Found or - peer is not configured. + * Success: If all of the following conditions are met: + - All specified peers are found in the BGP configuration. + - All specified update error counters (or all counters if none specified) are zero. + * Failure: If any of the following occur: + - A specified peer is not found in the BGP configuration. + - Any checked update error counters has a non-zero value. + - A specified update error counters does not exist. Examples -------- @@ -1379,81 +1068,68 @@ class VerifyBGPPeerUpdateErrors(AntaTest): bgp_peers: - peer_address: 172.30.11.1 vrf: default - update_error_filter: + update_errors: - inUpdErrWithdraw ``` """ - name = "VerifyBGPPeerUpdateErrors" - description = "Verifies the update error counters of a BGP IPv4 peer." categories: ClassVar[list[str]] = ["bgp"] - commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaTemplate(template="show bgp neighbors {peer} vrf {vrf}", revision=3)] + commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show bgp neighbors vrf all", revision=3)] class Input(AntaTest.Input): """Input model for the VerifyBGPPeerUpdateErrors test.""" bgp_peers: list[BgpPeer] """List of BGP peers""" - - class BgpPeer(BaseModel): - """Model for a BGP peer.""" - - peer_address: IPv4Address - """IPv4 address of a BGP peer.""" - vrf: str = "default" - """Optional VRF for BGP peer. If not provided, it defaults to `default`.""" - update_errors: list[BgpUpdateError] | None = None - """Optional list of update error counters to be verified. If not provided, test will verifies all the update error counters.""" - - def render(self, template: AntaTemplate) -> list[AntaCommand]: - """Render the template for each BGP peer in the input list.""" - return [template.render(peer=str(bgp_peer.peer_address), vrf=bgp_peer.vrf) for bgp_peer in self.inputs.bgp_peers] + BgpPeer: ClassVar[type[BgpPeer]] = BgpPeer @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyBGPPeerUpdateErrors.""" - failures: dict[Any, Any] = {} + self.result.is_success() - for command, input_entry in zip(self.instance_commands, self.inputs.bgp_peers): - peer = command.params.peer - vrf = command.params.vrf - update_error_counters = input_entry.update_errors + output = self.instance_commands[0].json_output - # Verify BGP peer. - if not (peer_list := get_value(command.json_output, f"vrfs.{vrf}.peerList")) or (peer_detail := get_item(peer_list, "peerAddress", peer)) is None: - failures[peer] = {vrf: "Not configured"} + for peer in self.inputs.bgp_peers: + peer_ip = str(peer.peer_address) + update_errors_input = peer.update_errors + peer_list = get_value(output, f"vrfs.{peer.vrf}.peerList", default=[]) + + # Check if the peer is found + if (peer_data := get_item(peer_list, "peerAddress", peer_ip)) is None: + self.result.is_failure(f"{peer} - Not found") continue # Getting the BGP peer's error counters output. - error_counters_output = peer_detail.get("peerInUpdateErrors", {}) + error_counters_output = peer_data.get("peerInUpdateErrors", {}) # In case update error counters not provided, It will check all the update error counters. - if not update_error_counters: - update_error_counters = error_counters_output - - # verifying the error counters. - error_counters_not_ok = { - ("disabledAfiSafi" if error_counter == "disabledAfiSafi" else error_counter): value - for error_counter in update_error_counters - if (value := error_counters_output.get(error_counter, "Not Found")) != "None" and value != 0 - } - if error_counters_not_ok: - failures[peer] = {vrf: error_counters_not_ok} + if not update_errors_input: + update_errors_input = error_counters_output - # Check if any failures - if not failures: - self.result.is_success() - else: - self.result.is_failure(f"The following BGP peers are not configured or have non-zero update error counters:\n{failures}") + # Verify BGP peer's update error counters + for error_counter in update_errors_input: + if (stat_value := error_counters_output.get(error_counter, "Not Found")) != 0 and stat_value != "None": + self.result.is_failure(f"{peer} - Non-zero update error counter - {error_counter}: {stat_value}") class VerifyBgpRouteMaps(AntaTest): """Verifies BGP inbound and outbound route-maps of BGP IPv4 peer(s). + This test performs the following checks for each specified peer: + + 1. Confirms that the specified VRF is configured. + 2. Verifies that the peer exists in the BGP configuration. + 3. Validates the correct BGP route maps are applied in the correct direction (inbound or outbound). + Expected Results ---------------- - * Success: The test will pass if the correct route maps are applied in the correct direction (inbound or outbound) for IPv4 BGP peers in the specified VRF. - * Failure: The test will fail if BGP peers are not configured or any neighbor has an incorrect or missing route map in either the inbound or outbound direction. + * Success: If all of the following conditions are met: + - All specified peers are found in the BGP configuration. + - All specified peers has correct BGP route maps are applied in the correct direction (inbound or outbound). + * Failure: If any of the following occur: + - A specified peer is not found in the BGP configuration. + - A incorrect or missing route map in either the inbound or outbound direction. Examples -------- @@ -1469,89 +1145,73 @@ class VerifyBgpRouteMaps(AntaTest): ``` """ - name = "VerifyBgpRouteMaps" - description = "Verifies BGP inbound and outbound route-maps of BGP IPv4 peer(s)." categories: ClassVar[list[str]] = ["bgp"] - commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaTemplate(template="show bgp neighbors {peer} vrf {vrf}", revision=3)] + commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show bgp neighbors vrf all", revision=3)] class Input(AntaTest.Input): """Input model for the VerifyBgpRouteMaps test.""" bgp_peers: list[BgpPeer] """List of BGP peers""" + BgpPeer: ClassVar[type[BgpPeer]] = BgpPeer - class BgpPeer(BaseModel): - """Model for a BGP peer.""" - - peer_address: IPv4Address - """IPv4 address of a BGP peer.""" - vrf: str = "default" - """Optional VRF for BGP peer. If not provided, it defaults to `default`.""" - inbound_route_map: str | None = None - """Inbound route map applied, defaults to None.""" - outbound_route_map: str | None = None - """Outbound route map applied, defaults to None.""" - - @model_validator(mode="after") - def validate_inputs(self) -> Self: - """Validate the inputs provided to the BgpPeer class. - - At least one of 'inbound' or 'outbound' route-map must be provided. - """ - if not (self.inbound_route_map or self.outbound_route_map): - msg = "At least one of 'inbound_route_map' or 'outbound_route_map' must be provided." - raise ValueError(msg) - return self + @field_validator("bgp_peers") + @classmethod + def validate_bgp_peers(cls, bgp_peers: list[T]) -> list[T]: + """Validate that 'peers' field is provided in each address family. - def render(self, template: AntaTemplate) -> list[AntaCommand]: - """Render the template for each BGP peer in the input list.""" - return [template.render(peer=str(bgp_peer.peer_address), vrf=bgp_peer.vrf) for bgp_peer in self.inputs.bgp_peers] + At least one of 'inbound' or 'outbound' route-map must be provided. + """ + for peer in bgp_peers: + if not (peer.inbound_route_map or peer.outbound_route_map): + msg = f"{peer}; At least one of 'inbound_route_map' or 'outbound_route_map' must be provided." + raise ValueError(msg) + return bgp_peers @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyBgpRouteMaps.""" - failures: dict[Any, Any] = {} - - for command, input_entry in zip(self.instance_commands, self.inputs.bgp_peers): - peer = str(input_entry.peer_address) - vrf = input_entry.vrf - inbound_route_map = input_entry.inbound_route_map - outbound_route_map = input_entry.outbound_route_map - failure: dict[Any, Any] = {vrf: {}} - - # Verify BGP peer. - if not (peer_list := get_value(command.json_output, f"vrfs.{vrf}.peerList")) or (peer_detail := get_item(peer_list, "peerAddress", peer)) is None: - failures[peer] = {vrf: "Not configured"} + self.result.is_success() + + output = self.instance_commands[0].json_output + + for peer in self.inputs.bgp_peers: + peer_ip = str(peer.peer_address) + inbound_route_map = peer.inbound_route_map + outbound_route_map = peer.outbound_route_map + peer_list = get_value(output, f"vrfs.{peer.vrf}.peerList", default=[]) + + # Check if the peer is found + if (peer_data := get_item(peer_list, "peerAddress", peer_ip)) is None: + self.result.is_failure(f"{peer} - Not found") continue # Verify Inbound route-map - if inbound_route_map and (inbound_map := peer_detail.get("routeMapInbound", "Not Configured")) != inbound_route_map: - failure[vrf].update({"Inbound route-map": inbound_map}) + if inbound_route_map and (inbound_map := peer_data.get("routeMapInbound", "Not Configured")) != inbound_route_map: + self.result.is_failure(f"{peer} - Inbound route-map mismatch - Expected: {inbound_route_map}, Actual: {inbound_map}") # Verify Outbound route-map - if outbound_route_map and (outbound_map := peer_detail.get("routeMapOutbound", "Not Configured")) != outbound_route_map: - failure[vrf].update({"Outbound route-map": outbound_map}) + if outbound_route_map and (outbound_map := peer_data.get("routeMapOutbound", "Not Configured")) != outbound_route_map: + self.result.is_failure(f"{peer} - Outbound route-map mismatch - Expected: {outbound_route_map}, Actual: {outbound_map}") - if failure[vrf]: - failures[peer] = failure - # Check if any failures - if not failures: - self.result.is_success() - else: - self.result.is_failure( - f"The following BGP peers are not configured or has an incorrect or missing route map in either the inbound or outbound direction:\n{failures}" - ) +class VerifyBGPPeerRouteLimit(AntaTest): + """Verifies maximum routes and outbound route-maps of BGP IPv4 peer(s). + This test performs the following checks for each specified peer: -class VerifyBGPPeerRouteLimit(AntaTest): - """Verifies the maximum routes and optionally verifies the maximum routes warning limit for the provided BGP IPv4 peer(s). + 1. Confirms that the specified VRF is configured. + 2. Verifies that the peer exists in the BGP configuration. + 3. Confirms the Maximum routes and maximum routes warning limit, if provided match the expected value. Expected Results ---------------- - * Success: The test will pass if the BGP peer's maximum routes and, if provided, the maximum routes warning limit are equal to the given limits. - * Failure: The test will fail if the BGP peer's maximum routes do not match the given limit, or if the maximum routes warning limit is provided - and does not match the given limit, or if the peer is not configured. + * Success: If all of the following conditions are met: + - All specified peers are found in the BGP configuration. + - The maximum routese/maximum routes warning limit match the expected value for a peer. + * Failure: If any of the following occur: + - A specified peer is not found in the BGP configuration. + - The maximum routese/maximum routes warning limit do not match the expected value for a peer. Examples -------- @@ -1567,64 +1227,48 @@ class VerifyBGPPeerRouteLimit(AntaTest): ``` """ - name = "VerifyBGPPeerRouteLimit" - description = "Verifies maximum routes and maximum routes warning limit for the provided BGP IPv4 peer(s)." categories: ClassVar[list[str]] = ["bgp"] - commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaTemplate(template="show bgp neighbors {peer} vrf {vrf}", revision=3)] + commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show bgp neighbors vrf all", revision=3)] class Input(AntaTest.Input): """Input model for the VerifyBGPPeerRouteLimit test.""" bgp_peers: list[BgpPeer] """List of BGP peers""" - - class BgpPeer(BaseModel): - """Model for a BGP peer.""" - - peer_address: IPv4Address - """IPv4 address of a BGP peer.""" - vrf: str = "default" - """Optional VRF for BGP peer. If not provided, it defaults to `default`.""" - maximum_routes: int = Field(ge=0, le=4294967294) - """The maximum allowable number of BGP routes, `0` means unlimited.""" - warning_limit: int = Field(default=0, ge=0, le=4294967294) - """Optional maximum routes warning limit. If not provided, it defaults to `0` meaning no warning limit.""" - - def render(self, template: AntaTemplate) -> list[AntaCommand]: - """Render the template for each BGP peer in the input list.""" - return [template.render(peer=str(bgp_peer.peer_address), vrf=bgp_peer.vrf) for bgp_peer in self.inputs.bgp_peers] + BgpPeer: ClassVar[type[BgpPeer]] = BgpPeer + + @field_validator("bgp_peers") + @classmethod + def validate_bgp_peers(cls, bgp_peers: list[T]) -> list[T]: + """Validate that 'peers' field is provided in each address family.""" + for peer in bgp_peers: + if peer.maximum_routes is None: + msg = f"{peer}; 'maximum_routes' field missing in the input" + raise ValueError(msg) + return bgp_peers @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyBGPPeerRouteLimit.""" - failures: dict[Any, Any] = {} - - for command, input_entry in zip(self.instance_commands, self.inputs.bgp_peers): - peer = str(input_entry.peer_address) - vrf = input_entry.vrf - maximum_routes = input_entry.maximum_routes - warning_limit = input_entry.warning_limit - failure: dict[Any, Any] = {} - - # Verify BGP peer. - if not (peer_list := get_value(command.json_output, f"vrfs.{vrf}.peerList")) or (peer_detail := get_item(peer_list, "peerAddress", peer)) is None: - failures[peer] = {vrf: "Not configured"} + self.result.is_success() + + output = self.instance_commands[0].json_output + + for peer in self.inputs.bgp_peers: + peer_ip = str(peer.peer_address) + maximum_routes = peer.maximum_routes + warning_limit = peer.warning_limit + peer_list = get_value(output, f"vrfs.{peer.vrf}.peerList", default=[]) + + # Check if the peer is found + if (peer_data := get_item(peer_list, "peerAddress", peer_ip)) is None: + self.result.is_failure(f"{peer} - Not found") continue # Verify maximum routes configured. - if (actual_routes := peer_detail.get("maxTotalRoutes", "Not Found")) != maximum_routes: - failure["Maximum total routes"] = actual_routes + if (actual_routes := peer_data.get("maxTotalRoutes", "Not Found")) != maximum_routes: + self.result.is_failure(f"{peer} - Maximum routes mismatch - Expected: {maximum_routes}, Actual: {actual_routes}") # Verify warning limit if given. - if warning_limit and (actual_warning_limit := peer_detail.get("totalRoutesWarnLimit", "Not Found")) != warning_limit: - failure["Warning limit"] = actual_warning_limit - - # Updated failures if any. - if failure: - failures[peer] = {vrf: failure} - - # Check if any failures - if not failures: - self.result.is_success() - else: - self.result.is_failure(f"The following BGP peer(s) are not configured or maximum routes and maximum routes warning limit is not correct:\n{failures}") + if warning_limit and (actual_warning_limit := peer_data.get("totalRoutesWarnLimit", "Not Found")) != warning_limit: + self.result.is_failure(f"{peer} - Maximum route warning limit mismatch - Expected: {warning_limit}, Actual: {actual_warning_limit}") diff --git a/anta/tests/routing/generic.py b/anta/tests/routing/generic.py index d1322a5..7b916a3 100644 --- a/anta/tests/routing/generic.py +++ b/anta/tests/routing/generic.py @@ -14,7 +14,9 @@ from typing import TYPE_CHECKING, ClassVar, Literal from pydantic import model_validator from anta.custom_types import PositiveInteger +from anta.input_models.routing.generic import IPv4Routes from anta.models import AntaCommand, AntaTemplate, AntaTest +from anta.tools import get_value if TYPE_CHECKING: import sys @@ -26,7 +28,7 @@ if TYPE_CHECKING: class VerifyRoutingProtocolModel(AntaTest): - """Verifies the configured routing protocol model is the one we expect. + """Verifies the configured routing protocol model. Expected Results ---------------- @@ -43,8 +45,6 @@ class VerifyRoutingProtocolModel(AntaTest): ``` """ - name = "VerifyRoutingProtocolModel" - description = "Verifies the configured routing protocol model." categories: ClassVar[list[str]] = ["routing"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show ip route summary", revision=3)] @@ -85,8 +85,6 @@ class VerifyRoutingTableSize(AntaTest): ``` """ - name = "VerifyRoutingTableSize" - description = "Verifies the size of the IP routing table of the default VRF." categories: ClassVar[list[str]] = ["routing"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show ip route summary", revision=3)] @@ -138,8 +136,6 @@ class VerifyRoutingTableEntry(AntaTest): ``` """ - name = "VerifyRoutingTableEntry" - description = "Verifies that the provided routes are present in the routing table of a specified VRF." categories: ClassVar[list[str]] = ["routing"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [ AntaTemplate(template="show ip route vrf {vrf} {route}", revision=4), @@ -187,3 +183,76 @@ class VerifyRoutingTableEntry(AntaTest): self.result.is_success() else: self.result.is_failure(f"The following route(s) are missing from the routing table of VRF {self.inputs.vrf}: {missing_routes}") + + +class VerifyIPv4RouteType(AntaTest): + """Verifies the route-type of the IPv4 prefixes. + + This test performs the following checks for each IPv4 route: + 1. Verifies that the specified VRF is configured. + 2. Verifies that the specified IPv4 route is exists in the configuration. + 3. Verifies that the the specified IPv4 route is of the expected type. + + Expected Results + ---------------- + * Success: If all of the following conditions are met: + - All the specified VRFs are configured. + - All the specified IPv4 routes are found. + - All the specified IPv4 routes are of the expected type. + * Failure: If any of the following occur: + - A specified VRF is not configured. + - A specified IPv4 route is not found. + - Any specified IPv4 route is not of the expected type. + + Examples + -------- + ```yaml + anta.tests.routing: + generic: + - VerifyIPv4RouteType: + routes_entries: + - prefix: 10.10.0.1/32 + vrf: default + route_type: eBGP + - prefix: 10.100.0.12/31 + vrf: default + route_type: connected + - prefix: 10.100.1.5/32 + vrf: default + route_type: iBGP + ``` + """ + + categories: ClassVar[list[str]] = ["routing"] + commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show ip route vrf all", revision=4)] + + class Input(AntaTest.Input): + """Input model for the VerifyIPv4RouteType test.""" + + routes_entries: list[IPv4Routes] + + @AntaTest.anta_test + def test(self) -> None: + """Main test function for VerifyIPv4RouteType.""" + self.result.is_success() + output = self.instance_commands[0].json_output + + # Iterating over the all routes entries mentioned in the inputs. + for entry in self.inputs.routes_entries: + prefix = str(entry.prefix) + vrf = entry.vrf + expected_route_type = entry.route_type + + # Verifying that on device, expected VRF is configured. + if (routes_details := get_value(output, f"vrfs.{vrf}.routes")) is None: + self.result.is_failure(f"{entry} - VRF not configured") + continue + + # Verifying that the expected IPv4 route is present or not on the device + if (route_data := routes_details.get(prefix)) is None: + self.result.is_failure(f"{entry} - Route not found") + continue + + # Verifying that the specified IPv4 routes are of the expected type. + if expected_route_type != (actual_route_type := route_data.get("routeType")): + self.result.is_failure(f"{entry} - Incorrect route type - Expected: {expected_route_type} Actual: {actual_route_type}") diff --git a/anta/tests/routing/isis.py b/anta/tests/routing/isis.py index 684578c..54a4f14 100644 --- a/anta/tests/routing/isis.py +++ b/anta/tests/routing/isis.py @@ -158,8 +158,6 @@ class VerifyISISNeighborState(AntaTest): ``` """ - name = "VerifyISISNeighborState" - description = "Verifies all IS-IS neighbors are in UP state." categories: ClassVar[list[str]] = ["isis"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show isis neighbors", revision=1)] @@ -204,8 +202,6 @@ class VerifyISISNeighborCount(AntaTest): ``` """ - name = "VerifyISISNeighborCount" - description = "Verifies count of IS-IS interface per level" categories: ClassVar[list[str]] = ["isis"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show isis interface brief", revision=1)] @@ -277,7 +273,6 @@ class VerifyISISInterfaceMode(AntaTest): ``` """ - name = "VerifyISISInterfaceMode" description = "Verifies interface mode for IS-IS" categories: ClassVar[list[str]] = ["isis"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show isis interface brief", revision=1)] @@ -333,9 +328,7 @@ class VerifyISISInterfaceMode(AntaTest): class VerifyISISSegmentRoutingAdjacencySegments(AntaTest): - """Verifies ISIS Segment Routing Adjacency Segments. - - Verify that all expected Adjacency segments are correctly visible for each interface. + """Verify that all expected Adjacency segments are correctly visible for each interface. Expected Results ---------------- @@ -356,12 +349,9 @@ class VerifyISISSegmentRoutingAdjacencySegments(AntaTest): - interface: Ethernet2 address: 10.0.1.3 sid_origin: dynamic - ``` """ - name = "VerifyISISSegmentRoutingAdjacencySegments" - description = "Verify expected Adjacency segments are correctly visible for each interface." categories: ClassVar[list[str]] = ["isis", "segment-routing"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show isis segment-routing adjacency-segments", ofmt="json")] @@ -446,8 +436,7 @@ class VerifyISISSegmentRoutingAdjacencySegments(AntaTest): class VerifyISISSegmentRoutingDataplane(AntaTest): - """ - Verify dataplane of a list of ISIS-SR instances. + """Verify dataplane of a list of ISIS-SR instances. Expected Results ---------------- @@ -468,8 +457,6 @@ class VerifyISISSegmentRoutingDataplane(AntaTest): ``` """ - name = "VerifyISISSegmentRoutingDataplane" - description = "Verify dataplane of a list of ISIS-SR instances" categories: ClassVar[list[str]] = ["isis", "segment-routing"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show isis segment-routing", ofmt="json")] @@ -530,8 +517,7 @@ class VerifyISISSegmentRoutingDataplane(AntaTest): class VerifyISISSegmentRoutingTunnels(AntaTest): - """ - Verify ISIS-SR tunnels computed by device. + """Verify ISIS-SR tunnels computed by device. Expected Results ---------------- @@ -543,26 +529,24 @@ class VerifyISISSegmentRoutingTunnels(AntaTest): -------- ```yaml anta.tests.routing: - isis: + isis: - VerifyISISSegmentRoutingTunnels: entries: - # Check only endpoint - - endpoint: 1.0.0.122/32 - # Check endpoint and via TI-LFA - - endpoint: 1.0.0.13/32 - vias: - - type: tunnel - tunnel_id: ti-lfa - # Check endpoint and via IP routers - - endpoint: 1.0.0.14/32 - vias: - - type: ip - nexthop: 1.1.1.1 + # Check only endpoint + - endpoint: 1.0.0.122/32 + # Check endpoint and via TI-LFA + - endpoint: 1.0.0.13/32 + vias: + - type: tunnel + tunnel_id: ti-lfa + # Check endpoint and via IP routers + - endpoint: 1.0.0.14/32 + vias: + - type: ip + nexthop: 1.1.1.1 ``` """ - name = "VerifyISISSegmentRoutingTunnels" - description = "Verify ISIS-SR tunnels computed by device" categories: ClassVar[list[str]] = ["isis", "segment-routing"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show isis segment-routing tunnel", ofmt="json")] @@ -638,8 +622,7 @@ class VerifyISISSegmentRoutingTunnels(AntaTest): self.result.is_failure("\n".join(failure_message)) def _check_tunnel_type(self, via_input: VerifyISISSegmentRoutingTunnels.Input.Entry.Vias, eos_entry: dict[str, Any]) -> bool: - """ - Check if the tunnel type specified in `via_input` matches any of the tunnel types in `eos_entry`. + """Check if the tunnel type specified in `via_input` matches any of the tunnel types in `eos_entry`. Parameters ---------- @@ -666,8 +649,7 @@ class VerifyISISSegmentRoutingTunnels(AntaTest): return True def _check_tunnel_nexthop(self, via_input: VerifyISISSegmentRoutingTunnels.Input.Entry.Vias, eos_entry: dict[str, Any]) -> bool: - """ - Check if the tunnel nexthop matches the given input. + """Check if the tunnel nexthop matches the given input. Parameters ---------- @@ -694,8 +676,7 @@ class VerifyISISSegmentRoutingTunnels(AntaTest): return True def _check_tunnel_interface(self, via_input: VerifyISISSegmentRoutingTunnels.Input.Entry.Vias, eos_entry: dict[str, Any]) -> bool: - """ - Check if the tunnel interface exists in the given EOS entry. + """Check if the tunnel interface exists in the given EOS entry. Parameters ---------- @@ -722,8 +703,7 @@ class VerifyISISSegmentRoutingTunnels(AntaTest): return True def _check_tunnel_id(self, via_input: VerifyISISSegmentRoutingTunnels.Input.Entry.Vias, eos_entry: dict[str, Any]) -> bool: - """ - Check if the tunnel ID matches any of the tunnel IDs in the EOS entry's vias. + """Check if the tunnel ID matches any of the tunnel IDs in the EOS entry's vias. Parameters ---------- diff --git a/anta/tests/routing/ospf.py b/anta/tests/routing/ospf.py index 3ffd81d..d5d12e2 100644 --- a/anta/tests/routing/ospf.py +++ b/anta/tests/routing/ospf.py @@ -109,8 +109,6 @@ class VerifyOSPFNeighborState(AntaTest): ``` """ - name = "VerifyOSPFNeighborState" - description = "Verifies all OSPF neighbors are in FULL state." categories: ClassVar[list[str]] = ["ospf"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show ip ospf neighbor", revision=1)] @@ -146,8 +144,6 @@ class VerifyOSPFNeighborCount(AntaTest): ``` """ - name = "VerifyOSPFNeighborCount" - description = "Verifies the number of OSPF neighbors in FULL state is the one we expect." categories: ClassVar[list[str]] = ["ospf"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show ip ospf neighbor", revision=1)] @@ -190,7 +186,6 @@ class VerifyOSPFMaxLSA(AntaTest): ``` """ - name = "VerifyOSPFMaxLSA" description = "Verifies all OSPF instances did not cross the maximum LSA threshold." categories: ClassVar[list[str]] = ["ospf"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show ip ospf", revision=1)] diff --git a/anta/tests/security.py b/anta/tests/security.py index 71c9f12..38bf240 100644 --- a/anta/tests/security.py +++ b/anta/tests/security.py @@ -8,12 +8,12 @@ from __future__ import annotations # Mypy does not understand AntaTest.Input typing # mypy: disable-error-code=attr-defined from datetime import datetime, timezone -from ipaddress import IPv4Address from typing import TYPE_CHECKING, ClassVar, get_args from pydantic import BaseModel, Field, model_validator from anta.custom_types import EcdsaKeySize, EncryptionAlgorithm, PositiveInteger, RsaKeySize +from anta.input_models.security import IPSecPeer, IPSecPeers from anta.models import AntaCommand, AntaTemplate, AntaTest from anta.tools import get_failed_logs, get_item, get_value @@ -42,8 +42,6 @@ class VerifySSHStatus(AntaTest): ``` """ - name = "VerifySSHStatus" - description = "Verifies if the SSHD agent is disabled in the default VRF." categories: ClassVar[list[str]] = ["security"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show management ssh", ofmt="text")] @@ -83,7 +81,6 @@ class VerifySSHIPv4Acl(AntaTest): ``` """ - name = "VerifySSHIPv4Acl" description = "Verifies if the SSHD agent has IPv4 ACL(s) configured." categories: ClassVar[list[str]] = ["security"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show management ssh ip access-list summary", revision=1)] @@ -132,7 +129,6 @@ class VerifySSHIPv6Acl(AntaTest): ``` """ - name = "VerifySSHIPv6Acl" description = "Verifies if the SSHD agent has IPv6 ACL(s) configured." categories: ClassVar[list[str]] = ["security"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show management ssh ipv6 access-list summary", revision=1)] @@ -179,8 +175,6 @@ class VerifyTelnetStatus(AntaTest): ``` """ - name = "VerifyTelnetStatus" - description = "Verifies if Telnet is disabled in the default VRF." categories: ClassVar[list[str]] = ["security"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show management telnet", revision=1)] @@ -210,8 +204,6 @@ class VerifyAPIHttpStatus(AntaTest): ``` """ - name = "VerifyAPIHttpStatus" - description = "Verifies if eAPI HTTP server is disabled globally." categories: ClassVar[list[str]] = ["security"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show management api http-commands", revision=1)] @@ -242,7 +234,6 @@ class VerifyAPIHttpsSSL(AntaTest): ``` """ - name = "VerifyAPIHttpsSSL" description = "Verifies if the eAPI has a valid SSL profile." categories: ClassVar[list[str]] = ["security"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show management api http-commands", revision=1)] @@ -285,8 +276,6 @@ class VerifyAPIIPv4Acl(AntaTest): ``` """ - name = "VerifyAPIIPv4Acl" - description = "Verifies if eAPI has the right number IPv4 ACL(s) configured for a specified VRF." categories: ClassVar[list[str]] = ["security"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show management api http-commands ip access-list summary", revision=1)] @@ -335,8 +324,6 @@ class VerifyAPIIPv6Acl(AntaTest): ``` """ - name = "VerifyAPIIPv6Acl" - description = "Verifies if eAPI has the right number IPv6 ACL(s) configured for a specified VRF." categories: ClassVar[list[str]] = ["security"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show management api http-commands ipv6 access-list summary", revision=1)] @@ -395,8 +382,6 @@ class VerifyAPISSLCertificate(AntaTest): ``` """ - name = "VerifyAPISSLCertificate" - description = "Verifies the eAPI SSL certificate expiry, common subject name, encryption algorithm and key size." categories: ClassVar[list[str]] = ["security"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [ AntaCommand(command="show management security ssl certificate", revision=1), @@ -498,15 +483,13 @@ class VerifyBannerLogin(AntaTest): ```yaml anta.tests.security: - VerifyBannerLogin: - login_banner: | - # Copyright (c) 2023-2024 Arista Networks, Inc. - # Use of this source code is governed by the Apache License 2.0 - # that can be found in the LICENSE file. + login_banner: | + # Copyright (c) 2023-2024 Arista Networks, Inc. + # Use of this source code is governed by the Apache License 2.0 + # that can be found in the LICENSE file. ``` """ - name = "VerifyBannerLogin" - description = "Verifies the login banner of a device." categories: ClassVar[list[str]] = ["security"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show banner login", revision=1)] @@ -542,15 +525,13 @@ class VerifyBannerMotd(AntaTest): ```yaml anta.tests.security: - VerifyBannerMotd: - motd_banner: | - # Copyright (c) 2023-2024 Arista Networks, Inc. - # Use of this source code is governed by the Apache License 2.0 - # that can be found in the LICENSE file. + motd_banner: | + # Copyright (c) 2023-2024 Arista Networks, Inc. + # Use of this source code is governed by the Apache License 2.0 + # that can be found in the LICENSE file. ``` """ - name = "VerifyBannerMotd" - description = "Verifies the motd banner of a device." categories: ClassVar[list[str]] = ["security"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show banner motd", revision=1)] @@ -604,8 +585,6 @@ class VerifyIPv4ACL(AntaTest): ``` """ - name = "VerifyIPv4ACL" - description = "Verifies the configuration of IPv4 ACLs." categories: ClassVar[list[str]] = ["security"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaTemplate(template="show ip access-lists {acl}", revision=1)] @@ -669,8 +648,7 @@ class VerifyIPv4ACL(AntaTest): class VerifyIPSecConnHealth(AntaTest): - """ - Verifies all IPv4 security connections. + """Verifies all IPv4 security connections. Expected Results ---------------- @@ -685,8 +663,6 @@ class VerifyIPSecConnHealth(AntaTest): ``` """ - name = "VerifyIPSecConnHealth" - description = "Verifies all IPv4 security connections." categories: ClassVar[list[str]] = ["security"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show ip security connection vrf all")] @@ -716,16 +692,22 @@ class VerifyIPSecConnHealth(AntaTest): class VerifySpecificIPSecConn(AntaTest): - """ - Verifies the state of IPv4 security connections for a specified peer. + """Verifies the IPv4 security connections. - It optionally allows for the verification of a specific path for a peer by providing source and destination addresses. - If these addresses are not provided, it will verify all paths for the specified peer. + This test performs the following checks for each peer: + + 1. Validates that the VRF is configured. + 2. Checks for the presence of IPv4 security connections for the specified peer. + 3. For each relevant peer: + - If source and destination addresses are provided, verifies the security connection for the specific path exists and is `Established`. + - If no addresses are provided, verifies that all security connections associated with the peer are `Established`. Expected Results ---------------- - * Success: The test passes if the IPv4 security connection for a peer is established in the specified VRF. - * Failure: The test fails if IPv4 security is not configured, a connection is not found for a peer, or the connection is not established in the specified VRF. + * Success: If all checks pass for all specified IPv4 security connections. + * Failure: If any of the following occur: + - No IPv4 security connections are found for the peer + - The security connection is not established for the specified path or any of the peer connections is not established when no path is specified. Examples -------- @@ -744,36 +726,16 @@ class VerifySpecificIPSecConn(AntaTest): ``` """ - name = "VerifySpecificIPSecConn" - description = "Verifies IPv4 security connections for a peer." categories: ClassVar[list[str]] = ["security"] - commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaTemplate(template="show ip security connection vrf {vrf} path peer {peer}")] + commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaTemplate(template="show ip security connection vrf {vrf} path peer {peer}", revision=2)] class Input(AntaTest.Input): """Input model for the VerifySpecificIPSecConn test.""" - ip_security_connections: list[IPSecPeers] + ip_security_connections: list[IPSecPeer] """List of IP4v security peers.""" - - class IPSecPeers(BaseModel): - """Details of IPv4 security peers.""" - - peer: IPv4Address - """IPv4 address of the peer.""" - - vrf: str = "default" - """Optional VRF for the IP security peer.""" - - connections: list[IPSecConn] | None = None - """Optional list of IPv4 security connections of a peer.""" - - class IPSecConn(BaseModel): - """Details of IPv4 security connections for a peer.""" - - source_address: IPv4Address - """Source IPv4 address of the connection.""" - destination_address: IPv4Address - """Destination IPv4 address of the connection.""" + IPSecPeers: ClassVar[type[IPSecPeers]] = IPSecPeers + """To maintain backward compatibility.""" def render(self, template: AntaTemplate) -> list[AntaCommand]: """Render the template for each input IP Sec connection.""" @@ -783,15 +745,15 @@ class VerifySpecificIPSecConn(AntaTest): def test(self) -> None: """Main test function for VerifySpecificIPSecConn.""" self.result.is_success() + for command_output, input_peer in zip(self.instance_commands, self.inputs.ip_security_connections): conn_output = command_output.json_output["connections"] - peer = command_output.params.peer - vrf = command_output.params.vrf conn_input = input_peer.connections + vrf = input_peer.vrf # Check if IPv4 security connection is configured if not conn_output: - self.result.is_failure(f"No IPv4 security connection configured for peer `{peer}`.") + self.result.is_failure(f"{input_peer} - Not configured") continue # If connection details are not provided then check all connections of a peer @@ -801,10 +763,8 @@ class VerifySpecificIPSecConn(AntaTest): if state != "Established": source = conn_data.get("saddr") destination = conn_data.get("daddr") - vrf = conn_data.get("tunnelNs") self.result.is_failure( - f"Expected state of IPv4 security connection `source:{source} destination:{destination} vrf:{vrf}` for peer `{peer}` is `Established` " - f"but found `{state}` instead." + f"{input_peer} Source: {source} Destination: {destination} - Connection down - Expected: Established, Actual: {state}" ) continue @@ -820,19 +780,14 @@ class VerifySpecificIPSecConn(AntaTest): if (source_input, destination_input, vrf) in existing_connections: existing_state = existing_connections[(source_input, destination_input, vrf)] if existing_state != "Established": - self.result.is_failure( - f"Expected state of IPv4 security connection `source:{source_input} destination:{destination_input} vrf:{vrf}` " - f"for peer `{peer}` is `Established` but found `{existing_state}` instead." - ) + failure = f"Expected: Established, Actual: {existing_state}" + self.result.is_failure(f"{input_peer} Source: {source_input} Destination: {destination_input} - Connection down - {failure}") else: - self.result.is_failure( - f"IPv4 security connection `source:{source_input} destination:{destination_input} vrf:{vrf}` for peer `{peer}` is not found." - ) + self.result.is_failure(f"{input_peer} Source: {source_input} Destination: {destination_input} - Connection not found.") class VerifyHardwareEntropy(AntaTest): - """ - Verifies hardware entropy generation is enabled on device. + """Verifies hardware entropy generation is enabled on device. Expected Results ---------------- @@ -847,8 +802,6 @@ class VerifyHardwareEntropy(AntaTest): ``` """ - name = "VerifyHardwareEntropy" - description = "Verifies hardware entropy generation is enabled on device." categories: ClassVar[list[str]] = ["security"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show management security")] diff --git a/anta/tests/services.py b/anta/tests/services.py index 6184263..dab1b3a 100644 --- a/anta/tests/services.py +++ b/anta/tests/services.py @@ -7,14 +7,14 @@ from __future__ import annotations # Mypy does not understand AntaTest.Input typing # mypy: disable-error-code=attr-defined -from ipaddress import IPv4Address, IPv6Address from typing import ClassVar -from pydantic import BaseModel, Field +from pydantic import BaseModel from anta.custom_types import ErrDisableInterval, ErrDisableReasons +from anta.input_models.services import DnsServer from anta.models import AntaCommand, AntaTemplate, AntaTest -from anta.tools import get_dict_superset, get_failed_logs, get_item +from anta.tools import get_dict_superset, get_failed_logs class VerifyHostname(AntaTest): @@ -34,8 +34,6 @@ class VerifyHostname(AntaTest): ``` """ - name = "VerifyHostname" - description = "Verifies the hostname of a device." categories: ClassVar[list[str]] = ["services"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show hostname", revision=1)] @@ -77,7 +75,6 @@ class VerifyDNSLookup(AntaTest): ``` """ - name = "VerifyDNSLookup" description = "Verifies the DNS name to IP address resolution." categories: ClassVar[list[str]] = ["services"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaTemplate(template="bash timeout 10 nslookup {domain}", revision=1)] @@ -109,10 +106,17 @@ class VerifyDNSLookup(AntaTest): class VerifyDNSServers(AntaTest): """Verifies if the DNS (Domain Name Service) servers are correctly configured. + This test performs the following checks for each specified DNS Server: + + 1. Confirming correctly registered with a valid IPv4 or IPv6 address with the designated VRF. + 2. Ensuring an appropriate priority level. + Expected Results ---------------- * Success: The test will pass if the DNS server specified in the input is configured with the correct VRF and priority. - * Failure: The test will fail if the DNS server is not configured or if the VRF and priority of the DNS server do not match the input. + * Failure: The test will fail if any of the following conditions are met: + - The provided DNS server is not configured. + - The provided DNS server with designated VRF and priority does not match the expected information. Examples -------- @@ -129,8 +133,6 @@ class VerifyDNSServers(AntaTest): ``` """ - name = "VerifyDNSServers" - description = "Verifies if the DNS servers are correctly configured." categories: ClassVar[list[str]] = ["services"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show ip name-server", revision=1)] @@ -139,38 +141,28 @@ class VerifyDNSServers(AntaTest): dns_servers: list[DnsServer] """List of DNS servers to verify.""" - - class DnsServer(BaseModel): - """Model for a DNS server.""" - - server_address: IPv4Address | IPv6Address - """The IPv4/IPv6 address of the DNS server.""" - vrf: str = "default" - """The VRF for the DNS server. Defaults to 'default' if not provided.""" - priority: int = Field(ge=0, le=4) - """The priority of the DNS server from 0 to 4, lower is first.""" + DnsServer: ClassVar[type[DnsServer]] = DnsServer @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyDNSServers.""" - command_output = self.instance_commands[0].json_output["nameServerConfigs"] self.result.is_success() + + command_output = self.instance_commands[0].json_output["nameServerConfigs"] for server in self.inputs.dns_servers: address = str(server.server_address) vrf = server.vrf priority = server.priority input_dict = {"ipAddr": address, "vrf": vrf} - if get_item(command_output, "ipAddr", address) is None: - self.result.is_failure(f"DNS server `{address}` is not configured with any VRF.") - continue - + # Check if the DNS server is configured with specified VRF. if (output := get_dict_superset(command_output, input_dict)) is None: - self.result.is_failure(f"DNS server `{address}` is not configured with VRF `{vrf}`.") + self.result.is_failure(f"{server} - Not configured") continue + # Check if the DNS server priority matches with expected. if output["priority"] != priority: - self.result.is_failure(f"For DNS server `{address}`, the expected priority is `{priority}`, but `{output['priority']}` was found instead.") + self.result.is_failure(f"{server} - Incorrect priority - Priority: {output['priority']}") class VerifyErrdisableRecovery(AntaTest): @@ -194,8 +186,6 @@ class VerifyErrdisableRecovery(AntaTest): ``` """ - name = "VerifyErrdisableRecovery" - description = "Verifies the errdisable recovery reason, status, and interval." categories: ClassVar[list[str]] = ["services"] # NOTE: Only `text` output format is supported for this command commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show errdisable recovery", ofmt="text")] diff --git a/anta/tests/snmp.py b/anta/tests/snmp.py index 217e320..b8bd73d 100644 --- a/anta/tests/snmp.py +++ b/anta/tests/snmp.py @@ -34,7 +34,6 @@ class VerifySnmpStatus(AntaTest): ``` """ - name = "VerifySnmpStatus" description = "Verifies if the SNMP agent is enabled." categories: ClassVar[list[str]] = ["snmp"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show snmp", revision=1)] @@ -73,7 +72,6 @@ class VerifySnmpIPv4Acl(AntaTest): ``` """ - name = "VerifySnmpIPv4Acl" description = "Verifies if the SNMP agent has IPv4 ACL(s) configured." categories: ClassVar[list[str]] = ["snmp"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show snmp ipv4 access-list summary", revision=1)] @@ -122,7 +120,6 @@ class VerifySnmpIPv6Acl(AntaTest): ``` """ - name = "VerifySnmpIPv6Acl" description = "Verifies if the SNMP agent has IPv6 ACL(s) configured." categories: ClassVar[list[str]] = ["snmp"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show snmp ipv6 access-list summary", revision=1)] @@ -170,8 +167,6 @@ class VerifySnmpLocation(AntaTest): ``` """ - name = "VerifySnmpLocation" - description = "Verifies the SNMP location of a device." categories: ClassVar[list[str]] = ["snmp"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show snmp", revision=1)] @@ -213,8 +208,6 @@ class VerifySnmpContact(AntaTest): ``` """ - name = "VerifySnmpContact" - description = "Verifies the SNMP contact of a device." categories: ClassVar[list[str]] = ["snmp"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show snmp", revision=1)] @@ -261,8 +254,6 @@ class VerifySnmpPDUCounters(AntaTest): ``` """ - name = "VerifySnmpPDUCounters" - description = "Verifies the SNMP PDU counters." categories: ClassVar[list[str]] = ["snmp"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show snmp", revision=1)] @@ -317,8 +308,6 @@ class VerifySnmpErrorCounters(AntaTest): - inBadCommunityNames """ - name = "VerifySnmpErrorCounters" - description = "Verifies the SNMP error counters." categories: ClassVar[list[str]] = ["snmp"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show snmp", revision=1)] diff --git a/anta/tests/software.py b/anta/tests/software.py index 4028dd9..9a41881 100644 --- a/anta/tests/software.py +++ b/anta/tests/software.py @@ -34,7 +34,6 @@ class VerifyEOSVersion(AntaTest): ``` """ - name = "VerifyEOSVersion" description = "Verifies the EOS version of the device." categories: ClassVar[list[str]] = ["software"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show version", revision=1)] @@ -74,7 +73,6 @@ class VerifyTerminAttrVersion(AntaTest): ``` """ - name = "VerifyTerminAttrVersion" description = "Verifies the TerminAttr version of the device." categories: ClassVar[list[str]] = ["software"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show version detail", revision=1)] @@ -112,8 +110,6 @@ class VerifyEOSExtensions(AntaTest): ``` """ - name = "VerifyEOSExtensions" - description = "Verifies that all EOS extensions installed on the device are enabled for boot persistence." categories: ClassVar[list[str]] = ["software"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [ AntaCommand(command="show extensions", revision=2), diff --git a/anta/tests/stp.py b/anta/tests/stp.py index 3208f0c..93a0d2e 100644 --- a/anta/tests/stp.py +++ b/anta/tests/stp.py @@ -36,8 +36,6 @@ class VerifySTPMode(AntaTest): ``` """ - name = "VerifySTPMode" - description = "Verifies the configured STP mode for a provided list of VLAN(s)." categories: ClassVar[list[str]] = ["stp"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaTemplate(template="show spanning-tree vlan {vlan}", revision=1)] @@ -93,8 +91,6 @@ class VerifySTPBlockedPorts(AntaTest): ``` """ - name = "VerifySTPBlockedPorts" - description = "Verifies there is no STP blocked ports." categories: ClassVar[list[str]] = ["stp"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show spanning-tree blockedports", revision=1)] @@ -126,8 +122,6 @@ class VerifySTPCounters(AntaTest): ``` """ - name = "VerifySTPCounters" - description = "Verifies there is no errors in STP BPDU packets." categories: ClassVar[list[str]] = ["stp"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show spanning-tree counters", revision=1)] @@ -163,7 +157,6 @@ class VerifySTPForwardingPorts(AntaTest): ``` """ - name = "VerifySTPForwardingPorts" description = "Verifies that all interfaces are forwarding for a provided list of VLAN(s)." categories: ClassVar[list[str]] = ["stp"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaTemplate(template="show spanning-tree topology vlan {vlan} status", revision=1)] @@ -222,8 +215,6 @@ class VerifySTPRootPriority(AntaTest): ``` """ - name = "VerifySTPRootPriority" - description = "Verifies the STP root priority for a provided list of VLAN or MST instance ID(s)." categories: ClassVar[list[str]] = ["stp"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show spanning-tree root detail", revision=1)] @@ -279,8 +270,6 @@ class VerifyStpTopologyChanges(AntaTest): ``` """ - name = "VerifyStpTopologyChanges" - description = "Verifies the number of changes across all interfaces in the Spanning Tree Protocol (STP) topology is below a threshold." categories: ClassVar[list[str]] = ["stp"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show spanning-tree topology status detail", revision=1)] diff --git a/anta/tests/stun.py b/anta/tests/stun.py index f06b5a0..2be13c4 100644 --- a/anta/tests/stun.py +++ b/anta/tests/stun.py @@ -7,32 +7,36 @@ # mypy: disable-error-code=attr-defined from __future__ import annotations -from ipaddress import IPv4Address from typing import ClassVar -from pydantic import BaseModel - -from anta.custom_types import Port +from anta.decorators import deprecated_test_class +from anta.input_models.stun import StunClientTranslation from anta.models import AntaCommand, AntaTemplate, AntaTest -from anta.tools import get_failed_logs, get_value +from anta.tools import get_value -class VerifyStunClient(AntaTest): - """ - Verifies the configuration of the STUN client, specifically the IPv4 source address and port. +class VerifyStunClientTranslation(AntaTest): + """Verifies the translation for a source address on a STUN client. - Optionally, it can also verify the public address and port. + This test performs the following checks for each specified address family: + + 1. Validates that there is a translation for the source address on the STUN client. + 2. If public IP and port details are provided, validates their correctness against the configuration. Expected Results ---------------- - * Success: The test will pass if the STUN client is correctly configured with the specified IPv4 source address/port and public address/port. - * Failure: The test will fail if the STUN client is not configured or if the IPv4 source address, public address, or port details are incorrect. + * Success: If all of the following conditions are met: + - The test will pass if the source address translation is present. + - If public IP and port details are provided, they must also match the translation information. + * Failure: If any of the following occur: + - There is no translation for the source address on the STUN client. + - The public IP or port details, if specified, are incorrect. Examples -------- ```yaml anta.tests.stun: - - VerifyStunClient: + - VerifyStunClientTranslation: stun_clients: - source_address: 172.18.3.2 public_address: 172.18.3.21 @@ -45,27 +49,15 @@ class VerifyStunClient(AntaTest): ``` """ - name = "VerifyStunClient" - description = "Verifies the STUN client is configured with the specified IPv4 source address and port. Validate the public IP and port if provided." categories: ClassVar[list[str]] = ["stun"] - commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaTemplate(template="show stun client translations {source_address} {source_port}")] + commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaTemplate(template="show stun client translations {source_address} {source_port}", revision=1)] class Input(AntaTest.Input): - """Input model for the VerifyStunClient test.""" - - stun_clients: list[ClientAddress] - - class ClientAddress(BaseModel): - """Source and public address/port details of STUN client.""" + """Input model for the VerifyStunClientTranslation test.""" - source_address: IPv4Address - """IPv4 source address of STUN client.""" - source_port: Port = 4500 - """Source port number for STUN client.""" - public_address: IPv4Address | None = None - """Optional IPv4 public address of STUN client.""" - public_port: Port | None = None - """Optional public port number for STUN client.""" + stun_clients: list[StunClientTranslation] + """List of STUN clients.""" + StunClientTranslation: ClassVar[type[StunClientTranslation]] = StunClientTranslation def render(self, template: AntaTemplate) -> list[AntaCommand]: """Render the template for each STUN translation.""" @@ -73,53 +65,61 @@ class VerifyStunClient(AntaTest): @AntaTest.anta_test def test(self) -> None: - """Main test function for VerifyStunClient.""" + """Main test function for VerifyStunClientTranslation.""" self.result.is_success() # Iterate over each command output and corresponding client input for command, client_input in zip(self.instance_commands, self.inputs.stun_clients): bindings = command.json_output["bindings"] - source_address = str(command.params.source_address) - source_port = command.params.source_port + input_public_address = client_input.public_address + input_public_port = client_input.public_port # If no bindings are found for the STUN client, mark the test as a failure and continue with the next client if not bindings: - self.result.is_failure(f"STUN client transaction for source `{source_address}:{source_port}` is not found.") + self.result.is_failure(f"{client_input} - STUN client translation not found.") continue - # Extract the public address and port from the client input - public_address = client_input.public_address - public_port = client_input.public_port - # Extract the transaction ID from the bindings transaction_id = next(iter(bindings.keys())) - # Prepare the actual and expected STUN data for comparison - actual_stun_data = { - "source ip": get_value(bindings, f"{transaction_id}.sourceAddress.ip"), - "source port": get_value(bindings, f"{transaction_id}.sourceAddress.port"), - } - expected_stun_data = {"source ip": source_address, "source port": source_port} + # Verifying the public address if provided + if input_public_address and str(input_public_address) != (actual_public_address := get_value(bindings, f"{transaction_id}.publicAddress.ip")): + self.result.is_failure(f"{client_input} - Incorrect public-facing address - Expected: {input_public_address} Actual: {actual_public_address}") - # If public address is provided, add it to the actual and expected STUN data - if public_address is not None: - actual_stun_data["public ip"] = get_value(bindings, f"{transaction_id}.publicAddress.ip") - expected_stun_data["public ip"] = str(public_address) + # Verifying the public port if provided + if input_public_port and input_public_port != (actual_public_port := get_value(bindings, f"{transaction_id}.publicAddress.port")): + self.result.is_failure(f"{client_input} - Incorrect public-facing port - Expected: {input_public_port} Actual: {actual_public_port}") - # If public port is provided, add it to the actual and expected STUN data - if public_port is not None: - actual_stun_data["public port"] = get_value(bindings, f"{transaction_id}.publicAddress.port") - expected_stun_data["public port"] = public_port - # If the actual STUN data does not match the expected STUN data, mark the test as failure - if actual_stun_data != expected_stun_data: - failed_log = get_failed_logs(expected_stun_data, actual_stun_data) - self.result.is_failure(f"For STUN source `{source_address}:{source_port}`:{failed_log}") +@deprecated_test_class(new_tests=["VerifyStunClientTranslation"], removal_in_version="v2.0.0") +class VerifyStunClient(VerifyStunClientTranslation): + """(Deprecated) Verifies the translation for a source address on a STUN client. + Alias for the VerifyStunClientTranslation test to maintain backward compatibility. + When initialized, it will emit a deprecation warning and call the VerifyStunClientTranslation test. -class VerifyStunServer(AntaTest): + Examples + -------- + ```yaml + anta.tests.stun: + - VerifyStunClient: + stun_clients: + - source_address: 172.18.3.2 + public_address: 172.18.3.21 + source_port: 4500 + public_port: 6006 + ``` """ - Verifies the STUN server status is enabled and running. + + # TODO: Remove this class in ANTA v2.0.0. + + # required to redefine name an description to overwrite parent class. + name = "VerifyStunClient" + description = "(Deprecated) Verifies the translation for a source address on a STUN client." + + +class VerifyStunServer(AntaTest): + """Verifies the STUN server status is enabled and running. Expected Results ---------------- @@ -134,8 +134,6 @@ class VerifyStunServer(AntaTest): ``` """ - name = "VerifyStunServer" - description = "Verifies the STUN server status is enabled and running." categories: ClassVar[list[str]] = ["stun"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show stun server status", revision=1)] diff --git a/anta/tests/system.py b/anta/tests/system.py index d620d53..cceced6 100644 --- a/anta/tests/system.py +++ b/anta/tests/system.py @@ -8,14 +8,12 @@ from __future__ import annotations import re -from ipaddress import IPv4Address from typing import TYPE_CHECKING, ClassVar -from pydantic import BaseModel, Field - -from anta.custom_types import Hostname, PositiveInteger +from anta.custom_types import PositiveInteger +from anta.input_models.system import NTPServer from anta.models import AntaCommand, AntaTest -from anta.tools import get_failed_logs, get_value +from anta.tools import get_value if TYPE_CHECKING: from anta.models import AntaTemplate @@ -42,7 +40,6 @@ class VerifyUptime(AntaTest): ``` """ - name = "VerifyUptime" description = "Verifies the device uptime." categories: ClassVar[list[str]] = ["system"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show uptime", revision=1)] @@ -80,8 +77,6 @@ class VerifyReloadCause(AntaTest): ``` """ - name = "VerifyReloadCause" - description = "Verifies the last reload cause of the device." categories: ClassVar[list[str]] = ["system"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show reload cause", revision=1)] @@ -112,19 +107,18 @@ class VerifyCoredump(AntaTest): * Success: The test will pass if there are NO core dump(s) in /var/core. * Failure: The test will fail if there are core dump(s) in /var/core. - Info - ---- + Notes + ----- * This test will NOT check for minidump(s) generated by certain agents in /var/core/minidump. Examples -------- ```yaml anta.tests.system: - - VerifyCoreDump: + - VerifyCoredump: ``` """ - name = "VerifyCoredump" description = "Verifies there are no core dump files." categories: ClassVar[list[str]] = ["system"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show system coredump", revision=1)] @@ -143,7 +137,7 @@ class VerifyCoredump(AntaTest): class VerifyAgentLogs(AntaTest): - """Verifies that no agent crash reports are present on the device. + """Verifies there are no agent crash reports. Expected Results ---------------- @@ -158,8 +152,6 @@ class VerifyAgentLogs(AntaTest): ``` """ - name = "VerifyAgentLogs" - description = "Verifies there are no agent crash reports." categories: ClassVar[list[str]] = ["system"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show agent logs crash", ofmt="text")] @@ -191,8 +183,6 @@ class VerifyCPUUtilization(AntaTest): ``` """ - name = "VerifyCPUUtilization" - description = "Verifies whether the CPU utilization is below 75%." categories: ClassVar[list[str]] = ["system"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show processes top once", revision=1)] @@ -223,8 +213,6 @@ class VerifyMemoryUtilization(AntaTest): ``` """ - name = "VerifyMemoryUtilization" - description = "Verifies whether the memory utilization is below 75%." categories: ClassVar[list[str]] = ["system"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show version", revision=1)] @@ -255,8 +243,6 @@ class VerifyFileSystemUtilization(AntaTest): ``` """ - name = "VerifyFileSystemUtilization" - description = "Verifies that no partition is utilizing more than 75% of its disk space." categories: ClassVar[list[str]] = ["system"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="bash timeout 10 df -h", ofmt="text")] @@ -286,7 +272,6 @@ class VerifyNTP(AntaTest): ``` """ - name = "VerifyNTP" description = "Verifies if NTP is synchronised." categories: ClassVar[list[str]] = ["system"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show ntp status", ofmt="text")] @@ -328,8 +313,6 @@ class VerifyNTPAssociations(AntaTest): ``` """ - name = "VerifyNTPAssociations" - description = "Verifies the Network Time Protocol (NTP) associations." categories: ClassVar[list[str]] = ["system"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show ntp associations")] @@ -338,55 +321,33 @@ class VerifyNTPAssociations(AntaTest): ntp_servers: list[NTPServer] """List of NTP servers.""" - - class NTPServer(BaseModel): - """Model for a NTP server.""" - - server_address: Hostname | IPv4Address - """The NTP server address as an IPv4 address or hostname. The NTP server name defined in the running configuration - of the device may change during DNS resolution, which is not handled in ANTA. Please provide the DNS-resolved server name. - For example, 'ntp.example.com' in the configuration might resolve to 'ntp3.example.com' in the device output.""" - preferred: bool = False - """Optional preferred for NTP server. If not provided, it defaults to `False`.""" - stratum: int = Field(ge=0, le=16) - """NTP stratum level (0 to 15) where 0 is the reference clock and 16 indicates unsynchronized. - Values should be between 0 and 15 for valid synchronization and 16 represents an out-of-sync state.""" + NTPServer: ClassVar[type[NTPServer]] = NTPServer @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyNTPAssociations.""" - failures: str = "" + self.result.is_success() - if not (peer_details := get_value(self.instance_commands[0].json_output, "peers")): - self.result.is_failure("None of NTP peers are not configured.") + if not (peers := get_value(self.instance_commands[0].json_output, "peers")): + self.result.is_failure("No NTP peers configured") return # Iterate over each NTP server. for ntp_server in self.inputs.ntp_servers: server_address = str(ntp_server.server_address) - preferred = ntp_server.preferred - stratum = ntp_server.stratum - # Check if NTP server details exists. - if (peer_detail := get_value(peer_details, server_address, separator="..")) is None: - failures += f"NTP peer {server_address} is not configured.\n" - continue + # We check `peerIpAddr` in the peer details - covering IPv4Address input, or the peer key - covering Hostname input. + matching_peer = next((peer for peer, peer_details in peers.items() if (server_address in {peer_details["peerIpAddr"], peer})), None) - # Collecting the expected NTP peer details. - expected_peer_details = {"condition": "candidate", "stratum": stratum} - if preferred: - expected_peer_details["condition"] = "sys.peer" - - # Collecting the actual NTP peer details. - actual_peer_details = {"condition": get_value(peer_detail, "condition"), "stratum": get_value(peer_detail, "stratumLevel")} + if not matching_peer: + self.result.is_failure(f"{ntp_server} - Not configured") + continue - # Collecting failures logs if any. - failure_logs = get_failed_logs(expected_peer_details, actual_peer_details) - if failure_logs: - failures += f"For NTP peer {server_address}:{failure_logs}\n" + # Collecting the expected/actual NTP peer details. + exp_condition = "sys.peer" if ntp_server.preferred else "candidate" + exp_stratum = ntp_server.stratum + act_condition = get_value(peers[matching_peer], "condition") + act_stratum = get_value(peers[matching_peer], "stratumLevel") - # Check if there are any failures. - if not failures: - self.result.is_success() - else: - self.result.is_failure(failures) + if act_condition != exp_condition or act_stratum != exp_stratum: + self.result.is_failure(f"{ntp_server} - Bad association - Condition: {act_condition}, Stratum: {act_stratum}") diff --git a/anta/tests/vlan.py b/anta/tests/vlan.py index fdf91d8..b7b1bd4 100644 --- a/anta/tests/vlan.py +++ b/anta/tests/vlan.py @@ -38,7 +38,6 @@ class VerifyVlanInternalPolicy(AntaTest): ``` """ - name = "VerifyVlanInternalPolicy" description = "Verifies the VLAN internal allocation policy and the range of VLANs." categories: ClassVar[list[str]] = ["vlan"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show vlan internal allocation policy", revision=1)] diff --git a/anta/tests/vxlan.py b/anta/tests/vxlan.py index fe53816..e5f0a54 100644 --- a/anta/tests/vxlan.py +++ b/anta/tests/vxlan.py @@ -23,8 +23,8 @@ if TYPE_CHECKING: class VerifyVxlan1Interface(AntaTest): """Verifies if the Vxlan1 interface is configured and 'up/up'. - Warning - ------- + Warnings + -------- The name of this test has been updated from 'VerifyVxlan' for better representation. Expected Results @@ -41,7 +41,6 @@ class VerifyVxlan1Interface(AntaTest): ``` """ - name = "VerifyVxlan1Interface" description = "Verifies the Vxlan1 interface status." categories: ClassVar[list[str]] = ["vxlan"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show interfaces description", revision=1)] @@ -65,7 +64,7 @@ class VerifyVxlan1Interface(AntaTest): class VerifyVxlanConfigSanity(AntaTest): - """Verifies that no issues are detected with the VXLAN configuration. + """Verifies there are no VXLAN config-sanity inconsistencies. Expected Results ---------------- @@ -81,8 +80,6 @@ class VerifyVxlanConfigSanity(AntaTest): ``` """ - name = "VerifyVxlanConfigSanity" - description = "Verifies there are no VXLAN config-sanity inconsistencies." categories: ClassVar[list[str]] = ["vxlan"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show vxlan config-sanity", revision=1)] @@ -124,8 +121,6 @@ class VerifyVxlanVniBinding(AntaTest): ``` """ - name = "VerifyVxlanVniBinding" - description = "Verifies the VNI-VLAN bindings of the Vxlan1 interface." categories: ClassVar[list[str]] = ["vxlan"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show vxlan vni", revision=1)] @@ -187,8 +182,6 @@ class VerifyVxlanVtep(AntaTest): ``` """ - name = "VerifyVxlanVtep" - description = "Verifies the VTEP peers of the Vxlan1 interface" categories: ClassVar[list[str]] = ["vxlan"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show vxlan vtep", revision=1)] @@ -238,8 +231,6 @@ class VerifyVxlan1ConnSettings(AntaTest): ``` """ - name = "VerifyVxlan1ConnSettings" - description = "Verifies the interface vxlan1 source interface and UDP port." categories: ClassVar[list[str]] = ["vxlan"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show interfaces", revision=1)] diff --git a/anta/tools.py b/anta/tools.py index 4f73db9..8b116a0 100644 --- a/anta/tools.py +++ b/anta/tools.py @@ -94,8 +94,7 @@ def get_dict_superset( *, required: bool = False, ) -> Any: - """ - Get the first dictionary from a list of dictionaries that is a superset of the input dict. + """Get the first dictionary from a list of dictionaries that is a superset of the input dict. Returns the supplied default value or None if there is no match and "required" is False. @@ -378,7 +377,7 @@ def safe_command(command: str) -> str: def convert_categories(categories: list[str]) -> list[str]: """Convert categories for reports. - if the category is part of the defined acronym, transform it to upper case + If the category is part of the defined acronym, transform it to upper case otherwise capitalize the first letter. Parameters @@ -395,3 +394,24 @@ def convert_categories(categories: list[str]) -> list[str]: return [" ".join(word.upper() if word.lower() in ACRONYM_CATEGORIES else word.title() for word in category.split()) for category in categories] msg = f"Wrong input type '{type(categories)}' for convert_categories." raise TypeError(msg) + + +def format_data(data: dict[str, bool]) -> str: + """Format a data dictionary for logging purposes. + + Parameters + ---------- + data + A dictionary containing the data to format. + + Returns + ------- + str + The formatted data. + + Example + ------- + >>> format_data({"advertised": True, "received": True, "enabled": True}) + "Advertised: True, Received: True, Enabled: True" + """ + return ", ".join(f"{k.capitalize()}: {v}" for k, v in data.items()) diff --git a/asynceapi/__init__.py b/asynceapi/__init__.py index d6586cf..6d5a23b 100644 --- a/asynceapi/__init__.py +++ b/asynceapi/__init__.py @@ -9,4 +9,4 @@ from .config_session import SessionConfig from .device import Device from .errors import EapiCommandError -__all__ = ["Device", "SessionConfig", "EapiCommandError"] +__all__ = ["Device", "EapiCommandError", "SessionConfig"] diff --git a/asynceapi/aio_portcheck.py b/asynceapi/aio_portcheck.py index 0cab94c..deac043 100644 --- a/asynceapi/aio_portcheck.py +++ b/asynceapi/aio_portcheck.py @@ -34,8 +34,7 @@ __all__ = ["port_check_url"] async def port_check_url(url: URL, timeout: int = 5) -> bool: - """ - Open the port designated by the URL given the timeout in seconds. + """Open the port designated by the URL given the timeout in seconds. Parameters ---------- diff --git a/asynceapi/config_session.py b/asynceapi/config_session.py index df26d7d..7f83da4 100644 --- a/asynceapi/config_session.py +++ b/asynceapi/config_session.py @@ -29,8 +29,7 @@ __all__ = ["SessionConfig"] class SessionConfig: - """ - Send configuration to a device using the EOS session mechanism. + """Send configuration to a device using the EOS session mechanism. This is the preferred way of managing configuration changes. @@ -44,8 +43,7 @@ class SessionConfig: CLI_CFG_FACTORY_RESET = "rollback clean-config" def __init__(self, device: Device, name: str) -> None: - """ - Create a new instance of SessionConfig. + """Create a new instance of SessionConfig. The session config instance bound to the given device instance, and using the session `name`. @@ -81,8 +79,7 @@ class SessionConfig: # ------------------------------------------------------------------------- async def status_all(self) -> dict[str, Any]: - """ - Get the status of all the session config on the device. + """Get the status of all the session config on the device. Run the following command on the device: # show configuration sessions detail @@ -122,8 +119,7 @@ class SessionConfig: return await self._cli("show configuration sessions detail") # type: ignore[return-value] # json outformat returns dict[str, Any] async def status(self) -> dict[str, Any] | None: - """ - Get the status of a session config on the device. + """Get the status of a session config on the device. Run the following command on the device: # show configuration sessions detail @@ -179,8 +175,7 @@ class SessionConfig: return res["sessions"].get(self.name) async def push(self, content: list[str] | str, *, replace: bool = False) -> None: - """ - Send the configuration content to the device. + """Send the configuration content to the device. If `replace` is true, then the command "rollback clean-config" is issued before sending the configuration content. @@ -218,8 +213,7 @@ class SessionConfig: await self._cli(commands=commands) async def commit(self, timer: str | None = None) -> None: - """ - Commit the session config. + """Commit the session config. Run the following command on the device: # configure session <name> @@ -241,8 +235,7 @@ class SessionConfig: await self._cli(command) async def abort(self) -> None: - """ - Abort the configuration session. + """Abort the configuration session. Run the following command on the device: # configure session <name> abort @@ -250,8 +243,7 @@ class SessionConfig: await self._cli(f"{self._cli_config_session} abort") async def diff(self) -> str: - """ - Return the "diff" of the session config relative to the running config. + """Return the "diff" of the session config relative to the running config. Run the following command on the device: # show session-config named <name> diffs @@ -268,8 +260,7 @@ class SessionConfig: return await self._cli(f"show session-config named {self.name} diffs", ofmt="text") # type: ignore[return-value] # text outformat returns str async def load_file(self, filename: str, *, replace: bool = False) -> None: - """ - Load the configuration from <filename> into the session configuration. + """Load the configuration from <filename> into the session configuration. If the replace parameter is True then the file contents will replace the existing session config (load-replace). diff --git a/asynceapi/device.py b/asynceapi/device.py index 933ae64..c423c36 100644 --- a/asynceapi/device.py +++ b/asynceapi/device.py @@ -43,8 +43,7 @@ __all__ = ["Device"] class Device(httpx.AsyncClient): - """ - Represent the async JSON-RPC client that communicates with an Arista EOS device. + """Represent the async JSON-RPC client that communicates with an Arista EOS device. This class inherits directly from the httpx.AsyncClient, so any initialization options can be passed directly. @@ -63,8 +62,7 @@ class Device(httpx.AsyncClient): port: str | int | None = None, **kwargs: Any, # noqa: ANN401 ) -> None: - """ - Initialize the Device class. + """Initialize the Device class. As a subclass to httpx.AsyncClient, the caller can provide any of those initializers. Specific parameters for Device class are all optional and described below. @@ -111,8 +109,7 @@ class Device(httpx.AsyncClient): self.headers["Content-Type"] = "application/json-rpc" async def check_connection(self) -> bool: - """ - Check the target device to ensure that the eAPI port is open and accepting connections. + """Check the target device to ensure that the eAPI port is open and accepting connections. It is recommended that a Caller checks the connection before involving cli commands, but this step is not required. @@ -124,7 +121,7 @@ class Device(httpx.AsyncClient): """ return await port_check_url(self.base_url) - async def cli( # noqa: PLR0913 + async def cli( self, command: str | dict[str, Any] | None = None, commands: Sequence[str | dict[str, Any]] | None = None, @@ -136,8 +133,7 @@ class Device(httpx.AsyncClient): expand_aliases: bool = False, req_id: int | str | None = None, ) -> list[dict[str, Any] | str] | dict[str, Any] | str | None: - """ - Execute one or more CLI commands. + """Execute one or more CLI commands. Parameters ---------- @@ -199,7 +195,7 @@ class Device(httpx.AsyncClient): return None raise - def _jsonrpc_command( # noqa: PLR0913 + def _jsonrpc_command( self, commands: Sequence[str | dict[str, Any]] | None = None, ofmt: str | None = None, @@ -264,8 +260,7 @@ class Device(httpx.AsyncClient): return cmd async def jsonrpc_exec(self, jsonrpc: dict[str, Any]) -> list[dict[str, Any] | str]: - """ - Execute the JSON-RPC dictionary object. + """Execute the JSON-RPC dictionary object. Parameters ---------- diff --git a/asynceapi/errors.py b/asynceapi/errors.py index e6794b7..5fce9db 100644 --- a/asynceapi/errors.py +++ b/asynceapi/errors.py @@ -12,8 +12,7 @@ import httpx class EapiCommandError(RuntimeError): - """ - Exception class for EAPI command errors. + """Exception class for EAPI command errors. Attributes ---------- diff --git a/docs/advanced_usages/as-python-lib.md b/docs/advanced_usages/as-python-lib.md index 49c010f..fce5e7e 100644 --- a/docs/advanced_usages/as-python-lib.md +++ b/docs/advanced_usages/as-python-lib.md @@ -6,8 +6,8 @@ ANTA is a Python library that can be used in user applications. This section describes how you can leverage ANTA Python modules to help you create your own NRFU solution. -!!! tip - If you are unfamiliar with asyncio, refer to the Python documentation relevant to your Python version - https://docs.python.org/3/library/asyncio.html +> [!TIP] +> If you are unfamiliar with asyncio, refer to the Python documentation relevant to your Python version - https://docs.python.org/3/library/asyncio.html ## [AntaDevice](../api/device.md#anta.device.AntaDevice) Abstract Class @@ -47,8 +47,10 @@ The [AntaInventory](../api/inventory.md#anta.inventory.AntaInventory) class is a --8<-- "parse_anta_inventory_file.py" ``` -!!! note "How to create your inventory file" - Please visit this [dedicated section](../usage-inventory-catalog.md) for how to use inventory and catalog files. +> [!NOTE] +> **How to create your inventory file** +> +> Please visit this [dedicated section](../usage-inventory-catalog.md) for how to use inventory and catalog files. ### Run EOS commands diff --git a/docs/advanced_usages/custom-tests.md b/docs/advanced_usages/custom-tests.md index d79fe50..2fc61cc 100644 --- a/docs/advanced_usages/custom-tests.md +++ b/docs/advanced_usages/custom-tests.md @@ -4,8 +4,8 @@ ~ that can be found in the LICENSE file. --> -!!! info - This documentation applies for both creating tests in ANTA or creating your own test package. +> [!INFO] +> This documentation applies for both creating tests in ANTA or creating your own test package. ANTA is not only a Python library with a CLI and a collection of built-in tests, it is also a framework you can extend by building your own tests. @@ -15,7 +15,7 @@ A test is a Python class where a test function is defined and will be run by the ANTA provides an abstract class [AntaTest](../api/models.md#anta.models.AntaTest). This class does the heavy lifting and provide the logic to define, collect and test data. The code below is an example of a simple test in ANTA, which is an [AntaTest](../api/models.md#anta.models.AntaTest) subclass: -```python +````python from anta.models import AntaTest, AntaCommand from anta.decorators import skip_on_platforms @@ -36,8 +36,6 @@ class VerifyTemperature(AntaTest): ``` """ - name = "VerifyTemperature" - description = "Verifies the device temperature." categories: ClassVar[list[str]] = ["hardware"] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show system environment temperature", revision=1)] @@ -51,7 +49,7 @@ class VerifyTemperature(AntaTest): self.result.is_success() else: self.result.is_failure(f"Device temperature exceeds acceptable limits. Current system status: '{temperature_status}'") -``` +```` [AntaTest](../api/models.md#anta.models.AntaTest) also provide more advanced capabilities like [AntaCommand](../api/models.md#anta.models.AntaCommand) templating using the [AntaTemplate](../api/models.md#anta.models.AntaTemplate) class or test inputs definition and validation using [AntaTest.Input](../api/models.md#anta.models.AntaTest.Input) [pydantic](https://docs.pydantic.dev/latest/) model. This will be discussed in the sections below. @@ -61,13 +59,13 @@ Full AntaTest API documentation is available in the [API documentation section]( ### Class Attributes -- `name` (`str`): Name of the test. Used during reporting. -- `description` (`str`): A human readable description of your test. +- `name` (`str`, `optional`): Name of the test. Used during reporting. By default set to the Class name. +- `description` (`str`, `optional`): A human readable description of your test. By default set to the first line of the docstring. - `categories` (`list[str]`): A list of categories in which the test belongs. - `commands` (`[list[AntaCommand | AntaTemplate]]`): A list of command to collect from devices. This list **must** be a list of [AntaCommand](../api/models.md#anta.models.AntaCommand) or [AntaTemplate](../api/models.md#anta.models.AntaTemplate) instances. Rendering [AntaTemplate](../api/models.md#anta.models.AntaTemplate) instances will be discussed later. -!!! info - All these class attributes are mandatory. If any attribute is missing, a `NotImplementedError` exception will be raised during class instantiation. +> [!INFO] +> All these class attributes are mandatory. If any attribute is missing, a `NotImplementedError` exception will be raised during class instantiation. ### Instance Attributes @@ -84,11 +82,15 @@ Full AntaTest API documentation is available in the [API documentation section]( show_root_toc_entry: false heading_level: 10 -!!! note "Logger object" - ANTA already provides comprehensive logging at every steps of a test execution. The [AntaTest](../api/models.md#anta.models.AntaTest) class also provides a `logger` attribute that is a Python logger specific to the test instance. See [Python documentation](https://docs.python.org/3/library/logging.html) for more information. - -!!! note "AntaDevice object" - Even if `device` is not a private attribute, you should not need to access this object in your code. +> [!NOTE] +> +> - **Logger object** +> +> ANTA already provides comprehensive logging at every steps of a test execution. The [AntaTest](../api/models.md#anta.models.AntaTest) class also provides a `logger` attribute that is a Python logger specific to the test instance. See [Python documentation](https://docs.python.org/3/library/logging.html) for more information. +> +> - **AntaDevice object** +> +> Even if `device` is not a private attribute, you should not need to access this object in your code. ### Test Inputs @@ -131,8 +133,8 @@ Full `ResultOverwrite` model documentation is available in [API documentation se show_root_toc_entry: false heading_level: 10 -!!! note - The pydantic model is configured using the [`extra=forbid`](https://docs.pydantic.dev/latest/usage/model_config/#extra-attributes) that will fail input validation if extra fields are provided. +> [!NOTE] +> The pydantic model is configured using the [`extra=forbid`](https://docs.pydantic.dev/latest/usage/model_config/#extra-attributes) that will fail input validation if extra fields are provided. ### Methods @@ -162,8 +164,8 @@ In this section, we will go into all the details of writing an [AntaTest](../api Import [anta.models.AntaTest](../api/models.md#anta.models.AntaTest) and define your own class. Define the mandatory class attributes using [anta.models.AntaCommand](../api/models.md#anta.models.AntaCommand), [anta.models.AntaTemplate](../api/models.md#anta.models.AntaTemplate) or both. -!!! info - Caching can be disabled per `AntaCommand` or `AntaTemplate` by setting the `use_cache` argument to `False`. For more details about how caching is implemented in ANTA, please refer to [Caching in ANTA](../advanced_usages/caching.md). +> [!NOTE] +> Caching can be disabled per `AntaCommand` or `AntaTemplate` by setting the `use_cache` argument to `False`. For more details about how caching is implemented in ANTA, please refer to [Caching in ANTA](../advanced_usages/caching.md). ```python from anta.models import AntaTest, AntaCommand, AntaTemplate @@ -171,11 +173,11 @@ from anta.models import AntaTest, AntaCommand, AntaTemplate class <YourTestName>(AntaTest): """ - <a docstring description of your test> + <a docstring description of your test, the first line is used as description of the test by default> """ - name = "YourTestName" # should be your class name - description = "<test description in human reading format>" + # name = <override> # uncomment to override default behavior of name=Class Name + # description = <override> # uncomment to override default behavior of description=first line of docstring categories = ["<arbitrary category>", "<another arbitrary category>"] commands = [ AntaCommand( @@ -195,21 +197,23 @@ class <YourTestName>(AntaTest): ] ``` -!!! tip "Command revision and version" - * Most of EOS commands return a JSON structure according to a model (some commands may not be modeled hence the necessity to use `text` outformat sometimes. - * The model can change across time (adding feature, ... ) and when the model is changed in a non backward-compatible way, the **revision** number is bumped. The initial model starts with **revision** 1. - * A **revision** applies to a particular CLI command whereas a **version** is global to an eAPI call. The **version** is internally translated to a specific **revision** for each CLI command in the RPC call. The currently supported **version** values are `1` and `latest`. - * A **revision takes precedence over a version** (e.g. if a command is run with version="latest" and revision=1, the first revision of the model is returned) - * By default, eAPI returns the first revision of each model to ensure that when upgrading, integrations with existing tools are not broken. This is done by using by default `version=1` in eAPI calls. - - By default, ANTA uses `version="latest"` in AntaCommand, but when developing tests, the revision MUST be provided when the outformat of the command is `json`. As explained earlier, this is to ensure that the eAPI always returns the same output model and that the test remains always valid from the day it was created. For some commands, you may also want to run them with a different revision or version. - - For instance, the `VerifyBFDPeersHealth` test leverages the first revision of `show bfd peers`: - - ``` - # revision 1 as later revision introduce additional nesting for type - commands = [AntaCommand(command="show bfd peers", revision=1)] - ``` +> [!TIP] +> **Command revision and version** +> +> - Most of EOS commands return a JSON structure according to a model (some commands may not be modeled hence the necessity to use `text` outformat sometimes. +> - The model can change across time (adding feature, ... ) and when the model is changed in a non backward-compatible way, the **revision** number is bumped. The initial model starts with **revision** 1. +> - A **revision** applies to a particular CLI command whereas a **version** is global to an eAPI call. The **version** is internally translated to a specific **revision** for each CLI command in the RPC call. The currently supported **version** values are `1` and `latest`. +> - A **revision takes precedence over a version** (e.g. if a command is run with version="latest" and revision=1, the first revision of the model is returned) +> - By default, eAPI returns the first revision of each model to ensure that when upgrading, integrations with existing tools are not broken. This is done by using by default `version=1` in eAPI calls. +> +> By default, ANTA uses `version="latest"` in AntaCommand, but when developing tests, the revision MUST be provided when the outformat of the command is `json`. As explained earlier, this is to ensure that the eAPI always returns the same output model and that the test remains always valid from the day it was created. For some commands, you may also want to run them with a different revision or version. +> +> For instance, the `VerifyBFDPeersHealth` test leverages the first revision of `show bfd peers`: +> +> ```python +> # revision 1 as later revision introduce additional nesting for type +> commands = [AntaCommand(command="show bfd peers", revision=1)] +> ``` ### Inputs definition @@ -244,8 +248,8 @@ You can also leverage [anta.custom_types](../api/types.md) that provides reusabl Regarding required, optional and nullable fields, refer to this [documentation](https://docs.pydantic.dev/latest/migration/#required-optional-and-nullable-fields) on how to define them. -!!! note - All the `pydantic` features are supported. For instance you can define [validators](https://docs.pydantic.dev/latest/usage/validators/) for complex input validation. +> [!NOTE] +> All the `pydantic` features are supported. For instance you can define [validators](https://docs.pydantic.dev/latest/usage/validators/) for complex input validation. ### Template rendering @@ -340,10 +344,10 @@ class VerifyTemperature(AntaTest): ## Access your custom tests in the test catalog -!!! warning "" - This section is required only if you are not merging your development into ANTA. Otherwise, just follow [contribution guide](../contribution.md). +> [!WARNING] +> This section is required only if you are not merging your development into ANTA. Otherwise, just follow [contribution guide](../contribution.md). -For that, you need to create your own Python package as described in this [hitchhiker's guide](https://the-hitchhikers-guide-to-packaging.readthedocs.io/en/latest/) to package Python code. We assume it is well known and we won't focus on this aspect. Thus, your package must be impartable by ANTA hence available in the module search path `sys.path` (you can use `PYTHONPATH` for example). +For that, you need to create your own Python package as described in this [hitchhiker's guide](https://the-hitchhikers-guide-to-packaging.readthedocs.io/en/latest/) to package Python code. We assume it is well known and we won't focus on this aspect. Thus, your package must be importable by ANTA hence available in the module search path `sys.path` (you can use `PYTHONPATH` for example). It is very similar to what is documented in [catalog section](../usage-inventory-catalog.md) but you have to use your own package name.2 diff --git a/docs/api/tests.avt.md b/docs/api/tests.avt.md index f9e1acf..a55fcce 100644 --- a/docs/api/tests.avt.md +++ b/docs/api/tests.avt.md @@ -7,7 +7,10 @@ anta_title: ANTA catalog for Adaptive Virtual Topology (AVT) tests ~ that can be found in the LICENSE file. --> +# Tests + ::: anta.tests.avt + options: show_root_heading: false show_root_toc_entry: false @@ -18,3 +21,18 @@ anta_title: ANTA catalog for Adaptive Virtual Topology (AVT) tests filters: - "!test" - "!render" + +# Input models + +::: anta.input_models.avt + + options: + show_root_heading: false + show_root_toc_entry: false + show_bases: false + anta_hide_test_module_description: true + merge_init_into_class: false + show_labels: true + filters: + - "!^__init__" + - "!^__str__" diff --git a/docs/api/tests.bfd.md b/docs/api/tests.bfd.md index 719466e..ee95087 100644 --- a/docs/api/tests.bfd.md +++ b/docs/api/tests.bfd.md @@ -7,7 +7,10 @@ anta_title: ANTA catalog for BFD tests ~ that can be found in the LICENSE file. --> +# Tests + ::: anta.tests.bfd + options: show_root_heading: false show_root_toc_entry: false @@ -18,3 +21,16 @@ anta_title: ANTA catalog for BFD tests filters: - "!test" - "!render" + +# Input models + +::: anta.input_models.bfd + + options: + show_root_heading: false + show_root_toc_entry: false + show_bases: false + merge_init_into_class: false + anta_hide_test_module_description: true + show_labels: true + filters: ["!^__str__"] diff --git a/docs/api/tests.connectivity.md b/docs/api/tests.connectivity.md index 0dd5d44..439cec8 100644 --- a/docs/api/tests.connectivity.md +++ b/docs/api/tests.connectivity.md @@ -7,7 +7,10 @@ anta_title: ANTA catalog for connectivity tests ~ that can be found in the LICENSE file. --> +# Tests + ::: anta.tests.connectivity + options: show_root_heading: false show_root_toc_entry: false @@ -18,3 +21,16 @@ anta_title: ANTA catalog for connectivity tests filters: - "!test" - "!render" + +# Input models + +::: anta.input_models.connectivity + + options: + show_root_heading: false + show_root_toc_entry: false + show_bases: false + merge_init_into_class: false + anta_hide_test_module_description: true + show_labels: true + filters: ["!^__str__"] diff --git a/docs/api/tests.cvx.md b/docs/api/tests.cvx.md new file mode 100644 index 0000000..c9ff53d --- /dev/null +++ b/docs/api/tests.cvx.md @@ -0,0 +1,20 @@ +--- +anta_title: ANTA catalog for CVX tests +--- +<!-- + ~ Copyright (c) 2023-2024 Arista Networks, Inc. + ~ Use of this source code is governed by the Apache License 2.0 + ~ that can be found in the LICENSE file. + --> + +::: anta.tests.cvx + options: + show_root_heading: false + show_root_toc_entry: false + show_bases: false + merge_init_into_class: false + anta_hide_test_module_description: true + show_labels: true + filters: + - "!test" + - "!render" diff --git a/docs/api/tests.interfaces.md b/docs/api/tests.interfaces.md index 95630f5..3d863ee 100644 --- a/docs/api/tests.interfaces.md +++ b/docs/api/tests.interfaces.md @@ -7,7 +7,10 @@ anta_title: ANTA catalog for interfaces tests ~ that can be found in the LICENSE file. --> +# Tests + ::: anta.tests.interfaces + options: show_root_heading: false show_root_toc_entry: false @@ -18,3 +21,16 @@ anta_title: ANTA catalog for interfaces tests filters: - "!test" - "!render" + +# Input models + +::: anta.input_models.interfaces + + options: + show_root_heading: false + show_root_toc_entry: false + show_bases: false + merge_init_into_class: false + anta_hide_test_module_description: true + show_labels: true + filters: ["!^__str__"] diff --git a/docs/api/tests.md b/docs/api/tests.md index 7dd74c1..a36c9eb 100644 --- a/docs/api/tests.md +++ b/docs/api/tests.md @@ -18,6 +18,7 @@ Here are the tests that we currently provide: - [BFD](tests.bfd.md) - [Configuration](tests.configuration.md) - [Connectivity](tests.connectivity.md) +- [CVX](tests.cvx.md) - [Field Notices](tests.field_notices.md) - [Flow Tracking](tests.flow_tracking.md) - [GreenT](tests.greent.md) @@ -44,6 +45,10 @@ Here are the tests that we currently provide: - [VLAN](tests.vlan.md) - [VXLAN](tests.vxlan.md) +!!! tip + + You can use `anta get tests` from the CLI to list all the tests available with an example. Refer to [documentation](../cli/get-tests.md) for more options. + ## Using the Tests All these tests can be imported in a [catalog](../usage-inventory-catalog.md) to be used by [the ANTA CLI](../cli/nrfu.md) or in your [own framework](../advanced_usages/as-python-lib.md). diff --git a/docs/api/tests.routing.bgp.md b/docs/api/tests.routing.bgp.md index 4537ec2..b40ff7b 100644 --- a/docs/api/tests.routing.bgp.md +++ b/docs/api/tests.routing.bgp.md @@ -7,7 +7,13 @@ anta_title: ANTA catalog for BGP tests ~ that can be found in the LICENSE file. --> +!!! info "`multi-agent` Service Routing Protocols Model Requirements" + The BGP tests in this section are only supported on switches running the `multi-agent` routing protocols model. Starting from EOS version 4.30.1F, `service routing protocols model` is set to `multi-agent` by default. These BGP commands may **not** be compatible with switches running the legacy `ribd` routing protocols model and may fail if attempted. + +# Tests + ::: anta.tests.routing.bgp + options: show_root_heading: false show_root_toc_entry: false @@ -19,3 +25,21 @@ anta_title: ANTA catalog for BGP tests - "!test" - "!render" - "!^_[^_]" + +# Input models + +::: anta.input_models.routing.bgp + + options: + show_root_heading: false + show_root_toc_entry: false + show_bases: false + anta_hide_test_module_description: true + merge_init_into_class: false + show_labels: true + filters: + - "!^__init__" + - "!^__str__" + - "!AFI_SAFI_EOS_KEY" + - "!eos_key" + - "!BgpAfi" diff --git a/docs/api/tests.routing.generic.md b/docs/api/tests.routing.generic.md index 1c4c39d..bbc8904 100644 --- a/docs/api/tests.routing.generic.md +++ b/docs/api/tests.routing.generic.md @@ -7,7 +7,10 @@ anta_title: ANTA catalog for generic routing tests ~ that can be found in the LICENSE file. --> +# Tests + ::: anta.tests.routing.generic + options: show_root_heading: false show_root_toc_entry: false @@ -18,3 +21,16 @@ anta_title: ANTA catalog for generic routing tests filters: - "!test" - "!render" + +# Input models + +::: anta.input_models.routing.generic + + options: + show_root_heading: false + show_root_toc_entry: false + show_bases: false + merge_init_into_class: false + anta_hide_test_module_description: true + show_labels: true + filters: ["!^__str__"] diff --git a/docs/api/tests.routing.isis.md b/docs/api/tests.routing.isis.md index bf50c72..16ca7ff 100644 --- a/docs/api/tests.routing.isis.md +++ b/docs/api/tests.routing.isis.md @@ -8,6 +8,7 @@ anta_title: ANTA catalog for IS-IS tests --> ::: anta.tests.routing.isis + options: show_root_heading: false show_root_toc_entry: false diff --git a/docs/api/tests.routing.ospf.md b/docs/api/tests.routing.ospf.md index 2fd0cd4..12bb3ec 100644 --- a/docs/api/tests.routing.ospf.md +++ b/docs/api/tests.routing.ospf.md @@ -8,6 +8,7 @@ anta_title: ANTA catalog for OSPF tests --> ::: anta.tests.routing.ospf + options: show_root_heading: false show_root_toc_entry: false diff --git a/docs/api/tests.security.md b/docs/api/tests.security.md index fe008ba..5997832 100644 --- a/docs/api/tests.security.md +++ b/docs/api/tests.security.md @@ -7,7 +7,10 @@ anta_title: ANTA catalog for security tests ~ that can be found in the LICENSE file. --> +# Tests + ::: anta.tests.security + options: show_root_heading: false show_root_toc_entry: false @@ -18,3 +21,18 @@ anta_title: ANTA catalog for security tests filters: - "!test" - "!render" + +# Input models + +::: anta.input_models.security + + options: + show_root_heading: false + show_root_toc_entry: false + show_bases: false + merge_init_into_class: false + anta_hide_test_module_description: true + show_labels: true + filters: + - "!^__init__" + - "!^__str__" diff --git a/docs/api/tests.services.md b/docs/api/tests.services.md index 63d9234..cd37148 100644 --- a/docs/api/tests.services.md +++ b/docs/api/tests.services.md @@ -7,7 +7,10 @@ anta_title: ANTA catalog for services tests ~ that can be found in the LICENSE file. --> +# Tests + ::: anta.tests.services + options: show_root_heading: false show_root_toc_entry: false @@ -18,3 +21,16 @@ anta_title: ANTA catalog for services tests filters: - "!test" - "!render" + +# Input models + +::: anta.input_models.services + + options: + show_root_heading: false + show_root_toc_entry: false + show_bases: false + merge_init_into_class: false + anta_hide_test_module_description: true + show_labels: true + filters: ["!^__str__"] diff --git a/docs/api/tests.stun.md b/docs/api/tests.stun.md index b4274e9..6a73b88 100644 --- a/docs/api/tests.stun.md +++ b/docs/api/tests.stun.md @@ -7,6 +7,8 @@ anta_title: ANTA catalog for STUN tests ~ that can be found in the LICENSE file. --> +# Tests + ::: anta.tests.stun options: show_root_heading: false @@ -18,3 +20,18 @@ anta_title: ANTA catalog for STUN tests filters: - "!test" - "!render" + +# Input models + +::: anta.input_models.stun + + options: + show_root_heading: false + show_root_toc_entry: false + show_bases: false + merge_init_into_class: false + anta_hide_test_module_description: true + show_labels: true + filters: + - "!^__init__" + - "!^__str__" diff --git a/docs/api/tests.system.md b/docs/api/tests.system.md index 5dcfbc0..26568e2 100644 --- a/docs/api/tests.system.md +++ b/docs/api/tests.system.md @@ -7,7 +7,10 @@ anta_title: ANTA catalog for System tests ~ that can be found in the LICENSE file. --> +# Tests + ::: anta.tests.system + options: show_root_heading: false show_root_toc_entry: false @@ -18,3 +21,16 @@ anta_title: ANTA catalog for System tests filters: - "!test" - "!render" + +# Input models + +::: anta.input_models.system + + options: + show_root_heading: false + show_root_toc_entry: false + show_bases: false + merge_init_into_class: false + anta_hide_test_module_description: true + show_labels: true + filters: ["!^__str__"] diff --git a/docs/cli/debug.md b/docs/cli/debug.md index 4c864db..45ad791 100644 --- a/docs/cli/debug.md +++ b/docs/cli/debug.md @@ -61,6 +61,7 @@ Options: --help Show this message and exit. ``` +> [!TIP] > `username`, `password`, `enable-password`, `enable`, `timeout` and `insecure` values are the same for all devices ### Example @@ -162,8 +163,8 @@ Run templated command 'show vlan {vlan_id}' with {'vlan_id': '10'} on DC1-LEAF1A ### Example of multiple arguments -!!! warning - If multiple arguments of the same key are provided, only the last argument value will be kept in the template parameters. +> [!WARNING] +> If multiple arguments of the same key are provided, only the last argument value will be kept in the template parameters. ```bash anta -log DEBUG debug run-template --template "ping {dst} source {src}" dst "8.8.8.8" src Loopback0 --device DC1-SPINE1 diff --git a/docs/cli/exec.md b/docs/cli/exec.md index 2eb12ee..a7a0fe3 100644 --- a/docs/cli/exec.md +++ b/docs/cli/exec.md @@ -64,6 +64,7 @@ Options: --help Show this message and exit. ``` +> [!TIP] > `username`, `password`, `enable-password`, `enable`, `timeout` and `insecure` values are the same for all devices ### Example @@ -235,12 +236,14 @@ Options: tag1,tag2,tag3. [env var: ANTA_TAGS] -o, --output PATH Path for test catalog [default: ./tech-support] --latest INTEGER Number of scheduled show-tech to retrieve - --configure Ensure devices have 'aaa authorization exec default - local' configured (required for SCP on EOS). THIS - WILL CHANGE THE CONFIGURATION OF YOUR NETWORK. + --configure [DEPRECATED] Ensure devices have 'aaa authorization + exec default local' configured (required for SCP on + EOS). THIS WILL CHANGE THE CONFIGURATION OF YOUR + NETWORK. --help Show this message and exit. ``` +> [!TIP] > `username`, `password`, `enable-password`, `enable`, `timeout` and `insecure` values are the same for all devices When executed, this command fetches tech-support files and downloads them locally into a device-specific subfolder within the designated folder. You can specify the output folder with the `--output` option. @@ -248,13 +251,18 @@ When executed, this command fetches tech-support files and downloads them locall ANTA uses SCP to download files from devices and will not trust unknown SSH hosts by default. Add the SSH public keys of your devices to your `known_hosts` file or use the `anta --insecure` option to ignore SSH host keys validation. The configuration `aaa authorization exec default` must be present on devices to be able to use SCP. -ANTA can automatically configure `aaa authorization exec default local` using the `anta exec collect-tech-support --configure` option. + +> [!CAUTION] +> **Deprecation** +> +> ANTA can automatically configure `aaa authorization exec default local` using the `anta exec collect-tech-support --configure` option but this option is deprecated and will be removed in ANTA 2.0.0. + If you require specific AAA configuration for `aaa authorization exec default`, like `aaa authorization exec default none` or `aaa authorization exec default group tacacs+`, you will need to configure it manually. The `--latest` option allows retrieval of a specific number of the most recent tech-support files. -!!! warning - By default **all** the tech-support files present on the devices are retrieved. +> [!WARNING] +> By default **all** the tech-support files present on the devices are retrieved. ### Example diff --git a/docs/cli/get-inventory-information.md b/docs/cli/get-inventory-information.md index ab1bebc..d45cb6a 100644 --- a/docs/cli/get-inventory-information.md +++ b/docs/cli/get-inventory-information.md @@ -52,8 +52,8 @@ Options: --help Show this message and exit. ``` -!!! tip - By default, `anta get inventory` only provides information that doesn't rely on a device connection. If you are interested in obtaining connection-dependent details, like the hardware model, use the `--connected` option. +> [!TIP] +> By default, `anta get inventory` only provides information that doesn't rely on a device connection. If you are interested in obtaining connection-dependent details, like the hardware model, use the `--connected` option. ### Example diff --git a/docs/cli/get-tests.md b/docs/cli/get-tests.md new file mode 100644 index 0000000..3c2b369 --- /dev/null +++ b/docs/cli/get-tests.md @@ -0,0 +1,120 @@ +--- +anta_title: Retrieving Tests information +--- +<!-- + ~ Copyright (c) 2023-2024 Arista Networks, Inc. + ~ Use of this source code is governed by the Apache License 2.0 + ~ that can be found in the LICENSE file. + --> + +`anta get tests` commands help you discover available tests in ANTA. + +### Command overview + +```bash +Usage: anta get tests [OPTIONS] + + Show all builtin ANTA tests with an example output retrieved from each test + documentation. + +Options: + --module TEXT Filter tests by module name. [default: anta.tests] + --test TEXT Filter by specific test name. If module is specified, + searches only within that module. + --short Display test names without their inputs. + --count Print only the number of tests found. + --help Show this message and exit. +``` + +> [!TIP] +> By default, `anta get tests` will retrieve all tests available in ANTA. + +### Examples + +#### Default usage + +``` yaml title="anta get tests" +anta.tests.aaa: + - VerifyAcctConsoleMethods: + # Verifies the AAA accounting console method lists for different accounting types (system, exec, commands, dot1x). + methods: + - local + - none + - logging + types: + - system + - exec + - commands + - dot1x + - VerifyAcctDefaultMethods: + # Verifies the AAA accounting default method lists for different accounting types (system, exec, commands, dot1x). + methods: + - local + - none + - logging + types: + - system + - exec + - commands + - dot1x +[...] +``` + +#### Module usage + +To retrieve all the tests from `anta.tests.stun`. + +``` yaml title="anta get tests --module anta.tests.stun" +anta.tests.stun: + - VerifyStunClient: + # Verifies STUN client settings, including local IP/port and optionally public IP/port. + stun_clients: + - source_address: 172.18.3.2 + public_address: 172.18.3.21 + source_port: 4500 + public_port: 6006 + - source_address: 100.64.3.2 + public_address: 100.64.3.21 + source_port: 4500 + public_port: 6006 + - VerifyStunServer: + # Verifies the STUN server status is enabled and running. +``` + +#### Test usage + +``` yaml title="anta get tests --test VerifyTacacsSourceIntf" +anta.tests.aaa: + - VerifyTacacsSourceIntf: + # Verifies TACACS source-interface for a specified VRF. + intf: Management0 + vrf: MGMT +``` + +> [!TIP] +> You can filter tests by providing a prefix - ANTA will return all tests that start with your specified string. + +```yaml title="anta get tests --test VerifyTacacs" +anta.tests.aaa: + - VerifyTacacsServerGroups: + # Verifies if the provided TACACS server group(s) are configured. + groups: + - TACACS-GROUP1 + - TACACS-GROUP2 + - VerifyTacacsServers: + # Verifies TACACS servers are configured for a specified VRF. + servers: + - 10.10.10.21 + - 10.10.10.22 + vrf: MGMT + - VerifyTacacsSourceIntf: + # Verifies TACACS source-interface for a specified VRF. + intf: Management0 + vrf: MGMT +``` + +#### Count the tests + +```bash title="anta get tests --count" +There are 155 tests available in `anta.tests`. +``` diff --git a/docs/cli/inv-from-ansible.md b/docs/cli/inv-from-ansible.md index 6bbaca9..c891693 100644 --- a/docs/cli/inv-from-ansible.md +++ b/docs/cli/inv-from-ansible.md @@ -31,26 +31,13 @@ Options: --help Show this message and exit. ``` -!!! warning - - `anta get from-ansible` does not support inline vaulted variables, comment them out to generate your inventory. - If the vaulted variable is necessary to build the inventory (e.g. `ansible_host`), it needs to be unvaulted for `from-ansible` command to work." - -The output is an inventory where the name of the container is added as a tag for each host: - -```yaml -anta_inventory: - hosts: - - host: 10.73.252.41 - name: srv-pod01 - - host: 10.73.252.42 - name: srv-pod02 - - host: 10.73.252.43 - name: srv-pod03 -``` - -!!! warning - The current implementation only considers devices directly attached to a specific Ansible group and does not support inheritance when using the `--ansible-group` option. +> [!WARNING] +> +> - `anta get from-ansible` does not support inline vaulted variables, comment them out to generate your inventory. +> +> - If the vaulted variable is necessary to build the inventory (e.g. `ansible_host`), it needs to be unvaulted for `from-ansible` command to work." +> +> - The current implementation only considers devices directly attached to a specific Ansible group and does not support inheritance when using the `--ansible-group` option. By default, if user does not provide `--output` file, anta will save output to configured anta inventory (`anta --inventory`). If the output file has content, anta will ask user to overwrite when running in interactive console. This mechanism can be controlled by triggers in case of CI usage: `--overwrite` to force anta to overwrite file. If not set, anta will exit @@ -60,7 +47,7 @@ By default, if user does not provide `--output` file, anta will save output to c ```yaml --- -tooling: +all: children: endpoints: hosts: @@ -80,3 +67,16 @@ tooling: ansible_host: 10.73.252.43 type: endpoint ``` + +The output is an inventory where the name of the container is added as a tag for each host: + +```yaml +anta_inventory: + hosts: + - host: 10.73.252.41 + name: srv-pod01 + - host: 10.73.252.42 + name: srv-pod02 + - host: 10.73.252.43 + name: srv-pod03 +``` diff --git a/docs/cli/inv-from-cvp.md b/docs/cli/inv-from-cvp.md index 9717870..e08ffd6 100644 --- a/docs/cli/inv-from-cvp.md +++ b/docs/cli/inv-from-cvp.md @@ -52,8 +52,8 @@ anta_inventory: - pod2 ``` -!!! warning - The current implementation only considers devices directly attached to a specific container when using the `--cvp-container` option. +> [!WARNING] +> The current implementation only considers devices directly attached to a specific container when using the `--cvp-container` option. ## Creating an inventory from multiple containers diff --git a/docs/cli/nrfu.md b/docs/cli/nrfu.md index 0f2b425..667eb5f 100644 --- a/docs/cli/nrfu.md +++ b/docs/cli/nrfu.md @@ -26,8 +26,8 @@ ANTA provides a set of commands for performing NRFU tests on devices. These comm All commands under the `anta nrfu` namespace require a catalog yaml file specified with the `--catalog` option and a device inventory file specified with the `--inventory` option. -!!! info - Issuing the command `anta nrfu` will run `anta nrfu table` without any option. +> [!TIP] +> Issuing the command `anta nrfu` will run `anta nrfu table` without any option. ### Tag management diff --git a/docs/cli/overview.md b/docs/cli/overview.md index f1247b7..be6b1f4 100644 --- a/docs/cli/overview.md +++ b/docs/cli/overview.md @@ -45,9 +45,10 @@ Then, run the CLI without options: anta nrfu ``` -!!! note - All environment variables may not be needed for every commands. - Refer to `<command> --help` for the comprehensive environment variables names. +> [!NOTE] +> All environment variables may not be needed for every commands. +> +> Refer to `<command> --help` for the comprehensive environment variables names. Below are the environment variables usable with the `anta nrfu` command: @@ -63,8 +64,8 @@ Below are the environment variables usable with the `anta nrfu` command: | ANTA_ENABLE | Whether it is necessary to go to enable mode on devices. | No | | ANTA_ENABLE_PASSWORD | The optional enable password, when this variable is set, ANTA_ENABLE or `--enable` is required. | No | -!!! info - Caching can be disabled with the global parameter `--disable-cache`. For more details about how caching is implemented in ANTA, please refer to [Caching in ANTA](../advanced_usages/caching.md). +> [!NOTE] +> Caching can be disabled with the global parameter `--disable-cache`. For more details about how caching is implemented in ANTA, please refer to [Caching in ANTA](../advanced_usages/caching.md). ## ANTA Exit Codes diff --git a/docs/cli/tag-management.md b/docs/cli/tag-management.md index ad5ccf3..b07e0c9 100644 --- a/docs/cli/tag-management.md +++ b/docs/cli/tag-management.md @@ -4,9 +4,7 @@ ~ that can be found in the LICENSE file. --> -ANTA commands can be used with a `--tags` option. This option **filters the inventory** with the specified tag(s) when running the command. - -Tags can also be used to **restrict a specific test** to a set of devices when using `anta nrfu`. +ANTA uses tags to define test-to-device mappings (tests run on devices with matching tags) and the `--tags` CLI option acts as a filter to execute specific test/device combinations. ## Defining tags @@ -94,10 +92,11 @@ anta.tests.interfaces: tags: ['spine'] ``` -> A tag used to filter a test can also be a device name - -!!! tip "Use different input values for a specific test" - Leverage tags to define different input values for a specific test. See the `VerifyUptime` example above. +> [!TIP] +> +> - A tag used to filter a test can also be a device name +> +> - **Use different input values for a specific test**: Leverage tags to define different input values for a specific test. See the `VerifyUptime` example above. ## Using tags diff --git a/docs/contribution.md b/docs/contribution.md index 88f09c1..50aed44 100644 --- a/docs/contribution.md +++ b/docs/contribution.md @@ -29,7 +29,7 @@ $ pip install -e .[dev,cli] $ pip list -e Package Version Editable project location ------- ------- ------------------------- -anta 1.1.0 /mnt/lab/projects/anta +anta 1.2.0 /mnt/lab/projects/anta ``` Then, [`tox`](https://tox.wiki/) is configured with few environments to run CI locally: @@ -86,9 +86,9 @@ Success: no issues found in 82 source files > NOTE: Typing is configured quite strictly, do not hesitate to reach out if you have any questions, struggles, nightmares. -## Unit tests +## Unit tests with Pytest -To keep high quality code, we require to provide a Pytest for every tests implemented in ANTA. +To keep high quality code, we require to provide a **Pytest** for every tests implemented in ANTA. All submodule should have its own pytest section under `tests/units/anta_tests/<submodule-name>.py`. diff --git a/docs/faq.md b/docs/faq.md index 7a58663..ee823b4 100644 --- a/docs/faq.md +++ b/docs/faq.md @@ -110,6 +110,17 @@ anta_title: Frequently Asked Questions (FAQ) pip install -U pyopenssl>22.0 ``` +## Caveat running on non-POSIX platforms (e.g. Windows) + +???+ faq "Caveat running on non-POSIX platforms (e.g. Windows)" + + While ANTA should in general work on non-POSIX platforms (e.g. Windows), + there are some known limitations: + + - On non-Posix platforms, ANTA is not able to check and/or adjust the system limit of file descriptors. + + ANTA test suite is being run in the CI on a Windows runner. + ## `__NSCFConstantString initialize` error on OSX ???+ faq "`__NSCFConstantString initialize` error on OSX" @@ -124,6 +135,40 @@ anta_title: Frequently Asked Questions (FAQ) export OBJC_DISABLE_INITIALIZE_FORK_SAFETY=YES ``` +## EOS AAA configuration for an ANTA-only user + +???+ faq "EOS AAA configuration for an ANTA-only user" + + Here is a starting guide to configure an ANTA-only user to run ANTA tests on a device. + + !!! warning + + This example is not using TACACS / RADIUS but only local AAA + + 1. Configure the following role. + + ```bash + role anta-users + 10 permit command show + 20 deny command .* + ``` + + You can then add other commands if they are required for your test catalog (`ping` for example) and then tighten down the show commands to only those required for your tests. + + 2. Configure the following authorization (You may need to adapt depending on your AAA setup). + + ```bash + aaa authorization commands all default local + ``` + + 3. Configure a user for the role. + + ```bash + user anta role anta-users secret <secret> + ``` + + 4. You can then use the credentials `anta` / `<secret>` to run ANTA against the device and adjust the role as required. + # Still facing issues? If you've tried the above solutions and continue to experience problems, please follow the [troubleshooting](troubleshooting.md) instructions and report the issue in our [GitHub repository](https://github.com/aristanetworks/anta). diff --git a/docs/getting-started.md b/docs/getting-started.md index aac88c6..bcd5a2c 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -48,26 +48,7 @@ management api http-commands ANTA uses an inventory to list the target devices for the tests. You can create a file manually with this format: ```yaml -anta_inventory: - hosts: - - host: 192.168.0.10 - name: spine01 - tags: ['fabric', 'spine'] - - host: 192.168.0.11 - name: spine02 - tags: ['fabric', 'spine'] - - host: 192.168.0.12 - name: leaf01 - tags: ['fabric', 'leaf'] - - host: 192.168.0.13 - name: leaf02 - tags: ['fabric', 'leaf'] - - host: 192.168.0.14 - name: leaf03 - tags: ['fabric', 'leaf'] - - host: 192.168.0.15 - name: leaf04 - tags: ['fabric', 'leaf'] +--8<-- "getting-started/inventory.yml" ``` > You can read more details about how to build your inventory [here](usage-inventory-catalog.md#device-inventory) @@ -90,31 +71,7 @@ The structure to follow is like: Here is an example for basic tests: ```yaml -# Load anta.tests.software -anta.tests.software: - - VerifyEOSVersion: # Verifies the device is running one of the allowed EOS version. - versions: # List of allowed EOS versions. - - 4.25.4M - - 4.26.1F - - '4.28.3M-28837868.4283M (engineering build)' - - VerifyTerminAttrVersion: - versions: - - v1.22.1 - -anta.tests.system: - - VerifyUptime: # Verifies the device uptime is higher than a value. - minimum: 1 - - VerifyNTP: - - VerifySyslog: - -anta.tests.mlag: - - VerifyMlagStatus: - - VerifyMlagInterfaces: - - VerifyMlagConfigSanity: - -anta.tests.configuration: - - VerifyZeroTouch: # Verifies ZeroTouch is disabled. - - VerifyRunningConfigDiffs: +--8<-- "getting-started/catalog.yml" ``` ## Test your network @@ -135,128 +92,32 @@ This entrypoint has multiple options to manage test coverage and reporting. To run the NRFU, you need to select an output format amongst ["json", "table", "text", "tpl-report"]. For a first usage, `table` is recommended. By default all test results for all devices are rendered but it can be changed to a report per test case or per host +!!! Note + The following examples shows how to pass all the CLI options. + + See how to use environment variables instead in the [CLI overview](cli/overview.md#anta-environment-variables) + #### Default report using table ```bash -anta nrfu \ - --username tom \ - --password arista123 \ - --enable \ - --enable-password t \ - --inventory .personal/inventory_atd.yml \ - --catalog .personal/tests-bases.yml \ - table --tags leaf - - -╭────────────────────── Settings ──────────────────────╮ -│ Running ANTA tests: │ -│ - ANTA Inventory contains 6 devices (AsyncEOSDevice) │ -│ - Tests catalog contains 10 tests │ -╰──────────────────────────────────────────────────────╯ -[10:17:24] INFO Running ANTA tests... runner.py:75 - • Running NRFU Tests...100% ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 40/40 • 0:00:02 • 0:00:00 - - All tests results -┏━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓ -┃ Device IP ┃ Test Name ┃ Test Status ┃ Message(s) ┃ Test description ┃ Test category ┃ -┡━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩ -│ leaf01 │ VerifyEOSVersion │ success │ │ Verifies the device is running one of the allowed EOS version. │ software │ -│ leaf01 │ VerifyTerminAttrVersion │ success │ │ Verifies the device is running one of the allowed TerminAttr │ software │ -│ │ │ │ │ version. │ │ -│ leaf01 │ VerifyUptime │ success │ │ Verifies the device uptime is higher than a value. │ system │ -│ leaf01 │ VerifyNTP │ success │ │ Verifies NTP is synchronised. │ system │ -│ leaf01 │ VerifySyslog │ success │ │ Verifies the device had no syslog message with a severity of warning │ system │ -│ │ │ │ │ (or a more severe message) during the last 7 days. │ │ -│ leaf01 │ VerifyMlagStatus │ skipped │ MLAG is disabled │ This test verifies the health status of the MLAG configuration. │ mlag │ -│ leaf01 │ VerifyMlagInterfaces │ skipped │ MLAG is disabled │ This test verifies there are no inactive or active-partial MLAG │ mlag │ -[...] -│ leaf04 │ VerifyMlagConfigSanity │ skipped │ MLAG is disabled │ This test verifies there are no MLAG config-sanity inconsistencies. │ mlag │ -│ leaf04 │ VerifyZeroTouch │ success │ │ Verifies ZeroTouch is disabled. │ configuration │ -│ leaf04 │ VerifyRunningConfigDiffs │ success │ │ │ configuration │ -└───────────┴──────────────────────────┴─────────────┴──────────────────┴──────────────────────────────────────────────────────────────────────┴───────────────┘ +--8<-- "getting-started/anta_nrfu_table.sh" +--8<-- "getting-started/anta_nrfu_table.output" ``` #### Report in text mode ```bash -$ anta nrfu \ - --username tom \ - --password arista123 \ - --enable \ - --enable-password t \ - --inventory .personal/inventory_atd.yml \ - --catalog .personal/tests-bases.yml \ - text --tags leaf - -╭────────────────────── Settings ──────────────────────╮ -│ Running ANTA tests: │ -│ - ANTA Inventory contains 6 devices (AsyncEOSDevice) │ -│ - Tests catalog contains 10 tests │ -╰──────────────────────────────────────────────────────╯ -[10:20:47] INFO Running ANTA tests... runner.py:75 - • Running NRFU Tests...100% ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 40/40 • 0:00:01 • 0:00:00 -leaf01 :: VerifyEOSVersion :: SUCCESS -leaf01 :: VerifyTerminAttrVersion :: SUCCESS -leaf01 :: VerifyUptime :: SUCCESS -leaf01 :: VerifyNTP :: SUCCESS -leaf01 :: VerifySyslog :: SUCCESS -leaf01 :: VerifyMlagStatus :: SKIPPED (MLAG is disabled) -leaf01 :: VerifyMlagInterfaces :: SKIPPED (MLAG is disabled) -leaf01 :: VerifyMlagConfigSanity :: SKIPPED (MLAG is disabled) -[...] +--8<-- "getting-started/anta_nrfu_text.sh" +--8<-- "getting-started/anta_nrfu_text.output" ``` #### Report in JSON format ```bash -$ anta nrfu \ - --username tom \ - --password arista123 \ - --enable \ - --enable-password t \ - --inventory .personal/inventory_atd.yml \ - --catalog .personal/tests-bases.yml \ - json --tags leaf - -╭────────────────────── Settings ──────────────────────╮ -│ Running ANTA tests: │ -│ - ANTA Inventory contains 6 devices (AsyncEOSDevice) │ -│ - Tests catalog contains 10 tests │ -╰──────────────────────────────────────────────────────╯ -[10:21:51] INFO Running ANTA tests... runner.py:75 - • Running NRFU Tests...100% ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 40/40 • 0:00:02 • 0:00:00 -╭──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮ -│ JSON results of all tests │ -╰──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯ -[ - { - "name": "leaf01", - "test": "VerifyEOSVersion", - "categories": [ - "software" - ], - "description": "Verifies the device is running one of the allowed EOS version.", - "result": "success", - "messages": [], - "custom_field": "None", - }, - { - "name": "leaf01", - "test": "VerifyTerminAttrVersion", - "categories": [ - "software" - ], - "description": "Verifies the device is running one of the allowed TerminAttr version.", - "result": "success", - "messages": [], - "custom_field": "None", - }, -[...] -] +--8<-- "getting-started/anta_nrfu_json.sh" +--8<-- "getting-started/anta_nrfu_json.output" ``` -You can find more information under the **usage** section of the website - ### Basic usage in a Python script ```python diff --git a/docs/requirements-and-installation.md b/docs/requirements-and-installation.md index 1b35758..e9fbbc7 100644 --- a/docs/requirements-and-installation.md +++ b/docs/requirements-and-installation.md @@ -25,9 +25,8 @@ The ANTA package and the cli require some packages that are not part of the Pyth pip install anta ``` -!!! Warning - - * This command alone **will not** install the ANTA CLI requirements. +> [!WARNING] +> This command alone **will not** install the ANTA CLI requirements. ### Install ANTA CLI as an application with `pipx` @@ -37,9 +36,8 @@ pip install anta pipx install anta[cli] ``` -!!! Info - - Please take the time to read through the installation instructions of `pipx` before getting started. +> [!INFO] +> Please take the time to read through the installation instructions of `pipx` before getting started. ### Install CLI from Pypi server @@ -80,13 +78,13 @@ which anta /home/tom/.pyenv/shims/anta ``` -!!! warning - Before running the `anta --version` command, please be aware that some users have reported issues related to the `urllib3` package. If you encounter an error at this step, please refer to our [FAQ](faq.md) page for guidance on resolving it. +> [!WARNING] +> Before running the `anta --version` command, please be aware that some users have reported issues related to the `urllib3` package. If you encounter an error at this step, please refer to our [FAQ](faq.md) page for guidance on resolving it. ```bash # Check ANTA version anta --version -anta, version v1.1.0 +anta, version v1.2.0 ``` ## EOS Requirements diff --git a/docs/scripts/generate_examples_tests.py b/docs/scripts/generate_examples_tests.py new file mode 100755 index 0000000..a88d9d6 --- /dev/null +++ b/docs/scripts/generate_examples_tests.py @@ -0,0 +1,29 @@ +#!/usr/bin/env python +# Copyright (c) 2024 Arista Networks, Inc. +# Use of this source code is governed by the Apache License 2.0 +# that can be found in the LICENSE file. +"""Generates examples/tests.py.""" + +import os +from contextlib import redirect_stdout +from pathlib import Path +from sys import path + +# Override global path to load anta from pwd instead of any installed version. +path.insert(0, str(Path(__file__).parents[2])) + +examples_tests_path = Path(__file__).parents[2] / "examples" / "tests.yaml" + + +prev = os.environ.get("TERM", "") +os.environ["TERM"] = "dumb" +# imported after TERM is set to act upon rich console. +from anta.cli.get.commands import tests # noqa: E402 + +with examples_tests_path.open("w") as f: + f.write("---\n") + with redirect_stdout(f): + # removing the style + tests() + +os.environ["TERM"] = prev diff --git a/docs/scripts/generate_svg.py b/docs/scripts/generate_svg.py index f017b24..2eca6ac 100644 --- a/docs/scripts/generate_svg.py +++ b/docs/scripts/generate_svg.py @@ -94,7 +94,7 @@ if __name__ == "__main__": # Redirect stdout of the program towards another StringIO to capture help # that is not part or anta rich console # redirect potential progress bar output to console by patching - with patch("anta.cli.nrfu.anta_progress_bar", custom_progress_bar), suppress(SystemExit): + with patch("anta.cli.nrfu.utils.anta_progress_bar", custom_progress_bar), suppress(SystemExit): function() if "--help" in args: diff --git a/docs/snippets/getting-started/anta_nrfu_json.output b/docs/snippets/getting-started/anta_nrfu_json.output new file mode 100644 index 0000000..c6db49d --- /dev/null +++ b/docs/snippets/getting-started/anta_nrfu_json.output @@ -0,0 +1,54 @@ +╭────────────────────── Settings ──────────────────────╮ +│ - ANTA Inventory contains 5 devices (AsyncEOSDevice) │ +│ - Tests catalog contains 9 tests │ +╰──────────────────────────────────────────────────────╯ + +[10:53:11] INFO Preparing ANTA NRFU Run ... tools.py:294 + INFO Connecting to devices ... tools.py:294 + INFO Connecting to devices completed in: 0:00:00.053. tools.py:302 + INFO Preparing the tests ... tools.py:294 + INFO Preparing the tests completed in: 0:00:00.001. tools.py:302 + INFO --- ANTA NRFU Run Information --- runner.py:276 + Number of devices: 5 (5 established) + Total number of selected tests: 45 + Maximum number of open file descriptors for the current ANTA process: 16384 + --------------------------------- + INFO Preparing ANTA NRFU Run completed in: 0:00:00.065. tools.py:302 + INFO Running ANTA tests ... tools.py:294 +[10:53:12] INFO Running ANTA tests completed in: 0:00:00.857. tools.py:302 + INFO Cache statistics for 's1-spine1': 1 hits / 9 command(s) (11.11%) runner.py:75 + INFO Cache statistics for 's1-spine2': 1 hits / 9 command(s) (11.11%) runner.py:75 + INFO Cache statistics for 's1-leaf1': 1 hits / 9 command(s) (11.11%) runner.py:75 + INFO Cache statistics for 's1-leaf2': 1 hits / 9 command(s) (11.11%) runner.py:75 + INFO Cache statistics for 's1-leaf3': 1 hits / 9 command(s) (11.11%) runner.py:75 + • Running NRFU Tests...100% ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 45/45 • 0:00:00 • 0:00:00 + +╭──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮ +│ JSON results │ +╰──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯ +[ + { + "name": "s1-spine1", + "test": "VerifyNTP", + "categories": [ + "system" + ], + "description": "Verifies if NTP is synchronised.", + "result": "success", + "messages": [], + "custom_field": null + }, + { + "name": "s1-spine1", + "test": "VerifyMlagConfigSanity", + "categories": [ + "mlag" + ], + "description": "Verifies there are no MLAG config-sanity inconsistencies.", + "result": "skipped", + "messages": [ + "MLAG is disabled" + ], + "custom_field": null + }, + [...] diff --git a/docs/snippets/getting-started/anta_nrfu_json.sh b/docs/snippets/getting-started/anta_nrfu_json.sh new file mode 100644 index 0000000..932aeb3 --- /dev/null +++ b/docs/snippets/getting-started/anta_nrfu_json.sh @@ -0,0 +1,9 @@ +anta nrfu \ + --username arista \ + --password arista \ + --inventory ./inventory.yml \ + `# uncomment the next two lines if you have an enable password `\ + `# --enable `\ + `# --enable-password <password> `\ + --catalog ./catalog.yml \ + json diff --git a/docs/snippets/getting-started/anta_nrfu_table.output b/docs/snippets/getting-started/anta_nrfu_table.output new file mode 100644 index 0000000..a34b5bd --- /dev/null +++ b/docs/snippets/getting-started/anta_nrfu_table.output @@ -0,0 +1,47 @@ +╭────────────────────── Settings ──────────────────────╮ +│ - ANTA Inventory contains 5 devices (AsyncEOSDevice) │ +│ - Tests catalog contains 9 tests │ +╰──────────────────────────────────────────────────────╯ + +[10:53:01] INFO Preparing ANTA NRFU Run ... tools.py:294 + INFO Connecting to devices ... tools.py:294 + INFO Connecting to devices completed in: 0:00:00.058. tools.py:302 + INFO Preparing the tests ... tools.py:294 + INFO Preparing the tests completed in: 0:00:00.001. tools.py:302 + INFO --- ANTA NRFU Run Information --- runner.py:276 + Number of devices: 5 (5 established) + Total number of selected tests: 45 + Maximum number of open file descriptors for the current ANTA process: 16384 + --------------------------------- + INFO Preparing ANTA NRFU Run completed in: 0:00:00.069. tools.py:302 + INFO Running ANTA tests ... tools.py:294 +[10:53:02] INFO Running ANTA tests completed in: 0:00:00.969. tools.py:302 + INFO Cache statistics for 's1-spine1': 1 hits / 9 command(s) (11.11%) runner.py:75 + INFO Cache statistics for 's1-spine2': 1 hits / 9 command(s) (11.11%) runner.py:75 + INFO Cache statistics for 's1-leaf1': 1 hits / 9 command(s) (11.11%) runner.py:75 + INFO Cache statistics for 's1-leaf2': 1 hits / 9 command(s) (11.11%) runner.py:75 + INFO Cache statistics for 's1-leaf3': 1 hits / 9 command(s) (11.11%) runner.py:75 + • Running NRFU Tests...100% ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 45/45 • 0:00:00 • 0:00:00 + + All tests results +┏━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓ +┃ Device ┃ Test Name ┃ Test Status ┃ Message(s) ┃ Test description ┃ Test category ┃ +┡━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩ +│ s1-spine1 │ VerifyMlagConfigSanity │ skipped │ MLAG is disabled │ Verifies there are no MLAG config-sanity │ MLAG │ +│ │ │ │ │ inconsistencies. │ │ +├───────────┼──────────────────────────┼─────────────┼────────────────────────────────────────────┼────────────────────────────────────────────┼───────────────┤ +│ s1-spine1 │ VerifyEOSVersion │ failure │ device is running version │ Verifies the EOS version of the device. │ Software │ +│ │ │ │ "4.32.2F-38195967.4322F (engineering │ │ │ +│ │ │ │ build)" not in expected versions: │ │ │ +│ │ │ │ ['4.25.4M', '4.26.1F', │ │ │ +│ │ │ │ '4.28.3M-28837868.4283M (engineering │ │ │ +│ │ │ │ build)'] │ │ │ +├───────────┼──────────────────────────┼─────────────┼────────────────────────────────────────────┼────────────────────────────────────────────┼───────────────┤ +[...] +├───────────┼──────────────────────────┼─────────────┼────────────────────────────────────────────┼────────────────────────────────────────────┼───────────────┤ +│ s1-leaf3 │ VerifyTerminAttrVersion │ failure │ device is running TerminAttr version │ Verifies the TerminAttr version of the │ Software │ +│ │ │ │ v1.34.0 and is not in the allowed list: │ device. │ │ +│ │ │ │ ['v1.22.1'] │ │ │ +├───────────┼──────────────────────────┼─────────────┼────────────────────────────────────────────┼────────────────────────────────────────────┼───────────────┤ +│ s1-leaf3 │ VerifyZeroTouch │ success │ │ Verifies ZeroTouch is disabled │ Configuration │ +└───────────┴──────────────────────────┴─────────────┴────────────────────────────────────────────┴────────────────────────────────────────────┴───────────────┘ diff --git a/docs/snippets/getting-started/anta_nrfu_table.sh b/docs/snippets/getting-started/anta_nrfu_table.sh new file mode 100644 index 0000000..785c418 --- /dev/null +++ b/docs/snippets/getting-started/anta_nrfu_table.sh @@ -0,0 +1,10 @@ +anta nrfu \ + --username arista \ + --password arista \ + --inventory ./inventory.yml \ + `# uncomment the next two lines if you have an enable password `\ + `# --enable` \ + `# --enable-password <password>` \ + --catalog ./catalog.yml \ + `# table is default if not provided` \ + table diff --git a/docs/snippets/getting-started/anta_nrfu_text.output b/docs/snippets/getting-started/anta_nrfu_text.output new file mode 100644 index 0000000..872f608 --- /dev/null +++ b/docs/snippets/getting-started/anta_nrfu_text.output @@ -0,0 +1,30 @@ +╭────────────────────── Settings ──────────────────────╮ +│ - ANTA Inventory contains 5 devices (AsyncEOSDevice) │ +│ - Tests catalog contains 9 tests │ +╰──────────────────────────────────────────────────────╯ + +[10:52:39] INFO Preparing ANTA NRFU Run ... tools.py:294 + INFO Connecting to devices ... tools.py:294 + INFO Connecting to devices completed in: 0:00:00.057. tools.py:302 + INFO Preparing the tests ... tools.py:294 + INFO Preparing the tests completed in: 0:00:00.001. tools.py:302 + INFO --- ANTA NRFU Run Information --- runner.py:276 + Number of devices: 5 (5 established) + Total number of selected tests: 45 + Maximum number of open file descriptors for the current ANTA process: 16384 + --------------------------------- + INFO Preparing ANTA NRFU Run completed in: 0:00:00.068. tools.py:302 + INFO Running ANTA tests ... tools.py:294 +[10:52:40] INFO Running ANTA tests completed in: 0:00:00.863. tools.py:302 + INFO Cache statistics for 's1-spine1': 1 hits / 9 command(s) (11.11%) runner.py:75 + INFO Cache statistics for 's1-spine2': 1 hits / 9 command(s) (11.11%) runner.py:75 + INFO Cache statistics for 's1-leaf1': 1 hits / 9 command(s) (11.11%) runner.py:75 + INFO Cache statistics for 's1-leaf2': 1 hits / 9 command(s) (11.11%) runner.py:75 + INFO Cache statistics for 's1-leaf3': 1 hits / 9 command(s) (11.11%) runner.py:75 + • Running NRFU Tests...100% ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 45/45 • 0:00:00 • 0:00:00 + +s1-spine1 :: VerifyEOSVersion :: FAILURE(device is running version "4.32.2F-38195967.4322F (engineering build)" not in expected versions: ['4.25.4M', '4.26.1F', +'4.28.3M-28837868.4283M (engineering build)']) +s1-spine1 :: VerifyTerminAttrVersion :: FAILURE(device is running TerminAttr version v1.34.0 and is not in the allowed list: ['v1.22.1']) +s1-spine1 :: VerifyZeroTouch :: SUCCESS() +s1-spine1 :: VerifyMlagConfigSanity :: SKIPPED(MLAG is disabled) diff --git a/docs/snippets/getting-started/anta_nrfu_text.sh b/docs/snippets/getting-started/anta_nrfu_text.sh new file mode 100644 index 0000000..3835b51 --- /dev/null +++ b/docs/snippets/getting-started/anta_nrfu_text.sh @@ -0,0 +1,9 @@ +anta nrfu \ + --username arista \ + --password arista \ + --inventory ./inventory.yml \ + `# uncomment the next two lines if you have an enable password `\ + `# --enable` \ + `# --enable-password <password>` \ + --catalog ./catalog.yml \ + text diff --git a/docs/snippets/getting-started/catalog.yml b/docs/snippets/getting-started/catalog.yml new file mode 100644 index 0000000..cc7e781 --- /dev/null +++ b/docs/snippets/getting-started/catalog.yml @@ -0,0 +1,24 @@ +--- +anta.tests.software: + - VerifyEOSVersion: # Verifies the device is running one of the allowed EOS version. + versions: # List of allowed EOS versions. + - 4.25.4M + - 4.26.1F + - '4.28.3M-28837868.4283M (engineering build)' + - VerifyTerminAttrVersion: + versions: + - v1.22.1 + +anta.tests.system: + - VerifyUptime: # Verifies the device uptime is higher than a value. + minimum: 1 + - VerifyNTP: + +anta.tests.mlag: + - VerifyMlagStatus: + - VerifyMlagInterfaces: + - VerifyMlagConfigSanity: + +anta.tests.configuration: + - VerifyZeroTouch: # Verifies ZeroTouch is disabled. + - VerifyRunningConfigDiffs: diff --git a/docs/snippets/getting-started/inventory.yml b/docs/snippets/getting-started/inventory.yml new file mode 100644 index 0000000..2f3d512 --- /dev/null +++ b/docs/snippets/getting-started/inventory.yml @@ -0,0 +1,20 @@ +anta_inventory: + hosts: + - host: 192.168.0.10 + name: s1-spine1 + tags: ['fabric', 'spine'] + - host: 192.168.0.11 + name: s1-spine2 + tags: ['fabric', 'spine'] + - host: 192.168.0.12 + name: s1-leaf1 + tags: ['fabric', 'leaf'] + - host: 192.168.0.13 + name: s1-leaf2 + tags: ['fabric', 'leaf'] + - host: 192.168.0.14 + name: s1-leaf3 + tags: ['fabric', 'leaf'] + - host: 192.168.0.15 + name: s1-leaf3 + tags: ['fabric', 'leaf'] diff --git a/docs/templates/python/material/anta_test_input_model.html.jinja b/docs/templates/python/material/anta_test_input_model.html.jinja new file mode 100644 index 0000000..f867ad0 --- /dev/null +++ b/docs/templates/python/material/anta_test_input_model.html.jinja @@ -0,0 +1,154 @@ +{% if obj.members %} + {{ log.debug("Rendering children of " + obj.path) }} + + <div class="doc doc-children"> + + {% if root_members %} + {% set members_list = config.members %} + {% else %} + {% set members_list = none %} + {% endif %} + + {% if config.group_by_category %} + + {% with %} + + {% if config.show_category_heading %} + {% set extra_level = 1 %} + {% else %} + {% set extra_level = 0 %} + {% endif %} + + {% with attributes = obj.attributes|filter_objects( + filters=config.filters, + members_list=members_list, + inherited_members=config.inherited_members, + keep_no_docstrings=config.show_if_no_docstring, + ) %} + {% if attributes %} + {% if config.show_category_heading %} + {% filter heading(heading_level, id=html_id ~ "-attributes") %}Attributes{% endfilter %} + {% endif %} + {% with heading_level = heading_level + extra_level %} + {% set root = False %} + {% set heading_level = heading_level + 1 %} + {% set old_obj = obj %} + {% set obj = class %} + {% include "attributes_table.html" with context %} + {% set obj = old_obj %} + {% endwith %} + {% endif %} + {% endwith %} + + {% with classes = obj.classes|filter_objects( + filters=config.filters, + members_list=members_list, + inherited_members=config.inherited_members, + keep_no_docstrings=config.show_if_no_docstring, + ) %} + {% if classes %} + {% if config.show_category_heading %} + {% filter heading(heading_level, id=html_id ~ "-classes") %}Classes{% endfilter %} + {% endif %} + {% with heading_level = heading_level + extra_level %} + {% for class in classes|order_members(config.members_order, members_list) %} + {% if members_list is not none or class.is_public %} + {% include class|get_template with context %} + {% endif %} + {% endfor %} + {% endwith %} + {% endif %} + {% endwith %} + + {% with functions = obj.functions|filter_objects( + filters=config.filters, + members_list=members_list, + inherited_members=config.inherited_members, + keep_no_docstrings=config.show_if_no_docstring, + ) %} + {% if functions %} + {% if config.show_category_heading %} + {% filter heading(heading_level, id=html_id ~ "-functions") %}Functions{% endfilter %} + {% endif %} + {% with heading_level = heading_level + extra_level %} + {% for function in functions|order_members(config.members_order, members_list) %} + {% if not (obj.kind.value == "class" and function.name == "__init__" and config.merge_init_into_class) %} + {% if members_list is not none or function.is_public %} + {% include function|get_template with context %} + {% endif %} + {% endif %} + {% endfor %} + {% endwith %} + {% endif %} + {% endwith %} + + {% if config.show_submodules %} + {% with modules = obj.modules|filter_objects( + filters=config.filters, + members_list=members_list, + inherited_members=config.inherited_members, + keep_no_docstrings=config.show_if_no_docstring, + ) %} + {% if modules %} + {% if config.show_category_heading %} + {% filter heading(heading_level, id=html_id ~ "-modules") %}Modules{% endfilter %} + {% endif %} + {% with heading_level = heading_level + extra_level %} + {% for module in modules|order_members(config.members_order.alphabetical, members_list) %} + {% if members_list is not none or module.is_public %} + {% include module|get_template with context %} + {% endif %} + {% endfor %} + {% endwith %} + {% endif %} + {% endwith %} + {% endif %} + + {% endwith %} + + {% else %} + + {% for child in obj.all_members + |filter_objects( + filters=config.filters, + members_list=members_list, + inherited_members=config.inherited_members, + keep_no_docstrings=config.show_if_no_docstring, + ) + |order_members(config.members_order, members_list) + %} + + {% if not (obj.is_class and child.name == "__init__" and config.merge_init_into_class) %} + + {% if members_list is not none or child.is_public %} + {% if child.is_attribute %} + {% with attribute = child %} + {% include attribute|get_template with context %} + {% endwith %} + + {% elif child.is_class %} + {% with class = child %} + {% include class|get_template with context %} + {% endwith %} + + {% elif child.is_function %} + {% with function = child %} + {% include function|get_template with context %} + {% endwith %} + + {% elif child.is_module and config.show_submodules %} + {% with module = child %} + {% include module|get_template with context %} + {% endwith %} + + {% endif %} + {% endif %} + + {% endif %} + + {% endfor %} + + {% endif %} + + </div> +{% endif %} diff --git a/docs/templates/python/material/class.html.jinja b/docs/templates/python/material/class.html.jinja index 1c1173c..cbf9fac 100644 --- a/docs/templates/python/material/class.html.jinja +++ b/docs/templates/python/material/class.html.jinja @@ -1,26 +1,46 @@ {% extends "_base/class.html.jinja" %} {% set anta_test = namespace(found=false) %} +{% set anta_test_input_model = namespace(found=false) %} {% for base in class.bases %} {% set basestr = base | string %} {% if "AntaTest" == basestr %} {% set anta_test.found = True %} {% endif %} {% endfor %} +{# TODO make this nicer #} +{% if class.parent.parent.name == "input_models" or class.parent.parent.parent.name == "input_models" %} +{% set anta_test_input_model.found = True %} +{% endif %} {% block children %} {% if anta_test.found %} {% set root = False %} {% set heading_level = heading_level + 1 %} {% include "anta_test.html.jinja" with context %} {# render source after children - TODO make add flag to respect disabling it.. though do we want to disable?#} - <details class="quote"> - <summary>Source code in <code> - {%- if class.relative_filepath.is_absolute() -%} - {{ class.relative_package_filepath }} - {%- else -%} - {{ class.relative_filepath }} - {%- endif -%} - </code></summary> - {{ class.source|highlight(language="python", linestart=class.lineno, linenums=True) }} + <details class="quote"> + <summary>Source code in <code> + {%- if class.relative_filepath.is_absolute() -%} + {{ class.relative_package_filepath }} + {%- else -%} + {{ class.relative_filepath }} + {%- endif -%} + </code></summary> + {{ class.source|highlight(language="python", linestart=class.lineno, linenums=True) }} + </details> +{% elif anta_test_input_model.found %} + {% set root = False %} + {% set heading_level = heading_level + 1 %} + {% include "anta_test_input_model.html.jinja" with context %} + {# render source after children - TODO make add flag to respect disabling it.. though do we want to disable?#} + <details class="quote"> + <summary>Source code in <code> + {%- if class.relative_filepath.is_absolute() -%} + {{ class.relative_package_filepath }} + {%- else -%} + {{ class.relative_filepath }} + {%- endif -%} + </code></summary> + {{ class.source|highlight(language="python", linestart=class.lineno, linenums=True) }} </details> {% else %} {{ super() }} @@ -29,7 +49,25 @@ {# Do not render source before children for AntaTest #} {% block source %} -{% if not anta_test.found %} +{% if not anta_test.found and not anta_test_input_model%} {{ super() }} {% endif %} {% endblock source %} + +{# overwrite block base to render some stuff on deprecation for anta_test #} +{% block bases %} +{{ super() }} + +{% for dec in class.decorators %} +{% if dec.value.function.name == "deprecated_test_class" %} +<img alt="Static Badge" src="https://img.shields.io/badge/DEPRECATED-yellow?style=flat&logoSize=auto"> +{% for arg in dec.value.arguments | selectattr("name", "equalto", "removal_in_version") | list %} +<img alt="Static Badge" src="https://img.shields.io/badge/REMOVAL-{{ arg.value[1:-1] }}-grey?style=flat&logoSize=auto&labelColor=red"> +{% endfor %} +<br/> +{% for arg in dec.value.arguments | selectattr("name", "equalto", "new_tests") | list %} +<strong>Replaced with:</strong> {{ arg.value.elements | map("replace", "'", "<code>", 1) | map("replace", "'", "</code>", 1) | join(", ") | safe }} +{% endfor %} +{% endif %} +{% endfor %} +{% endblock bases %} diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index 25b061c..a422f7c 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -33,9 +33,8 @@ To help document the issue in Github, it is important to capture some logs so th ANTA provides very verbose logs when using the `DEBUG` level. When using DEBUG log level with a log file, the DEBUG logging level is not sent to stdout, but only to the file. -!!! danger - - On real deployments, do not use DEBUG logging level without setting a log file at the same time. +> [!CAUTION] +> On real deployments, do not use DEBUG logging level without setting a log file at the same time. To save the logs to a file called `anta.log`, use the following flags: @@ -46,11 +45,10 @@ anta -l DEBUG –log-file anta.log <ANTA_COMMAND> See `anta --help` for more information. These have to precede the `nrfu` cmd. -!!! tip - - Remember that in ANTA, each level of command has its own options and they can only be set at this level. - so the `-l` and `--log-file` MUST be between `anta` and the `ANTA_COMMAND`. - similarly, all the `nrfu` options MUST be set between the `nrfu` and the `ANTA_NRFU_SUBCOMMAND` (`json`, `text`, `table` or `tpl-report`). +> [!TIP] +> Remember that in ANTA, each level of command has its own options and they can only be set at this level. +> so the `-l` and `--log-file` MUST be between `anta` and the `ANTA_COMMAND`. +> similarly, all the `nrfu` options MUST be set between the `nrfu` and the `ANTA_NRFU_SUBCOMMAND` (`json`, `text`, `table` or `tpl-report`). As an example, for the `nrfu` command, it would look like: @@ -60,9 +58,8 @@ anta -l DEBUG --log-file anta.log nrfu --enable --username username --password a ### `ANTA_DEBUG` environment variable -!!! warning - - Do not use this if you do not know why. This produces a lot of logs and can create confusion if you do not know what to look for. +> [!WARNING] +> Do not use this if you do not know why. This produces a lot of logs and can create confusion if you do not know what to look for. The environment variable `ANTA_DEBUG=true` enable ANTA Debug Mode. diff --git a/docs/usage-inventory-catalog.md b/docs/usage-inventory-catalog.md index e41321a..7baebfb 100644 --- a/docs/usage-inventory-catalog.md +++ b/docs/usage-inventory-catalog.md @@ -47,8 +47,8 @@ The inventory file must start with the `anta_inventory` key then define one or m A full description of the inventory model is available in [API documentation](api/inventory.models.input.md) -!!! info - Caching can be disabled per device, network or range by setting the `disable_cache` key to `True` in the inventory file. For more details about how caching is implemented in ANTA, please refer to [Caching in ANTA](advanced_usages/caching.md). +> [!INFO] +> Caching can be disabled per device, network or range by setting the `disable_cache` key to `True` in the inventory file. For more details about how caching is implemented in ANTA, please refer to [Caching in ANTA](advanced_usages/caching.md). ### Example @@ -199,8 +199,8 @@ anta.tests.system: tags: ['leaf'] ``` -!!! info - When using the CLI, you can filter the NRFU execution using tags. Refer to [this section](cli/tag-management.md) of the CLI documentation. +> [!INFO] +> When using the CLI, you can filter the NRFU execution using tags. Refer to [this section](cli/tag-management.md) of the CLI documentation. ### Tests available in ANTA @@ -277,8 +277,10 @@ custom.tests.system: type: ['cEOS-LAB'] ``` -!!! tip "How to create custom tests" - To create your custom tests, you should refer to this [documentation](advanced_usages/custom-tests.md) +> [!TIP] +> **How to create custom tests** +> +> To create your custom tests, you should refer to this [documentation](advanced_usages/custom-tests.md) ### Customize test description and categories @@ -317,5 +319,5 @@ The following script reads all the files in `intended/test_catalogs/` with names --8<-- "merge_catalogs.py" ``` -!!! warning - The `AntaCatalog.merge()` method is deprecated and will be removed in ANTA v2.0. Please use the `AntaCatalog.merge_catalogs()` class method instead. +> [!WARNING] +> The `AntaCatalog.merge()` method is deprecated and will be removed in ANTA v2.0. Please use the `AntaCatalog.merge_catalogs()` class method instead. diff --git a/examples/tests.yaml b/examples/tests.yaml index d8f3332..e22acf4 100644 --- a/examples/tests.yaml +++ b/examples/tests.yaml @@ -1,75 +1,81 @@ --- anta.tests.aaa: - - VerifyTacacsSourceIntf: - intf: Management1 - vrf: default - - VerifyTacacsServers: - servers: - - 1.1.1.1 - - 2.2.2.2 - vrf: default - - VerifyTacacsServerGroups: - groups: - - admin - - user - - VerifyAuthenMethods: + - VerifyAcctConsoleMethods: + # Verifies the AAA accounting console method lists for different accounting types (system, exec, commands, dot1x). methods: - local - none - logging types: - - login - - enable + - system + - exec + - commands - dot1x - - VerifyAuthzMethods: + - VerifyAcctDefaultMethods: + # Verifies the AAA accounting default method lists for different accounting types (system, exec, commands, dot1x). methods: - local - none - logging types: - - commands + - system - exec - - VerifyAcctDefaultMethods: + - commands + - dot1x + - VerifyAuthenMethods: + # Verifies the AAA authentication method lists for different authentication types (login, enable, dot1x). methods: - local - none - logging types: - - system - - exec - - commands + - login + - enable - dot1x - - VerifyAcctConsoleMethods: + - VerifyAuthzMethods: + # Verifies the AAA authorization method lists for different authorization types (commands, exec). methods: - local - none - logging types: - - system - - exec - commands - - dot1x - + - exec + - VerifyTacacsServerGroups: + # Verifies if the provided TACACS server group(s) are configured. + groups: + - TACACS-GROUP1 + - TACACS-GROUP2 + - VerifyTacacsServers: + # Verifies TACACS servers are configured for a specified VRF. + servers: + - 10.10.10.21 + - 10.10.10.22 + vrf: MGMT + - VerifyTacacsSourceIntf: + # Verifies TACACS source-interface for a specified VRF. + intf: Management0 + vrf: MGMT anta.tests.avt: - VerifyAVTPathHealth: + # Verifies the status of all AVT paths for all VRFs. + - VerifyAVTRole: + # Verifies the AVT role of a device. + role: edge - VerifyAVTSpecificPath: + # Verifies the Adaptive Virtual Topology (AVT) path. avt_paths: - avt_name: CONTROL-PLANE-PROFILE vrf: default destination: 10.101.255.2 next_hop: 10.101.255.1 path_type: direct - - VerifyAVTRole: - role: edge - anta.tests.bfd: - - VerifyBFDSpecificPeers: - bfd_peers: - - peer_address: 192.0.255.8 - vrf: default - - peer_address: 192.0.255.7 - vrf: default + - VerifyBFDPeersHealth: + # Verifies the health of IPv4 BFD peers across all VRFs. + down_threshold: 2 - VerifyBFDPeersIntervals: + # Verifies the timers of IPv4 BFD peer sessions. bfd_peers: - peer_address: 192.0.255.8 vrf: default @@ -81,93 +87,158 @@ anta.tests.bfd: tx_interval: 1200 rx_interval: 1200 multiplier: 3 - - VerifyBFDPeersHealth: - down_threshold: 2 - VerifyBFDPeersRegProtocols: + # Verifies the registered routing protocol of IPv4 BFD peer sessions. bfd_peers: - - peer_address: 192.0.255.8 + - peer_address: 192.0.255.7 vrf: default protocols: - bgp - - isis - + - VerifyBFDSpecificPeers: + # Verifies the state of IPv4 BFD peer sessions. + bfd_peers: + - peer_address: 192.0.255.8 + vrf: default + - peer_address: 192.0.255.7 + vrf: default anta.tests.configuration: - - VerifyZeroTouch: - VerifyRunningConfigDiffs: + # Verifies there is no difference between the running-config and the startup-config. - VerifyRunningConfigLines: + # Search the Running-Config for the given RegEx patterns. regex_patterns: - "^enable password.*$" - "bla bla" - + - VerifyZeroTouch: + # Verifies ZeroTouch is disabled. anta.tests.connectivity: + - VerifyLLDPNeighbors: + # Verifies the connection status of the specified LLDP (Link Layer Discovery Protocol) neighbors. + neighbors: + - port: Ethernet1 + neighbor_device: DC1-SPINE1 + neighbor_port: Ethernet1 + - port: Ethernet2 + neighbor_device: DC1-SPINE2 + neighbor_port: Ethernet1 - VerifyReachability: + # Test network reachability to one or many destination IP(s). hosts: - - source: Management1 + - source: Management0 destination: 1.1.1.1 vrf: MGMT df_bit: True size: 100 - - source: Management1 + - source: Management0 destination: 8.8.8.8 vrf: MGMT df_bit: True size: 100 - - VerifyLLDPNeighbors: - neighbors: - - port: Ethernet1 - neighbor_device: DC1-SPINE1 - neighbor_port: Ethernet1 - - port: Ethernet2 - neighbor_device: DC1-SPINE2 - neighbor_port: Ethernet1 - +anta.tests.cvx: + - VerifyActiveCVXConnections: + # Verifies the number of active CVX Connections. + connections_count: 100 + - VerifyCVXClusterStatus: + # Verifies the CVX Server Cluster status. + role: Master + peer_status: + - peer_name : cvx-red-2 + registration_state: Registration complete + - peer_name: cvx-red-3 + registration_state: Registration error + - VerifyManagementCVX: + # Verifies the management CVX global status. + enabled: true + - VerifyMcsClientMounts: + # Verify if all MCS client mounts are in mountStateMountComplete. + - VerifyMcsServerMounts: + # Verify if all MCS server mounts are in a MountComplete state. + connections_count: 100 anta.tests.field_notices: - VerifyFieldNotice44Resolution: + # Verifies that the device is using the correct Aboot version per FN0044. - VerifyFieldNotice72Resolution: - + # Verifies if the device is exposed to FN0072, and if the issue has been mitigated. anta.tests.flow_tracking: - VerifyHardwareFlowTrackerStatus: + # Verifies if hardware flow tracking is running and an input tracker is active. Optionally verifies the tracker interval/timeout and exporter configuration. trackers: - name: FLOW-TRACKER record_export: - on_inactive_timeout: 700000 - on_interval: 3000000 + on_inactive_timeout: 70000 + on_interval: 300000 exporters: - name: CV-TELEMETRY - local_interface: Loopback11 - template_interval: 3600 - - name: CVP-TELEMETRY - local_interface: Loopback01 - template_interval: 36000000 - + local_interface: Loopback0 + template_interval: 3600000 anta.tests.greent: - VerifyGreenT: + # Verifies if a GreenT policy other than the default is created. - VerifyGreenTCounters: - + # Verifies if the GreenT counters are incremented. anta.tests.hardware: - - VerifyTransceiversManufacturers: - manufacturers: - - Not Present - - Arista Networks - - Arastra, Inc. - - VerifyTemperature: - - VerifyTransceiversTemperature: - - VerifyEnvironmentSystemCooling: + - VerifyAdverseDrops: + # Verifies there are no adverse drops on DCS-7280 and DCS-7500 family switches. - VerifyEnvironmentCooling: + # Verifies the status of power supply fans and all fan trays. states: - ok - VerifyEnvironmentPower: + # Verifies the power supplies status. states: - ok - - VerifyAdverseDrops: - + - VerifyEnvironmentSystemCooling: + # Verifies the device's system cooling status. + - VerifyTemperature: + # Verifies if the device temperature is within acceptable limits. + - VerifyTransceiversManufacturers: + # Verifies if all the transceivers come from approved manufacturers. + manufacturers: + - Not Present + - Arista Networks + - Arastra, Inc. + - VerifyTransceiversTemperature: + # Verifies if all the transceivers are operating at an acceptable temperature. anta.tests.interfaces: - - VerifyInterfaceUtilization: - threshold: 70.0 - - VerifyInterfaceErrors: + - VerifyIPProxyARP: + # Verifies if Proxy ARP is enabled. + interfaces: + - Ethernet1 + - Ethernet2 + - VerifyIllegalLACP: + # Verifies there are no illegal LACP packets in all port channels. - VerifyInterfaceDiscards: + # Verifies that the interfaces packet discard counters are equal to zero. - VerifyInterfaceErrDisabled: + # Verifies there are no interfaces in the errdisabled state. + - VerifyInterfaceErrors: + # Verifies that the interfaces error counters are equal to zero. + - VerifyInterfaceIPv4: + # Verifies the interface IPv4 addresses. + interfaces: + - name: Ethernet2 + primary_ip: 172.30.11.1/31 + secondary_ips: + - 10.10.10.1/31 + - 10.10.10.10/31 + - VerifyInterfaceUtilization: + # Verifies that the utilization of interfaces is below a certain threshold. + threshold: 70.0 + - VerifyInterfacesSpeed: + # Verifies the speed, lanes, auto-negotiation status, and mode as full duplex for interfaces. + interfaces: + - name: Ethernet2 + auto: False + speed: 10 + - name: Eth3 + auto: True + speed: 100 + lanes: 1 + - name: Eth2 + auto: False + speed: 2.5 - VerifyInterfacesStatus: + # Verifies the operational states of specified interfaces to ensure they match expected configurations. interfaces: - name: Ethernet1 status: up @@ -177,141 +248,369 @@ anta.tests.interfaces: - name: Ethernet49/1 status: adminDown line_protocol_status: notPresent - - VerifyStormControlDrops: - - VerifyPortChannels: - - VerifyIllegalLACP: - - VerifyLoopbackCount: - number: 3 - - VerifySVI: - - VerifyL3MTU: - mtu: 1500 - ignored_interfaces: - - Vxlan1 - specific_mtu: - - Ethernet1: 2500 - - VerifyIPProxyARP: - interfaces: - - Ethernet1/1 - - Ethernet2/1 + - VerifyIpVirtualRouterMac: + # Verifies the IP virtual router MAC address. + mac_address: 00:1c:73:00:dc:01 - VerifyL2MTU: + # Verifies the global L2 MTU of all L2 interfaces. mtu: 1500 ignored_interfaces: - Management1 - Vxlan1 specific_mtu: - Ethernet1/1: 1500 - - VerifyInterfaceIPv4: - interfaces: - - name: Ethernet2/1 - primary_ip: 172.30.11.0/31 - secondary_ips: - - 10.10.10.0/31 - - 10.10.10.10/31 - - VerifyIpVirtualRouterMac: - mac_address: 00:1c:73:00:dc:01 - - VerifyInterfacesSpeed: - interfaces: - - name: Ethernet2 - auto: False - speed: 10 - - name: Eth3 - auto: True - speed: 100 - lanes: 1 - - name: Eth2 - auto: False - speed: 2.5 + - VerifyL3MTU: + # Verifies the global L3 MTU of all L3 interfaces. + mtu: 1500 + ignored_interfaces: + - Vxlan1 + specific_mtu: + - Ethernet1: 2500 - VerifyLACPInterfacesStatus: + # Verifies the Link Aggregation Control Protocol (LACP) status of the interface. interfaces: - - name: Ethernet5 - portchannel: Port-Channel5 - - name: Ethernet6 - portchannel: Port-Channel5 - + - name: Ethernet1 + portchannel: Port-Channel100 + - VerifyLoopbackCount: + # Verifies the number of loopback interfaces and their status. + number: 3 + - VerifyPortChannels: + # Verifies there are no inactive ports in all port channels. + - VerifySVI: + # Verifies the status of all SVIs. + - VerifyStormControlDrops: + # Verifies there are no interface storm-control drop counters. anta.tests.lanz: - VerifyLANZ: - + # Verifies if LANZ is enabled. anta.tests.logging: - - VerifyLoggingPersistent: - - VerifyLoggingSourceIntf: - interface: Management1 - vrf: default + - VerifyLoggingAccounting: + # Verifies if AAA accounting logs are generated. + - VerifyLoggingErrors: + # Verifies there are no syslog messages with a severity of ERRORS or higher. + - VerifyLoggingHostname: + # Verifies if logs are generated with the device FQDN. - VerifyLoggingHosts: + # Verifies logging hosts (syslog servers) for a specified VRF. hosts: - 1.1.1.1 - 2.2.2.2 vrf: default - VerifyLoggingLogsGeneration: - - VerifyLoggingHostname: + # Verifies if logs are generated. + - VerifyLoggingPersistent: + # Verifies if logging persistent is enabled and logs are saved in flash. + - VerifyLoggingSourceIntf: + # Verifies logging source-interface for a specified VRF. + interface: Management0 + vrf: default - VerifyLoggingTimestamp: - - VerifyLoggingAccounting: - - VerifyLoggingErrors: - + # Verifies if logs are generated with the appropriate timestamp. anta.tests.mlag: - - VerifyMlagStatus: - - VerifyMlagInterfaces: - VerifyMlagConfigSanity: - - VerifyMlagReloadDelay: - reload_delay: 300 - reload_delay_non_mlag: 330 + # Verifies there are no MLAG config-sanity inconsistencies. - VerifyMlagDualPrimary: + # Verifies the MLAG dual-primary detection parameters. detection_delay: 200 errdisabled: True recovery_delay: 60 recovery_delay_non_mlag: 0 + - VerifyMlagInterfaces: + # Verifies there are no inactive or active-partial MLAG ports. - VerifyMlagPrimaryPriority: + # Verifies the configuration of the MLAG primary priority. primary_priority: 3276 - + - VerifyMlagReloadDelay: + # Verifies the reload-delay parameters of the MLAG configuration. + reload_delay: 300 + reload_delay_non_mlag: 330 + - VerifyMlagStatus: + # Verifies the health status of the MLAG configuration. anta.tests.multicast: + - VerifyIGMPSnoopingGlobal: + # Verifies the IGMP snooping global status. + enabled: True - VerifyIGMPSnoopingVlans: + # Verifies the IGMP snooping status for the provided VLANs. vlans: 10: False 12: False - - VerifyIGMPSnoopingGlobal: - enabled: True - anta.tests.path_selection: - VerifyPathsHealth: + # Verifies the path and telemetry state of all paths under router path-selection. - VerifySpecificPath: + # Verifies the path and telemetry state of a specific path for an IPv4 peer under router path-selection. paths: - peer: 10.255.0.1 path_group: internet source_address: 100.64.3.2 destination_address: 100.64.1.2 - anta.tests.profiles: - - VerifyUnifiedForwardingTableMode: - mode: 3 - VerifyTcamProfile: + # Verifies the device TCAM profile. profile: vxlan-routing - + - VerifyUnifiedForwardingTableMode: + # Verifies the device is using the expected UFT mode. + mode: 3 anta.tests.ptp: - - VerifyPtpModeStatus: - VerifyPtpGMStatus: + # Verifies that the device is locked to a valid PTP Grandmaster. gmid: 0xec:46:70:ff:fe:00:ff:a9 - VerifyPtpLockStatus: + # Verifies that the device was locked to the upstream PTP GM in the last minute. + - VerifyPtpModeStatus: + # Verifies that the device is configured as a PTP Boundary Clock. - VerifyPtpOffset: + # Verifies that the PTP timing offset is within +/- 1000ns from the master clock. - VerifyPtpPortModeStatus: - -anta.tests.security: - - VerifySSHStatus: - - VerifySSHIPv4Acl: - number: 3 + # Verifies the PTP interfaces state. +anta.tests.routing.bgp: + - VerifyBGPAdvCommunities: + # Verifies that advertised communities are standard, extended and large for BGP peers. + bgp_peers: + - peer_address: 172.30.11.17 + vrf: default + - peer_address: 172.30.11.21 + vrf: default + - VerifyBGPExchangedRoutes: + # Verifies the advertised and received routes of BGP peers. + bgp_peers: + - peer_address: 172.30.255.5 + vrf: default + advertised_routes: + - 192.0.254.5/32 + received_routes: + - 192.0.255.4/32 + - peer_address: 172.30.255.1 + vrf: default + advertised_routes: + - 192.0.255.1/32 + - 192.0.254.5/32 + received_routes: + - 192.0.254.3/32 + - VerifyBGPPeerASNCap: + # Verifies the four octet ASN capability of BGP peers. + bgp_peers: + - peer_address: 172.30.11.1 + vrf: default + - VerifyBGPPeerCount: + # Verifies the count of BGP peers for given address families. + address_families: + - afi: "evpn" + num_peers: 2 + - afi: "ipv4" + safi: "unicast" + vrf: "PROD" + num_peers: 2 + - afi: "ipv4" + safi: "unicast" + vrf: "default" + num_peers: 3 + - afi: "ipv4" + safi: "multicast" + vrf: "DEV" + num_peers: 3 + - VerifyBGPPeerDropStats: + # Verifies BGP NLRI drop statistics for the provided BGP IPv4 peer(s). + bgp_peers: + - peer_address: 172.30.11.1 + vrf: default + drop_stats: + - inDropAsloop + - prefixEvpnDroppedUnsupportedRouteType + - VerifyBGPPeerMD5Auth: + # Verifies the MD5 authentication and state of IPv4 BGP peers in a specified VRF. + bgp_peers: + - peer_address: 172.30.11.1 + vrf: default + - peer_address: 172.30.11.5 + vrf: default + - VerifyBGPPeerMPCaps: + # Verifies the multiprotocol capabilities of BGP peers. + bgp_peers: + - peer_address: 172.30.11.1 + vrf: default + strict: False + capabilities: + - ipv4Unicast + - VerifyBGPPeerRouteLimit: + # Verifies maximum routes and outbound route-maps of BGP IPv4 peer(s). + bgp_peers: + - peer_address: 172.30.11.1 + vrf: default + maximum_routes: 12000 + warning_limit: 10000 + - VerifyBGPPeerRouteRefreshCap: + # Verifies the route refresh capabilities of a BGP peer in a specified VRF. + bgp_peers: + - peer_address: 172.30.11.1 + vrf: default + - VerifyBGPPeerUpdateErrors: + # Verifies BGP update error counters for the provided BGP IPv4 peer(s). + bgp_peers: + - peer_address: 172.30.11.1 + vrf: default + update_errors: + - inUpdErrWithdraw + - VerifyBGPPeersHealth: + # Verifies the health of BGP peers for given address families. + address_families: + - afi: "evpn" + - afi: "ipv4" + safi: "unicast" + vrf: "default" + - afi: "ipv6" + safi: "unicast" + vrf: "DEV" + check_tcp_queues: false + - VerifyBGPSpecificPeers: + # Verifies the health of specific BGP peer(s) for given address families. + address_families: + - afi: "evpn" + peers: + - 10.1.0.1 + - 10.1.0.2 + - afi: "ipv4" + safi: "unicast" + peers: + - 10.1.254.1 + - 10.1.255.0 + - 10.1.255.2 + - 10.1.255.4 + - VerifyBGPTimers: + # Verifies the timers of BGP peers. + bgp_peers: + - peer_address: 172.30.11.1 + vrf: default + hold_time: 180 + keep_alive_time: 60 + - peer_address: 172.30.11.5 + vrf: default + hold_time: 180 + keep_alive_time: 60 + - VerifyBgpRouteMaps: + # Verifies BGP inbound and outbound route-maps of BGP IPv4 peer(s). + bgp_peers: + - peer_address: 172.30.11.1 + vrf: default + inbound_route_map: RM-MLAG-PEER-IN + outbound_route_map: RM-MLAG-PEER-OUT + - VerifyEVPNType2Route: + # Verifies the EVPN Type-2 routes for a given IPv4 or MAC address and VNI. + vxlan_endpoints: + - address: 192.168.20.102 + vni: 10020 + - address: aac1.ab5d.b41e + vni: 10010 +anta.tests.routing.generic: + - VerifyIPv4RouteType: + # Verifies the route-type of the IPv4 prefixes. + routes_entries: + - prefix: 10.10.0.1/32 + vrf: default + route_type: eBGP + - prefix: 10.100.0.12/31 + vrf: default + route_type: connected + - prefix: 10.100.1.5/32 + vrf: default + route_type: iBGP + - VerifyRoutingProtocolModel: + # Verifies the configured routing protocol model. + model: multi-agent + - VerifyRoutingTableEntry: + # Verifies that the provided routes are present in the routing table of a specified VRF. vrf: default - - VerifySSHIPv6Acl: + routes: + - 10.1.0.1 + - 10.1.0.2 + - VerifyRoutingTableSize: + # Verifies the size of the IP routing table of the default VRF. + minimum: 2 + maximum: 20 +anta.tests.routing.isis: + - VerifyISISInterfaceMode: + # Verifies interface mode for IS-IS + interfaces: + - name: Loopback0 + mode: passive + # vrf is set to default by default + - name: Ethernet2 + mode: passive + level: 2 + # vrf is set to default by default + - name: Ethernet1 + mode: point-to-point + vrf: default + # level is set to 2 by default + - VerifyISISNeighborCount: + # Verifies number of IS-IS neighbors per level and per interface. + interfaces: + - name: Ethernet1 + level: 1 + count: 2 + - name: Ethernet2 + level: 2 + count: 1 + - name: Ethernet3 + count: 2 + # level is set to 2 by default + - VerifyISISNeighborState: + # Verifies all IS-IS neighbors are in UP state. + - VerifyISISSegmentRoutingAdjacencySegments: + # Verify that all expected Adjacency segments are correctly visible for each interface. + instances: + - name: CORE-ISIS + vrf: default + segments: + - interface: Ethernet2 + address: 10.0.1.3 + sid_origin: dynamic + - VerifyISISSegmentRoutingDataplane: + # Verify dataplane of a list of ISIS-SR instances. + instances: + - name: CORE-ISIS + vrf: default + dataplane: MPLS + - VerifyISISSegmentRoutingTunnels: + # Verify ISIS-SR tunnels computed by device. + entries: + # Check only endpoint + - endpoint: 1.0.0.122/32 + # Check endpoint and via TI-LFA + - endpoint: 1.0.0.13/32 + vias: + - type: tunnel + tunnel_id: ti-lfa + # Check endpoint and via IP routers + - endpoint: 1.0.0.14/32 + vias: + - type: ip + nexthop: 1.1.1.1 +anta.tests.routing.ospf: + - VerifyOSPFMaxLSA: + # Verifies all OSPF instances did not cross the maximum LSA threshold. + - VerifyOSPFNeighborCount: + # Verifies the number of OSPF neighbors in FULL state is the one we expect. number: 3 - vrf: default - - VerifyTelnetStatus: + - VerifyOSPFNeighborState: + # Verifies all OSPF neighbors are in FULL state. +anta.tests.security: - VerifyAPIHttpStatus: + # Verifies if eAPI HTTP server is disabled globally. - VerifyAPIHttpsSSL: + # Verifies if the eAPI has a valid SSL profile. profile: default - VerifyAPIIPv4Acl: + # Verifies if eAPI has the right number IPv4 ACL(s) configured for a specified VRF. number: 3 vrf: default - VerifyAPIIPv6Acl: + # Verifies if eAPI has the right number IPv6 ACL(s) configured for a specified VRF. number: 3 vrf: default - VerifyAPISSLCertificate: + # Verifies the eAPI SSL certificate expiry, common subject name, encryption algorithm and key size. certificates: - certificate_name: ARISTA_SIGNING_CA.crt expiry_threshold: 30 @@ -324,16 +623,23 @@ anta.tests.security: encryption_algorithm: RSA key_size: 4096 - VerifyBannerLogin: - login_banner: | - # Copyright (c) 2023-2024 Arista Networks, Inc. - # Use of this source code is governed by the Apache License 2.0 - # that can be found in the LICENSE file. + # Verifies the login banner of a device. + login_banner: | + # Copyright (c) 2023-2024 Arista Networks, Inc. + # Use of this source code is governed by the Apache License 2.0 + # that can be found in the LICENSE file. - VerifyBannerMotd: - motd_banner: | - # Copyright (c) 2023-2024 Arista Networks, Inc. - # Use of this source code is governed by the Apache License 2.0 - # that can be found in the LICENSE file. + # Verifies the motd banner of a device. + motd_banner: | + # Copyright (c) 2023-2024 Arista Networks, Inc. + # Use of this source code is governed by the Apache License 2.0 + # that can be found in the LICENSE file. + - VerifyHardwareEntropy: + # Verifies hardware entropy generation is enabled on device. + - VerifyIPSecConnHealth: + # Verifies all IPv4 security connections. - VerifyIPv4ACL: + # Verifies the configuration of IPv4 ACLs. ipv4_access_lists: - name: default-control-plane-acl entries: @@ -349,8 +655,18 @@ anta.tests.security: action: permit icmp any any - sequence: 20 action: permit tcp any any range 5900 5910 - - VerifyIPSecConnHealth: + - VerifySSHIPv4Acl: + # Verifies if the SSHD agent has IPv4 ACL(s) configured. + number: 3 + vrf: default + - VerifySSHIPv6Acl: + # Verifies if the SSHD agent has IPv6 ACL(s) configured. + number: 3 + vrf: default + - VerifySSHStatus: + # Verifies if the SSHD agent is disabled in the default VRF. - VerifySpecificIPSecConn: + # Verifies the IPv4 security connections. ip_security_connections: - peer: 10.255.0.1 - peer: 10.255.0.2 @@ -360,17 +676,17 @@ anta.tests.security: destination_address: 100.64.2.2 - source_address: 172.18.3.2 destination_address: 172.18.2.2 - - VerifyHardwareEntropy: - + - VerifyTelnetStatus: + # Verifies if Telnet is disabled in the default VRF. anta.tests.services: - - VerifyHostname: - hostname: s1-spine1 - VerifyDNSLookup: + # Verifies the DNS name to IP address resolution. domain_names: - arista.com - www.google.com - arista.ca - VerifyDNSServers: + # Verifies if the DNS (Domain Name Service) servers are correctly configured. dns_servers: - server_address: 10.14.0.1 vrf: default @@ -379,66 +695,90 @@ anta.tests.services: vrf: MGMT priority: 0 - VerifyErrdisableRecovery: + # Verifies the errdisable recovery reason, status, and interval. reasons: - reason: acl interval: 30 - reason: bpduguard interval: 30 - + - VerifyHostname: + # Verifies the hostname of a device. + hostname: s1-spine1 anta.tests.snmp: - - VerifySnmpStatus: - vrf: default + - VerifySnmpContact: + # Verifies the SNMP contact of a device. + contact: Jon@example.com + - VerifySnmpErrorCounters: + # Verifies the SNMP error counters. + error_counters: + - inVersionErrs - VerifySnmpIPv4Acl: + # Verifies if the SNMP agent has IPv4 ACL(s) configured. number: 3 vrf: default - VerifySnmpIPv6Acl: + # Verifies if the SNMP agent has IPv6 ACL(s) configured. number: 3 vrf: default - VerifySnmpLocation: + # Verifies the SNMP location of a device. location: New York - - VerifySnmpContact: - contact: Jon@example.com - VerifySnmpPDUCounters: + # Verifies the SNMP PDU counters. pdus: - outTrapPdus - - VerifySnmpErrorCounters: - error_counters: - - inVersionErrs - - inBadCommunityNames - + - inGetNextPdus + - VerifySnmpStatus: + # Verifies if the SNMP agent is enabled. + vrf: default anta.tests.software: + - VerifyEOSExtensions: + # Verifies that all EOS extensions installed on the device are enabled for boot persistence. - VerifyEOSVersion: + # Verifies the EOS version of the device. versions: - 4.25.4M - 4.26.1F - VerifyTerminAttrVersion: + # Verifies the TerminAttr version of the device. versions: - v1.13.6 - v1.8.0 - - VerifyEOSExtensions: - anta.tests.stp: - - VerifySTPMode: - mode: rapidPvst - vlans: - - 10 - - 20 - VerifySTPBlockedPorts: + # Verifies there is no STP blocked ports. - VerifySTPCounters: + # Verifies there is no errors in STP BPDU packets. - VerifySTPForwardingPorts: + # Verifies that all interfaces are forwarding for a provided list of VLAN(s). + vlans: + - 10 + - 20 + - VerifySTPMode: + # Verifies the configured STP mode for a provided list of VLAN(s). + mode: rapidPvst vlans: - 10 - 20 - VerifySTPRootPriority: + # Verifies the STP root priority for a provided list of VLAN or MST instance ID(s). priority: 32768 instances: - 10 - 20 - VerifyStpTopologyChanges: + # Verifies the number of changes across all interfaces in the Spanning Tree Protocol (STP) topology is below a threshold. threshold: 10 - anta.tests.stun: - VerifyStunClient: + # (Deprecated) Verifies the translation for a source address on a STUN client. + stun_clients: + - source_address: 172.18.3.2 + public_address: 172.18.3.21 + source_port: 4500 + public_port: 6006 + - VerifyStunClientTranslation: + # Verifies the translation for a source address on a STUN client. stun_clients: - source_address: 172.18.3.2 public_address: 172.18.3.21 @@ -449,242 +789,57 @@ anta.tests.stun: source_port: 4500 public_port: 6006 - VerifyStunServer: - + # Verifies the STUN server status is enabled and running. anta.tests.system: - - VerifyUptime: - minimum: 86400 - - VerifyReloadCause: - - VerifyCoredump: - VerifyAgentLogs: + # Verifies there are no agent crash reports. - VerifyCPUUtilization: - - VerifyMemoryUtilization: + # Verifies whether the CPU utilization is below 75%. + - VerifyCoredump: + # Verifies there are no core dump files. - VerifyFileSystemUtilization: + # Verifies that no partition is utilizing more than 75% of its disk space. + - VerifyMemoryUtilization: + # Verifies whether the memory utilization is below 75%. - VerifyNTP: + # Verifies if NTP is synchronised. - VerifyNTPAssociations: + # Verifies the Network Time Protocol (NTP) associations. ntp_servers: - server_address: 1.1.1.1 preferred: True stratum: 1 - server_address: 2.2.2.2 - stratum: 1 + stratum: 2 - server_address: 3.3.3.3 - stratum: 1 - + stratum: 2 + - VerifyReloadCause: + # Verifies the last reload cause of the device. + - VerifyUptime: + # Verifies the device uptime. + minimum: 86400 anta.tests.vlan: - VerifyVlanInternalPolicy: + # Verifies the VLAN internal allocation policy and the range of VLANs. policy: ascending start_vlan_id: 1006 end_vlan_id: 4094 - anta.tests.vxlan: + - VerifyVxlan1ConnSettings: + # Verifies the interface vxlan1 source interface and UDP port. + source_interface: Loopback1 + udp_port: 4789 - VerifyVxlan1Interface: + # Verifies the Vxlan1 interface status. - VerifyVxlanConfigSanity: + # Verifies there are no VXLAN config-sanity inconsistencies. - VerifyVxlanVniBinding: + # Verifies the VNI-VLAN bindings of the Vxlan1 interface. bindings: 10010: 10 10020: 20 - VerifyVxlanVtep: + # Verifies the VTEP peers of the Vxlan1 interface. vteps: - 10.1.1.5 - 10.1.1.6 - - VerifyVxlan1ConnSettings: - source_interface: Loopback1 - udp_port: 4789 - -anta.tests.routing: - generic: - - VerifyRoutingProtocolModel: - model: multi-agent - - VerifyRoutingTableSize: - minimum: 2 - maximum: 20 - - VerifyRoutingTableEntry: - vrf: default - routes: - - 10.1.0.1 - - 10.1.0.2 - bgp: - - VerifyBGPPeerCount: - address_families: - - afi: "evpn" - num_peers: 2 - - afi: "ipv4" - safi: "unicast" - vrf: "PROD" - num_peers: 2 - - afi: "ipv4" - safi: "unicast" - vrf: "default" - num_peers: 3 - - afi: "ipv4" - safi: "multicast" - vrf: "DEV" - num_peers: 3 - - VerifyBGPPeersHealth: - address_families: - - afi: "evpn" - - afi: "ipv4" - safi: "unicast" - vrf: "default" - - afi: "ipv6" - safi: "unicast" - vrf: "DEV" - - VerifyBGPSpecificPeers: - address_families: - - afi: "evpn" - peers: - - 10.1.0.1 - - 10.1.0.2 - - afi: "ipv4" - safi: "unicast" - peers: - - 10.1.254.1 - - 10.1.255.0 - - 10.1.255.2 - - 10.1.255.4 - - VerifyBGPExchangedRoutes: - bgp_peers: - - peer_address: 172.30.255.5 - vrf: default - advertised_routes: - - 192.0.254.5/32 - received_routes: - - 192.0.255.4/32 - - peer_address: 172.30.255.1 - vrf: default - advertised_routes: - - 192.0.255.1/32 - - 192.0.254.5/32 - received_routes: - - 192.0.254.3/32 - - VerifyBGPPeerMPCaps: - bgp_peers: - - peer_address: 172.30.11.1 - vrf: default - strict: False - capabilities: - - ipv4Unicast - - VerifyBGPPeerASNCap: - bgp_peers: - - peer_address: 172.30.11.1 - vrf: default - - VerifyBGPPeerRouteRefreshCap: - bgp_peers: - - peer_address: 172.30.11.1 - vrf: default - - VerifyBGPPeerMD5Auth: - bgp_peers: - - peer_address: 172.30.11.1 - vrf: default - - peer_address: 172.30.11.5 - vrf: default - - VerifyEVPNType2Route: - vxlan_endpoints: - - address: 192.168.20.102 - vni: 10020 - - address: aac1.ab5d.b41e - vni: 10010 - - VerifyBGPAdvCommunities: - bgp_peers: - - peer_address: 172.30.11.17 - vrf: default - - peer_address: 172.30.11.21 - vrf: default - - VerifyBGPTimers: - bgp_peers: - - peer_address: 172.30.11.1 - vrf: default - hold_time: 180 - keep_alive_time: 60 - - peer_address: 172.30.11.5 - vrf: default - hold_time: 180 - keep_alive_time: 60 - - VerifyBGPPeerDropStats: - bgp_peers: - - peer_address: 10.101.0.4 - vrf: default - drop_stats: - - inDropAsloop - - inDropClusterIdLoop - - inDropMalformedMpbgp - - inDropOrigId - - inDropNhLocal - - inDropNhAfV6 - - VerifyBGPPeerUpdateErrors: - bgp_peers: - - peer_address: 10.100.0.8 - vrf: default - update_errors: - - inUpdErrWithdraw - - inUpdErrIgnore - - VerifyBgpRouteMaps: - bgp_peers: - - peer_address: 10.100.4.1 - vrf: default - inbound_route_map: RM-MLAG-PEER-IN - outbound_route_map: RM-MLAG-PEER-IN - - VerifyBGPPeerRouteLimit: - bgp_peers: - - peer_address: 10.100.0.8 - vrf: default - maximum_routes: 12000 - warning_limit: 10000 - ospf: - - VerifyOSPFNeighborState: - - VerifyOSPFNeighborCount: - number: 3 - - VerifyOSPFMaxLSA: - isis: - - VerifyISISNeighborState: - - VerifyISISNeighborCount: - interfaces: - - name: Ethernet1 - level: 1 - count: 2 - - name: Ethernet2 - level: 2 - count: 1 - - name: Ethernet3 - count: 2 - # level is set to 2 by default - - VerifyISISInterfaceMode: - interfaces: - - name: Loopback0 - mode: passive - # vrf is set to default by default - - name: Ethernet2 - mode: passive - level: 2 - # vrf is set to default by default - - name: Ethernet1 - mode: point-to-point - vrf: default - # level is set to 2 by default - - VerifyISISSegmentRoutingAdjacencySegments: - instances: - - name: CORE-ISIS - vrf: default - segments: - - interface: Ethernet2 - address: 10.0.1.3 - sid_origin: dynamic - - VerifyISISSegmentRoutingDataplane: - instances: - - name: CORE-ISIS - vrf: default - dataplane: MPLS - - VerifyISISSegmentRoutingTunnels: - entries: - # Check only endpoint - - endpoint: 1.0.0.122/32 - # Check endpoint and via TI-LFA - - endpoint: 1.0.0.13/32 - vias: - - type: tunnel - tunnel_id: ti-lfa - # Check endpoint and via IP routers - - endpoint: 1.0.0.14/32 - vias: - - type: ip - nexthop: 1.1.1.1 @@ -79,6 +79,7 @@ watch: - anta plugins: + - gh-admonitions - mkdocstrings: default_handler: python custom_templates: docs/templates @@ -122,47 +123,44 @@ plugins: width: 90vw markdown_extensions: + - admonition - attr_list - - pymdownx.emoji: - emoji_index: !!python/name:material.extensions.emoji.twemoji - emoji_generator: !!python/name:material.extensions.emoji.to_svg - - smarty + - codehilite: + guess_lang: true - pymdownx.arithmatex - pymdownx.betterem: smart_enable: all - pymdownx.caret - pymdownx.critic - pymdownx.details + - pymdownx.emoji: + emoji_index: !!python/name:material.extensions.emoji.twemoji + emoji_generator: !!python/name:material.extensions.emoji.to_svg + - pymdownx.highlight - pymdownx.inlinehilite - pymdownx.magiclink - pymdownx.mark - pymdownx.smartsymbols + - pymdownx.snippets: + base_path: + - docs/snippets + - examples - pymdownx.superfences: custom_fences: - name: mermaid class: mermaid format: !!python/name:pymdownx.superfences.fence_code_format + - pymdownx.tabbed: + alternate_style: true - pymdownx.tasklist: custom_checkbox: true - pymdownx.tilde - # - fontawesome_markdown - - admonition - - codehilite: - guess_lang: true + - smarty - toc: separator: "-" # permalink: "#" permalink: true baselevel: 2 - - pymdownx.highlight - - pymdownx.snippets: - base_path: - - docs/snippets - - examples - - pymdownx.superfences - - pymdownx.superfences - - pymdownx.tabbed: - alternate_style: true # TOC docs_dir: docs/ @@ -178,6 +176,7 @@ nav: - Inventory from CVP: cli/inv-from-cvp.md - Inventory from Ansible: cli/inv-from-ansible.md - Get Inventory Information: cli/get-inventory-information.md + - Get Tests Information: cli/get-tests.md - Check commands: cli/check.md - Debug commands: cli/debug.md - Tag Management: cli/tag-management.md @@ -192,6 +191,7 @@ nav: - BFD: api/tests.bfd.md - Configuration: api/tests.configuration.md - Connectivity: api/tests.connectivity.md + - CVX: api/tests.cvx.md - Field Notices: api/tests.field_notices.md - Flow Tracking: api/tests.flow_tracking.md - GreenT: api/tests.greent.md diff --git a/pyproject.toml b/pyproject.toml index bc7ac71..1e85b01 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ build-backend = "setuptools.build_meta" [project] name = "anta" -version = "v1.1.0" +version = "v1.2.0" readme = "docs/README.md" authors = [{ name = "Arista Networks ANTA maintainers", email = "anta-dev@arista.com" }] maintainers = [ @@ -23,13 +23,13 @@ dependencies = [ "asyncssh>=2.16", "cvprac>=1.3.1", "eval-type-backport>=0.1.3", # Support newer typing features in older Python versions (required until Python 3.9 support is removed) + "httpx>=0.27.0", "Jinja2>=3.1.2", "pydantic>=2.7", "pydantic-extra-types>=2.3.0", "PyYAML>=6.0", "requests>=2.31.0", - "rich>=13.5.2,<14", - "httpx>=0.27.0" + "rich>=13.5.2,<14" ] keywords = ["test", "anta", "Arista", "network", "automation", "networking", "devops", "netdevops"] classifiers = [ @@ -45,6 +45,7 @@ classifiers = [ "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3 :: Only", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: Software Development :: Testing", @@ -69,13 +70,12 @@ dev = [ "pytest-cov>=4.1.0", "pytest-dependency", "pytest-codspeed>=2.2.0", - "respx", "pytest-html>=3.2.0", "pytest-httpx>=0.30.0", "pytest-metadata>=3.0.0", "pytest>=7.4.0", - "respx>=0.21.1", - "ruff>=0.5.4,<0.7.0", + "respx>=0.22.0", + "ruff>=0.5.4,<0.9.0", "tox>=4.10.0,<5.0.0", "types-PyYAML", "types-pyOpenSSL", @@ -97,7 +97,8 @@ doc = [ "mkdocs-material>=9.5.34", "mkdocstrings[python]>=0.26.0", "mkdocstrings-python>=1.11.0", - "black>=24.10.0" + "black>=24.10.0", + "mkdocs-github-admonitions-plugin>=0.0.3" ] [project.urls] @@ -108,18 +109,18 @@ Contributing = "https://anta.arista.com/main/contribution/" [project.scripts] anta = "anta.cli:cli" -################################ -# Tools -################################ [tool.setuptools.packages.find] include = ["anta*", "asynceapi*"] namespaces = false +[tool.setuptools.package-data] +"anta" = ["py.typed"] + ################################ # Version ################################ [tool.bumpver] -current_version = "1.1.0" +current_version = "1.2.0" version_pattern = "MAJOR.MINOR.PATCH" commit_message = "bump: Version {old_version} -> {new_version}" commit = true @@ -238,7 +239,7 @@ envlist = clean, lint, type, - py{39,310,311,312}, + py{39,310,311,312,313}, report [gh-actions] @@ -247,6 +248,7 @@ python = 3.10: py310 3.11: erase, py311, report 3.12: py312 + 3.13: py313 [testenv] description = Run pytest with {basepython} @@ -257,6 +259,9 @@ extras = # tox -e <env> -- path/to/my/test::test commands = pytest {posargs} +# To test on non-POSIX system +# https://github.com/tox-dev/tox/issues/1455 +passenv = USERNAME [testenv:lint] description = Check the code style @@ -337,8 +342,7 @@ select = ["ALL", # By enabling a convention for docstrings, ruff automatically ignore some rules that need to be # added back if we want them. # https://docs.astral.sh/ruff/faq/#does-ruff-support-numpy-or-google-style-docstrings - # TODO: Augment the numpy convention rules to make sure we add all the params - # Uncomment below D417 + "D212", "D415", "D417", ] @@ -366,6 +370,7 @@ convention = "numpy" # we have not removed pylint completely, these settings should be kept in sync with our pylintrc file. # https://github.com/astral-sh/ruff/issues/970 max-branches = 13 +max-args = 10 [tool.ruff.lint.mccabe] # Unlike Flake8, default to a complexity level of 10. @@ -376,6 +381,7 @@ max-complexity = 10 "RICH_COLOR_PALETTE" ] + [tool.ruff.lint.flake8-type-checking] # These classes require that type annotations be available at runtime runtime-evaluated-base-classes = ["pydantic.BaseModel", "anta.models.AntaTest.Input"] @@ -389,7 +395,6 @@ runtime-evaluated-base-classes = ["pydantic.BaseModel", "anta.models.AntaTest.In "tests/units/*" = [ "ARG002", # Sometimes we need to declare unused arguments when a parameter is not used but declared in @pytest.mark.parametrize "FBT001", # Boolean-typed positional argument in function definition - "PLR0913", # Too many arguments to function call "PLR2004", # Magic value used in comparison, consider replacing {value} with a constant variable "S105", # Passwords are indeed hardcoded in tests "S106", # Passwords are indeed hardcoded in tests @@ -411,7 +416,7 @@ runtime-evaluated-base-classes = ["pydantic.BaseModel", "anta.models.AntaTest.In "T201", # Allow print statements ] "anta/cli/*" = [ - "PLR0913", # Allow more than 5 input arguments in CLI functions + "PLR0913", # CLI has many arguments defined in functions "ANN401", # TODO: Check if we can update the Any type hints in the CLI ] "anta/tests/field_notices.py" = [ @@ -428,13 +433,6 @@ runtime-evaluated-base-classes = ["pydantic.BaseModel", "anta.models.AntaTest.In ] "anta/tools.py" = [ "ANN401", # Ok to use Any type hint in our custom get functions - "PLR0913", # Ok to have more than 5 arguments in our custom get functions -] -"anta/device.py" = [ - "PLR0913", # Ok to have more than 5 arguments in the AntaDevice classes -] -"anta/inventory/__init__.py" = [ - "PLR0913", # Ok to have more than 5 arguments in the AntaInventory class ] "examples/*.py" = [ # These are example scripts and linked in snippets "S105", # Possible hardcoded password @@ -469,9 +467,11 @@ disable = [ # Any rule listed here can be disabled: https://github.com/astral-sh "reimported", "wrong-import-order", "wrong-import-position", + "unnecessary-lambda", "abstract-class-instantiated", # Overlap with https://mypy.readthedocs.io/en/stable/error_code_list.html#check-instantiation-of-abstract-classes-abstract "unexpected-keyword-arg", # Overlap with https://mypy.readthedocs.io/en/stable/error_code_list.html#check-arguments-in-calls-call-arg and other rules - "no-value-for-parameter" # Overlap with https://mypy.readthedocs.io/en/stable/error_code_list.html#check-arguments-in-calls-call-arg + "no-value-for-parameter", # Overlap with https://mypy.readthedocs.io/en/stable/error_code_list.html#check-arguments-in-calls-call-arg + "import-outside-toplevel" ] max-statements=61 max-returns=8 diff --git a/tests/benchmark/conftest.py b/tests/benchmark/conftest.py index 61f2fa1..04ce54c 100644 --- a/tests/benchmark/conftest.py +++ b/tests/benchmark/conftest.py @@ -4,12 +4,14 @@ """Fixtures for benchmarking ANTA.""" import logging +from collections import defaultdict import pytest import respx from _pytest.terminal import TerminalReporter from anta.catalog import AntaCatalog +from anta.result_manager import ResultManager from .utils import AntaMockEnvironment @@ -17,6 +19,12 @@ logger = logging.getLogger(__name__) TEST_CASE_COUNT = None +# Used to globally configure the benchmarks by specifying parameters for inventories +BENCHMARK_PARAMETERS = [ + pytest.param({"count": 1, "disable_cache": True, "reachable": True}, id="1-device"), + pytest.param({"count": 2, "disable_cache": True, "reachable": True}, id="2-devices"), +] + @pytest.fixture(name="anta_mock_env", scope="session") # We want this fixture to have a scope set to session to avoid reparsing all the unit tests data. def anta_mock_env_fixture() -> AntaMockEnvironment: @@ -35,6 +43,22 @@ def catalog(anta_mock_env: AntaMockEnvironment) -> AntaCatalog: return anta_mock_env.catalog +@pytest.fixture(name="session_results", scope="session") # We want this fixture to be reused across test modules within tests/benchmark +def session_results_fixture() -> defaultdict[str, ResultManager]: + """Return a dictionary of ResultManger objects for the benchmarks. + + The key is the test id as defined in the pytest_generate_tests in this module. + Used to pass a populated ResultManager from one benchmark to another. + """ + return defaultdict(lambda: ResultManager()) + + +@pytest.fixture +def results(request: pytest.FixtureRequest, session_results: defaultdict[str, ResultManager]) -> ResultManager: + """Return the unique ResultManger object for the current benchmark parameter.""" + return session_results[request.node.callspec.id] + + def pytest_terminal_summary(terminalreporter: TerminalReporter) -> None: """Display the total number of ANTA unit test cases used to benchmark.""" terminalreporter.write_sep("=", f"{TEST_CASE_COUNT} ANTA test cases") @@ -49,9 +73,12 @@ def pytest_generate_tests(metafunc: pytest.Metafunc) -> None: return metafunc.parametrize( "inventory", - [ - pytest.param({"count": 1, "disable_cache": True, "reachable": True}, id="1-device"), - pytest.param({"count": 2, "disable_cache": True, "reachable": True}, id="2-devices"), - ], + BENCHMARK_PARAMETERS, + indirect=True, + ) + elif "results" in metafunc.fixturenames: + metafunc.parametrize( + "results", + BENCHMARK_PARAMETERS, indirect=True, ) diff --git a/tests/benchmark/test_anta.py b/tests/benchmark/test_anta.py index e82de64..7d1f21c 100644 --- a/tests/benchmark/test_anta.py +++ b/tests/benchmark/test_anta.py @@ -5,6 +5,7 @@ import asyncio import logging +from collections import defaultdict from unittest.mock import patch import pytest @@ -22,45 +23,61 @@ from .utils import collect, collect_commands logger = logging.getLogger(__name__) -def test_anta_dry_run(benchmark: BenchmarkFixture, event_loop: asyncio.AbstractEventLoop, catalog: AntaCatalog, inventory: AntaInventory) -> None: +def test_anta_dry_run( + benchmark: BenchmarkFixture, + event_loop: asyncio.AbstractEventLoop, + catalog: AntaCatalog, + inventory: AntaInventory, + request: pytest.FixtureRequest, + session_results: defaultdict[str, ResultManager], +) -> None: """Benchmark ANTA in Dry-Run Mode.""" # Disable logging during ANTA execution to avoid having these function time in benchmarks logging.disable() - def _() -> ResultManager: - manager = ResultManager() - catalog.clear_indexes() - event_loop.run_until_complete(main(manager, inventory, catalog, dry_run=True)) - return manager + results = session_results[request.node.callspec.id] - manager = benchmark(_) + @benchmark + def _() -> None: + results.reset() + catalog.clear_indexes() + event_loop.run_until_complete(main(results, inventory, catalog, dry_run=True)) logging.disable(logging.NOTSET) - if len(manager.results) != len(inventory) * len(catalog.tests): - pytest.fail(f"Expected {len(inventory) * len(catalog.tests)} tests but got {len(manager.results)}", pytrace=False) - bench_info = "\n--- ANTA NRFU Dry-Run Benchmark Information ---\n" f"Test count: {len(manager.results)}\n" "-----------------------------------------------" + + if len(results.results) != len(inventory) * len(catalog.tests): + pytest.fail(f"Expected {len(inventory) * len(catalog.tests)} tests but got {len(results.results)}", pytrace=False) + bench_info = "\n--- ANTA NRFU Dry-Run Benchmark Information ---\n" f"Test count: {len(results.results)}\n" "-----------------------------------------------" logger.info(bench_info) @patch("anta.models.AntaTest.collect", collect) @patch("anta.device.AntaDevice.collect_commands", collect_commands) +@pytest.mark.dependency(name="anta_benchmark", scope="package") @respx.mock # Mock eAPI responses -def test_anta(benchmark: BenchmarkFixture, event_loop: asyncio.AbstractEventLoop, catalog: AntaCatalog, inventory: AntaInventory) -> None: +def test_anta( + benchmark: BenchmarkFixture, + event_loop: asyncio.AbstractEventLoop, + catalog: AntaCatalog, + inventory: AntaInventory, + request: pytest.FixtureRequest, + session_results: defaultdict[str, ResultManager], +) -> None: """Benchmark ANTA.""" # Disable logging during ANTA execution to avoid having these function time in benchmarks logging.disable() - def _() -> ResultManager: - manager = ResultManager() - catalog.clear_indexes() - event_loop.run_until_complete(main(manager, inventory, catalog)) - return manager + results = session_results[request.node.callspec.id] - manager = benchmark(_) + @benchmark + def _() -> None: + results.reset() + catalog.clear_indexes() + event_loop.run_until_complete(main(results, inventory, catalog)) logging.disable(logging.NOTSET) - if len(catalog.tests) * len(inventory) != len(manager.results): + if len(catalog.tests) * len(inventory) != len(results.results): # This could mean duplicates exist. # TODO: consider removing this code and refactor unit test data as a dictionary with tuple keys instead of a list seen = set() @@ -74,17 +91,17 @@ def test_anta(benchmark: BenchmarkFixture, event_loop: asyncio.AbstractEventLoop for test in dupes: msg = f"Found duplicate in test catalog: {test}" logger.error(msg) - pytest.fail(f"Expected {len(catalog.tests) * len(inventory)} tests but got {len(manager.results)}", pytrace=False) + pytest.fail(f"Expected {len(catalog.tests) * len(inventory)} tests but got {len(results.results)}", pytrace=False) bench_info = ( "\n--- ANTA NRFU Benchmark Information ---\n" - f"Test results: {len(manager.results)}\n" - f"Success: {manager.get_total_results({AntaTestStatus.SUCCESS})}\n" - f"Failure: {manager.get_total_results({AntaTestStatus.FAILURE})}\n" - f"Skipped: {manager.get_total_results({AntaTestStatus.SKIPPED})}\n" - f"Error: {manager.get_total_results({AntaTestStatus.ERROR})}\n" - f"Unset: {manager.get_total_results({AntaTestStatus.UNSET})}\n" + f"Test results: {len(results.results)}\n" + f"Success: {results.get_total_results({AntaTestStatus.SUCCESS})}\n" + f"Failure: {results.get_total_results({AntaTestStatus.FAILURE})}\n" + f"Skipped: {results.get_total_results({AntaTestStatus.SKIPPED})}\n" + f"Error: {results.get_total_results({AntaTestStatus.ERROR})}\n" + f"Unset: {results.get_total_results({AntaTestStatus.UNSET})}\n" "---------------------------------------" ) logger.info(bench_info) - assert manager.get_total_results({AntaTestStatus.ERROR}) == 0 - assert manager.get_total_results({AntaTestStatus.UNSET}) == 0 + assert results.get_total_results({AntaTestStatus.ERROR}) == 0 + assert results.get_total_results({AntaTestStatus.UNSET}) == 0 diff --git a/tests/benchmark/test_reporter.py b/tests/benchmark/test_reporter.py new file mode 100644 index 0000000..ea74fb5 --- /dev/null +++ b/tests/benchmark/test_reporter.py @@ -0,0 +1,71 @@ +# Copyright (c) 2023-2024 Arista Networks, Inc. +# Use of this source code is governed by the Apache License 2.0 +# that can be found in the LICENSE file. +"""Benchmark tests for anta.reporter.""" + +import json +import logging +from pathlib import Path + +import pytest + +from anta.reporter import ReportJinja, ReportTable +from anta.reporter.csv_reporter import ReportCsv +from anta.reporter.md_reporter import MDReportGenerator +from anta.result_manager import ResultManager + +logger = logging.getLogger(__name__) + +DATA_DIR: Path = Path(__file__).parents[1].resolve() / "data" + + +@pytest.mark.benchmark +@pytest.mark.dependency(depends=["anta_benchmark"], scope="package") +def test_table_all(results: ResultManager) -> None: + """Benchmark ReportTable.report_all().""" + reporter = ReportTable() + reporter.report_all(results) + + +@pytest.mark.benchmark +@pytest.mark.dependency(depends=["anta_benchmark"], scope="package") +def test_table_devices(results: ResultManager) -> None: + """Benchmark ReportTable.report_summary_devices().""" + reporter = ReportTable() + reporter.report_summary_devices(results) + + +@pytest.mark.benchmark +@pytest.mark.dependency(depends=["anta_benchmark"], scope="package") +def test_table_tests(results: ResultManager) -> None: + """Benchmark ReportTable.report_summary_tests().""" + reporter = ReportTable() + reporter.report_summary_tests(results) + + +@pytest.mark.benchmark +@pytest.mark.dependency(depends=["anta_benchmark"], scope="package") +def test_json(results: ResultManager) -> None: + """Benchmark JSON report.""" + assert isinstance(results.json, str) + + +@pytest.mark.benchmark +@pytest.mark.dependency(depends=["anta_benchmark"], scope="package") +def test_jinja(results: ResultManager) -> None: + """Benchmark ReportJinja.""" + assert isinstance(ReportJinja(template_path=DATA_DIR / "template.j2").render(json.loads(results.json)), str) + + +@pytest.mark.benchmark +@pytest.mark.dependency(depends=["anta_benchmark"], scope="package") +def test_csv(results: ResultManager, tmp_path: Path) -> None: + """Benchmark ReportCsv.generate().""" + ReportCsv.generate(results=results, csv_filename=tmp_path / "report.csv") + + +@pytest.mark.benchmark +@pytest.mark.dependency(depends=["anta_benchmark"], scope="package") +def test_markdown(results: ResultManager, tmp_path: Path) -> None: + """Benchmark MDReportGenerator.generate().""" + MDReportGenerator.generate(results=results, md_filename=tmp_path / "report.md") diff --git a/tests/benchmark/test_runner.py b/tests/benchmark/test_runner.py index b020a85..a8639af 100644 --- a/tests/benchmark/test_runner.py +++ b/tests/benchmark/test_runner.py @@ -5,19 +5,21 @@ from __future__ import annotations -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Any from anta.result_manager import ResultManager from anta.runner import get_coroutines, prepare_tests if TYPE_CHECKING: from collections import defaultdict + from collections.abc import Coroutine from pytest_codspeed import BenchmarkFixture from anta.catalog import AntaCatalog, AntaTestDefinition from anta.device import AntaDevice from anta.inventory import AntaInventory + from anta.result_manager.models import TestResult def test_prepare_tests(benchmark: BenchmarkFixture, catalog: AntaCatalog, inventory: AntaInventory) -> None: @@ -40,9 +42,13 @@ def test_get_coroutines(benchmark: BenchmarkFixture, catalog: AntaCatalog, inven assert selected_tests is not None - coroutines = benchmark(lambda: get_coroutines(selected_tests=selected_tests, manager=ResultManager())) - for coros in coroutines: - coros.close() + def bench() -> list[Coroutine[Any, Any, TestResult]]: + coros = get_coroutines(selected_tests=selected_tests, manager=ResultManager()) + for c in coros: + c.close() + return coros + + coroutines = benchmark(bench) count = sum(len(tests) for tests in selected_tests.values()) assert count == len(coroutines) diff --git a/tests/data/test_catalog_double_failure.yml b/tests/data/test_catalog_double_failure.yml new file mode 100644 index 0000000..0ce48f8 --- /dev/null +++ b/tests/data/test_catalog_double_failure.yml @@ -0,0 +1,13 @@ +--- +anta.tests.interfaces: + - VerifyInterfacesSpeed: + interfaces: + - name: Ethernet2 + auto: False + speed: 10 + - name: Ethernet3 + auto: True + speed: 100 + - name: Ethernet4 + auto: False + speed: 2.5 diff --git a/tests/data/test_inventory_with_tags.yml b/tests/data/test_inventory_with_tags.yml index cbbcd75..16a9df4 100644 --- a/tests/data/test_inventory_with_tags.yml +++ b/tests/data/test_inventory_with_tags.yml @@ -3,7 +3,7 @@ anta_inventory: hosts: - name: leaf1 host: leaf1.anta.arista.com - tags: ["leaf"] + tags: ["leaf", "dc1"] - name: leaf2 host: leaf2.anta.arista.com tags: ["leaf"] diff --git a/tests/units/anta_tests/conftest.py b/tests/units/anta_tests/conftest.py index 5da7606..5e0c11b 100644 --- a/tests/units/anta_tests/conftest.py +++ b/tests/units/anta_tests/conftest.py @@ -21,7 +21,7 @@ def build_test_id(val: dict[str, Any]) -> str: def pytest_generate_tests(metafunc: pytest.Metafunc) -> None: - """Generate ANTA testts unit tests dynamically during test collection. + """Generate ANTA tests unit tests dynamically during test collection. It will parametrize test cases based on the `DATA` data structure defined in `tests.units.anta_tests` modules. See `tests/units/anta_tests/README.md` for more information on how to use it. diff --git a/tests/units/anta_tests/routing/test_bgp.py b/tests/units/anta_tests/routing/test_bgp.py index e256b04..59a6719 100644 --- a/tests/units/anta_tests/routing/test_bgp.py +++ b/tests/units/anta_tests/routing/test_bgp.py @@ -6,8 +6,11 @@ # pylint: disable=C0302 from __future__ import annotations -from typing import Any +from typing import TYPE_CHECKING, Any +import pytest + +from anta.input_models.routing.bgp import BgpAddressFamily from anta.tests.routing.bgp import ( VerifyBGPAdvCommunities, VerifyBGPExchangedRoutes, @@ -24,556 +27,397 @@ from anta.tests.routing.bgp import ( VerifyBGPSpecificPeers, VerifyBGPTimers, VerifyEVPNType2Route, + _check_bgp_neighbor_capability, ) from tests.units.anta_tests import test + +@pytest.mark.parametrize( + ("input_dict", "expected"), + [ + pytest.param({"advertised": True, "received": True, "enabled": True}, True, id="all True"), + pytest.param({"advertised": False, "received": True, "enabled": True}, False, id="advertised False"), + pytest.param({"advertised": True, "received": False, "enabled": True}, False, id="received False"), + pytest.param({"advertised": True, "received": True, "enabled": False}, False, id="enabled False"), + pytest.param({"advertised": True, "received": True}, False, id="missing enabled"), + pytest.param({}, False), + ], +) +def test_check_bgp_neighbor_capability(input_dict: dict[str, bool], expected: bool) -> None: + """Test check_bgp_neighbor_capability.""" + assert _check_bgp_neighbor_capability(input_dict) == expected + + DATA: list[dict[str, Any]] = [ { "name": "success", "test": VerifyBGPPeerCount, "eos_data": [ - # Need to order the output as the commands would be sorted after template rendering. { "vrfs": { "default": { + "vrf": "default", + "routerId": "10.1.0.3", + "asn": "65120", "peers": { - "10.1.255.0": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Established", + "10.1.0.1": { + "peerState": "Idle", + "peerAsn": "65100", + "ipv4Unicast": {"afiSafiState": "advertised", "nlrisReceived": 0, "nlrisAccepted": 0}, + "l2VpnEvpn": {"afiSafiState": "negotiated", "nlrisReceived": 42, "nlrisAccepted": 42}, }, - "10.1.255.2": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Established", + "10.1.0.2": { + "peerState": "Idle", + "peerAsn": "65100", + "ipv4Unicast": {"afiSafiState": "advertised", "nlrisReceived": 0, "nlrisAccepted": 0}, + "l2VpnEvpn": {"afiSafiState": "negotiated", "nlrisReceived": 42, "nlrisAccepted": 42}, }, }, }, - }, - }, - { - "vrfs": { - "MGMT": { + "DEV": { + "vrf": "DEV", + "routerId": "10.1.0.3", + "asn": "65120", "peers": { - "10.255.0.21": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Established", - }, + "10.1.254.1": { + "peerState": "Idle", + "peerAsn": "65120", + "ipv4Unicast": {"afiSafiState": "negotiated", "nlrisReceived": 4, "nlrisAccepted": 4}, + } }, }, - }, + } }, + ], + "inputs": { + "address_families": [ + {"afi": "evpn", "num_peers": 2}, + {"afi": "ipv4", "safi": "unicast", "vrf": "default", "num_peers": 2}, + {"afi": "ipv4", "safi": "unicast", "vrf": "DEV", "num_peers": 1}, + ] + }, + "expected": {"result": "success"}, + }, + { + "name": "success-peer-state-check-true", + "test": VerifyBGPPeerCount, + "eos_data": [ { "vrfs": { "default": { + "vrf": "default", + "routerId": "10.1.0.3", + "asn": "65120", "peers": { - "10.255.0.1": { - "inMsgQueue": 0, - "outMsgQueue": 0, + "10.1.0.1": { "peerState": "Established", + "peerAsn": "65100", + "ipv4MplsVpn": {"afiSafiState": "advertised", "nlrisReceived": 0, "nlrisAccepted": 0}, + "l2VpnEvpn": {"afiSafiState": "negotiated", "nlrisReceived": 42, "nlrisAccepted": 42}, }, - "10.255.0.2": { - "description": "DC1-SPINE2_Ethernet1", - "inMsgQueue": 0, - "outMsgQueue": 0, + "10.1.0.2": { "peerState": "Established", + "peerAsn": "65100", + "ipv4MplsVpn": {"afiSafiState": "advertised", "nlrisReceived": 0, "nlrisAccepted": 0}, + "l2VpnEvpn": {"afiSafiState": "negotiated", "nlrisReceived": 42, "nlrisAccepted": 42}, }, - }, - }, - }, - }, - { - "vrfs": { - "default": { - "peers": { - "10.255.0.11": { - "inMsgQueue": 0, - "outMsgQueue": 0, + "10.1.254.1": { "peerState": "Established", + "peerAsn": "65120", + "ipv4Unicast": {"afiSafiState": "negotiated", "nlrisReceived": 17, "nlrisAccepted": 17}, }, - "10.255.0.12": { - "description": "DC1-SPINE2_Ethernet1", - "inMsgQueue": 0, - "outMsgQueue": 0, + "10.1.255.0": { "peerState": "Established", + "peerAsn": "65100", + "ipv4Unicast": {"afiSafiState": "negotiated", "nlrisReceived": 14, "nlrisAccepted": 14}, + }, + "10.1.255.2": { + "peerState": "Established", + "peerAsn": "65100", + "ipv4Unicast": {"afiSafiState": "negotiated", "nlrisReceived": 14, "nlrisAccepted": 14}, }, }, }, - }, - }, - { - "vrfs": { - "default": { + "DEV": { + "vrf": "DEV", + "routerId": "10.1.0.3", + "asn": "65120", "peers": { - "10.255.0.21": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Established", - }, - "10.255.0.22": { - "inMsgQueue": 0, - "outMsgQueue": 0, + "10.1.254.1": { "peerState": "Established", - }, + "peerAsn": "65120", + "ipv4Unicast": {"afiSafiState": "negotiated", "nlrisReceived": 4, "nlrisAccepted": 4}, + } }, }, - }, + } }, ], "inputs": { "address_families": [ - # evpn first to make sure that the correct mapping output to input is kept. - {"afi": "evpn", "num_peers": 2}, - {"afi": "ipv4", "safi": "unicast", "vrf": "default", "num_peers": 2}, - {"afi": "ipv4", "safi": "sr-te", "vrf": "MGMT", "num_peers": 1}, - {"afi": "link-state", "num_peers": 2}, - {"afi": "path-selection", "num_peers": 2}, + {"afi": "evpn", "num_peers": 2, "check_peer_state": True}, + {"afi": "ipv4", "safi": "unicast", "vrf": "default", "num_peers": 3, "check_peer_state": True}, + {"afi": "ipv4", "safi": "unicast", "vrf": "DEV", "num_peers": 1, "check_peer_state": True}, ] }, "expected": {"result": "success"}, }, { - "name": "failure-wrong-count", + "name": "failure-vrf-not-configured", "test": VerifyBGPPeerCount, "eos_data": [ { "vrfs": { "default": { + "vrf": "default", + "routerId": "10.1.0.3", + "asn": "65120", "peers": { - "10.1.255.0": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Established", - }, - "10.1.255.2": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Established", - }, - }, - }, - }, - }, - { - "vrfs": { - "MGMT": { - "peers": { - "10.255.0.21": { - "inMsgQueue": 0, - "outMsgQueue": 0, + "10.1.0.1": { "peerState": "Established", + "peerAsn": "65100", + "ipv4MplsVpn": {"afiSafiState": "advertised", "nlrisReceived": 0, "nlrisAccepted": 0}, + "l2VpnEvpn": {"afiSafiState": "negotiated", "nlrisReceived": 42, "nlrisAccepted": 42}, }, - }, - }, - }, - }, - { - "vrfs": { - "default": { - "peers": { - "10.255.0.1": { - "inMsgQueue": 0, - "outMsgQueue": 0, + "10.1.0.2": { "peerState": "Established", + "peerAsn": "65100", + "ipv4MplsVpn": {"afiSafiState": "advertised", "nlrisReceived": 0, "nlrisAccepted": 0}, + "l2VpnEvpn": {"afiSafiState": "negotiated", "nlrisReceived": 42, "nlrisAccepted": 42}, }, - "10.255.0.2": { - "description": "DC1-SPINE2_Ethernet1", - "inMsgQueue": 0, - "outMsgQueue": 0, + "10.1.254.1": { "peerState": "Established", + "peerAsn": "65120", + "ipv4Unicast": {"afiSafiState": "negotiated", "nlrisReceived": 17, "nlrisAccepted": 17}, }, - }, - }, - }, - }, - { - "vrfs": { - "default": { - "peers": { - "10.255.0.11": { - "inMsgQueue": 0, - "outMsgQueue": 0, + "10.1.255.0": { "peerState": "Established", + "peerAsn": "65100", + "ipv4Unicast": {"afiSafiState": "negotiated", "nlrisReceived": 14, "nlrisAccepted": 14}, }, - "10.255.0.12": { - "description": "DC1-SPINE2_Ethernet1", - "inMsgQueue": 0, - "outMsgQueue": 0, + "10.1.255.2": { "peerState": "Established", + "peerAsn": "65100", + "ipv4Unicast": {"afiSafiState": "negotiated", "nlrisReceived": 14, "nlrisAccepted": 14}, }, }, }, - }, - }, - { - "vrfs": { - "default": { + "DEV": { + "vrf": "DEV", + "routerId": "10.1.0.3", + "asn": "65120", "peers": { - "10.255.0.21": { - "inMsgQueue": 0, - "outMsgQueue": 0, + "10.1.254.1": { "peerState": "Established", - }, + "peerAsn": "65120", + "ipv4Unicast": {"afiSafiState": "negotiated", "nlrisReceived": 4, "nlrisAccepted": 4}, + } }, }, - }, - }, - ], - "inputs": { - "address_families": [ - {"afi": "ipv4", "safi": "unicast", "vrf": "default", "num_peers": 3}, - {"afi": "ipv4", "safi": "sr-te", "vrf": "MGMT", "num_peers": 2}, - {"afi": "evpn", "num_peers": 1}, - {"afi": "link-state", "num_peers": 3}, - {"afi": "path-selection", "num_peers": 3}, - ] - }, - "expected": { - "result": "failure", - "messages": [ - "Failures: [{'afi': 'ipv4', 'safi': 'unicast', 'vrfs': {'default': 'Expected: 3, Actual: 2'}}, " - "{'afi': 'ipv4', 'safi': 'sr-te', 'vrfs': {'MGMT': 'Expected: 2, Actual: 1'}}, " - "{'afi': 'evpn', 'vrfs': {'default': 'Expected: 1, Actual: 2'}}, " - "{'afi': 'link-state', 'vrfs': {'default': 'Expected: 3, Actual: 2'}}, " - "{'afi': 'path-selection', 'vrfs': {'default': 'Expected: 3, Actual: 1'}}]" - ], - }, - }, - { - "name": "failure-no-peers", - "test": VerifyBGPPeerCount, - "eos_data": [ - { - "vrfs": { - "default": { - "peers": {}, - } - } - }, - { - "vrfs": { - "MGMT": { - "peers": {}, - } - } - }, - { - "vrfs": { - "default": { - "peers": {}, - } - } - }, - { - "vrfs": { - "default": { - "peers": {}, - } - } - }, - { - "vrfs": { - "default": { - "peers": {}, - } } }, ], "inputs": { "address_families": [ - {"afi": "ipv4", "safi": "unicast", "vrf": "default", "num_peers": 2}, - {"afi": "ipv4", "safi": "sr-te", "vrf": "MGMT", "num_peers": 1}, - {"afi": "evpn", "num_peers": 2}, - {"afi": "link-state", "num_peers": 2}, - {"afi": "path-selection", "num_peers": 2}, + {"afi": "evpn", "num_peers": 2, "check_peer_state": True}, + {"afi": "ipv4", "safi": "unicast", "vrf": "default", "num_peers": 3, "check_peer_state": True}, + {"afi": "ipv4", "safi": "unicast", "vrf": "PROD", "num_peers": 2, "check_peer_state": True}, ] }, "expected": { "result": "failure", "messages": [ - "Failures: [{'afi': 'ipv4', 'safi': 'unicast', 'vrfs': {'default': 'Expected: 2, Actual: 0'}}, " - "{'afi': 'ipv4', 'safi': 'sr-te', 'vrfs': {'MGMT': 'Expected: 1, Actual: 0'}}, " - "{'afi': 'evpn', 'vrfs': {'default': 'Expected: 2, Actual: 0'}}, " - "{'afi': 'link-state', 'vrfs': {'default': 'Expected: 2, Actual: 0'}}, " - "{'afi': 'path-selection', 'vrfs': {'default': 'Expected: 2, Actual: 0'}}]" + "AFI: ipv4 SAFI: unicast VRF: PROD - VRF not configured", ], }, }, { - "name": "failure-not-configured", - "test": VerifyBGPPeerCount, - "eos_data": [{"vrfs": {}}, {"vrfs": {}}, {"vrfs": {}}, {"vrfs": {}}, {"vrfs": {}}], - "inputs": { - "address_families": [ - {"afi": "ipv6", "safi": "multicast", "vrf": "DEV", "num_peers": 3}, - {"afi": "ipv4", "safi": "sr-te", "vrf": "MGMT", "num_peers": 1}, - {"afi": "evpn", "num_peers": 2}, - {"afi": "link-state", "num_peers": 2}, - {"afi": "path-selection", "num_peers": 2}, - ] - }, - "expected": { - "result": "failure", - "messages": [ - "Failures: [{'afi': 'ipv6', 'safi': 'multicast', 'vrfs': {'DEV': 'Not Configured'}}, " - "{'afi': 'ipv4', 'safi': 'sr-te', 'vrfs': {'MGMT': 'Not Configured'}}, " - "{'afi': 'evpn', 'vrfs': {'default': 'Not Configured'}}, " - "{'afi': 'link-state', 'vrfs': {'default': 'Not Configured'}}, " - "{'afi': 'path-selection', 'vrfs': {'default': 'Not Configured'}}]" - ], - }, - }, - { - "name": "success-vrf-all", + "name": "failure-peer-state-check-true", "test": VerifyBGPPeerCount, "eos_data": [ { "vrfs": { "default": { + "vrf": "default", + "routerId": "10.1.0.3", + "asn": "65120", "peers": { - "10.1.255.0": { - "inMsgQueue": 0, - "outMsgQueue": 0, + "10.1.0.1": { "peerState": "Established", + "peerAsn": "65100", + "ipv4MplsVpn": {"afiSafiState": "advertised", "nlrisReceived": 0, "nlrisAccepted": 0}, + "l2VpnEvpn": {"afiSafiState": "negotiated", "nlrisReceived": 42, "nlrisAccepted": 42}, + }, + "10.1.0.2": { + "peerState": "Established", + "peerAsn": "65100", + "ipv4MplsVpn": {"afiSafiState": "advertised", "nlrisReceived": 0, "nlrisAccepted": 0}, + "l2VpnEvpn": {"afiSafiState": "negotiated", "nlrisReceived": 42, "nlrisAccepted": 42}, }, - }, - }, - "PROD": { - "peers": { "10.1.254.1": { - "inMsgQueue": 0, - "outMsgQueue": 0, "peerState": "Established", + "peerAsn": "65120", + "ipv4Unicast": {"afiSafiState": "negotiated", "nlrisReceived": 17, "nlrisAccepted": 17}, }, - "192.168.1.11": { - "inMsgQueue": 0, - "outMsgQueue": 0, + "10.1.255.0": { "peerState": "Established", + "peerAsn": "65100", + "ipv4Unicast": {"afiSafiState": "negotiated", "nlrisReceived": 14, "nlrisAccepted": 14}, }, - }, - }, - }, - }, - { - "vrfs": { - "default": { - "peers": { - "10.1.255.10": { - "inMsgQueue": 0, - "outMsgQueue": 0, + "10.1.255.2": { "peerState": "Established", + "peerAsn": "65100", + "ipv4Unicast": {"afiSafiState": "negotiated", "nlrisReceived": 14, "nlrisAccepted": 14}, }, }, }, - "PROD": { + "DEV": { + "vrf": "DEV", + "routerId": "10.1.0.3", + "asn": "65120", "peers": { - "10.1.254.11": { - "inMsgQueue": 0, - "outMsgQueue": 0, + "10.1.254.1": { "peerState": "Established", - }, + "peerAsn": "65120", + "ipv4Unicast": {"afiSafiState": "negotiated", "nlrisReceived": 4, "nlrisAccepted": 4}, + } }, }, - }, + } }, ], "inputs": { "address_families": [ - {"afi": "ipv4", "safi": "unicast", "vrf": "all", "num_peers": 3}, - {"afi": "ipv4", "safi": "sr-te", "vrf": "all", "num_peers": 2}, + {"afi": "evpn", "num_peers": 2, "check_peer_state": True}, + {"afi": "vpn-ipv4", "num_peers": 2, "check_peer_state": True}, + {"afi": "ipv4", "safi": "unicast", "vrf": "default", "num_peers": 3, "check_peer_state": True}, + {"afi": "ipv4", "safi": "unicast", "vrf": "DEV", "num_peers": 1, "check_peer_state": True}, ] }, - "expected": {"result": "success"}, + "expected": { + "result": "failure", + "messages": [ + "AFI: vpn-ipv4 - Expected: 2, Actual: 0", + ], + }, }, { - "name": "failure-vrf-all", + "name": "failure-wrong-count-peer-state-check-true", "test": VerifyBGPPeerCount, "eos_data": [ { "vrfs": { "default": { + "vrf": "default", + "routerId": "10.1.0.3", + "asn": "65120", "peers": { - "10.1.255.0": { - "inMsgQueue": 0, - "outMsgQueue": 0, + "10.1.0.1": { "peerState": "Established", + "peerAsn": "65100", + "ipv4MplsVpn": {"afiSafiState": "advertised", "nlrisReceived": 0, "nlrisAccepted": 0}, + "l2VpnEvpn": {"afiSafiState": "negotiated", "nlrisReceived": 42, "nlrisAccepted": 42}, + }, + "10.1.0.2": { + "peerState": "Established", + "peerAsn": "65100", + "ipv4MplsVpn": {"afiSafiState": "advertised", "nlrisReceived": 0, "nlrisAccepted": 0}, + "l2VpnEvpn": {"afiSafiState": "negotiated", "nlrisReceived": 42, "nlrisAccepted": 42}, }, - }, - }, - "PROD": { - "peers": { "10.1.254.1": { - "inMsgQueue": 0, - "outMsgQueue": 0, "peerState": "Established", + "peerAsn": "65120", + "ipv4Unicast": {"afiSafiState": "negotiated", "nlrisReceived": 17, "nlrisAccepted": 17}, }, - "192.168.1.11": { - "inMsgQueue": 0, - "outMsgQueue": 0, + "10.1.255.0": { "peerState": "Established", + "peerAsn": "65100", + "ipv4Unicast": {"afiSafiState": "negotiated", "nlrisReceived": 14, "nlrisAccepted": 14}, }, - }, - }, - }, - }, - { - "vrfs": { - "default": { - "peers": { - "10.1.255.10": { - "inMsgQueue": 0, - "outMsgQueue": 0, + "10.1.255.2": { "peerState": "Established", + "peerAsn": "65100", + "ipv4Unicast": {"afiSafiState": "negotiated", "nlrisReceived": 14, "nlrisAccepted": 14}, }, }, }, - "PROD": { + "DEV": { + "vrf": "DEV", + "routerId": "10.1.0.3", + "asn": "65120", "peers": { "10.1.254.1": { - "inMsgQueue": 0, - "outMsgQueue": 0, "peerState": "Established", - }, - "192.168.1.12": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Established", - }, + "peerAsn": "65120", + "ipv4Unicast": {"afiSafiState": "negotiated", "nlrisReceived": 4, "nlrisAccepted": 4}, + } }, }, - }, + } }, ], "inputs": { "address_families": [ - {"afi": "ipv4", "safi": "unicast", "vrf": "all", "num_peers": 5}, - {"afi": "ipv4", "safi": "sr-te", "vrf": "all", "num_peers": 2}, + {"afi": "evpn", "num_peers": 3, "check_peer_state": True}, + {"afi": "ipv4", "safi": "unicast", "vrf": "default", "num_peers": 3, "check_peer_state": True}, + {"afi": "ipv4", "safi": "unicast", "vrf": "DEV", "num_peers": 2, "check_peer_state": True}, ] }, "expected": { "result": "failure", "messages": [ - "Failures: [{'afi': 'ipv4', 'safi': 'unicast', 'vrfs': {'all': 'Expected: 5, Actual: 3'}}, " - "{'afi': 'ipv4', 'safi': 'sr-te', 'vrfs': {'all': 'Expected: 2, Actual: 3'}}]" + "AFI: evpn - Expected: 3, Actual: 2", + "AFI: ipv4 SAFI: unicast VRF: DEV - Expected: 2, Actual: 1", ], }, }, { - "name": "failure-multiple-afi", + "name": "failure-wrong-count", "test": VerifyBGPPeerCount, "eos_data": [ { "vrfs": { - "PROD": { - "peers": { - "10.1.254.1": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Established", - }, - "192.168.1.11": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Established", - }, - }, - }, - }, - }, - {"vrfs": {}}, - { - "vrfs": { - "MGMT": { - "peers": { - "10.1.254.11": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Established", - }, - "192.168.1.21": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Established", - }, - }, - }, - }, - }, - { - "vrfs": { "default": { + "vrf": "default", + "routerId": "10.1.0.3", + "asn": "65120", "peers": { "10.1.0.1": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Established", - }, - "10.1.0.2": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Established", - }, - }, - }, - }, - }, - { - "vrfs": { - "default": { - "peers": { - "10.1.0.11": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Established", - }, - "10.1.0.21": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Established", + "peerState": "Idle", + "peerAsn": "65100", + "ipv4Unicast": {"afiSafiState": "advertised", "nlrisReceived": 0, "nlrisAccepted": 0}, + "l2VpnEvpn": {"afiSafiState": "negotiated", "nlrisReceived": 42, "nlrisAccepted": 42}, }, }, }, - }, - }, - { - "vrfs": { - "default": { + "DEV": { + "vrf": "DEV", + "routerId": "10.1.0.3", + "asn": "65120", "peers": { - "10.1.0.2": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Established", - }, - "10.1.0.22": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Established", - }, + "10.1.254.1": { + "peerState": "Idle", + "peerAsn": "65120", + "ipv4Unicast": {"afiSafiState": "negotiated", "nlrisReceived": 4, "nlrisAccepted": 4}, + } }, }, - }, + } }, ], "inputs": { "address_families": [ - {"afi": "ipv4", "safi": "unicast", "vrf": "PROD", "num_peers": 3}, - {"afi": "ipv6", "safi": "unicast", "vrf": "default", "num_peers": 3}, - {"afi": "ipv4", "safi": "sr-te", "vrf": "MGMT", "num_peers": 3}, - {"afi": "evpn", "num_peers": 3}, - {"afi": "link-state", "num_peers": 4}, - {"afi": "path-selection", "num_peers": 1}, - ], + {"afi": "evpn", "num_peers": 2}, + {"afi": "ipv4", "safi": "unicast", "vrf": "default", "num_peers": 2}, + {"afi": "ipv4", "safi": "unicast", "vrf": "DEV", "num_peers": 2}, + ] }, "expected": { "result": "failure", "messages": [ - "Failures: [{'afi': 'ipv4', 'safi': 'unicast', 'vrfs': {'PROD': 'Expected: 3, Actual: 2'}}, " - "{'afi': 'ipv6', 'safi': 'unicast', 'vrfs': {'default': 'Not Configured'}}, " - "{'afi': 'ipv4', 'safi': 'sr-te', 'vrfs': {'MGMT': 'Expected: 3, Actual: 2'}}, " - "{'afi': 'evpn', 'vrfs': {'default': 'Expected: 3, Actual: 2'}}, " - "{'afi': 'link-state', 'vrfs': {'default': 'Expected: 4, Actual: 2'}}, " - "{'afi': 'path-selection', 'vrfs': {'default': 'Expected: 1, Actual: 2'}}]", + "AFI: evpn - Expected: 2, Actual: 1", + "AFI: ipv4 SAFI: unicast VRF: default - Expected: 2, Actual: 1", + "AFI: ipv4 SAFI: unicast VRF: DEV - Expected: 2, Actual: 1", ], }, }, @@ -584,163 +428,127 @@ DATA: list[dict[str, Any]] = [ { "vrfs": { "default": { - "peers": { - "10.1.255.0": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Established", - }, - "10.1.255.2": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Established", - }, - }, - } - } - }, - { - "vrfs": { - "MGMT": { - "peers": { - "10.1.255.10": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Established", - }, - "10.1.255.12": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Established", + "peerList": [ + { + "peerAddress": "10.100.0.12", + "state": "Established", + "neighborCapabilities": {"multiprotocolCaps": {"ipv4Unicast": {"advertised": True, "received": True, "enabled": True}}}, + "peerTcpInfo": {"state": "ESTABLISHED", "outputQueueLength": 0, "inputQueueLength": 0}, }, - }, - } - } - }, - { - "vrfs": { - "default": { - "peers": { - "10.1.255.20": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Established", + { + "peerAddress": "10.100.0.13", + "state": "Established", + "neighborCapabilities": {"multiprotocolCaps": {"l2VpnEvpn": {"advertised": True, "received": True, "enabled": True}}}, + "peerTcpInfo": {"state": "ESTABLISHED", "outputQueueLength": 0, "inputQueueLength": 0}, }, - "10.1.255.22": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Established", + ] + }, + "DEV": { + "peerList": [ + { + "peerAddress": "10.100.0.12", + "state": "Established", + "neighborCapabilities": {"multiprotocolCaps": {"ipv4Unicast": {"advertised": True, "received": True, "enabled": True}}}, + "peerTcpInfo": {"state": "ESTABLISHED", "outputQueueLength": 0, "inputQueueLength": 0}, }, - }, - } + ] + }, } - }, + } + ], + "inputs": { + "address_families": [ + {"afi": "evpn"}, + {"afi": "ipv4", "safi": "unicast", "vrf": "default"}, + {"afi": "ipv4", "safi": "unicast", "vrf": "DEV"}, + ] + }, + "expected": {"result": "success"}, + }, + { + "name": "failure-vrf-not-configured", + "test": VerifyBGPPeersHealth, + "eos_data": [ { - "vrfs": { - "default": { - "peers": { - "10.1.255.30": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Established", - }, - "10.1.255.32": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Established", - }, - }, - } - } - }, + "vrfs": {}, + } ], "inputs": { "address_families": [ - # Path selection first to make sure input to output mapping is correct. + {"afi": "ipv4", "safi": "unicast", "vrf": "default"}, + {"afi": "ipv4", "safi": "sr-te", "vrf": "MGMT"}, {"afi": "path-selection"}, + {"afi": "link-state"}, + ] + }, + "expected": { + "result": "failure", + "messages": [ + "AFI: ipv4 SAFI: unicast VRF: default - VRF not configured", + "AFI: ipv4 SAFI: sr-te VRF: MGMT - VRF not configured", + "AFI: path-selection - VRF not configured", + "AFI: link-state - VRF not configured", + ], + }, + }, + { + "name": "failure-peer-not-found", + "test": VerifyBGPPeersHealth, + "eos_data": [{"vrfs": {"default": {"peerList": []}, "MGMT": {"peerList": []}}}], + "inputs": { + "address_families": [ {"afi": "ipv4", "safi": "unicast", "vrf": "default"}, {"afi": "ipv4", "safi": "sr-te", "vrf": "MGMT"}, + {"afi": "path-selection"}, {"afi": "link-state"}, ] }, - "expected": {"result": "success"}, + "expected": { + "result": "failure", + "messages": [ + "AFI: ipv4 SAFI: unicast VRF: default - No peers found", + "AFI: ipv4 SAFI: sr-te VRF: MGMT - No peers found", + "AFI: path-selection - No peers found", + "AFI: link-state - No peers found", + ], + }, }, { - "name": "failure-issues", + "name": "failure-session-not-established", "test": VerifyBGPPeersHealth, "eos_data": [ { "vrfs": { "default": { - "peers": { - "10.1.255.0": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Idle", - }, - "10.1.255.2": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Established", - }, - }, - } - } - }, - { - "vrfs": { - "MGMT": { - "peers": { - "10.1.255.10": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Established", - }, - "10.1.255.12": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Idle", - }, - }, - } - } - }, - { - "vrfs": { - "default": { - "peers": { - "10.1.255.20": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Idle", + "peerList": [ + { + "peerAddress": "10.100.0.12", + "state": "Idle", + "neighborCapabilities": {"multiprotocolCaps": {"ipv4Unicast": {"advertised": True, "received": True, "enabled": True}}}, }, - "10.1.255.22": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Established", + { + "peerAddress": "10.100.0.13", + "state": "Idle", + "neighborCapabilities": {"multiprotocolCaps": {"dps": {"advertised": True, "received": True, "enabled": True}}}, }, - }, - } - } - }, - { - "vrfs": { - "default": { - "peers": { - "10.1.255.30": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Established", + { + "peerAddress": "10.100.0.14", + "state": "Active", + "neighborCapabilities": {"multiprotocolCaps": {"linkState": {"advertised": True, "received": True, "enabled": True}}}, }, - "10.1.255.32": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Idle", + ] + }, + "MGMT": { + "peerList": [ + { + "peerAddress": "10.100.0.12", + "state": "Active", + "neighborCapabilities": {"multiprotocolCaps": {"ipv4SrTe": {"advertised": True, "received": True, "enabled": True}}}, }, - }, - } + ] + }, } - }, + } ], "inputs": { "address_families": [ @@ -753,559 +561,411 @@ DATA: list[dict[str, Any]] = [ "expected": { "result": "failure", "messages": [ - "Failures: [{'afi': 'ipv4', 'safi': 'unicast', 'vrfs': {'default': {'10.1.255.0': {'peerState': 'Idle', 'inMsgQueue': 0, 'outMsgQueue': 0}}}}, " - "{'afi': 'ipv4', 'safi': 'sr-te', 'vrfs': {'MGMT': {'10.1.255.12': {'peerState': 'Idle', 'inMsgQueue': 0, 'outMsgQueue': 0}}}}, " - "{'afi': 'path-selection', 'vrfs': {'default': {'10.1.255.20': {'peerState': 'Idle', 'inMsgQueue': 0, 'outMsgQueue': 0}}}}, " - "{'afi': 'link-state', 'vrfs': {'default': {'10.1.255.32': {'peerState': 'Idle', 'inMsgQueue': 0, 'outMsgQueue': 0}}}}]" + "AFI: ipv4 SAFI: unicast VRF: default Peer: 10.100.0.12 - Session state is not established - State: Idle", + "AFI: ipv4 SAFI: sr-te VRF: MGMT Peer: 10.100.0.12 - Session state is not established - State: Active", + "AFI: path-selection Peer: 10.100.0.13 - Session state is not established - State: Idle", + "AFI: link-state Peer: 10.100.0.14 - Session state is not established - State: Active", ], }, }, { - "name": "success-vrf-all", + "name": "failure-afi-not-negotiated", "test": VerifyBGPPeersHealth, "eos_data": [ { "vrfs": { "default": { - "peers": { - "10.1.255.0": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Established", + "peerList": [ + { + "peerAddress": "10.100.0.12", + "state": "Established", + "neighborCapabilities": {"multiprotocolCaps": {"ipv4Unicast": {"advertised": False, "received": False, "enabled": True}}}, + "peerTcpInfo": {"state": "ESTABLISHED", "outputQueueLength": 0, "inputQueueLength": 0}, }, - "10.1.255.2": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Established", + { + "peerAddress": "10.100.0.13", + "state": "Established", + "neighborCapabilities": {"multiprotocolCaps": {"dps": {"advertised": True, "received": False, "enabled": False}}}, + "peerTcpInfo": {"state": "ESTABLISHED", "outputQueueLength": 0, "inputQueueLength": 0}, }, - }, - }, - "PROD": { - "peers": { - "10.1.254.1": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Established", + { + "peerAddress": "10.100.0.14", + "state": "Established", + "neighborCapabilities": {"multiprotocolCaps": {"linkState": {"advertised": False, "received": False, "enabled": False}}}, + "peerTcpInfo": {"state": "ESTABLISHED", "outputQueueLength": 0, "inputQueueLength": 0}, }, - "192.168.1.11": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Established", + ] + }, + "MGMT": { + "peerList": [ + { + "peerAddress": "10.100.0.12", + "state": "Established", + "neighborCapabilities": {"multiprotocolCaps": {"ipv4SrTe": {"advertised": False, "received": False, "enabled": False}}}, + "peerTcpInfo": {"state": "ESTABLISHED", "outputQueueLength": 0, "inputQueueLength": 0}, }, - }, + ] }, } - }, + } + ], + "inputs": { + "address_families": [ + {"afi": "ipv4", "safi": "unicast", "vrf": "default"}, + {"afi": "ipv4", "safi": "sr-te", "vrf": "MGMT"}, + {"afi": "path-selection"}, + {"afi": "link-state"}, + ] + }, + "expected": { + "result": "failure", + "messages": [ + "AFI: ipv4 SAFI: unicast VRF: default Peer: 10.100.0.12 - AFI/SAFI state is not negotiated - Advertised: False, Received: False, Enabled: True", + "AFI: ipv4 SAFI: sr-te VRF: MGMT Peer: 10.100.0.12 - AFI/SAFI state is not negotiated - Advertised: False, Received: False, Enabled: False", + "AFI: path-selection Peer: 10.100.0.13 - AFI/SAFI state is not negotiated - Advertised: True, Received: False, Enabled: False", + "AFI: link-state Peer: 10.100.0.14 - AFI/SAFI state is not negotiated - Advertised: False, Received: False, Enabled: False", + ], + }, + }, + { + "name": "failure-tcp-queues", + "test": VerifyBGPPeersHealth, + "eos_data": [ { "vrfs": { "default": { - "peers": { - "10.1.255.10": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Established", + "peerList": [ + { + "peerAddress": "10.100.0.12", + "state": "Established", + "neighborCapabilities": {"multiprotocolCaps": {"ipv4Unicast": {"advertised": True, "received": True, "enabled": True}}}, + "peerTcpInfo": {"state": "ESTABLISHED", "outputQueueLength": 4, "inputQueueLength": 2}, }, - "10.1.255.12": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Established", + { + "peerAddress": "10.100.0.13", + "state": "Established", + "neighborCapabilities": {"multiprotocolCaps": {"dps": {"advertised": True, "received": True, "enabled": True}}}, + "peerTcpInfo": {"state": "ESTABLISHED", "outputQueueLength": 1, "inputQueueLength": 1}, }, - }, - }, - "PROD": { - "peers": { - "10.1.254.11": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Established", + { + "peerAddress": "10.100.0.14", + "state": "Established", + "neighborCapabilities": {"multiprotocolCaps": {"linkState": {"advertised": True, "received": True, "enabled": True}}}, + "peerTcpInfo": {"state": "ESTABLISHED", "outputQueueLength": 2, "inputQueueLength": 3}, }, - "192.168.1.111": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Established", + ] + }, + "MGMT": { + "peerList": [ + { + "peerAddress": "10.100.0.12", + "state": "Established", + "neighborCapabilities": {"multiprotocolCaps": {"ipv4SrTe": {"advertised": True, "received": True, "enabled": True}}}, + "peerTcpInfo": {"state": "ESTABLISHED", "outputQueueLength": 1, "inputQueueLength": 5}, }, - }, + ] }, } - }, + } ], "inputs": { "address_families": [ - {"afi": "ipv4", "safi": "unicast", "vrf": "all"}, - {"afi": "ipv4", "safi": "sr-te", "vrf": "all"}, + {"afi": "ipv4", "safi": "unicast", "vrf": "default"}, + {"afi": "ipv4", "safi": "sr-te", "vrf": "MGMT"}, + {"afi": "path-selection"}, + {"afi": "link-state"}, ] }, "expected": { - "result": "success", + "result": "failure", + "messages": [ + "AFI: ipv4 SAFI: unicast VRF: default Peer: 10.100.0.12 - Session has non-empty message queues - InQ: 2, OutQ: 4", + "AFI: ipv4 SAFI: sr-te VRF: MGMT Peer: 10.100.0.12 - Session has non-empty message queues - InQ: 5, OutQ: 1", + "AFI: path-selection Peer: 10.100.0.13 - Session has non-empty message queues - InQ: 1, OutQ: 1", + "AFI: link-state Peer: 10.100.0.14 - Session has non-empty message queues - InQ: 3, OutQ: 2", + ], }, }, { - "name": "failure-issues-vrf-all", - "test": VerifyBGPPeersHealth, + "name": "success", + "test": VerifyBGPSpecificPeers, "eos_data": [ { "vrfs": { "default": { - "peers": { - "10.1.255.0": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Idle", + "peerList": [ + { + "peerAddress": "10.100.0.12", + "state": "Established", + "neighborCapabilities": {"multiprotocolCaps": {"ipv4Unicast": {"advertised": True, "received": True, "enabled": True}}}, + "peerTcpInfo": {"state": "ESTABLISHED", "outputQueueLength": 0, "inputQueueLength": 0}, }, - "10.1.255.2": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Established", + { + "peerAddress": "10.100.0.13", + "state": "Established", + "neighborCapabilities": {"multiprotocolCaps": {"l2VpnEvpn": {"advertised": True, "received": True, "enabled": True}}}, + "peerTcpInfo": {"state": "ESTABLISHED", "outputQueueLength": 0, "inputQueueLength": 0}, }, - }, + ] }, - "PROD": { - "peers": { - "10.1.254.1": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Established", - }, - "192.168.1.11": { - "inMsgQueue": 100, - "outMsgQueue": 200, - "peerState": "Established", + "MGMT": { + "peerList": [ + { + "peerAddress": "10.100.0.14", + "state": "Established", + "neighborCapabilities": {"multiprotocolCaps": {"ipv4Unicast": {"advertised": True, "received": True, "enabled": True}}}, + "peerTcpInfo": {"state": "ESTABLISHED", "outputQueueLength": 0, "inputQueueLength": 0}, }, - }, + ] }, } - }, + } + ], + "inputs": { + "address_families": [ + {"afi": "ipv4", "safi": "unicast", "peers": ["10.100.0.12"]}, + {"afi": "evpn", "peers": ["10.100.0.13"]}, + {"afi": "ipv4", "safi": "unicast", "vrf": "MGMT", "peers": ["10.100.0.14"]}, + ] + }, + "expected": {"result": "success"}, + }, + { + "name": "failure-peer-not-configured", + "test": VerifyBGPSpecificPeers, + "eos_data": [ { "vrfs": { "default": { - "peers": { - "10.1.255.10": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Idle", - }, - "10.1.255.12": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Established", - }, - }, + "peerList": [ + { + "peerAddress": "10.100.0.20", + "state": "Established", + "neighborCapabilities": {"multiprotocolCaps": {"l2VpnEvpn": {"advertised": True, "received": True, "enabled": True}}}, + "peerTcpInfo": {"state": "ESTABLISHED", "outputQueueLength": 0, "inputQueueLength": 0}, + } + ] }, - "PROD": { - "peers": { - "10.1.254.11": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Established", - }, - "192.168.1.111": { - "inMsgQueue": 100, - "outMsgQueue": 200, - "peerState": "Established", + "MGMT": { + "peerList": [ + { + "peerAddress": "10.100.0.10", + "state": "Established", + "neighborCapabilities": {"multiprotocolCaps": {"ipv4Unicast": {"advertised": True, "received": True, "enabled": True}}}, + "peerTcpInfo": {"state": "ESTABLISHED", "outputQueueLength": 0, "inputQueueLength": 0}, }, - }, + ] }, } - }, + } ], "inputs": { "address_families": [ - {"afi": "ipv4", "safi": "unicast", "vrf": "all"}, - {"afi": "ipv4", "safi": "sr-te", "vrf": "all"}, + {"afi": "ipv4", "safi": "unicast", "peers": ["10.100.0.12"]}, + {"afi": "evpn", "peers": ["10.100.0.13"]}, + {"afi": "ipv4", "safi": "unicast", "vrf": "MGMT", "peers": ["10.100.0.14"]}, ] }, "expected": { "result": "failure", "messages": [ - "Failures: [{'afi': 'ipv4', 'safi': 'unicast', 'vrfs': {'default': {'10.1.255.0': {'peerState': 'Idle', 'inMsgQueue': 0, 'outMsgQueue': 0}}, " - "'PROD': {'192.168.1.11': {'peerState': 'Established', 'inMsgQueue': 100, 'outMsgQueue': 200}}}}, " - "{'afi': 'ipv4', 'safi': 'sr-te', 'vrfs': {'default': {'10.1.255.10': {'peerState': 'Idle', 'inMsgQueue': 0, 'outMsgQueue': 0}}, " - "'PROD': {'192.168.1.111': {'peerState': 'Established', 'inMsgQueue': 100, 'outMsgQueue': 200}}}}]" + "AFI: ipv4 SAFI: unicast VRF: default Peer: 10.100.0.12 - Not configured", + "AFI: evpn Peer: 10.100.0.13 - Not configured", + "AFI: ipv4 SAFI: unicast VRF: MGMT Peer: 10.100.0.14 - Not configured", ], }, }, { - "name": "failure-not-configured", - "test": VerifyBGPPeersHealth, - "eos_data": [{"vrfs": {}}, {"vrfs": {}}, {"vrfs": {}}, {"vrfs": {}}], + "name": "failure-vrf-not-configured", + "test": VerifyBGPSpecificPeers, + "eos_data": [ + { + "vrfs": {}, + } + ], "inputs": { "address_families": [ - {"afi": "ipv4", "safi": "unicast", "vrf": "DEV"}, - {"afi": "ipv4", "safi": "sr-te", "vrf": "MGMT"}, - {"afi": "link-state"}, - {"afi": "path-selection"}, + {"afi": "ipv4", "safi": "unicast", "peers": ["10.100.0.12"]}, + {"afi": "evpn", "peers": ["10.100.0.13"]}, + {"afi": "ipv4", "safi": "unicast", "vrf": "MGMT", "peers": ["10.100.0.14"]}, ] }, "expected": { "result": "failure", "messages": [ - "Failures: [{'afi': 'ipv4', 'safi': 'unicast', 'vrfs': {'DEV': 'Not Configured'}}, " - "{'afi': 'ipv4', 'safi': 'sr-te', 'vrfs': {'MGMT': 'Not Configured'}}, " - "{'afi': 'link-state', 'vrfs': {'default': 'Not Configured'}}, " - "{'afi': 'path-selection', 'vrfs': {'default': 'Not Configured'}}]" + "AFI: ipv4 SAFI: unicast VRF: default - VRF not configured", + "AFI: evpn - VRF not configured", + "AFI: ipv4 SAFI: unicast VRF: MGMT - VRF not configured", ], }, }, { - "name": "failure-no-peers", - "test": VerifyBGPPeersHealth, + "name": "failure-session-not-established", + "test": VerifyBGPSpecificPeers, "eos_data": [ { "vrfs": { "default": { - "vrf": "default", - "routerId": "10.1.0.3", - "asn": "65120", - "peers": {}, - } - } - }, - { - "vrfs": { + "peerList": [ + { + "peerAddress": "10.100.0.12", + "state": "Idle", + "neighborCapabilities": {"multiprotocolCaps": {"ipv4Unicast": {"advertised": True, "received": True, "enabled": True}}}, + } + ] + }, "MGMT": { - "vrf": "MGMT", - "routerId": "10.1.0.3", - "asn": "65120", - "peers": {}, - } - } - }, - { - "vrfs": { - "default": { - "vrf": "default", - "routerId": "10.1.0.3", - "asn": "65120", - "peers": {}, - } - } - }, - { - "vrfs": { - "default": { - "vrf": "default", - "routerId": "10.1.0.3", - "asn": "65120", - "peers": {}, - } + "peerList": [ + { + "peerAddress": "10.100.0.14", + "state": "Idle", + "neighborCapabilities": {"multiprotocolCaps": {"ipv4Unicast": {"advertised": True, "received": True, "enabled": True}}}, + }, + ] + }, } - }, + } ], "inputs": { "address_families": [ - {"afi": "ipv4", "safi": "multicast"}, - {"afi": "ipv4", "safi": "sr-te", "vrf": "MGMT"}, - {"afi": "link-state"}, - {"afi": "path-selection"}, + {"afi": "ipv4", "safi": "unicast", "peers": ["10.100.0.12"]}, + {"afi": "ipv4", "safi": "unicast", "vrf": "MGMT", "peers": ["10.100.0.14"]}, ] }, "expected": { "result": "failure", "messages": [ - "Failures: [{'afi': 'ipv4', 'safi': 'multicast', 'vrfs': {'default': 'No Peers'}}, {'afi': 'ipv4', 'safi': 'sr-te', 'vrfs': {'MGMT': 'No Peers'}}, " - "{'afi': 'link-state', 'vrfs': {'default': 'No Peers'}}, {'afi': 'path-selection', 'vrfs': {'default': 'No Peers'}}]" + "AFI: ipv4 SAFI: unicast VRF: default Peer: 10.100.0.12 - Session state is not established - State: Idle", + "AFI: ipv4 SAFI: unicast VRF: MGMT Peer: 10.100.0.14 - Session state is not established - State: Idle", ], }, }, { - "name": "success", + "name": "failure-afi-safi-not-negotiated", "test": VerifyBGPSpecificPeers, "eos_data": [ { "vrfs": { "default": { - "peers": { - "10.1.255.0": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Established", - }, - "10.1.255.2": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Established", - }, - }, - } - } - }, - { - "vrfs": { + "peerList": [ + { + "peerAddress": "10.100.0.12", + "state": "Established", + "neighborCapabilities": {"multiprotocolCaps": {"ipv4Unicast": {"advertised": False, "received": False, "enabled": True}}}, + "peerTcpInfo": {"state": "ESTABLISHED", "outputQueueLength": 0, "inputQueueLength": 0}, + } + ] + }, "MGMT": { - "peers": { - "10.1.255.10": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Established", - }, - "10.1.255.12": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Established", - }, - }, - } - } - }, - { - "vrfs": { - "default": { - "peers": { - "10.1.255.20": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Established", - }, - "10.1.255.22": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Established", - }, - }, - } - } - }, - { - "vrfs": { - "default": { - "peers": { - "10.1.255.30": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Established", - }, - "10.1.255.32": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Established", + "peerList": [ + { + "peerAddress": "10.100.0.14", + "state": "Established", + "neighborCapabilities": {"multiprotocolCaps": {"ipv4Unicast": {"advertised": False, "received": False, "enabled": False}}}, + "peerTcpInfo": {"state": "ESTABLISHED", "outputQueueLength": 0, "inputQueueLength": 0}, }, - }, - } + ] + }, } - }, + } ], "inputs": { "address_families": [ - # Path selection first to make sure input to output mapping is correct. - {"afi": "path-selection", "peers": ["10.1.255.20", "10.1.255.22"]}, - { - "afi": "ipv4", - "safi": "unicast", - "vrf": "default", - "peers": ["10.1.255.0", "10.1.255.2"], - }, - { - "afi": "ipv4", - "safi": "sr-te", - "vrf": "MGMT", - "peers": ["10.1.255.10", "10.1.255.12"], - }, - {"afi": "link-state", "peers": ["10.1.255.30", "10.1.255.32"]}, + {"afi": "ipv4", "safi": "unicast", "peers": ["10.100.0.12"]}, + {"afi": "ipv4", "safi": "unicast", "vrf": "MGMT", "peers": ["10.100.0.14"]}, ] }, - "expected": {"result": "success"}, + "expected": { + "result": "failure", + "messages": [ + "AFI: ipv4 SAFI: unicast VRF: default Peer: 10.100.0.12 - AFI/SAFI state is not negotiated - Advertised: False, Received: False, Enabled: True", + "AFI: ipv4 SAFI: unicast VRF: MGMT Peer: 10.100.0.14 - AFI/SAFI state is not negotiated - Advertised: False, Received: False, Enabled: False", + ], + }, }, { - "name": "failure-issues", + "name": "failure-afi-safi-not-correct", "test": VerifyBGPSpecificPeers, "eos_data": [ { "vrfs": { "default": { - "peers": { - "10.1.255.0": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Idle", - }, - "10.1.255.2": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Established", - }, - }, - } - } - }, - { - "vrfs": { + "peerList": [ + { + "peerAddress": "10.100.0.12", + "state": "Established", + "neighborCapabilities": {"multiprotocolCaps": {"l2VpnEvpn": {"advertised": False, "received": False, "enabled": True}}}, + "peerTcpInfo": {"state": "ESTABLISHED", "outputQueueLength": 0, "inputQueueLength": 0}, + } + ] + }, "MGMT": { - "peers": { - "10.1.255.10": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Established", - }, - "10.1.255.12": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Idle", - }, - }, - } - } - }, - { - "vrfs": { - "default": { - "peers": { - "10.1.255.20": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Idle", - }, - "10.1.255.22": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Established", - }, - }, - } - } - }, - { - "vrfs": { - "default": { - "peers": { - "10.1.255.30": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Established", - }, - "10.1.255.32": { - "inMsgQueue": 0, - "outMsgQueue": 0, - "peerState": "Idle", + "peerList": [ + { + "peerAddress": "10.100.0.14", + "state": "Established", + "neighborCapabilities": {"multiprotocolCaps": {"l2VpnEvpn": {"advertised": False, "received": False, "enabled": False}}}, + "peerTcpInfo": {"state": "ESTABLISHED", "outputQueueLength": 0, "inputQueueLength": 0}, }, - }, - } + ] + }, } - }, + } ], "inputs": { "address_families": [ - { - "afi": "ipv4", - "safi": "unicast", - "vrf": "default", - "peers": ["10.1.255.0", "10.1.255.2"], - }, - { - "afi": "ipv4", - "safi": "sr-te", - "vrf": "MGMT", - "peers": ["10.1.255.10", "10.1.255.12"], - }, - {"afi": "path-selection", "peers": ["10.1.255.20", "10.1.255.22"]}, - {"afi": "link-state", "peers": ["10.1.255.30", "10.1.255.32"]}, + {"afi": "ipv4", "safi": "unicast", "peers": ["10.100.0.12"]}, + {"afi": "ipv4", "safi": "unicast", "vrf": "MGMT", "peers": ["10.100.0.14"]}, ] }, "expected": { "result": "failure", "messages": [ - "Failures: [{'afi': 'ipv4', 'safi': 'unicast', 'vrfs': {'default': {'10.1.255.0': {'peerState': 'Idle', 'inMsgQueue': 0, 'outMsgQueue': 0}}}}, " - "{'afi': 'ipv4', 'safi': 'sr-te', 'vrfs': {'MGMT': {'10.1.255.12': {'peerState': 'Idle', 'inMsgQueue': 0, 'outMsgQueue': 0}}}}, " - "{'afi': 'path-selection', 'vrfs': {'default': {'10.1.255.20': {'peerState': 'Idle', 'inMsgQueue': 0, 'outMsgQueue': 0}}}}, " - "{'afi': 'link-state', 'vrfs': {'default': {'10.1.255.32': {'peerState': 'Idle', 'inMsgQueue': 0, 'outMsgQueue': 0}}}}]" + "AFI: ipv4 SAFI: unicast VRF: default Peer: 10.100.0.12 - AFI/SAFI state is not negotiated", + "AFI: ipv4 SAFI: unicast VRF: MGMT Peer: 10.100.0.14 - AFI/SAFI state is not negotiated", ], }, }, { - "name": "failure-not-configured", - "test": VerifyBGPSpecificPeers, - "eos_data": [{"vrfs": {}}, {"vrfs": {}}, {"vrfs": {}}, {"vrfs": {}}], - "inputs": { - "address_families": [ - { - "afi": "ipv4", - "safi": "unicast", - "vrf": "DEV", - "peers": ["10.1.255.0"], - }, - { - "afi": "ipv4", - "safi": "sr-te", - "vrf": "MGMT", - "peers": ["10.1.255.10"], - }, - {"afi": "link-state", "peers": ["10.1.255.20"]}, - {"afi": "path-selection", "peers": ["10.1.255.30"]}, - ] - }, - "expected": { - "result": "failure", - "messages": [ - "Failures: [{'afi': 'ipv4', 'safi': 'unicast', 'vrfs': {'DEV': 'Not Configured'}}, " - "{'afi': 'ipv4', 'safi': 'sr-te', 'vrfs': {'MGMT': 'Not Configured'}}, {'afi': 'link-state', 'vrfs': {'default': 'Not Configured'}}, " - "{'afi': 'path-selection', 'vrfs': {'default': 'Not Configured'}}]" - ], - }, - }, - { - "name": "failure-no-peers", + "name": "failure-tcp-queues", "test": VerifyBGPSpecificPeers, "eos_data": [ { "vrfs": { "default": { - "vrf": "default", - "routerId": "10.1.0.3", - "asn": "65120", - "peers": {}, - } - } - }, - { - "vrfs": { + "peerList": [ + { + "peerAddress": "10.100.0.12", + "state": "Established", + "neighborCapabilities": {"multiprotocolCaps": {"ipv4Unicast": {"advertised": True, "received": True, "enabled": True}}}, + "peerTcpInfo": {"state": "ESTABLISHED", "outputQueueLength": 3, "inputQueueLength": 3}, + } + ] + }, "MGMT": { - "vrf": "MGMT", - "routerId": "10.1.0.3", - "asn": "65120", - "peers": {}, - } - } - }, - { - "vrfs": { - "default": { - "vrf": "default", - "routerId": "10.1.0.3", - "asn": "65120", - "peers": {}, - } - } - }, - { - "vrfs": { - "default": { - "vrf": "default", - "routerId": "10.1.0.3", - "asn": "65120", - "peers": {}, - } + "peerList": [ + { + "peerAddress": "10.100.0.14", + "state": "Established", + "neighborCapabilities": {"multiprotocolCaps": {"ipv4Unicast": {"advertised": True, "received": True, "enabled": True}}}, + "peerTcpInfo": {"state": "ESTABLISHED", "outputQueueLength": 2, "inputQueueLength": 2}, + }, + ] + }, } - }, + } ], "inputs": { "address_families": [ - {"afi": "ipv4", "safi": "multicast", "peers": ["10.1.255.0"]}, - { - "afi": "ipv4", - "safi": "sr-te", - "vrf": "MGMT", - "peers": ["10.1.255.10"], - }, - {"afi": "link-state", "peers": ["10.1.255.20"]}, - {"afi": "path-selection", "peers": ["10.1.255.30"]}, + {"afi": "ipv4", "safi": "unicast", "peers": ["10.100.0.12"]}, + {"afi": "ipv4", "safi": "unicast", "vrf": "MGMT", "peers": ["10.100.0.14"]}, ] }, "expected": { "result": "failure", "messages": [ - "Failures: [{'afi': 'ipv4', 'safi': 'multicast', 'vrfs': {'default': {'10.1.255.0': {'peerNotFound': True}}}}, " - "{'afi': 'ipv4', 'safi': 'sr-te', 'vrfs': {'MGMT': {'10.1.255.10': {'peerNotFound': True}}}}, " - "{'afi': 'link-state', 'vrfs': {'default': {'10.1.255.20': {'peerNotFound': True}}}}, " - "{'afi': 'path-selection', 'vrfs': {'default': {'10.1.255.30': {'peerNotFound': True}}}}]" + "AFI: ipv4 SAFI: unicast VRF: default Peer: 10.100.0.12 - Session has non-empty message queues - InQ: 3, OutQ: 3", + "AFI: ipv4 SAFI: unicast VRF: MGMT Peer: 10.100.0.14 - Session has non-empty message queues - InQ: 2, OutQ: 2", ], }, }, @@ -1508,234 +1168,10 @@ DATA: list[dict[str, Any]] = [ "expected": { "result": "failure", "messages": [ - "Following BGP peers are not found or routes are not exchanged properly:\n" - "{'bgp_peers': {'172.30.11.11': {'default': 'Not configured'}, '172.30.11.12': {'default': 'Not configured'}}}" - ], - }, - }, - { - "name": "failure-no-peer", - "test": VerifyBGPExchangedRoutes, - "eos_data": [ - {"vrfs": {}}, - { - "vrfs": { - "default": { - "bgpRouteEntries": { - "192.0.254.3/32": { - "bgpRoutePaths": [ - { - "routeType": { - "valid": True, - "active": True, - }, - } - ] - }, - "192.0.254.5/32": { - "bgpRoutePaths": [ - { - "routeType": { - "valid": True, - "active": True, - }, - } - ] - }, - }, - } - } - }, - {"vrfs": {}}, - { - "vrfs": { - "default": { - "bgpRouteEntries": { - "192.0.254.3/32": { - "bgpRoutePaths": [ - { - "routeType": { - "valid": True, - "active": True, - }, - } - ], - }, - "192.0.255.4/32": { - "bgpRoutePaths": [ - { - "routeType": { - "valid": True, - "active": True, - }, - } - ], - }, - }, - } - } - }, - ], - "inputs": { - "bgp_peers": [ - { - "peer_address": "172.30.11.11", - "vrf": "MGMT", - "advertised_routes": ["192.0.254.3/32"], - "received_routes": ["192.0.255.3/32"], - }, - { - "peer_address": "172.30.11.5", - "vrf": "default", - "advertised_routes": ["192.0.254.3/32", "192.0.254.5/32"], - "received_routes": ["192.0.254.3/32", "192.0.255.4/32"], - }, - ] - }, - "expected": { - "result": "failure", - "messages": ["Following BGP peers are not found or routes are not exchanged properly:\n{'bgp_peers': {'172.30.11.11': {'MGMT': 'Not configured'}}}"], - }, - }, - { - "name": "failure-missing-routes", - "test": VerifyBGPExchangedRoutes, - "eos_data": [ - { - "vrfs": { - "default": { - "bgpRouteEntries": { - "192.0.254.3/32": { - "bgpRoutePaths": [ - { - "routeType": { - "valid": True, - "active": True, - }, - } - ] - }, - "192.0.254.5/32": { - "bgpRoutePaths": [ - { - "routeType": { - "valid": True, - "active": True, - }, - } - ] - }, - }, - } - } - }, - { - "vrfs": { - "default": { - "bgpRouteEntries": { - "192.0.254.3/32": { - "bgpRoutePaths": [ - { - "routeType": { - "valid": True, - "active": True, - }, - } - ] - }, - "192.0.254.5/32": { - "bgpRoutePaths": [ - { - "routeType": { - "valid": True, - "active": True, - }, - } - ] - }, - }, - } - } - }, - { - "vrfs": { - "default": { - "bgpRouteEntries": { - "192.0.254.3/32": { - "bgpRoutePaths": [ - { - "routeType": { - "valid": True, - "active": True, - }, - } - ], - }, - "192.0.255.4/32": { - "bgpRoutePaths": [ - { - "routeType": { - "valid": True, - "active": True, - }, - } - ], - }, - }, - } - } - }, - { - "vrfs": { - "default": { - "bgpRouteEntries": { - "192.0.254.3/32": { - "bgpRoutePaths": [ - { - "routeType": { - "valid": True, - "active": True, - }, - } - ], - }, - "192.0.255.4/32": { - "bgpRoutePaths": [ - { - "routeType": { - "valid": True, - "active": True, - }, - } - ], - }, - }, - } - } - }, - ], - "inputs": { - "bgp_peers": [ - { - "peer_address": "172.30.11.1", - "vrf": "default", - "advertised_routes": ["192.0.254.3/32", "192.0.254.51/32"], - "received_routes": ["192.0.254.31/32", "192.0.255.4/32"], - }, - { - "peer_address": "172.30.11.5", - "vrf": "default", - "advertised_routes": ["192.0.254.31/32", "192.0.254.5/32"], - "received_routes": ["192.0.254.3/32", "192.0.255.41/32"], - }, - ] - }, - "expected": { - "result": "failure", - "messages": [ - "Following BGP peers are not found or routes are not exchanged properly:\n{'bgp_peers': " - "{'172.30.11.1': {'default': {'advertised_routes': {'192.0.254.51/32': 'Not found'}, 'received_routes': {'192.0.254.31/32': 'Not found'}}}, " - "'172.30.11.5': {'default': {'advertised_routes': {'192.0.254.31/32': 'Not found'}, 'received_routes': {'192.0.255.41/32': 'Not found'}}}}}" + "Peer: 172.30.11.11 VRF: default Advertised route: 192.0.254.3/32 - Not found", + "Peer: 172.30.11.11 VRF: default Received route: 192.0.255.3/32 - Not found", + "Peer: 172.30.11.12 VRF: default Advertised route: 192.0.254.31/32 - Not found", + "Peer: 172.30.11.12 VRF: default Received route: 192.0.255.31/32 - Not found", ], }, }, @@ -1875,11 +1311,14 @@ DATA: list[dict[str, Any]] = [ "expected": { "result": "failure", "messages": [ - "Following BGP peers are not found or routes are not exchanged properly:\n{'bgp_peers': " - "{'172.30.11.1': {'default': {'advertised_routes': {'192.0.254.3/32': {'valid': True, 'active': False}, '192.0.254.51/32': 'Not found'}, " - "'received_routes': {'192.0.254.31/32': 'Not found', '192.0.255.4/32': {'valid': False, 'active': False}}}}, " - "'172.30.11.5': {'default': {'advertised_routes': {'192.0.254.31/32': 'Not found', '192.0.254.5/32': {'valid': True, 'active': False}}, " - "'received_routes': {'192.0.254.3/32': {'valid': False, 'active': True}, '192.0.255.41/32': 'Not found'}}}}}" + "Peer: 172.30.11.1 VRF: default Advertised route: 192.0.254.3/32 - Valid: False, Active: True", + "Peer: 172.30.11.1 VRF: default Advertised route: 192.0.254.51/32 - Not found", + "Peer: 172.30.11.1 VRF: default Received route: 192.0.254.31/32 - Not found", + "Peer: 172.30.11.1 VRF: default Received route: 192.0.255.4/32 - Valid: False, Active: False", + "Peer: 172.30.11.5 VRF: default Advertised route: 192.0.254.31/32 - Not found", + "Peer: 172.30.11.5 VRF: default Advertised route: 192.0.254.5/32 - Valid: False, Active: True", + "Peer: 172.30.11.5 VRF: default Received route: 192.0.254.3/32 - Valid: True, Active: False", + "Peer: 172.30.11.5 VRF: default Received route: 192.0.255.41/32 - Not found", ], }, }, @@ -1991,9 +1430,7 @@ DATA: list[dict[str, Any]] = [ }, "expected": { "result": "failure", - "messages": [ - "Following BGP peer multiprotocol capabilities are not found or not ok:\n{'bgp_peers': {'172.30.11.1': {'MGMT': {'status': 'Not configured'}}}}" - ], + "messages": ["Peer: 172.30.11.1 VRF: MGMT - VRF not configured"], }, }, { @@ -2054,8 +1491,8 @@ DATA: list[dict[str, Any]] = [ "expected": { "result": "failure", "messages": [ - "Following BGP peer multiprotocol capabilities are not found or not ok:\n" - "{'bgp_peers': {'172.30.11.10': {'default': {'status': 'Not configured'}}, '172.30.11.1': {'MGMT': {'status': 'Not configured'}}}}" + "Peer: 172.30.11.10 VRF: default - Not found", + "Peer: 172.30.11.1 VRF: MGMT - Not found", ], }, }, @@ -2095,9 +1532,7 @@ DATA: list[dict[str, Any]] = [ }, "expected": { "result": "failure", - "messages": [ - "Following BGP peer multiprotocol capabilities are not found or not ok:\n{'bgp_peers': {'172.30.11.1': {'default': {'l2VpnEvpn': 'not found'}}}}" - ], + "messages": ["Peer: 172.30.11.1 VRF: default - l2VpnEvpn not found"], }, }, { @@ -2190,13 +1625,15 @@ DATA: list[dict[str, Any]] = [ "expected": { "result": "failure", "messages": [ - "Following BGP peer multiprotocol capabilities are not found or not ok:\n" - "{'bgp_peers': {'172.30.11.1': {'default': {'ipv4Unicast': {'advertised': False, 'received': False, 'enabled': False}, " - "'ipv4MplsVpn': {'advertised': False, 'received': True, 'enabled': False}, 'l2VpnEvpn': 'not found'}}, " - "'172.30.11.10': {'MGMT': {'ipv4Unicast': 'not found', 'ipv4MplsVpn': {'advertised': False, 'received': False, 'enabled': True}, " - "'l2VpnEvpn': {'advertised': True, 'received': False, 'enabled': False}}}, " - "'172.30.11.11': {'MGMT': {'ipv4Unicast': {'advertised': False, 'received': False, 'enabled': False}, " - "'ipv4MplsVpn': {'advertised': False, 'received': False, 'enabled': False}, 'l2VpnEvpn': 'not found'}}}}" + "Peer: 172.30.11.1 VRF: default - ipv4Unicast not negotiated - Advertised: False, Received: False, Enabled: False", + "Peer: 172.30.11.1 VRF: default - ipv4MplsVpn not negotiated - Advertised: False, Received: True, Enabled: False", + "Peer: 172.30.11.1 VRF: default - l2VpnEvpn not found", + "Peer: 172.30.11.10 VRF: MGMT - ipv4Unicast not found", + "Peer: 172.30.11.10 VRF: MGMT - ipv4MplsVpn not negotiated - Advertised: False, Received: False, Enabled: True", + "Peer: 172.30.11.10 VRF: MGMT - l2VpnEvpn not negotiated - Advertised: True, Received: False, Enabled: False", + "Peer: 172.30.11.11 VRF: MGMT - ipv4Unicast not negotiated - Advertised: False, Received: False, Enabled: False", + "Peer: 172.30.11.11 VRF: MGMT - ipv4MplsVpn not negotiated - Advertised: False, Received: False, Enabled: False", + "Peer: 172.30.11.11 VRF: MGMT - l2VpnEvpn not found", ], }, }, @@ -2339,10 +1776,8 @@ DATA: list[dict[str, Any]] = [ "expected": { "result": "failure", "messages": [ - "Following BGP peer multiprotocol capabilities are not found or not ok:\n{'bgp_peers': {'172.30.11.1': " - "{'default': {'status': 'Expected only `ipv4Unicast` capabilities should be listed but found `ipv4Unicast, ipv4MplsLabels` instead.'}}," - " '172.30.11.10': {'MGMT': {'status': 'Expected only `ipv4MplsVpn, l2VpnEvpn` capabilities should be listed but found `ipv4Unicast, " - "ipv4MplsVpn` instead.'}}}}" + "Peer: 172.30.11.1 VRF: default - Mismatch - Expected: ipv4Unicast Actual: ipv4Unicast, ipv4MplsLabels", + "Peer: 172.30.11.10 VRF: MGMT - Mismatch - Expected: ipv4MplsVpn, l2VpnEvpn Actual: ipv4Unicast, ipv4MplsVpn", ], }, }, @@ -2398,63 +1833,6 @@ DATA: list[dict[str, Any]] = [ "expected": {"result": "success"}, }, { - "name": "failure-no-vrf", - "test": VerifyBGPPeerASNCap, - "eos_data": [ - { - "vrfs": { - "default": { - "peerList": [ - { - "peerAddress": "172.30.11.1", - "neighborCapabilities": { - "fourOctetAsnCap": { - "advertised": True, - "received": True, - "enabled": True, - }, - }, - } - ] - } - }, - "MGMT": { - "peerList": [ - { - "peerAddress": "172.30.11.10", - "neighborCapabilities": { - "fourOctetAsnCap": { - "advertised": True, - "received": True, - "enabled": True, - }, - }, - } - ] - }, - } - ], - "inputs": { - "bgp_peers": [ - { - "peer_address": "172.30.11.1", - "vrf": "MGMT", - }, - { - "peer_address": "172.30.11.10", - "vrf": "default", - }, - ] - }, - "expected": { - "result": "failure", - "messages": [ - "Following BGP peer four octet asn capabilities are not found or not ok:\n" - "{'bgp_peers': {'172.30.11.1': {'MGMT': {'status': 'Not configured'}}, '172.30.11.10': {'default': {'status': 'Not configured'}}}}" - ], - }, - }, - { "name": "failure-no-peer", "test": VerifyBGPPeerASNCap, "eos_data": [ @@ -2489,9 +1867,7 @@ DATA: list[dict[str, Any]] = [ }, "expected": { "result": "failure", - "messages": [ - "Following BGP peer four octet asn capabilities are not found or not ok:\n{'bgp_peers': {'172.30.11.10': {'default': {'status': 'Not configured'}}}}" - ], + "messages": ["Peer: 172.30.11.10 VRF: default - Not found"], }, }, { @@ -2544,8 +1920,8 @@ DATA: list[dict[str, Any]] = [ "expected": { "result": "failure", "messages": [ - "Following BGP peer four octet asn capabilities are not found or not ok:\n" - "{'bgp_peers': {'172.30.11.1': {'default': {'fourOctetAsnCap': 'not found'}}, '172.30.11.10': {'MGMT': {'fourOctetAsnCap': 'not found'}}}}" + "Peer: 172.30.11.1 VRF: default - 4-octet ASN capability not found", + "Peer: 172.30.11.10 VRF: MGMT - 4-octet ASN capability not found", ], }, }, @@ -2595,9 +1971,8 @@ DATA: list[dict[str, Any]] = [ "expected": { "result": "failure", "messages": [ - "Following BGP peer four octet asn capabilities are not found or not ok:\n" - "{'bgp_peers': {'172.30.11.1': {'default': {'fourOctetAsnCap': {'advertised': False, 'received': False, 'enabled': False}}}, " - "'172.30.11.10': {'MGMT': {'fourOctetAsnCap': {'advertised': True, 'received': False, 'enabled': True}}}}}" + "Peer: 172.30.11.1 VRF: default - 4-octet ASN capability not negotiated - Advertised: False, Received: False, Enabled: False", + "Peer: 172.30.11.10 VRF: MGMT - 4-octet ASN capability not negotiated - Advertised: True, Received: False, Enabled: True", ], }, }, @@ -2653,25 +2028,6 @@ DATA: list[dict[str, Any]] = [ "expected": {"result": "success"}, }, { - "name": "failure-no-vrf", - "test": VerifyBGPPeerRouteRefreshCap, - "eos_data": [{"vrfs": {}}], - "inputs": { - "bgp_peers": [ - { - "peer_address": "172.30.11.1", - "vrf": "MGMT", - } - ] - }, - "expected": { - "result": "failure", - "messages": [ - "Following BGP peer route refresh capabilities are not found or not ok:\n{'bgp_peers': {'172.30.11.1': {'MGMT': {'status': 'Not configured'}}}}" - ], - }, - }, - { "name": "failure-no-peer", "test": VerifyBGPPeerRouteRefreshCap, "eos_data": [ @@ -2727,8 +2083,8 @@ DATA: list[dict[str, Any]] = [ "expected": { "result": "failure", "messages": [ - "Following BGP peer route refresh capabilities are not found or not ok:\n" - "{'bgp_peers': {'172.30.11.12': {'default': {'status': 'Not configured'}}, '172.30.11.1': {'CS': {'status': 'Not configured'}}}}" + "Peer: 172.30.11.12 VRF: default - Not found", + "Peer: 172.30.11.1 VRF: CS - Not found", ], }, }, @@ -2782,8 +2138,8 @@ DATA: list[dict[str, Any]] = [ "expected": { "result": "failure", "messages": [ - "Following BGP peer route refresh capabilities are not found or not ok:\n" - "{'bgp_peers': {'172.30.11.1': {'default': {'routeRefreshCap': 'not found'}}, '172.30.11.11': {'CS': {'routeRefreshCap': 'not found'}}}}" + "Peer: 172.30.11.1 VRF: default - Route refresh capability not found", + "Peer: 172.30.11.11 VRF: CS - Route refresh capability not found", ], }, }, @@ -2833,8 +2189,7 @@ DATA: list[dict[str, Any]] = [ "expected": { "result": "failure", "messages": [ - "Following BGP peer route refresh capabilities are not found or not ok:\n" - "{'bgp_peers': {'172.30.11.1': {'default': {'routeRefreshCap': {'advertised': False, 'received': False, 'enabled': False}}}}}" + "Peer: 172.30.11.1 VRF: default - Route refresh capability not negotiated - Advertised: False, Received: False, Enabled: False", ], }, }, @@ -2880,40 +2235,6 @@ DATA: list[dict[str, Any]] = [ "expected": {"result": "success"}, }, { - "name": "failure-no-vrf", - "test": VerifyBGPPeerMD5Auth, - "eos_data": [ - { - "vrfs": { - "default": { - "peerList": [ - { - "peerAddress": "172.30.11.10", - "state": "Established", - "md5AuthEnabled": True, - } - ] - }, - } - } - ], - "inputs": { - "bgp_peers": [ - { - "peer_address": "172.30.11.1", - "vrf": "MGMT", - } - ] - }, - "expected": { - "result": "failure", - "messages": [ - "Following BGP peers are not configured, not established or MD5 authentication is not enabled:\n" - "{'bgp_peers': {'172.30.11.1': {'MGMT': {'status': 'Not configured'}}}}" - ], - }, - }, - { "name": "failure-no-peer", "test": VerifyBGPPeerMD5Auth, "eos_data": [ @@ -2947,16 +2268,16 @@ DATA: list[dict[str, Any]] = [ "vrf": "default", }, { - "peer_address": "172.30.11.11", - "vrf": "default", + "peer_address": "172.30.11.12", + "vrf": "CS", }, ] }, "expected": { "result": "failure", "messages": [ - "Following BGP peers are not configured, not established or MD5 authentication is not enabled:\n" - "{'bgp_peers': {'172.30.11.10': {'default': {'status': 'Not configured'}}, '172.30.11.11': {'default': {'status': 'Not configured'}}}}" + "Peer: 172.30.11.10 VRF: default - Not found", + "Peer: 172.30.11.12 VRF: CS - Not found", ], }, }, @@ -2980,7 +2301,7 @@ DATA: list[dict[str, Any]] = [ { "peerAddress": "172.30.11.10", "state": "Idle", - "md5AuthEnabled": False, + "md5AuthEnabled": True, } ] }, @@ -3002,9 +2323,8 @@ DATA: list[dict[str, Any]] = [ "expected": { "result": "failure", "messages": [ - "Following BGP peers are not configured, not established or MD5 authentication is not enabled:\n" - "{'bgp_peers': {'172.30.11.1': {'default': {'state': 'Idle', 'md5_auth_enabled': True}}, " - "'172.30.11.10': {'MGMT': {'state': 'Idle', 'md5_auth_enabled': False}}}}" + "Peer: 172.30.11.1 VRF: default - Session state is not established - State: Idle", + "Peer: 172.30.11.10 VRF: MGMT - Session state is not established - State: Idle", ], }, }, @@ -3054,9 +2374,8 @@ DATA: list[dict[str, Any]] = [ "expected": { "result": "failure", "messages": [ - "Following BGP peers are not configured, not established or MD5 authentication is not enabled:\n" - "{'bgp_peers': {'172.30.11.1': {'default': {'state': 'Established', 'md5_auth_enabled': None}}, " - "'172.30.11.11': {'MGMT': {'state': 'Established', 'md5_auth_enabled': False}}}}" + "Peer: 172.30.11.1 VRF: default - Session does not have MD5 authentication enabled", + "Peer: 172.30.11.11 VRF: MGMT - Session does not have MD5 authentication enabled", ], }, }, @@ -3155,8 +2474,8 @@ DATA: list[dict[str, Any]] = [ "evpnRoutePaths": [ { "routeType": { - "active": True, - "valid": True, + "active": False, + "valid": False, }, }, ] @@ -3303,7 +2622,7 @@ DATA: list[dict[str, Any]] = [ "inputs": {"vxlan_endpoints": [{"address": "192.168.20.102", "vni": 10020}]}, "expected": { "result": "failure", - "messages": ["The following VXLAN endpoint do not have any EVPN Type-2 route: [('192.168.20.102', 10020)]"], + "messages": ["Address: 192.168.20.102 VNI: 10020 - No EVPN Type-2 route"], }, }, { @@ -3331,103 +2650,7 @@ DATA: list[dict[str, Any]] = [ "inputs": {"vxlan_endpoints": [{"address": "192.168.20.102", "vni": 10020}]}, "expected": { "result": "failure", - "messages": [ - "The following EVPN Type-2 routes do not have at least one valid and active path: ['RD: 10.1.0.5:500 mac-ip 10020 aac1.ab4e.bec2 192.168.20.102']" - ], - }, - }, - { - "name": "failure-multiple-routes-not-active", - "test": VerifyEVPNType2Route, - "eos_data": [ - { - "vrf": "default", - "routerId": "10.1.0.3", - "asn": 65120, - "evpnRoutes": { - "RD: 10.1.0.5:500 mac-ip 10020 aac1.ab4e.bec2 192.168.20.102": { - "evpnRoutePaths": [ - { - "routeType": { - "active": False, - "valid": True, - }, - }, - ] - }, - "RD: 10.1.0.6:500 mac-ip 10020 aac1.ab4e.bec2 192.168.20.102": { - "evpnRoutePaths": [ - { - "routeType": { - "active": False, - "valid": False, - }, - }, - ] - }, - }, - }, - ], - "inputs": {"vxlan_endpoints": [{"address": "192.168.20.102", "vni": 10020}]}, - "expected": { - "result": "failure", - "messages": [ - "The following EVPN Type-2 routes do not have at least one valid and active path: " - "['RD: 10.1.0.5:500 mac-ip 10020 aac1.ab4e.bec2 192.168.20.102', " - "'RD: 10.1.0.6:500 mac-ip 10020 aac1.ab4e.bec2 192.168.20.102']" - ], - }, - }, - { - "name": "failure-multiple-routes-multiple-paths-not-active", - "test": VerifyEVPNType2Route, - "eos_data": [ - { - "vrf": "default", - "routerId": "10.1.0.3", - "asn": 65120, - "evpnRoutes": { - "RD: 10.1.0.5:500 mac-ip 10020 aac1.ab4e.bec2 192.168.20.102": { - "evpnRoutePaths": [ - { - "routeType": { - "active": True, - "valid": True, - }, - }, - { - "routeType": { - "active": False, - "valid": True, - }, - }, - ] - }, - "RD: 10.1.0.6:500 mac-ip 10020 aac1.ab4e.bec2 192.168.20.102": { - "evpnRoutePaths": [ - { - "routeType": { - "active": False, - "valid": False, - }, - }, - { - "routeType": { - "active": False, - "valid": False, - }, - }, - ] - }, - }, - }, - ], - "inputs": {"vxlan_endpoints": [{"address": "192.168.20.102", "vni": 10020}]}, - "expected": { - "result": "failure", - "messages": [ - "The following EVPN Type-2 routes do not have at least one valid and active path: ['RD: 10.1.0.6:500 mac-ip 10020 aac1.ab4e.bec2 192.168.20.102']" - ], + "messages": ["Address: 192.168.20.102 VNI: 10020 - No valid and active path"], }, }, { @@ -3477,67 +2700,7 @@ DATA: list[dict[str, Any]] = [ }, "expected": { "result": "failure", - "messages": [ - "The following EVPN Type-2 routes do not have at least one valid and active path: " - "['RD: 10.1.0.5:500 mac-ip 10020 aac1.ab4e.bec2 192.168.20.102', " - "'RD: 10.1.0.5:500 mac-ip 10010 aac1.ab5d.b41e']" - ], - }, - }, - { - "name": "failure-multiple-endpoints-one-no-routes", - "test": VerifyEVPNType2Route, - "eos_data": [ - {"vrf": "default", "routerId": "10.1.0.3", "asn": 65120, "evpnRoutes": {}}, - { - "vrf": "default", - "routerId": "10.1.0.3", - "asn": 65120, - "evpnRoutes": { - "RD: 10.1.0.5:500 mac-ip 10010 aac1.ab5d.b41e 192.168.10.101": { - "evpnRoutePaths": [ - { - "routeType": { - "active": False, - "valid": False, - }, - }, - ] - }, - }, - }, - ], - "inputs": { - "vxlan_endpoints": [ - {"address": "aac1.ab4e.bec2", "vni": 10020}, - {"address": "192.168.10.101", "vni": 10010}, - ] - }, - "expected": { - "result": "failure", - "messages": [ - "The following VXLAN endpoint do not have any EVPN Type-2 route: [('aa:c1:ab:4e:be:c2', 10020)]", - "The following EVPN Type-2 routes do not have at least one valid and active path: " - "['RD: 10.1.0.5:500 mac-ip 10010 aac1.ab5d.b41e 192.168.10.101']", - ], - }, - }, - { - "name": "failure-multiple-endpoints-no-routes", - "test": VerifyEVPNType2Route, - "eos_data": [ - {"vrf": "default", "routerId": "10.1.0.3", "asn": 65120, "evpnRoutes": {}}, - {"vrf": "default", "routerId": "10.1.0.3", "asn": 65120, "evpnRoutes": {}}, - ], - "inputs": { - "vxlan_endpoints": [ - {"address": "aac1.ab4e.bec2", "vni": 10020}, - {"address": "192.168.10.101", "vni": 10010}, - ] - }, - "expected": { - "result": "failure", - "messages": ["The following VXLAN endpoint do not have any EVPN Type-2 route: [('aa:c1:ab:4e:be:c2', 10020), ('192.168.10.101', 10010)]"], + "messages": ["Address: 192.168.20.102 VNI: 10020 - No valid and active path", "Address: aa:c1:ab:5d:b4:1e VNI: 10010 - No valid and active path"], }, }, { @@ -3587,43 +2750,6 @@ DATA: list[dict[str, Any]] = [ "expected": {"result": "success"}, }, { - "name": "failure-no-vrf", - "test": VerifyBGPAdvCommunities, - "eos_data": [ - { - "vrfs": { - "default": { - "peerList": [ - { - "peerAddress": "172.30.11.1", - "advertisedCommunities": { - "standard": True, - "extended": True, - "large": True, - }, - } - ] - }, - } - } - ], - "inputs": { - "bgp_peers": [ - { - "peer_address": "172.30.11.17", - "vrf": "MGMT", - } - ] - }, - "expected": { - "result": "failure", - "messages": [ - "Following BGP peers are not configured or advertised communities are not standard, extended, and large:\n" - "{'bgp_peers': {'172.30.11.17': {'MGMT': {'status': 'Not configured'}}}}" - ], - }, - }, - { "name": "failure-no-peer", "test": VerifyBGPAdvCommunities, "eos_data": [ @@ -3671,8 +2797,8 @@ DATA: list[dict[str, Any]] = [ "expected": { "result": "failure", "messages": [ - "Following BGP peers are not configured or advertised communities are not standard, extended, and large:\n" - "{'bgp_peers': {'172.30.11.10': {'default': {'status': 'Not configured'}}, '172.30.11.12': {'MGMT': {'status': 'Not configured'}}}}" + "Peer: 172.30.11.10 VRF: default - Not found", + "Peer: 172.30.11.12 VRF: MGMT - Not found", ], }, }, @@ -3724,9 +2850,8 @@ DATA: list[dict[str, Any]] = [ "expected": { "result": "failure", "messages": [ - "Following BGP peers are not configured or advertised communities are not standard, extended, and large:\n" - "{'bgp_peers': {'172.30.11.1': {'default': {'advertised_communities': {'standard': False, 'extended': False, 'large': False}}}, " - "'172.30.11.10': {'CS': {'advertised_communities': {'standard': True, 'extended': True, 'large': False}}}}}" + "Peer: 172.30.11.1 VRF: default - Standard: False, Extended: False, Large: False", + "Peer: 172.30.11.10 VRF: CS - Standard: True, Extended: True, Large: False", ], }, }, @@ -3781,15 +2906,7 @@ DATA: list[dict[str, Any]] = [ "eos_data": [ { "vrfs": { - "default": { - "peerList": [ - { - "peerAddress": "172.30.11.1", - "holdTime": 180, - "keepaliveTime": 60, - } - ] - }, + "default": {"peerList": []}, "MGMT": {"peerList": []}, } } @@ -3804,7 +2921,7 @@ DATA: list[dict[str, Any]] = [ }, { "peer_address": "172.30.11.11", - "vrf": "MGMT", + "vrf": "default", "hold_time": 180, "keep_alive_time": 60, }, @@ -3813,8 +2930,8 @@ DATA: list[dict[str, Any]] = [ "expected": { "result": "failure", "messages": [ - "Following BGP peers are not configured or hold and keep-alive timers are not correct:\n" - "{'172.30.11.1': {'MGMT': 'Not configured'}, '172.30.11.11': {'MGMT': 'Not configured'}}" + "Peer: 172.30.11.1 VRF: MGMT - Not found", + "Peer: 172.30.11.11 VRF: default - Not found", ], }, }, @@ -3864,9 +2981,9 @@ DATA: list[dict[str, Any]] = [ "expected": { "result": "failure", "messages": [ - "Following BGP peers are not configured or hold and keep-alive timers are not correct:\n" - "{'172.30.11.1': {'default': {'hold_time': 160, 'keep_alive_time': 60}}, " - "'172.30.11.11': {'MGMT': {'hold_time': 120, 'keep_alive_time': 40}}}" + "Peer: 172.30.11.1 VRF: default - Hold time mismatch - Expected: 180, Actual: 160", + "Peer: 172.30.11.11 VRF: MGMT - Hold time mismatch - Expected: 180, Actual: 120", + "Peer: 172.30.11.11 VRF: MGMT - Keepalive time mismatch - Expected: 60, Actual: 40", ], }, }, @@ -3894,10 +3011,6 @@ DATA: list[dict[str, Any]] = [ } ] }, - }, - }, - { - "vrfs": { "MGMT": { "peerList": [ { @@ -3934,10 +3047,7 @@ DATA: list[dict[str, Any]] = [ { "name": "failure-not-found", "test": VerifyBGPPeerDropStats, - "eos_data": [ - {"vrfs": {}}, - {"vrfs": {}}, - ], + "eos_data": [{"vrfs": {}}], "inputs": { "bgp_peers": [ { @@ -3951,8 +3061,8 @@ DATA: list[dict[str, Any]] = [ "expected": { "result": "failure", "messages": [ - "The following BGP peers are not configured or have non-zero NLRI drop statistics counters:\n" - "{'10.100.0.8': {'default': 'Not configured'}, '10.100.0.9': {'MGMT': 'Not configured'}}" + "Peer: 10.100.0.8 VRF: default - Not found", + "Peer: 10.100.0.9 VRF: MGMT - Not found", ], }, }, @@ -3980,10 +3090,6 @@ DATA: list[dict[str, Any]] = [ } ] }, - }, - }, - { - "vrfs": { "MGMT": { "peerList": [ { @@ -4018,9 +3124,10 @@ DATA: list[dict[str, Any]] = [ "expected": { "result": "failure", "messages": [ - "The following BGP peers are not configured or have non-zero NLRI drop statistics counters:\n" - "{'10.100.0.8': {'default': {'prefixDroppedMartianV4': 1, 'prefixDroppedMaxRouteLimitViolatedV4': 1}}, " - "'10.100.0.9': {'MGMT': {'inDropOrigId': 1, 'inDropNhLocal': 1}}}" + "Peer: 10.100.0.8 VRF: default - Non-zero NLRI drop statistics counter - prefixDroppedMartianV4: 1", + "Peer: 10.100.0.8 VRF: default - Non-zero NLRI drop statistics counter - prefixDroppedMaxRouteLimitViolatedV4: 1", + "Peer: 10.100.0.9 VRF: MGMT - Non-zero NLRI drop statistics counter - inDropOrigId: 1", + "Peer: 10.100.0.9 VRF: MGMT - Non-zero NLRI drop statistics counter - inDropNhLocal: 1", ], }, }, @@ -4048,10 +3155,6 @@ DATA: list[dict[str, Any]] = [ } ] }, - }, - }, - { - "vrfs": { "MGMT": { "peerList": [ { @@ -4105,10 +3208,6 @@ DATA: list[dict[str, Any]] = [ } ] }, - }, - }, - { - "vrfs": { "MGMT": { "peerList": [ { @@ -4139,49 +3238,14 @@ DATA: list[dict[str, Any]] = [ "expected": { "result": "failure", "messages": [ - "The following BGP peers are not configured or have non-zero NLRI drop statistics counters:\n" - "{'10.100.0.8': {'default': {'inDropAsloop': 3, 'inDropOrigId': 1, 'inDropNhLocal': 1, " - "'prefixDroppedMartianV4': 1, 'prefixDroppedMaxRouteLimitViolatedV4': 1}}, " - "'10.100.0.9': {'MGMT': {'inDropAsloop': 2, 'inDropOrigId': 1, 'inDropNhLocal': 1}}}" - ], - }, - }, - { - "name": "failure-drop-stat-not-found", - "test": VerifyBGPPeerDropStats, - "eos_data": [ - { - "vrfs": { - "default": { - "peerList": [ - { - "peerAddress": "10.100.0.8", - "dropStats": { - "inDropAsloop": 3, - "inDropClusterIdLoop": 0, - "inDropMalformedMpbgp": 0, - "inDropOrigId": 1, - "inDropNhLocal": 1, - "inDropNhAfV6": 0, - "prefixDroppedMaxRouteLimitViolatedV4": 1, - "prefixDroppedMartianV6": 0, - }, - } - ] - }, - }, - }, - ], - "inputs": { - "bgp_peers": [ - {"peer_address": "10.100.0.8", "vrf": "default", "drop_stats": ["inDropAsloop", "inDropOrigId", "inDropNhLocal", "prefixDroppedMartianV4"]} - ] - }, - "expected": { - "result": "failure", - "messages": [ - "The following BGP peers are not configured or have non-zero NLRI drop statistics counters:\n" - "{'10.100.0.8': {'default': {'inDropAsloop': 3, 'inDropOrigId': 1, 'inDropNhLocal': 1, 'prefixDroppedMartianV4': 'Not Found'}}}" + "Peer: 10.100.0.8 VRF: default - Non-zero NLRI drop statistics counter - inDropAsloop: 3", + "Peer: 10.100.0.8 VRF: default - Non-zero NLRI drop statistics counter - inDropOrigId: 1", + "Peer: 10.100.0.8 VRF: default - Non-zero NLRI drop statistics counter - inDropNhLocal: 1", + "Peer: 10.100.0.8 VRF: default - Non-zero NLRI drop statistics counter - prefixDroppedMartianV4: 1", + "Peer: 10.100.0.8 VRF: default - Non-zero NLRI drop statistics counter - prefixDroppedMaxRouteLimitViolatedV4: 1", + "Peer: 10.100.0.9 VRF: MGMT - Non-zero NLRI drop statistics counter - inDropAsloop: 2", + "Peer: 10.100.0.9 VRF: MGMT - Non-zero NLRI drop statistics counter - inDropOrigId: 1", + "Peer: 10.100.0.9 VRF: MGMT - Non-zero NLRI drop statistics counter - inDropNhLocal: 1", ], }, }, @@ -4205,10 +3269,6 @@ DATA: list[dict[str, Any]] = [ } ] }, - }, - }, - { - "vrfs": { "MGMT": { "peerList": [ { @@ -4239,7 +3299,6 @@ DATA: list[dict[str, Any]] = [ "test": VerifyBGPPeerUpdateErrors, "eos_data": [ {"vrfs": {}}, - {"vrfs": {}}, ], "inputs": { "bgp_peers": [ @@ -4250,8 +3309,8 @@ DATA: list[dict[str, Any]] = [ "expected": { "result": "failure", "messages": [ - "The following BGP peers are not configured or have non-zero update error counters:\n" - "{'10.100.0.8': {'default': 'Not configured'}, '10.100.0.9': {'MGMT': 'Not configured'}}" + "Peer: 10.100.0.8 VRF: default - Not found", + "Peer: 10.100.0.9 VRF: MGMT - Not found", ], }, }, @@ -4275,10 +3334,6 @@ DATA: list[dict[str, Any]] = [ } ] }, - }, - }, - { - "vrfs": { "MGMT": { "peerList": [ { @@ -4305,9 +3360,8 @@ DATA: list[dict[str, Any]] = [ "expected": { "result": "failure", "messages": [ - "The following BGP peers are not configured or have non-zero update error counters:\n" - "{'10.100.0.8': {'default': {'disabledAfiSafi': 'ipv4Unicast'}}, " - "'10.100.0.9': {'MGMT': {'inUpdErrWithdraw': 1}}}" + "Peer: 10.100.0.8 VRF: default - Non-zero update error counter - disabledAfiSafi: ipv4Unicast", + "Peer: 10.100.0.9 VRF: MGMT - Non-zero update error counter - inUpdErrWithdraw: 1", ], }, }, @@ -4331,10 +3385,6 @@ DATA: list[dict[str, Any]] = [ } ] }, - }, - }, - { - "vrfs": { "MGMT": { "peerList": [ { @@ -4380,10 +3430,6 @@ DATA: list[dict[str, Any]] = [ } ] }, - }, - }, - { - "vrfs": { "MGMT": { "peerList": [ { @@ -4414,9 +3460,10 @@ DATA: list[dict[str, Any]] = [ "expected": { "result": "failure", "messages": [ - "The following BGP peers are not configured or have non-zero update error counters:\n" - "{'10.100.0.8': {'default': {'inUpdErrWithdraw': 1, 'disabledAfiSafi': 'ipv4Unicast'}}, " - "'10.100.0.9': {'MGMT': {'inUpdErrWithdraw': 1, 'inUpdErrDisableAfiSafi': 1}}}" + "Peer: 10.100.0.8 VRF: default - Non-zero update error counter - inUpdErrWithdraw: 1", + "Peer: 10.100.0.8 VRF: default - Non-zero update error counter - disabledAfiSafi: ipv4Unicast", + "Peer: 10.100.0.9 VRF: MGMT - Non-zero update error counter - inUpdErrWithdraw: 1", + "Peer: 10.100.0.9 VRF: MGMT - Non-zero update error counter - inUpdErrDisableAfiSafi: 1", ], }, }, @@ -4439,10 +3486,6 @@ DATA: list[dict[str, Any]] = [ } ] }, - }, - }, - { - "vrfs": { "MGMT": { "peerList": [ { @@ -4472,9 +3515,10 @@ DATA: list[dict[str, Any]] = [ "expected": { "result": "failure", "messages": [ - "The following BGP peers are not configured or have non-zero update error counters:\n" - "{'10.100.0.8': {'default': {'inUpdErrWithdraw': 'Not Found', 'disabledAfiSafi': 'ipv4Unicast'}}, " - "'10.100.0.9': {'MGMT': {'inUpdErrWithdraw': 1, 'inUpdErrDisableAfiSafi': 'Not Found'}}}" + "Peer: 10.100.0.8 VRF: default - Non-zero update error counter - inUpdErrWithdraw: Not Found", + "Peer: 10.100.0.8 VRF: default - Non-zero update error counter - disabledAfiSafi: ipv4Unicast", + "Peer: 10.100.0.9 VRF: MGMT - Non-zero update error counter - inUpdErrWithdraw: 1", + "Peer: 10.100.0.9 VRF: MGMT - Non-zero update error counter - inUpdErrDisableAfiSafi: Not Found", ], }, }, @@ -4493,10 +3537,6 @@ DATA: list[dict[str, Any]] = [ } ] }, - }, - }, - { - "vrfs": { "MGMT": { "peerList": [ { @@ -4532,10 +3572,6 @@ DATA: list[dict[str, Any]] = [ } ] }, - }, - }, - { - "vrfs": { "MGMT": { "peerList": [ { @@ -4557,9 +3593,10 @@ DATA: list[dict[str, Any]] = [ "expected": { "result": "failure", "messages": [ - "The following BGP peers are not configured or has an incorrect or missing route map in either the inbound or outbound direction:\n" - "{'10.100.0.8': {'default': {'Inbound route-map': 'RM-MLAG-PEER', 'Outbound route-map': 'RM-MLAG-PEER'}}, " - "'10.100.0.10': {'MGMT': {'Inbound route-map': 'RM-MLAG-PEER', 'Outbound route-map': 'RM-MLAG-PEER'}}}" + "Peer: 10.100.0.8 VRF: default - Inbound route-map mismatch - Expected: RM-MLAG-PEER-IN, Actual: RM-MLAG-PEER", + "Peer: 10.100.0.8 VRF: default - Outbound route-map mismatch - Expected: RM-MLAG-PEER-OUT, Actual: RM-MLAG-PEER", + "Peer: 10.100.0.10 VRF: MGMT - Inbound route-map mismatch - Expected: RM-MLAG-PEER-IN, Actual: RM-MLAG-PEER", + "Peer: 10.100.0.10 VRF: MGMT - Outbound route-map mismatch - Expected: RM-MLAG-PEER-OUT, Actual: RM-MLAG-PEER", ], }, }, @@ -4578,10 +3615,6 @@ DATA: list[dict[str, Any]] = [ } ] }, - }, - }, - { - "vrfs": { "MGMT": { "peerList": [ { @@ -4603,8 +3636,8 @@ DATA: list[dict[str, Any]] = [ "expected": { "result": "failure", "messages": [ - "The following BGP peers are not configured or has an incorrect or missing route map in either the inbound or outbound direction:\n" - "{'10.100.0.8': {'default': {'Inbound route-map': 'RM-MLAG-PEER'}}, '10.100.0.10': {'MGMT': {'Inbound route-map': 'RM-MLAG-PEER'}}}" + "Peer: 10.100.0.8 VRF: default - Inbound route-map mismatch - Expected: RM-MLAG-PEER-IN, Actual: RM-MLAG-PEER", + "Peer: 10.100.0.10 VRF: MGMT - Inbound route-map mismatch - Expected: RM-MLAG-PEER-IN, Actual: RM-MLAG-PEER", ], }, }, @@ -4621,10 +3654,6 @@ DATA: list[dict[str, Any]] = [ } ] }, - }, - }, - { - "vrfs": { "MGMT": { "peerList": [ { @@ -4644,9 +3673,10 @@ DATA: list[dict[str, Any]] = [ "expected": { "result": "failure", "messages": [ - "The following BGP peers are not configured or has an incorrect or missing route map in either the inbound or outbound direction:\n" - "{'10.100.0.8': {'default': {'Inbound route-map': 'Not Configured', 'Outbound route-map': 'Not Configured'}}, " - "'10.100.0.10': {'MGMT': {'Inbound route-map': 'Not Configured', 'Outbound route-map': 'Not Configured'}}}" + "Peer: 10.100.0.8 VRF: default - Inbound route-map mismatch - Expected: RM-MLAG-PEER-IN, Actual: Not Configured", + "Peer: 10.100.0.8 VRF: default - Outbound route-map mismatch - Expected: RM-MLAG-PEER-OUT, Actual: Not Configured", + "Peer: 10.100.0.10 VRF: MGMT - Inbound route-map mismatch - Expected: RM-MLAG-PEER-IN, Actual: Not Configured", + "Peer: 10.100.0.10 VRF: MGMT - Outbound route-map mismatch - Expected: RM-MLAG-PEER-OUT, Actual: Not Configured", ], }, }, @@ -4657,10 +3687,6 @@ DATA: list[dict[str, Any]] = [ { "vrfs": { "default": {"peerList": []}, - }, - }, - { - "vrfs": { "MGMT": {"peerList": []}, }, }, @@ -4674,8 +3700,8 @@ DATA: list[dict[str, Any]] = [ "expected": { "result": "failure", "messages": [ - "The following BGP peers are not configured or has an incorrect or missing route map in either the inbound or outbound direction:\n" - "{'10.100.0.8': {'default': 'Not configured'}, '10.100.0.10': {'MGMT': 'Not configured'}}" + "Peer: 10.100.0.8 VRF: default - Not found", + "Peer: 10.100.0.10 VRF: MGMT - Not found", ], }, }, @@ -4694,10 +3720,6 @@ DATA: list[dict[str, Any]] = [ } ] }, - }, - }, - { - "vrfs": { "MGMT": { "peerList": [ { @@ -4725,10 +3747,6 @@ DATA: list[dict[str, Any]] = [ { "vrfs": { "default": {}, - }, - }, - { - "vrfs": { "MGMT": {}, }, }, @@ -4742,8 +3760,8 @@ DATA: list[dict[str, Any]] = [ "expected": { "result": "failure", "messages": [ - "The following BGP peer(s) are not configured or maximum routes and maximum routes warning limit is not correct:\n" - "{'10.100.0.8': {'default': 'Not configured'}, '10.100.0.9': {'MGMT': 'Not configured'}}" + "Peer: 10.100.0.8 VRF: default - Not found", + "Peer: 10.100.0.9 VRF: MGMT - Not found", ], }, }, @@ -4762,10 +3780,6 @@ DATA: list[dict[str, Any]] = [ } ] }, - }, - }, - { - "vrfs": { "MGMT": { "peerList": [ { @@ -4787,9 +3801,10 @@ DATA: list[dict[str, Any]] = [ "expected": { "result": "failure", "messages": [ - "The following BGP peer(s) are not configured or maximum routes and maximum routes warning limit is not correct:\n" - "{'10.100.0.8': {'default': {'Maximum total routes': 13000, 'Warning limit': 11000}}, " - "'10.100.0.9': {'MGMT': {'Maximum total routes': 11000, 'Warning limit': 10000}}}" + "Peer: 10.100.0.8 VRF: default - Maximum routes mismatch - Expected: 12000, Actual: 13000", + "Peer: 10.100.0.8 VRF: default - Maximum route warning limit mismatch - Expected: 10000, Actual: 11000", + "Peer: 10.100.0.9 VRF: MGMT - Maximum routes mismatch - Expected: 10000, Actual: 11000", + "Peer: 10.100.0.9 VRF: MGMT - Maximum route warning limit mismatch - Expected: 9000, Actual: 10000", ], }, }, @@ -4807,10 +3822,6 @@ DATA: list[dict[str, Any]] = [ } ] }, - }, - }, - { - "vrfs": { "MGMT": { "peerList": [ { @@ -4830,9 +3841,9 @@ DATA: list[dict[str, Any]] = [ "expected": { "result": "failure", "messages": [ - "The following BGP peer(s) are not configured or maximum routes and maximum routes warning limit is not correct:\n" - "{'10.100.0.8': {'default': {'Warning limit': 'Not Found'}}, " - "'10.100.0.9': {'MGMT': {'Maximum total routes': 'Not Found', 'Warning limit': 'Not Found'}}}" + "Peer: 10.100.0.8 VRF: default - Maximum route warning limit mismatch - Expected: 10000, Actual: Not Found", + "Peer: 10.100.0.9 VRF: MGMT - Maximum routes mismatch - Expected: 10000, Actual: Not Found", + "Peer: 10.100.0.9 VRF: MGMT - Maximum route warning limit mismatch - Expected: 9000, Actual: Not Found", ], }, }, diff --git a/tests/units/anta_tests/routing/test_generic.py b/tests/units/anta_tests/routing/test_generic.py index 20f83b9..4e9d654 100644 --- a/tests/units/anta_tests/routing/test_generic.py +++ b/tests/units/anta_tests/routing/test_generic.py @@ -11,7 +11,7 @@ from typing import Any import pytest from pydantic import ValidationError -from anta.tests.routing.generic import VerifyRoutingProtocolModel, VerifyRoutingTableEntry, VerifyRoutingTableSize +from anta.tests.routing.generic import VerifyIPv4RouteType, VerifyRoutingProtocolModel, VerifyRoutingTableEntry, VerifyRoutingTableSize from tests.units.anta_tests import test DATA: list[dict[str, Any]] = [ @@ -304,6 +304,50 @@ DATA: list[dict[str, Any]] = [ "inputs": {"vrf": "default", "routes": ["10.1.0.1", "10.1.0.2"], "collect": "all"}, "expected": {"result": "failure", "messages": ["The following route(s) are missing from the routing table of VRF default: ['10.1.0.2']"]}, }, + { + "name": "success-valid-route-type", + "test": VerifyIPv4RouteType, + "eos_data": [ + { + "vrfs": { + "default": {"routes": {"10.10.0.1/32": {"routeType": "eBGP"}, "10.100.0.12/31": {"routeType": "connected"}}}, + "MGMT": {"routes": {"10.100.1.5/32": {"routeType": "iBGP"}}}, + } + } + ], + "inputs": { + "routes_entries": [ + {"vrf": "default", "prefix": "10.10.0.1/32", "route_type": "eBGP"}, + {"vrf": "default", "prefix": "10.100.0.12/31", "route_type": "connected"}, + {"vrf": "MGMT", "prefix": "10.100.1.5/32", "route_type": "iBGP"}, + ] + }, + "expected": {"result": "success"}, + }, + { + "name": "failure-route-not-found", + "test": VerifyIPv4RouteType, + "eos_data": [{"vrfs": {"default": {"routes": {}}}}], + "inputs": {"routes_entries": [{"vrf": "default", "prefix": "10.10.0.1/32", "route_type": "eBGP"}]}, + "expected": {"result": "failure", "messages": ["Prefix: 10.10.0.1/32 VRF: default - Route not found"]}, + }, + { + "name": "failure-invalid-route-type", + "test": VerifyIPv4RouteType, + "eos_data": [{"vrfs": {"default": {"routes": {"10.10.0.1/32": {"routeType": "eBGP"}}}}}], + "inputs": {"routes_entries": [{"vrf": "default", "prefix": "10.10.0.1/32", "route_type": "iBGP"}]}, + "expected": { + "result": "failure", + "messages": ["Prefix: 10.10.0.1/32 VRF: default - Incorrect route type - Expected: iBGP Actual: eBGP"], + }, + }, + { + "name": "failure-vrf-not-configured", + "test": VerifyIPv4RouteType, + "eos_data": [{"vrfs": {}}], + "inputs": {"routes_entries": [{"vrf": "default", "prefix": "10.10.0.1/32", "route_type": "eBGP"}]}, + "expected": {"result": "failure", "messages": ["Prefix: 10.10.0.1/32 VRF: default - VRF not configured"]}, + }, ] diff --git a/tests/units/anta_tests/test_avt.py b/tests/units/anta_tests/test_avt.py index 80fbce0..d9cdaa1 100644 --- a/tests/units/anta_tests/test_avt.py +++ b/tests/units/anta_tests/test_avt.py @@ -361,48 +361,63 @@ DATA: list[dict[str, Any]] = [ "avts": { "DEFAULT-AVT-POLICY-CONTROL-PLANE": { "avtPaths": { - "direct:10": {"flags": {"directPath": True, "valid": True, "active": True}, "nexthopAddr": "10.101.255.1"}, - "direct:9": {"flags": {"directPath": True, "valid": True, "active": True}, "nexthopAddr": "10.101.255.1"}, - "multihop:1": {"flags": {"directPath": False, "valid": True, "active": True}, "nexthopAddr": "10.101.255.1"}, - "multihop:3": {"flags": {"directPath": False, "valid": True, "active": True}, "nexthopAddr": "10.101.255.1"}, - } - } - } - } - } - }, - { - "vrfs": { - "data": { - "avts": { - "DATA-AVT-POLICY-CONTROL-PLANE": { - "avtPaths": { - "direct:10": {"flags": {"directPath": True, "valid": True, "active": True}, "nexthopAddr": "10.101.255.1"}, - "direct:9": {"flags": {"directPath": True, "valid": True, "active": True}, "nexthopAddr": "10.101.255.1"}, - "direct:8": {"flags": {"directPath": True, "valid": True, "active": True}, "nexthopAddr": "10.101.255.2"}, - "multihop:1": {"flags": {"directPath": False, "valid": True, "active": True}, "nexthopAddr": "10.101.255.2"}, - "multihop:3": {"flags": {"directPath": False, "valid": True, "active": True}, "nexthopAddr": "10.101.255.2"}, + "direct:10": { + "flags": {"directPath": True, "valid": True, "active": True}, + "nexthopAddr": "10.101.255.1", + "destination": "10.101.255.2", + }, + "direct:9": { + "flags": {"directPath": True, "valid": True, "active": True}, + "nexthopAddr": "10.101.255.1", + "destination": "10.101.255.2", + }, + "multihop:1": { + "flags": {"directPath": False, "valid": True, "active": True}, + "nexthopAddr": "10.101.255.1", + "destination": "10.101.255.2", + }, + "multihop:3": { + "flags": {"directPath": False, "valid": True, "active": True}, + "nexthopAddr": "10.101.255.1", + "destination": "10.101.255.2", + }, } } - } - } - } - }, - { - "vrfs": { + }, + }, "data": { "avts": { "DATA-AVT-POLICY-CONTROL-PLANE": { "avtPaths": { - "direct:10": {"flags": {"directPath": True, "valid": True, "active": True}, "nexthopAddr": "10.101.255.1"}, - "direct:9": {"flags": {"directPath": True, "valid": True, "active": True}, "nexthopAddr": "10.101.255.1"}, - "direct:8": {"flags": {"directPath": True, "valid": True, "active": True}, "nexthopAddr": "10.101.255.2"}, - "multihop:1": {"flags": {"directPath": False, "valid": True, "active": True}, "nexthopAddr": "10.101.255.2"}, - "multihop:3": {"flags": {"directPath": False, "valid": True, "active": True}, "nexthopAddr": "10.101.255.2"}, + "direct:10": { + "flags": {"directPath": True, "valid": True, "active": True}, + "nexthopAddr": "10.101.255.1", + "destination": "10.101.255.1", + }, + "direct:9": { + "flags": {"directPath": True, "valid": True, "active": True}, + "nexthopAddr": "10.101.255.1", + "destination": "10.101.255.1", + }, + "direct:8": { + "flags": {"directPath": True, "valid": True, "active": True}, + "nexthopAddr": "10.101.255.2", + "destination": "10.101.255.1", + }, + "multihop:1": { + "flags": {"directPath": False, "valid": True, "active": True}, + "nexthopAddr": "10.101.255.2", + "destination": "10.101.255.1", + }, + "multihop:3": { + "flags": {"directPath": False, "valid": True, "active": True}, + "nexthopAddr": "10.101.255.2", + "destination": "10.101.255.1", + }, } - } + }, } - } + }, } }, ], @@ -420,36 +435,85 @@ DATA: list[dict[str, Any]] = [ "test": VerifyAVTSpecificPath, "eos_data": [ {"vrfs": {}}, + ], + "inputs": { + "avt_paths": [ + {"avt_name": "MGMT-AVT-POLICY-DEFAULT", "vrf": "default", "destination": "10.101.255.2", "next_hop": "10.101.255.1", "path_type": "multihop"}, + {"avt_name": "DATA-AVT-POLICY-CONTROL-PLANE", "vrf": "data", "destination": "10.101.255.1", "next_hop": "10.101.255.2", "path_type": "multihop"}, + ] + }, + "expected": { + "result": "failure", + "messages": ["AVT MGMT-AVT-POLICY-DEFAULT VRF: default (Destination: 10.101.255.2, Next-hop: 10.101.255.1) - No AVT path configured"], + }, + }, + { + "name": "failure-path_type_check_true", + "test": VerifyAVTSpecificPath, + "eos_data": [ { "vrfs": { + "default": { + "avts": { + "DEFAULT-AVT-POLICY-CONTROL-PLANE": { + "avtPaths": { + "direct:10": { + "flags": {"directPath": True, "valid": True, "active": True}, + "nexthopAddr": "10.101.255.1", + "destination": "10.101.255.2", + }, + "direct:9": { + "flags": {"directPath": True, "valid": True, "active": True}, + "nexthopAddr": "10.101.255.1", + "destination": "10.101.255.2", + }, + } + } + }, + }, "data": { "avts": { "DATA-AVT-POLICY-CONTROL-PLANE": { "avtPaths": { - "direct:10": {"flags": {"directPath": True, "valid": True, "active": True}, "nexthopAddr": "10.101.255.1"}, - "direct:9": {"flags": {"directPath": True, "valid": True, "active": True}, "nexthopAddr": "10.101.255.1"}, - "multihop:1": {"flags": {"directPath": False, "valid": True, "active": True}, "nexthopAddr": "10.101.255.2"}, - "multihop:3": {"flags": {"directPath": False, "valid": True, "active": True}, "nexthopAddr": "10.101.255.2"}, + "direct:10": { + "flags": {"directPath": True, "valid": True, "active": True}, + "nexthopAddr": "10.101.255.1", + "destination": "10.101.255.3", + }, + "direct:9": { + "flags": {"directPath": True, "valid": True, "active": True}, + "nexthopAddr": "10.101.255.1", + "destination": "10.101.255.3", + }, } - } + }, } - } + }, } }, ], "inputs": { "avt_paths": [ - {"avt_name": "MGMT-AVT-POLICY-DEFAULT", "vrf": "default", "destination": "10.101.255.2", "next_hop": "10.101.255.1", "path_type": "multihop"}, - {"avt_name": "DATA-AVT-POLICY-CONTROL-PLANE", "vrf": "data", "destination": "10.101.255.1", "next_hop": "10.101.255.2", "path_type": "multihop"}, + { + "avt_name": "DEFAULT-AVT-POLICY-CONTROL-PLANE", + "vrf": "default", + "destination": "10.101.255.2", + "next_hop": "10.101.255.11", + "path_type": "multihop", + }, + {"avt_name": "DATA-AVT-POLICY-CONTROL-PLANE", "vrf": "data", "destination": "10.101.255.1", "next_hop": "10.101.255.21", "path_type": "direct"}, ] }, "expected": { "result": "failure", - "messages": ["AVT configuration for peer '10.101.255.2' under topology 'MGMT-AVT-POLICY-DEFAULT' in VRF 'default' is not found."], + "messages": [ + "AVT DEFAULT-AVT-POLICY-CONTROL-PLANE VRF: default (Destination: 10.101.255.2, Next-hop: 10.101.255.11) Path Type: multihop - Path not found", + "AVT DATA-AVT-POLICY-CONTROL-PLANE VRF: data (Destination: 10.101.255.1, Next-hop: 10.101.255.21) Path Type: direct - Path not found", + ], }, }, { - "name": "failure-no-path-with-correct-next-hop", + "name": "failure-path_type_check_false", "test": VerifyAVTSpecificPath, "eos_data": [ { @@ -458,30 +522,38 @@ DATA: list[dict[str, Any]] = [ "avts": { "DEFAULT-AVT-POLICY-CONTROL-PLANE": { "avtPaths": { - "direct:10": {"flags": {"directPath": True, "valid": True, "active": True}, "nexthopAddr": "10.101.255.2"}, - "direct:9": {"flags": {"directPath": True, "valid": True, "active": True}, "nexthopAddr": "10.101.255.2"}, - "multihop:1": {"flags": {"directPath": False, "valid": True, "active": True}, "nexthopAddr": "10.101.255.1"}, - "multihop:3": {"flags": {"directPath": False, "valid": True, "active": True}, "nexthopAddr": "10.101.255.1"}, + "direct:10": { + "flags": {"directPath": True, "valid": True, "active": True}, + "nexthopAddr": "10.101.255.1", + "destination": "10.101.255.2", + }, + "direct:9": { + "flags": {"directPath": True, "valid": True, "active": True}, + "nexthopAddr": "10.101.255.1", + "destination": "10.101.255.2", + }, } } - } - } - } - }, - { - "vrfs": { + }, + }, "data": { "avts": { "DATA-AVT-POLICY-CONTROL-PLANE": { "avtPaths": { - "direct:10": {"flags": {"directPath": True, "valid": True, "active": True}, "nexthopAddr": "10.101.255.1"}, - "direct:9": {"flags": {"directPath": True, "valid": True, "active": True}, "nexthopAddr": "10.101.255.1"}, - "multihop:1": {"flags": {"directPath": False, "valid": True, "active": True}, "nexthopAddr": "10.101.255.2"}, - "multihop:3": {"flags": {"directPath": False, "valid": True, "active": True}, "nexthopAddr": "10.101.255.2"}, + "direct:10": { + "flags": {"directPath": True, "valid": True, "active": True}, + "nexthopAddr": "10.101.255.1", + "destination": "10.101.255.3", + }, + "direct:9": { + "flags": {"directPath": True, "valid": True, "active": True}, + "nexthopAddr": "10.101.255.1", + "destination": "10.101.255.3", + }, } - } + }, } - } + }, } }, ], @@ -492,18 +564,15 @@ DATA: list[dict[str, Any]] = [ "vrf": "default", "destination": "10.101.255.2", "next_hop": "10.101.255.11", - "path_type": "multihop", }, - {"avt_name": "DATA-AVT-POLICY-CONTROL-PLANE", "vrf": "data", "destination": "10.101.255.1", "next_hop": "10.101.255.21", "path_type": "direct"}, + {"avt_name": "DATA-AVT-POLICY-CONTROL-PLANE", "vrf": "data", "destination": "10.101.255.1", "next_hop": "10.101.255.21"}, ] }, "expected": { "result": "failure", "messages": [ - "No 'multihop' path found with next-hop address '10.101.255.11' for AVT peer '10.101.255.2' under " - "topology 'DEFAULT-AVT-POLICY-CONTROL-PLANE' in VRF 'default'.", - "No 'direct' path found with next-hop address '10.101.255.21' for AVT peer '10.101.255.1' under " - "topology 'DATA-AVT-POLICY-CONTROL-PLANE' in VRF 'data'.", + "AVT DEFAULT-AVT-POLICY-CONTROL-PLANE VRF: default (Destination: 10.101.255.2, Next-hop: 10.101.255.11) - Path not found", + "AVT DATA-AVT-POLICY-CONTROL-PLANE VRF: data (Destination: 10.101.255.1, Next-hop: 10.101.255.21) - Path not found", ], }, }, @@ -517,30 +586,48 @@ DATA: list[dict[str, Any]] = [ "avts": { "DEFAULT-AVT-POLICY-CONTROL-PLANE": { "avtPaths": { - "direct:10": {"flags": {"directPath": True, "valid": True, "active": True}, "nexthopAddr": "10.101.255.2"}, - "direct:9": {"flags": {"directPath": True, "valid": True, "active": True}, "nexthopAddr": "10.101.255.2"}, - "multihop:1": {"flags": {"directPath": True, "valid": False, "active": False}, "nexthopAddr": "10.101.255.1"}, - "multihop:3": {"flags": {"directPath": False, "valid": True, "active": False}, "nexthopAddr": "10.101.255.1"}, + "multihop:3": { + "flags": {"directPath": False, "valid": False, "active": True}, + "nexthopAddr": "10.101.255.1", + "destination": "10.101.255.2", + }, } } - } - } - } - }, - { - "vrfs": { + }, + }, "data": { "avts": { "DATA-AVT-POLICY-CONTROL-PLANE": { "avtPaths": { - "direct:10": {"flags": {"directPath": True, "valid": True, "active": True}, "nexthopAddr": "10.101.255.1"}, - "direct:9": {"flags": {"directPath": True, "valid": False, "active": True}, "nexthopAddr": "10.101.255.1"}, - "multihop:1": {"flags": {"directPath": False, "valid": True, "active": True}, "nexthopAddr": "10.101.255.2"}, - "multihop:3": {"flags": {"directPath": False, "valid": True, "active": True}, "nexthopAddr": "10.101.255.2"}, + "direct:10": { + "flags": {"directPath": True, "valid": False, "active": True}, + "nexthopAddr": "10.101.255.1", + "destination": "10.101.255.1", + }, + "direct:9": { + "flags": {"directPath": True, "valid": True, "active": False}, + "nexthopAddr": "10.101.255.1", + "destination": "10.101.255.1", + }, + "direct:8": { + "flags": {"directPath": True, "valid": False, "active": False}, + "nexthopAddr": "10.101.255.2", + "destination": "10.101.255.1", + }, + "multihop:1": { + "flags": {"directPath": False, "valid": True, "active": True}, + "nexthopAddr": "10.101.255.2", + "destination": "10.101.255.1", + }, + "multihop:3": { + "flags": {"directPath": False, "valid": True, "active": True}, + "nexthopAddr": "10.101.255.2", + "destination": "10.101.255.1", + }, } - } + }, } - } + }, } }, ], @@ -559,8 +646,12 @@ DATA: list[dict[str, Any]] = [ "expected": { "result": "failure", "messages": [ - "AVT path 'multihop:3' for topology 'DEFAULT-AVT-POLICY-CONTROL-PLANE' in VRF 'default' is inactive.", - "AVT path 'direct:9' for topology 'DATA-AVT-POLICY-CONTROL-PLANE' in VRF 'data' is invalid.", + "AVT DEFAULT-AVT-POLICY-CONTROL-PLANE VRF: default (Destination: 10.101.255.2, Next-hop: 10.101.255.1) - " + "Incorrect path multihop:3 - Valid: False, Active: True", + "AVT DATA-AVT-POLICY-CONTROL-PLANE VRF: data (Destination: 10.101.255.1, Next-hop: 10.101.255.1) - " + "Incorrect path direct:10 - Valid: False, Active: True", + "AVT DATA-AVT-POLICY-CONTROL-PLANE VRF: data (Destination: 10.101.255.1, Next-hop: 10.101.255.1) - " + "Incorrect path direct:9 - Valid: True, Active: False", ], }, }, diff --git a/tests/units/anta_tests/test_bfd.py b/tests/units/anta_tests/test_bfd.py index 9bd6465..952e838 100644 --- a/tests/units/anta_tests/test_bfd.py +++ b/tests/units/anta_tests/test_bfd.py @@ -107,8 +107,8 @@ DATA: list[dict[str, Any]] = [ "expected": { "result": "failure", "messages": [ - "Following BFD peers are not configured or timers are not correct:\n" - "{'192.0.255.7': {'CS': 'Not Configured'}, '192.0.255.70': {'MGMT': 'Not Configured'}}" + "Peer: 192.0.255.7 VRF: CS - Not found", + "Peer: 192.0.255.70 VRF: MGMT - Not found", ], }, }, @@ -160,9 +160,11 @@ DATA: list[dict[str, Any]] = [ "expected": { "result": "failure", "messages": [ - "Following BFD peers are not configured or timers are not correct:\n" - "{'192.0.255.7': {'default': {'tx_interval': 1300, 'rx_interval': 1200, 'multiplier': 4}}, " - "'192.0.255.70': {'MGMT': {'tx_interval': 120, 'rx_interval': 120, 'multiplier': 5}}}" + "Peer: 192.0.255.7 VRF: default - Incorrect Transmit interval - Expected: 1200 Actual: 1300", + "Peer: 192.0.255.7 VRF: default - Incorrect Multiplier - Expected: 3 Actual: 4", + "Peer: 192.0.255.70 VRF: MGMT - Incorrect Transmit interval - Expected: 1200 Actual: 120", + "Peer: 192.0.255.70 VRF: MGMT - Incorrect Receive interval - Expected: 1200 Actual: 120", + "Peer: 192.0.255.70 VRF: MGMT - Incorrect Multiplier - Expected: 3 Actual: 5", ], }, }, @@ -239,8 +241,8 @@ DATA: list[dict[str, Any]] = [ "expected": { "result": "failure", "messages": [ - "Following BFD peers are not configured, status is not up or remote disc is zero:\n" - "{'192.0.255.7': {'CS': 'Not Configured'}, '192.0.255.70': {'MGMT': 'Not Configured'}}" + "Peer: 192.0.255.7 VRF: CS - Not found", + "Peer: 192.0.255.70 VRF: MGMT - Not found", ], }, }, @@ -255,7 +257,7 @@ DATA: list[dict[str, Any]] = [ "192.0.255.7": { "peerStats": { "": { - "status": "Down", + "status": "down", "remoteDisc": 108328132, } } @@ -267,7 +269,7 @@ DATA: list[dict[str, Any]] = [ "192.0.255.70": { "peerStats": { "": { - "status": "Down", + "status": "down", "remoteDisc": 0, } } @@ -281,9 +283,8 @@ DATA: list[dict[str, Any]] = [ "expected": { "result": "failure", "messages": [ - "Following BFD peers are not configured, status is not up or remote disc is zero:\n" - "{'192.0.255.7': {'default': {'status': 'Down', 'remote_disc': 108328132}}, " - "'192.0.255.70': {'MGMT': {'status': 'Down', 'remote_disc': 0}}}" + "Peer: 192.0.255.7 VRF: default - Session not properly established - State: down Remote Discriminator: 108328132", + "Peer: 192.0.255.70 VRF: MGMT - Session not properly established - State: down Remote Discriminator: 0", ], }, }, @@ -414,7 +415,8 @@ DATA: list[dict[str, Any]] = [ "expected": { "result": "failure", "messages": [ - "Following BFD peers are not up:\n192.0.255.7 is down in default VRF with remote disc 0.\n192.0.255.71 is down in MGMT VRF with remote disc 0." + "Peer: 192.0.255.7 VRF: default - Session not properly established - State: down Remote Discriminator: 0", + "Peer: 192.0.255.71 VRF: MGMT - Session not properly established - State: down Remote Discriminator: 0", ], }, }, @@ -458,7 +460,10 @@ DATA: list[dict[str, Any]] = [ "inputs": {}, "expected": { "result": "failure", - "messages": ["Following BFD peers were down:\n192.0.255.7 in default VRF has remote disc 0.\n192.0.255.71 in default VRF has remote disc 0."], + "messages": [ + "Peer: 192.0.255.7 VRF: default - Session not properly established - State: up Remote Discriminator: 0", + "Peer: 192.0.255.71 VRF: default - Session not properly established - State: up Remote Discriminator: 0", + ], }, }, { @@ -512,8 +517,9 @@ DATA: list[dict[str, Any]] = [ "expected": { "result": "failure", "messages": [ - "Following BFD peers were down:\n192.0.255.7 in default VRF was down 3 hours ago.\n" - "192.0.255.71 in default VRF was down 3 hours ago.\n192.0.255.17 in default VRF was down 3 hours ago." + "Peer: 192.0.255.7 VRF: default - Session failure detected within the expected uptime threshold (3 hours ago)", + "Peer: 192.0.255.71 VRF: default - Session failure detected within the expected uptime threshold (3 hours ago)", + "Peer: 192.0.255.17 VRF: default - Session failure detected within the expected uptime threshold (3 hours ago)", ], }, }, @@ -609,15 +615,14 @@ DATA: list[dict[str, Any]] = [ "inputs": { "bfd_peers": [ {"peer_address": "192.0.255.7", "vrf": "default", "protocols": ["isis"]}, - {"peer_address": "192.0.255.70", "vrf": "MGMT", "protocols": ["isis"]}, + {"peer_address": "192.0.255.70", "vrf": "MGMT", "protocols": ["isis", "ospf"]}, ] }, "expected": { "result": "failure", "messages": [ - "The following BFD peers are not configured or have non-registered protocol(s):\n" - "{'192.0.255.7': {'default': ['isis']}, " - "'192.0.255.70': {'MGMT': ['isis']}}" + "Peer: 192.0.255.7 VRF: default - `isis` routing protocol(s) not configured", + "Peer: 192.0.255.70 VRF: MGMT - `isis` `ospf` routing protocol(s) not configured", ], }, }, @@ -641,8 +646,8 @@ DATA: list[dict[str, Any]] = [ "expected": { "result": "failure", "messages": [ - "The following BFD peers are not configured or have non-registered protocol(s):\n" - "{'192.0.255.7': {'default': 'Not Configured'}, '192.0.255.70': {'MGMT': 'Not Configured'}}" + "Peer: 192.0.255.7 VRF: default - Not found", + "Peer: 192.0.255.70 VRF: MGMT - Not found", ], }, }, diff --git a/tests/units/anta_tests/test_connectivity.py b/tests/units/anta_tests/test_connectivity.py index beeaae6..eac3084 100644 --- a/tests/units/anta_tests/test_connectivity.py +++ b/tests/units/anta_tests/test_connectivity.py @@ -153,7 +153,7 @@ DATA: list[dict[str, Any]] = [ ], }, ], - "expected": {"result": "failure", "messages": ["Connectivity test failed for the following source-destination pairs: [('10.0.0.5', '10.0.0.11')]"]}, + "expected": {"result": "failure", "messages": ["Host 10.0.0.11 (src: 10.0.0.5, vrf: default, size: 100B, repeat: 2) - Unreachable"]}, }, { "name": "failure-interface", @@ -187,7 +187,7 @@ DATA: list[dict[str, Any]] = [ ], }, ], - "expected": {"result": "failure", "messages": ["Connectivity test failed for the following source-destination pairs: [('Management0', '10.0.0.11')]"]}, + "expected": {"result": "failure", "messages": ["Host 10.0.0.11 (src: Management0, vrf: default, size: 100B, repeat: 2) - Unreachable"]}, }, { "name": "failure-size", @@ -209,17 +209,11 @@ DATA: list[dict[str, Any]] = [ ], }, ], - "expected": {"result": "failure", "messages": ["Connectivity test failed for the following source-destination pairs: [('Management0', '10.0.0.1')]"]}, + "expected": {"result": "failure", "messages": ["Host 10.0.0.1 (src: Management0, vrf: default, size: 1501B, repeat: 5, df-bit: enabled) - Unreachable"]}, }, { "name": "success", "test": VerifyLLDPNeighbors, - "inputs": { - "neighbors": [ - {"port": "Ethernet1", "neighbor_device": "DC1-SPINE1", "neighbor_port": "Ethernet1"}, - {"port": "Ethernet2", "neighbor_device": "DC1-SPINE2", "neighbor_port": "Ethernet1"}, - ], - }, "eos_data": [ { "lldpNeighbors": { @@ -256,16 +250,17 @@ DATA: list[dict[str, Any]] = [ }, }, ], + "inputs": { + "neighbors": [ + {"port": "Ethernet1", "neighbor_device": "DC1-SPINE1", "neighbor_port": "Ethernet1"}, + {"port": "Ethernet2", "neighbor_device": "DC1-SPINE2", "neighbor_port": "Ethernet1"}, + ], + }, "expected": {"result": "success"}, }, { "name": "success-multiple-neighbors", "test": VerifyLLDPNeighbors, - "inputs": { - "neighbors": [ - {"port": "Ethernet1", "neighbor_device": "DC1-SPINE2", "neighbor_port": "Ethernet1"}, - ], - }, "eos_data": [ { "lldpNeighbors": { @@ -298,17 +293,16 @@ DATA: list[dict[str, Any]] = [ }, }, ], + "inputs": { + "neighbors": [ + {"port": "Ethernet1", "neighbor_device": "DC1-SPINE2", "neighbor_port": "Ethernet1"}, + ], + }, "expected": {"result": "success"}, }, { "name": "failure-port-not-configured", "test": VerifyLLDPNeighbors, - "inputs": { - "neighbors": [ - {"port": "Ethernet1", "neighbor_device": "DC1-SPINE1", "neighbor_port": "Ethernet1"}, - {"port": "Ethernet2", "neighbor_device": "DC1-SPINE2", "neighbor_port": "Ethernet1"}, - ], - }, "eos_data": [ { "lldpNeighbors": { @@ -330,17 +324,17 @@ DATA: list[dict[str, Any]] = [ }, }, ], - "expected": {"result": "failure", "messages": ["Port(s) not configured:\n Ethernet2"]}, - }, - { - "name": "failure-no-neighbor", - "test": VerifyLLDPNeighbors, "inputs": { "neighbors": [ {"port": "Ethernet1", "neighbor_device": "DC1-SPINE1", "neighbor_port": "Ethernet1"}, {"port": "Ethernet2", "neighbor_device": "DC1-SPINE2", "neighbor_port": "Ethernet1"}, ], }, + "expected": {"result": "failure", "messages": ["Port Ethernet2 (Neighbor: DC1-SPINE2, Neighbor Port: Ethernet1) - Port not found"]}, + }, + { + "name": "failure-no-neighbor", + "test": VerifyLLDPNeighbors, "eos_data": [ { "lldpNeighbors": { @@ -363,17 +357,17 @@ DATA: list[dict[str, Any]] = [ }, }, ], - "expected": {"result": "failure", "messages": ["No LLDP neighbor(s) on port(s):\n Ethernet2"]}, - }, - { - "name": "failure-wrong-neighbor", - "test": VerifyLLDPNeighbors, "inputs": { "neighbors": [ {"port": "Ethernet1", "neighbor_device": "DC1-SPINE1", "neighbor_port": "Ethernet1"}, {"port": "Ethernet2", "neighbor_device": "DC1-SPINE2", "neighbor_port": "Ethernet1"}, ], }, + "expected": {"result": "failure", "messages": ["Port Ethernet2 (Neighbor: DC1-SPINE2, Neighbor Port: Ethernet1) - No LLDP neighbors"]}, + }, + { + "name": "failure-wrong-neighbor", + "test": VerifyLLDPNeighbors, "eos_data": [ { "lldpNeighbors": { @@ -410,18 +404,20 @@ DATA: list[dict[str, Any]] = [ }, }, ], - "expected": {"result": "failure", "messages": ["Wrong LLDP neighbor(s) on port(s):\n Ethernet2\n DC1-SPINE2_Ethernet2"]}, - }, - { - "name": "failure-multiple", - "test": VerifyLLDPNeighbors, "inputs": { "neighbors": [ {"port": "Ethernet1", "neighbor_device": "DC1-SPINE1", "neighbor_port": "Ethernet1"}, {"port": "Ethernet2", "neighbor_device": "DC1-SPINE2", "neighbor_port": "Ethernet1"}, - {"port": "Ethernet3", "neighbor_device": "DC1-SPINE3", "neighbor_port": "Ethernet1"}, ], }, + "expected": { + "result": "failure", + "messages": ["Port Ethernet2 (Neighbor: DC1-SPINE2, Neighbor Port: Ethernet1) - Wrong LLDP neighbors: DC1-SPINE2/Ethernet2"], + }, + }, + { + "name": "failure-multiple", + "test": VerifyLLDPNeighbors, "eos_data": [ { "lldpNeighbors": { @@ -444,23 +440,25 @@ DATA: list[dict[str, Any]] = [ }, }, ], + "inputs": { + "neighbors": [ + {"port": "Ethernet1", "neighbor_device": "DC1-SPINE1", "neighbor_port": "Ethernet1"}, + {"port": "Ethernet2", "neighbor_device": "DC1-SPINE2", "neighbor_port": "Ethernet1"}, + {"port": "Ethernet3", "neighbor_device": "DC1-SPINE3", "neighbor_port": "Ethernet1"}, + ], + }, "expected": { "result": "failure", "messages": [ - "Wrong LLDP neighbor(s) on port(s):\n Ethernet1\n DC1-SPINE1_Ethernet2\n" - "No LLDP neighbor(s) on port(s):\n Ethernet2\n" - "Port(s) not configured:\n Ethernet3" + "Port Ethernet1 (Neighbor: DC1-SPINE1, Neighbor Port: Ethernet1) - Wrong LLDP neighbors: DC1-SPINE1/Ethernet2", + "Port Ethernet2 (Neighbor: DC1-SPINE2, Neighbor Port: Ethernet1) - No LLDP neighbors", + "Port Ethernet3 (Neighbor: DC1-SPINE3, Neighbor Port: Ethernet1) - Port not found", ], }, }, { "name": "failure-multiple-neighbors", "test": VerifyLLDPNeighbors, - "inputs": { - "neighbors": [ - {"port": "Ethernet1", "neighbor_device": "DC1-SPINE3", "neighbor_port": "Ethernet1"}, - ], - }, "eos_data": [ { "lldpNeighbors": { @@ -493,6 +491,14 @@ DATA: list[dict[str, Any]] = [ }, }, ], - "expected": {"result": "failure", "messages": ["Wrong LLDP neighbor(s) on port(s):\n Ethernet1\n DC1-SPINE1_Ethernet1\n DC1-SPINE2_Ethernet1"]}, + "inputs": { + "neighbors": [ + {"port": "Ethernet1", "neighbor_device": "DC1-SPINE3", "neighbor_port": "Ethernet1"}, + ], + }, + "expected": { + "result": "failure", + "messages": ["Port Ethernet1 (Neighbor: DC1-SPINE3, Neighbor Port: Ethernet1) - Wrong LLDP neighbors: DC1-SPINE1/Ethernet1, DC1-SPINE2/Ethernet1"], + }, }, ] diff --git a/tests/units/anta_tests/test_cvx.py b/tests/units/anta_tests/test_cvx.py new file mode 100644 index 0000000..46d83b0 --- /dev/null +++ b/tests/units/anta_tests/test_cvx.py @@ -0,0 +1,525 @@ +# Copyright (c) 2023-2024 Arista Networks, Inc. +# Use of this source code is governed by the Apache License 2.0 +# that can be found in the LICENSE file. +"""Data for testing anta.tests.cvx.""" + +from __future__ import annotations + +from typing import Any + +from anta.tests.cvx import VerifyActiveCVXConnections, VerifyCVXClusterStatus, VerifyManagementCVX, VerifyMcsClientMounts, VerifyMcsServerMounts +from tests.units.anta_tests import test + +DATA: list[dict[str, Any]] = [ + { + "name": "success", + "test": VerifyMcsClientMounts, + "eos_data": [{"mountStates": [{"path": "mcs/v1/toSwitch/28-99-3a-8f-93-7b", "type": "Mcs::DeviceConfigV1", "state": "mountStateMountComplete"}]}], + "inputs": None, + "expected": {"result": "success"}, + }, + { + "name": "success-haclient", + "test": VerifyMcsClientMounts, + "eos_data": [ + { + "mountStates": [ + {"path": "mcs/v1/apiCfgRedState", "type": "Mcs::ApiConfigRedundancyState", "state": "mountStateMountComplete"}, + {"path": "mcs/v1/toSwitch/00-1c-73-74-c0-8b", "type": "Mcs::DeviceConfigV1", "state": "mountStateMountComplete"}, + ] + }, + ], + "inputs": None, + "expected": {"result": "success"}, + }, + { + "name": "success-partial-non-mcs", + "test": VerifyMcsClientMounts, + "eos_data": [ + { + "mountStates": [ + {"path": "blah/blah/blah", "type": "blah::blah", "state": "mountStatePreservedUnmounted"}, + {"path": "mcs/v1/toSwitch/00-1c-73-74-c0-8b", "type": "Mcs::DeviceConfigV1", "state": "mountStateMountComplete"}, + ] + }, + ], + "inputs": None, + "expected": {"result": "success"}, + }, + { + "name": "failure-nomounts", + "test": VerifyMcsClientMounts, + "eos_data": [ + {"mountStates": []}, + ], + "inputs": None, + "expected": {"result": "failure", "messages": ["MCS Client mount states are not present"]}, + }, + { + "name": "failure-mountStatePreservedUnmounted", + "test": VerifyMcsClientMounts, + "eos_data": [{"mountStates": [{"path": "mcs/v1/toSwitch/28-99-3a-8f-93-7b", "type": "Mcs::DeviceConfigV1", "state": "mountStatePreservedUnmounted"}]}], + "inputs": None, + "expected": {"result": "failure", "messages": ["MCS Client mount states are not valid: mountStatePreservedUnmounted"]}, + }, + { + "name": "failure-partial-haclient", + "test": VerifyMcsClientMounts, + "eos_data": [ + { + "mountStates": [ + {"path": "mcs/v1/apiCfgRedState", "type": "Mcs::ApiConfigRedundancyState", "state": "mountStateMountComplete"}, + {"path": "mcs/v1/toSwitch/00-1c-73-74-c0-8b", "type": "Mcs::DeviceConfigV1", "state": "mountStatePreservedUnmounted"}, + ] + }, + ], + "inputs": None, + "expected": {"result": "failure", "messages": ["MCS Client mount states are not valid: mountStatePreservedUnmounted"]}, + }, + { + "name": "failure-full-haclient", + "test": VerifyMcsClientMounts, + "eos_data": [ + { + "mountStates": [ + {"path": "blah/blah/blah", "type": "blah::blahState", "state": "mountStatePreservedUnmounted"}, + {"path": "mcs/v1/toSwitch/00-1c-73-74-c0-8b", "type": "Mcs::DeviceConfigV1", "state": "mountStatePreservedUnmounted"}, + ] + }, + ], + "inputs": None, + "expected": {"result": "failure", "messages": ["MCS Client mount states are not valid: mountStatePreservedUnmounted"]}, + }, + { + "name": "failure-non-mcs-client", + "test": VerifyMcsClientMounts, + "eos_data": [ + {"mountStates": [{"path": "blah/blah/blah", "type": "blah::blahState", "state": "mountStatePreservedUnmounted"}]}, + ], + "inputs": None, + "expected": {"result": "failure", "messages": ["MCS Client mount states are not present"]}, + }, + { + "name": "failure-partial-mcs-client", + "test": VerifyMcsClientMounts, + "eos_data": [ + { + "mountStates": [ + {"path": "blah/blah/blah", "type": "blah::blahState", "state": "mountStatePreservedUnmounted"}, + {"path": "blah/blah/blah", "type": "Mcs::DeviceConfigV1", "state": "mountStatePreservedUnmounted"}, + ] + }, + ], + "inputs": None, + "expected": {"result": "failure", "messages": ["MCS Client mount states are not valid: mountStatePreservedUnmounted"]}, + }, + { + "name": "success-enabled", + "test": VerifyManagementCVX, + "eos_data": [ + { + "clusterStatus": { + "enabled": True, + } + } + ], + "inputs": {"enabled": True}, + "expected": {"result": "success"}, + }, + { + "name": "success-disabled", + "test": VerifyManagementCVX, + "eos_data": [ + { + "clusterStatus": { + "enabled": False, + } + } + ], + "inputs": {"enabled": False}, + "expected": {"result": "success"}, + }, + { + "name": "failure - no enabled state", + "test": VerifyManagementCVX, + "eos_data": [{"clusterStatus": {}}], + "inputs": {"enabled": False}, + "expected": {"result": "failure", "messages": ["Management CVX status is not valid: None"]}, + }, + { + "name": "failure - no clusterStatus", + "test": VerifyManagementCVX, + "eos_data": [{}], + "inputs": {"enabled": False}, + "expected": {"result": "failure", "messages": ["Management CVX status is not valid: None"]}, + }, + { + "name": "success", + "test": VerifyMcsServerMounts, + "eos_data": [ + { + "connections": [ + { + "hostname": "media-leaf-1", + "mounts": [ + { + "service": "Mcs", + "mountStates": [ + { + "pathStates": [ + {"path": "mcs/v1/apiCfgRedStatus", "type": "Mcs::ApiConfigRedundancyStatus", "state": "mountStateMountComplete"}, + {"path": "mcs/v1/activeflows", "type": "Mcs::ActiveFlows", "state": "mountStateMountComplete"}, + {"path": "mcs/switch/status", "type": "Mcs::Client::Status", "state": "mountStateMountComplete"}, + ] + } + ], + } + ], + } + ] + } + ], + "inputs": {"connections_count": 1}, + "expected": {"result": "success"}, + }, + { + "name": "failure-no-mounts", + "test": VerifyMcsServerMounts, + "eos_data": [{"connections": [{"hostname": "media-leaf-1", "mounts": []}]}], + "inputs": {"connections_count": 1}, + "expected": { + "result": "failure", + "messages": ["No mount status for media-leaf-1", "Incorrect CVX successful connections count. Expected: 1, Actual : 0"], + }, + }, + { + "name": "failure-unexpected-number-paths", + "test": VerifyMcsServerMounts, + "eos_data": [ + { + "connections": [ + { + "hostname": "media-leaf-1", + "mounts": [ + { + "service": "Mcs", + "mountStates": [ + { + "pathStates": [ + {"path": "mcs/v1/apiCfgRedStatus", "type": "Mcs::ApiStatus", "state": "mountStateMountComplete"}, + {"path": "mcs/v1/activeflows", "type": "Mcs::ActiveFlows", "state": "mountStateMountComplete"}, + ] + } + ], + } + ], + } + ] + } + ], + "inputs": {"connections_count": 1}, + "expected": { + "result": "failure", + "messages": [ + "Incorrect number of mount path states for media-leaf-1 - Expected: 3, Actual: 2", + "Unexpected MCS path type for media-leaf-1: 'Mcs::ApiStatus'.", + ], + }, + }, + { + "name": "failure-unexpected-path-type", + "test": VerifyMcsServerMounts, + "eos_data": [ + { + "connections": [ + { + "hostname": "media-leaf-1", + "mounts": [ + { + "service": "Mcs", + "mountStates": [ + { + "pathStates": [ + {"path": "mcs/v1/apiCfgRedStatus", "type": "Mcs::ApiStatus", "state": "mountStateMountComplete"}, + {"path": "mcs/v1/activeflows", "type": "Mcs::ActiveFlows", "state": "mountStateMountComplete"}, + {"path": "mcs/switch/status", "type": "Mcs::Client::Status", "state": "mountStateMountComplete"}, + ] + } + ], + } + ], + } + ] + } + ], + "inputs": {"connections_count": 1}, + "expected": {"result": "failure", "messages": ["Unexpected MCS path type for media-leaf-1: 'Mcs::ApiStatus'"]}, + }, + { + "name": "failure-invalid-mount-state", + "test": VerifyMcsServerMounts, + "eos_data": [ + { + "connections": [ + { + "hostname": "media-leaf-1", + "mounts": [ + { + "service": "Mcs", + "mountStates": [ + { + "pathStates": [ + {"path": "mcs/v1/apiCfgRedStatus", "type": "Mcs::ApiConfigRedundancyStatus", "state": "mountStateMountFailed"}, + {"path": "mcs/v1/activeflows", "type": "Mcs::ActiveFlows", "state": "mountStateMountComplete"}, + {"path": "mcs/switch/status", "type": "Mcs::Client::Status", "state": "mountStateMountComplete"}, + ] + } + ], + } + ], + } + ] + } + ], + "inputs": {"connections_count": 1}, + "expected": { + "result": "failure", + "messages": ["MCS server mount state for path 'Mcs::ApiConfigRedundancyStatus' is not valid is for media-leaf-1: 'mountStateMountFailed'"], + }, + }, + { + "name": "failure-no-mcs-mount", + "test": VerifyMcsServerMounts, + "eos_data": [ + { + "connections": [ + { + "hostname": "media-leaf-1", + "mounts": [ + { + "service": "blah-blah", + "mountStates": [{"pathStates": [{"path": "blah-blah-path", "type": "blah-blah-type", "state": "blah-blah-state"}]}], + } + ], + } + ] + } + ], + "inputs": {"connections_count": 1}, + "expected": {"result": "failure", "messages": ["MCS mount state not detected", "Incorrect CVX successful connections count. Expected: 1, Actual : 0"]}, + }, + { + "name": "failure-connections", + "test": VerifyMcsServerMounts, + "eos_data": [{}], + "inputs": {"connections_count": 1}, + "expected": {"result": "failure", "messages": ["CVX connections are not available."]}, + }, + { + "name": "success", + "test": VerifyActiveCVXConnections, + "eos_data": [ + { + "connections": [ + { + "switchId": "fc:bd:67:c3:16:55", + "hostname": "lyv563", + "oobConnectionActive": True, + }, + { + "switchId": "00:1c:73:3c:e3:9e", + "hostname": "tg264", + "oobConnectionActive": True, + }, + ] + } + ], + "inputs": {"connections_count": 2}, + "expected": {"result": "success"}, + }, + { + "name": "failure", + "test": VerifyActiveCVXConnections, + "eos_data": [ + { + "connections": [ + { + "switchId": "fc:bd:67:c3:16:55", + "hostname": "lyv563", + "oobConnectionActive": False, + }, + { + "switchId": "00:1c:73:3c:e3:9e", + "hostname": "tg264", + "oobConnectionActive": True, + }, + ] + } + ], + "inputs": {"connections_count": 2}, + "expected": {"result": "failure", "messages": ["CVX active connections count. Expected: 2, Actual : 1"]}, + }, + { + "name": "failure-no-connections", + "test": VerifyActiveCVXConnections, + "eos_data": [{}], + "inputs": {"connections_count": 2}, + "expected": {"result": "failure", "messages": ["CVX connections are not available"]}, + }, + { + "name": "success-all", + "test": VerifyCVXClusterStatus, + "eos_data": [ + { + "enabled": True, + "clusterMode": True, + "clusterStatus": { + "role": "Master", + "peerStatus": { + "cvx-red-2": {"peerName": "cvx-red-2", "registrationState": "Registration complete"}, + "cvx-red-3": {"peerName": "cvx-red-3", "registrationState": "Registration complete"}, + }, + }, + } + ], + "inputs": { + "role": "Master", + "peer_status": [ + {"peer_name": "cvx-red-2", "registrationState": "Registration complete"}, + {"peer_name": "cvx-red-3", "registrationState": "Registration complete"}, + ], + }, + "expected": {"result": "success"}, + }, + { + "name": "failure-invalid-role", + "test": VerifyCVXClusterStatus, + "eos_data": [ + { + "enabled": True, + "clusterMode": True, + "clusterStatus": { + "role": "Standby", + "peerStatus": { + "cvx-red-2": {"peerName": "cvx-red-2", "registrationState": "Registration complete"}, + "cvx-red-3": {"peerName": "cvx-red-3", "registrationState": "Registration complete"}, + }, + }, + } + ], + "inputs": { + "role": "Master", + "peer_status": [ + {"peer_name": "cvx-red-2", "registrationState": "Registration complete"}, + {"peer_name": "cvx-red-3", "registrationState": "Registration complete"}, + ], + }, + "expected": {"result": "failure", "messages": ["CVX Role is not valid: Standby"]}, + }, + { + "name": "failure-cvx-enabled", + "test": VerifyCVXClusterStatus, + "eos_data": [ + { + "enabled": False, + "clusterMode": True, + "clusterStatus": { + "role": "Master", + "peerStatus": {}, + }, + } + ], + "inputs": { + "role": "Master", + "peer_status": [], + }, + "expected": {"result": "failure", "messages": ["CVX Server status is not enabled"]}, + }, + { + "name": "failure-cluster-enabled", + "test": VerifyCVXClusterStatus, + "eos_data": [ + { + "enabled": True, + "clusterMode": False, + "clusterStatus": {}, + } + ], + "inputs": { + "role": "Master", + "peer_status": [], + }, + "expected": {"result": "failure", "messages": ["CVX Server is not a cluster"]}, + }, + { + "name": "failure-missing-peers", + "test": VerifyCVXClusterStatus, + "eos_data": [ + { + "enabled": True, + "clusterMode": True, + "clusterStatus": { + "role": "Master", + "peerStatus": { + "cvx-red-2": {"peerName": "cvx-red-2", "registrationState": "Registration complete"}, + }, + }, + } + ], + "inputs": { + "role": "Master", + "peer_status": [ + {"peer_name": "cvx-red-2", "registrationState": "Registration complete"}, + {"peer_name": "cvx-red-3", "registrationState": "Registration complete"}, + ], + }, + "expected": {"result": "failure", "messages": ["Unexpected number of peers 1 vs 2", "cvx-red-3 is not present"]}, + }, + { + "name": "failure-invalid-peers", + "test": VerifyCVXClusterStatus, + "eos_data": [ + { + "enabled": True, + "clusterMode": True, + "clusterStatus": { + "role": "Master", + "peerStatus": {}, + }, + } + ], + "inputs": { + "role": "Master", + "peer_status": [ + {"peer_name": "cvx-red-2", "registrationState": "Registration complete"}, + {"peer_name": "cvx-red-3", "registrationState": "Registration complete"}, + ], + }, + "expected": {"result": "failure", "messages": ["Unexpected number of peers 0 vs 2", "cvx-red-2 is not present", "cvx-red-3 is not present"]}, + }, + { + "name": "failure-registration-error", + "test": VerifyCVXClusterStatus, + "eos_data": [ + { + "enabled": True, + "clusterMode": True, + "clusterStatus": { + "role": "Master", + "peerStatus": { + "cvx-red-2": {"peerName": "cvx-red-2", "registrationState": "Registration error"}, + "cvx-red-3": {"peerName": "cvx-red-3", "registrationState": "Registration complete"}, + }, + }, + } + ], + "inputs": { + "role": "Master", + "peer_status": [ + {"peer_name": "cvx-red-2", "registrationState": "Registration complete"}, + {"peer_name": "cvx-red-3", "registrationState": "Registration complete"}, + ], + }, + "expected": {"result": "failure", "messages": ["cvx-red-2 registration state is not complete: Registration error"]}, + }, +] diff --git a/tests/units/anta_tests/test_interfaces.py b/tests/units/anta_tests/test_interfaces.py index ea8106e..f3b4ee0 100644 --- a/tests/units/anta_tests/test_interfaces.py +++ b/tests/units/anta_tests/test_interfaces.py @@ -1108,7 +1108,7 @@ DATA: list[dict[str, Any]] = [ "inputs": {"interfaces": [{"name": "Ethernet2", "status": "up"}, {"name": "Ethernet8", "status": "up"}, {"name": "Ethernet3", "status": "up"}]}, "expected": { "result": "failure", - "messages": ["The following interface(s) are not configured: ['Ethernet8']"], + "messages": ["Ethernet8 - Not configured"], }, }, { @@ -1126,7 +1126,7 @@ DATA: list[dict[str, Any]] = [ "inputs": {"interfaces": [{"name": "Ethernet2", "status": "up"}, {"name": "Ethernet8", "status": "up"}, {"name": "Ethernet3", "status": "up"}]}, "expected": { "result": "failure", - "messages": ["The following interface(s) are not in the expected state: ['Ethernet8 is down/down'"], + "messages": ["Ethernet8 - Expected: up/up, Actual: down/down"], }, }, { @@ -1150,7 +1150,7 @@ DATA: list[dict[str, Any]] = [ }, "expected": { "result": "failure", - "messages": ["The following interface(s) are not in the expected state: ['Ethernet8 is up/down'"], + "messages": ["Ethernet8 - Expected: up/up, Actual: up/down"], }, }, { @@ -1166,7 +1166,7 @@ DATA: list[dict[str, Any]] = [ "inputs": {"interfaces": [{"name": "PortChannel100", "status": "up"}]}, "expected": { "result": "failure", - "messages": ["The following interface(s) are not in the expected state: ['Port-Channel100 is down/lowerLayerDown'"], + "messages": ["Port-Channel100 - Expected: up/up, Actual: down/lowerLayerDown"], }, }, { @@ -1190,7 +1190,38 @@ DATA: list[dict[str, Any]] = [ }, "expected": { "result": "failure", - "messages": ["The following interface(s) are not in the expected state: ['Ethernet2 is up/unknown'"], + "messages": [ + "Ethernet2 - Expected: up/down, Actual: up/unknown", + "Ethernet8 - Expected: up/up, Actual: up/down", + ], + }, + }, + { + "name": "failure-interface-status-down", + "test": VerifyInterfacesStatus, + "eos_data": [ + { + "interfaceDescriptions": { + "Ethernet8": {"interfaceStatus": "up", "description": "", "lineProtocolStatus": "down"}, + "Ethernet2": {"interfaceStatus": "up", "description": "", "lineProtocolStatus": "unknown"}, + "Ethernet3": {"interfaceStatus": "up", "description": "", "lineProtocolStatus": "up"}, + } + } + ], + "inputs": { + "interfaces": [ + {"name": "Ethernet2", "status": "down"}, + {"name": "Ethernet8", "status": "down"}, + {"name": "Ethernet3", "status": "down"}, + ] + }, + "expected": { + "result": "failure", + "messages": [ + "Ethernet2 - Expected: down, Actual: up", + "Ethernet8 - Expected: down, Actual: up", + "Ethernet3 - Expected: down, Actual: up", + ], }, }, { @@ -1938,8 +1969,8 @@ DATA: list[dict[str, Any]] = [ "interfaces": { "Ethernet2": { "interfaceAddress": { - "primaryIp": {"address": "172.30.11.0", "maskLen": 31}, - "secondaryIpsOrderedList": [{"address": "10.10.10.0", "maskLen": 31}, {"address": "10.10.10.10", "maskLen": 31}], + "primaryIp": {"address": "172.30.11.1", "maskLen": 31}, + "secondaryIpsOrderedList": [{"address": "10.10.10.1", "maskLen": 31}, {"address": "10.10.10.10", "maskLen": 31}], } } } @@ -1957,7 +1988,7 @@ DATA: list[dict[str, Any]] = [ ], "inputs": { "interfaces": [ - {"name": "Ethernet2", "primary_ip": "172.30.11.0/31", "secondary_ips": ["10.10.10.0/31", "10.10.10.10/31"]}, + {"name": "Ethernet2", "primary_ip": "172.30.11.1/31", "secondary_ips": ["10.10.10.1/31", "10.10.10.10/31"]}, {"name": "Ethernet12", "primary_ip": "172.30.11.10/31", "secondary_ips": ["10.10.10.10/31", "10.10.10.20/31"]}, ] }, @@ -2480,6 +2511,43 @@ DATA: list[dict[str, Any]] = [ "expected": {"result": "success"}, }, { + "name": "success-short-timeout", + "test": VerifyLACPInterfacesStatus, + "eos_data": [ + { + "portChannels": { + "Port-Channel5": { + "interfaces": { + "Ethernet5": { + "actorPortStatus": "bundled", + "partnerPortState": { + "activity": True, + "timeout": True, + "aggregation": True, + "synchronization": True, + "collecting": True, + "distributing": True, + }, + "actorPortState": { + "activity": True, + "timeout": True, + "aggregation": True, + "synchronization": True, + "collecting": True, + "distributing": True, + }, + } + } + } + }, + "interface": "Ethernet5", + "orphanPorts": {}, + } + ], + "inputs": {"interfaces": [{"name": "Ethernet5", "portchannel": "Port-Channel5", "lacp_rate_fast": True}]}, + "expected": {"result": "success"}, + }, + { "name": "failure-not-bundled", "test": VerifyLACPInterfacesStatus, "eos_data": [ @@ -2500,7 +2568,7 @@ DATA: list[dict[str, Any]] = [ "inputs": {"interfaces": [{"name": "Ethernet5", "portchannel": "Po5"}]}, "expected": { "result": "failure", - "messages": ["For Interface Ethernet5:\nExpected `bundled` as the local port status, but found `No Aggregate` instead.\n"], + "messages": ["Interface: Ethernet5 Port-Channel: Port-Channel5 - Not bundled - Port Status: No Aggregate"], }, }, { @@ -2514,7 +2582,7 @@ DATA: list[dict[str, Any]] = [ "inputs": {"interfaces": [{"name": "Ethernet5", "portchannel": "Po 5"}]}, "expected": { "result": "failure", - "messages": ["Interface 'Ethernet5' is not configured to be a member of LACP 'Port-Channel5'."], + "messages": ["Interface: Ethernet5 Port-Channel: Port-Channel5 - Not configured"], }, }, { @@ -2555,13 +2623,55 @@ DATA: list[dict[str, Any]] = [ "expected": { "result": "failure", "messages": [ - "For Interface Ethernet5:\n" - "Actor port details:\nExpected `True` as the activity, but found `False` instead." - "\nExpected `True` as the aggregation, but found `False` instead." - "\nExpected `True` as the synchronization, but found `False` instead." - "\nPartner port details:\nExpected `True` as the activity, but found `False` instead.\n" - "Expected `True` as the aggregation, but found `False` instead.\n" - "Expected `True` as the synchronization, but found `False` instead.\n" + "Interface: Ethernet5 Port-Channel: Port-Channel5 - Actor port details mismatch - Activity: False, Aggregation: False, " + "Synchronization: False, Collecting: True, Distributing: True, Timeout: False", + "Interface: Ethernet5 Port-Channel: Port-Channel5 - Partner port details mismatch - Activity: False, Aggregation: False, " + "Synchronization: False, Collecting: True, Distributing: True, Timeout: False", + ], + }, + }, + { + "name": "failure-short-timeout", + "test": VerifyLACPInterfacesStatus, + "eos_data": [ + { + "portChannels": { + "Port-Channel5": { + "interfaces": { + "Ethernet5": { + "actorPortStatus": "bundled", + "partnerPortState": { + "activity": True, + "timeout": False, + "aggregation": True, + "synchronization": True, + "collecting": True, + "distributing": True, + }, + "actorPortState": { + "activity": True, + "timeout": False, + "aggregation": True, + "synchronization": True, + "collecting": True, + "distributing": True, + }, + } + } + } + }, + "interface": "Ethernet5", + "orphanPorts": {}, + } + ], + "inputs": {"interfaces": [{"name": "Ethernet5", "portchannel": "port-channel 5", "lacp_rate_fast": True}]}, + "expected": { + "result": "failure", + "messages": [ + "Interface: Ethernet5 Port-Channel: Port-Channel5 - Actor port details mismatch - Activity: True, Aggregation: True, " + "Synchronization: True, Collecting: True, Distributing: True, Timeout: False", + "Interface: Ethernet5 Port-Channel: Port-Channel5 - Partner port details mismatch - Activity: True, Aggregation: True, " + "Synchronization: True, Collecting: True, Distributing: True, Timeout: False", ], }, }, diff --git a/tests/units/anta_tests/test_security.py b/tests/units/anta_tests/test_security.py index 0d4a478..472eb7e 100644 --- a/tests/units/anta_tests/test_security.py +++ b/tests/units/anta_tests/test_security.py @@ -1079,7 +1079,7 @@ DATA: list[dict[str, Any]] = [ }, ] }, - "expected": {"result": "failure", "messages": ["No IPv4 security connection configured for peer `10.255.0.1`."]}, + "expected": {"result": "failure", "messages": ["Peer: 10.255.0.1 VRF: default - Not configured"]}, }, { "name": "failure-not-established", @@ -1127,14 +1127,10 @@ DATA: list[dict[str, Any]] = [ "expected": { "result": "failure", "messages": [ - "Expected state of IPv4 security connection `source:172.18.3.2 destination:172.18.2.2 vrf:default` for peer `10.255.0.1` is `Established` " - "but found `Idle` instead.", - "Expected state of IPv4 security connection `source:100.64.2.2 destination:100.64.1.2 vrf:default` for peer `10.255.0.1` is `Established` " - "but found `Idle` instead.", - "Expected state of IPv4 security connection `source:100.64.2.2 destination:100.64.1.2 vrf:MGMT` for peer `10.255.0.2` is `Established` " - "but found `Idle` instead.", - "Expected state of IPv4 security connection `source:172.18.2.2 destination:172.18.1.2 vrf:MGMT` for peer `10.255.0.2` is `Established` " - "but found `Idle` instead.", + "Peer: 10.255.0.1 VRF: default Source: 172.18.3.2 Destination: 172.18.2.2 - Connection down - Expected: Established, Actual: Idle", + "Peer: 10.255.0.1 VRF: default Source: 100.64.2.2 Destination: 100.64.1.2 - Connection down - Expected: Established, Actual: Idle", + "Peer: 10.255.0.2 VRF: MGMT Source: 100.64.2.2 Destination: 100.64.1.2 - Connection down - Expected: Established, Actual: Idle", + "Peer: 10.255.0.2 VRF: MGMT Source: 172.18.2.2 Destination: 172.18.1.2 - Connection down - Expected: Established, Actual: Idle", ], }, }, @@ -1194,12 +1190,10 @@ DATA: list[dict[str, Any]] = [ "expected": { "result": "failure", "messages": [ - "Expected state of IPv4 security connection `source:172.18.3.2 destination:172.18.2.2 vrf:default` for peer `10.255.0.1` is `Established` " - "but found `Idle` instead.", - "Expected state of IPv4 security connection `source:100.64.3.2 destination:100.64.2.2 vrf:default` for peer `10.255.0.1` is `Established` " - "but found `Idle` instead.", - "IPv4 security connection `source:100.64.4.2 destination:100.64.1.2 vrf:default` for peer `10.255.0.2` is not found.", - "IPv4 security connection `source:172.18.4.2 destination:172.18.1.2 vrf:default` for peer `10.255.0.2` is not found.", + "Peer: 10.255.0.1 VRF: default Source: 172.18.3.2 Destination: 172.18.2.2 - Connection down - Expected: Established, Actual: Idle", + "Peer: 10.255.0.1 VRF: default Source: 100.64.3.2 Destination: 100.64.2.2 - Connection down - Expected: Established, Actual: Idle", + "Peer: 10.255.0.2 VRF: default Source: 100.64.4.2 Destination: 100.64.1.2 - Connection not found.", + "Peer: 10.255.0.2 VRF: default Source: 172.18.4.2 Destination: 172.18.1.2 - Connection not found.", ], }, }, diff --git a/tests/units/anta_tests/test_services.py b/tests/units/anta_tests/test_services.py index 3f13dfc..639c5c6 100644 --- a/tests/units/anta_tests/test_services.py +++ b/tests/units/anta_tests/test_services.py @@ -59,31 +59,23 @@ DATA: list[dict[str, Any]] = [ "test": VerifyDNSServers, "eos_data": [ { - "nameServerConfigs": [{"ipAddr": "10.14.0.1", "vrf": "default", "priority": 0}, {"ipAddr": "10.14.0.11", "vrf": "MGMT", "priority": 1}], + "nameServerConfigs": [ + {"ipAddr": "10.14.0.1", "vrf": "default", "priority": 0}, + {"ipAddr": "10.14.0.11", "vrf": "MGMT", "priority": 1}, + {"ipAddr": "fd12:3456:789a::1", "vrf": "default", "priority": 0}, + ], } ], "inputs": { - "dns_servers": [{"server_address": "10.14.0.1", "vrf": "default", "priority": 0}, {"server_address": "10.14.0.11", "vrf": "MGMT", "priority": 1}] + "dns_servers": [ + {"server_address": "10.14.0.1", "vrf": "default", "priority": 0}, + {"server_address": "10.14.0.11", "vrf": "MGMT", "priority": 1}, + {"server_address": "fd12:3456:789a::1", "vrf": "default", "priority": 0}, + ] }, "expected": {"result": "success"}, }, { - "name": "failure-dns-missing", - "test": VerifyDNSServers, - "eos_data": [ - { - "nameServerConfigs": [{"ipAddr": "10.14.0.1", "vrf": "default", "priority": 0}, {"ipAddr": "10.14.0.11", "vrf": "MGMT", "priority": 1}], - } - ], - "inputs": { - "dns_servers": [{"server_address": "10.14.0.10", "vrf": "default", "priority": 0}, {"server_address": "10.14.0.21", "vrf": "MGMT", "priority": 1}] - }, - "expected": { - "result": "failure", - "messages": ["DNS server `10.14.0.10` is not configured with any VRF.", "DNS server `10.14.0.21` is not configured with any VRF."], - }, - }, - { "name": "failure-no-dns-found", "test": VerifyDNSServers, "eos_data": [ @@ -96,7 +88,7 @@ DATA: list[dict[str, Any]] = [ }, "expected": { "result": "failure", - "messages": ["DNS server `10.14.0.10` is not configured with any VRF.", "DNS server `10.14.0.21` is not configured with any VRF."], + "messages": ["Server 10.14.0.10 (VRF: default, Priority: 0) - Not configured", "Server 10.14.0.21 (VRF: MGMT, Priority: 1) - Not configured"], }, }, { @@ -117,9 +109,9 @@ DATA: list[dict[str, Any]] = [ "expected": { "result": "failure", "messages": [ - "For DNS server `10.14.0.1`, the expected priority is `0`, but `1` was found instead.", - "DNS server `10.14.0.11` is not configured with VRF `default`.", - "DNS server `10.14.0.110` is not configured with any VRF.", + "Server 10.14.0.1 (VRF: CS, Priority: 0) - Incorrect priority - Priority: 1", + "Server 10.14.0.11 (VRF: default, Priority: 0) - Not configured", + "Server 10.14.0.110 (VRF: MGMT, Priority: 0) - Not configured", ], }, }, diff --git a/tests/units/anta_tests/test_stun.py b/tests/units/anta_tests/test_stun.py index 005ae35..2383483 100644 --- a/tests/units/anta_tests/test_stun.py +++ b/tests/units/anta_tests/test_stun.py @@ -7,13 +7,13 @@ from __future__ import annotations from typing import Any -from anta.tests.stun import VerifyStunClient, VerifyStunServer +from anta.tests.stun import VerifyStunClientTranslation, VerifyStunServer from tests.units.anta_tests import test DATA: list[dict[str, Any]] = [ { "name": "success", - "test": VerifyStunClient, + "test": VerifyStunClientTranslation, "eos_data": [ { "bindings": { @@ -60,7 +60,7 @@ DATA: list[dict[str, Any]] = [ }, { "name": "failure-incorrect-public-ip", - "test": VerifyStunClient, + "test": VerifyStunClientTranslation, "eos_data": [ { "bindings": { @@ -88,14 +88,14 @@ DATA: list[dict[str, Any]] = [ "expected": { "result": "failure", "messages": [ - "For STUN source `100.64.3.2:4500`:\nExpected `192.164.3.2` as the public ip, but found `192.64.3.2` instead.", - "For STUN source `172.18.3.2:4500`:\nExpected `192.118.3.2` as the public ip, but found `192.18.3.2` instead.", + "Client 100.64.3.2 Port: 4500 - Incorrect public-facing address - Expected: 192.164.3.2 Actual: 192.64.3.2", + "Client 172.18.3.2 Port: 4500 - Incorrect public-facing address - Expected: 192.118.3.2 Actual: 192.18.3.2", ], }, }, { "name": "failure-no-client", - "test": VerifyStunClient, + "test": VerifyStunClientTranslation, "eos_data": [ {"bindings": {}}, {"bindings": {}}, @@ -108,12 +108,12 @@ DATA: list[dict[str, Any]] = [ }, "expected": { "result": "failure", - "messages": ["STUN client transaction for source `100.64.3.2:4500` is not found.", "STUN client transaction for source `172.18.3.2:4500` is not found."], + "messages": ["Client 100.64.3.2 Port: 4500 - STUN client translation not found.", "Client 172.18.3.2 Port: 4500 - STUN client translation not found."], }, }, { "name": "failure-incorrect-public-port", - "test": VerifyStunClient, + "test": VerifyStunClientTranslation, "eos_data": [ {"bindings": {}}, { @@ -134,16 +134,15 @@ DATA: list[dict[str, Any]] = [ "expected": { "result": "failure", "messages": [ - "STUN client transaction for source `100.64.3.2:4500` is not found.", - "For STUN source `172.18.3.2:4500`:\n" - "Expected `192.118.3.2` as the public ip, but found `192.18.3.2` instead.\n" - "Expected `6006` as the public port, but found `4800` instead.", + "Client 100.64.3.2 Port: 4500 - STUN client translation not found.", + "Client 172.18.3.2 Port: 4500 - Incorrect public-facing address - Expected: 192.118.3.2 Actual: 192.18.3.2", + "Client 172.18.3.2 Port: 4500 - Incorrect public-facing port - Expected: 6006 Actual: 4800", ], }, }, { "name": "failure-all-type", - "test": VerifyStunClient, + "test": VerifyStunClientTranslation, "eos_data": [ {"bindings": {}}, { @@ -164,12 +163,9 @@ DATA: list[dict[str, Any]] = [ "expected": { "result": "failure", "messages": [ - "STUN client transaction for source `100.64.3.2:4500` is not found.", - "For STUN source `172.18.4.2:4800`:\n" - "Expected `172.18.4.2` as the source ip, but found `172.18.3.2` instead.\n" - "Expected `4800` as the source port, but found `4500` instead.\n" - "Expected `192.118.3.2` as the public ip, but found `192.18.3.2` instead.\n" - "Expected `6006` as the public port, but found `4800` instead.", + "Client 100.64.3.2 Port: 4500 - STUN client translation not found.", + "Client 172.18.4.2 Port: 4800 - Incorrect public-facing address - Expected: 192.118.3.2 Actual: 192.18.3.2", + "Client 172.18.4.2 Port: 4800 - Incorrect public-facing port - Expected: 6006 Actual: 4800", ], }, }, diff --git a/tests/units/anta_tests/test_system.py b/tests/units/anta_tests/test_system.py index 1eda8a1..f610a8e 100644 --- a/tests/units/anta_tests/test_system.py +++ b/tests/units/anta_tests/test_system.py @@ -347,6 +347,39 @@ poll interval unknown "expected": {"result": "success"}, }, { + "name": "success-ip-dns", + "test": VerifyNTPAssociations, + "eos_data": [ + { + "peers": { + "1.1.1.1 (1.ntp.networks.com)": { + "condition": "sys.peer", + "peerIpAddr": "1.1.1.1", + "stratumLevel": 1, + }, + "2.2.2.2 (2.ntp.networks.com)": { + "condition": "candidate", + "peerIpAddr": "2.2.2.2", + "stratumLevel": 2, + }, + "3.3.3.3 (3.ntp.networks.com)": { + "condition": "candidate", + "peerIpAddr": "3.3.3.3", + "stratumLevel": 2, + }, + } + } + ], + "inputs": { + "ntp_servers": [ + {"server_address": "1.1.1.1", "preferred": True, "stratum": 1}, + {"server_address": "2.2.2.2", "stratum": 2}, + {"server_address": "3.3.3.3", "stratum": 2}, + ] + }, + "expected": {"result": "success"}, + }, + { "name": "failure", "test": VerifyNTPAssociations, "eos_data": [ @@ -380,9 +413,9 @@ poll interval unknown "expected": { "result": "failure", "messages": [ - "For NTP peer 1.1.1.1:\nExpected `sys.peer` as the condition, but found `candidate` instead.\nExpected `1` as the stratum, but found `2` instead.\n" - "For NTP peer 2.2.2.2:\nExpected `candidate` as the condition, but found `sys.peer` instead.\n" - "For NTP peer 3.3.3.3:\nExpected `candidate` as the condition, but found `sys.peer` instead.\nExpected `2` as the stratum, but found `3` instead." + "1.1.1.1 (Preferred: True, Stratum: 1) - Bad association - Condition: candidate, Stratum: 2", + "2.2.2.2 (Preferred: False, Stratum: 2) - Bad association - Condition: sys.peer, Stratum: 2", + "3.3.3.3 (Preferred: False, Stratum: 2) - Bad association - Condition: sys.peer, Stratum: 3", ], }, }, @@ -399,7 +432,7 @@ poll interval unknown }, "expected": { "result": "failure", - "messages": ["None of NTP peers are not configured."], + "messages": ["No NTP peers configured"], }, }, { @@ -430,7 +463,7 @@ poll interval unknown }, "expected": { "result": "failure", - "messages": ["NTP peer 3.3.3.3 is not configured."], + "messages": ["3.3.3.3 (Preferred: False, Stratum: 1) - Not configured"], }, }, { @@ -457,8 +490,9 @@ poll interval unknown "expected": { "result": "failure", "messages": [ - "For NTP peer 1.1.1.1:\nExpected `sys.peer` as the condition, but found `candidate` instead.\n" - "NTP peer 2.2.2.2 is not configured.\nNTP peer 3.3.3.3 is not configured." + "1.1.1.1 (Preferred: True, Stratum: 1) - Bad association - Condition: candidate, Stratum: 1", + "2.2.2.2 (Preferred: False, Stratum: 1) - Not configured", + "3.3.3.3 (Preferred: False, Stratum: 1) - Not configured", ], }, }, diff --git a/tests/units/cli/conftest.py b/tests/units/cli/conftest.py index e63e60e..71c23e9 100644 --- a/tests/units/cli/conftest.py +++ b/tests/units/cli/conftest.py @@ -39,6 +39,7 @@ MOCK_CLI_JSON: dict[str, asynceapi.EapiCommandError | dict[str, Any]] = { errmsg="Invalid command", not_exec=[], ), + "show interfaces": {}, } MOCK_CLI_TEXT: dict[str, asynceapi.EapiCommandError | str] = { diff --git a/tests/units/cli/get/local_module/__init__.py b/tests/units/cli/get/local_module/__init__.py new file mode 100644 index 0000000..f93ff2b --- /dev/null +++ b/tests/units/cli/get/local_module/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2024 Arista Networks, Inc. +# Use of this source code is governed by the Apache License 2.0 +# that can be found in the LICENSE file. +"""Module used for test purposes.""" diff --git a/tests/units/cli/get/test_commands.py b/tests/units/cli/get/test_commands.py index ff3d922..0e263f7 100644 --- a/tests/units/cli/get/test_commands.py +++ b/tests/units/cli/get/test_commands.py @@ -114,6 +114,27 @@ def test_from_cvp( assert result.exit_code == ExitCode.OK +def test_from_cvp_os_error(tmp_path: Path, click_runner: CliRunner, caplog: pytest.LogCaptureFixture) -> None: + """Test from_cvp when an OSError occurs.""" + output: Path = tmp_path / "output.yml" + cli_args = ["get", "from-cvp", "--output", str(output), "--host", "42.42.42.42", "--username", "anta", "--password", "anta"] + + with ( + patch("anta.cli.get.commands.get_cv_token", autospec=True, side_effect=None), + patch("cvprac.cvp_client.CvpClient.connect", autospec=True, side_effect=None) as mocked_cvp_connect, + patch("cvprac.cvp_client.CvpApi.get_inventory", autospec=True, return_value=[]) as mocked_get_inventory, + patch("cvprac.cvp_client.CvpApi.get_devices_in_container", autospec=True, return_value=[]), + patch("anta.cli.get.utils.Path.open", side_effect=OSError("Permission denied")), + ): + result = click_runner.invoke(anta, cli_args) + + mocked_cvp_connect.assert_called_once() + mocked_get_inventory.assert_called_once() + assert not output.exists() + assert "Could not write inventory to path" in caplog.text + assert result.exit_code == ExitCode.USAGE_ERROR + + @pytest.mark.parametrize( ("ansible_inventory", "ansible_group", "expected_exit", "expected_log"), [ @@ -257,8 +278,7 @@ def test_from_ansible_overwrite( else: temp_env["ANTA_INVENTORY"] = None tmp_inv = tmp_output - cli_args.extend(["--output", str(tmp_output)]) - + cli_args.extend(["--output", str(tmp_inv)]) if overwrite: cli_args.append("--overwrite") @@ -275,3 +295,162 @@ def test_from_ansible_overwrite( elif expected_exit == ExitCode.INTERNAL_ERROR: assert expected_log assert expected_log in result.output + + +@pytest.mark.parametrize( + ("module", "test_name", "short", "count", "expected_output", "expected_exit_code"), + [ + pytest.param( + None, + None, + False, + False, + "VerifyAcctConsoleMethods", + ExitCode.OK, + id="Get all tests", + ), + pytest.param( + "anta.tests.aaa", + None, + False, + False, + "VerifyAcctConsoleMethods", + ExitCode.OK, + id="Get tests, filter on module", + ), + pytest.param( + None, + "VerifyNTPAssociations", + False, + False, + "VerifyNTPAssociations", + ExitCode.OK, + id="Get tests, filter on exact test name", + ), + pytest.param( + None, + "VerifyNTP", + False, + False, + "anta.tests.system", + ExitCode.OK, + id="Get tests, filter on included test name", + ), + pytest.param( + None, + "VerifyNTP", + True, + False, + "VerifyNTPAssociations", + ExitCode.OK, + id="Get tests --short", + ), + pytest.param( + "unknown_module", + None, + True, + False, + "Module `unknown_module` was not found!", + ExitCode.USAGE_ERROR, + id="Get tests wrong module", + ), + pytest.param( + "unknown_module.unknown", + None, + True, + False, + "Module `unknown_module.unknown` was not found!", + ExitCode.USAGE_ERROR, + id="Get tests wrong submodule", + ), + pytest.param( + ".unknown_module", + None, + True, + False, + "`anta get tests --module <module>` does not support relative imports", + ExitCode.USAGE_ERROR, + id="Use relative module name", + ), + pytest.param( + None, + "VerifySomething", + True, + False, + "No test 'VerifySomething' found in 'anta.tests'", + ExitCode.OK, + id="Get tests wrong test name", + ), + pytest.param( + "anta.tests.aaa", + "VerifyNTP", + True, + False, + "No test 'VerifyNTP' found in 'anta.tests.aaa'", + ExitCode.OK, + id="Get tests test exists but not in module", + ), + pytest.param( + "anta.tests.system", + "VerifyNTPAssociations", + False, + True, + "There is 1 test available in 'anta.tests.system'.", + ExitCode.OK, + id="Get single test count", + ), + pytest.param( + "anta.tests.stun", + None, + False, + True, + "There are 3 tests available in 'anta.tests.stun'", + ExitCode.OK, + id="Get multiple test count", + ), + ], +) +def test_get_tests( + click_runner: CliRunner, module: str | None, test_name: str | None, *, short: bool, count: bool, expected_output: str, expected_exit_code: str +) -> None: + """Test `anta get tests`.""" + cli_args = [ + "get", + "tests", + ] + if module is not None: + cli_args.extend(["--module", module]) + + if test_name is not None: + cli_args.extend(["--test", test_name]) + + if short: + cli_args.append("--short") + + if count: + cli_args.append("--count") + + result = click_runner.invoke(anta, cli_args) + + assert result.exit_code == expected_exit_code + assert expected_output in result.output + + +def test_get_tests_local_module(click_runner: CliRunner) -> None: + """Test injecting CWD in sys. + + The test overwrite CWD to return this file parents and local_module is located there. + """ + cli_args = ["get", "tests", "--module", "local_module"] + + cwd = Path.cwd() + local_module_parent_path = Path(__file__).parent + with patch("anta.cli.get.utils.Path.cwd", return_value=local_module_parent_path): + result = click_runner.invoke(anta, cli_args) + + assert result.exit_code == ExitCode.OK + + # In the rare case where people would be running `pytest .` in this directory + if cwd != local_module_parent_path: + assert "injecting CWD in PYTHONPATH and retrying..." in result.output + assert "No test found in 'local_module'" in result.output diff --git a/tests/units/cli/get/test_utils.py b/tests/units/cli/get/test_utils.py index 46ce14f..9cff4ce 100644 --- a/tests/units/cli/get/test_utils.py +++ b/tests/units/cli/get/test_utils.py @@ -7,14 +7,15 @@ from __future__ import annotations from contextlib import AbstractContextManager, nullcontext from pathlib import Path -from typing import Any +from typing import Any, ClassVar from unittest.mock import MagicMock, patch import pytest import requests -from anta.cli.get.utils import create_inventory_from_ansible, create_inventory_from_cvp, get_cv_token +from anta.cli.get.utils import create_inventory_from_ansible, create_inventory_from_cvp, extract_examples, find_tests_examples, get_cv_token, print_test from anta.inventory import AntaInventory +from anta.models import AntaCommand, AntaTemplate, AntaTest DATA_DIR: Path = Path(__file__).parents[3].resolve() / "data" @@ -160,3 +161,91 @@ def test_create_inventory_from_ansible( assert not target_file.exists() if expected_log: assert expected_log in caplog.text + + +class MissingExampleTest(AntaTest): + """ANTA test that always succeed but has no Examples section.""" + + categories: ClassVar[list[str]] = [] + commands: ClassVar[list[AntaCommand | AntaTemplate]] = [] + + @AntaTest.anta_test + def test(self) -> None: + """Test function.""" + self.result.is_success() + + +class EmptyExampleTest(AntaTest): + """ANTA test that always succeed but has an empty Examples section. + + Examples + -------- + """ + + # For the test purpose we want am empty section as custom tests could not be using ruff. + # ruff: noqa: D414 + + categories: ClassVar[list[str]] = [] + commands: ClassVar[list[AntaCommand | AntaTemplate]] = [] + + @AntaTest.anta_test + def test(self) -> None: + """Test function.""" + self.result.is_success() + + +class TypoExampleTest(AntaTest): + """ANTA test that always succeed but has a Typo in the test name in the example. + + Notice capital P in TyPo below. + + Examples + -------- + ```yaml + tests.units.cli.get.test_utils: + - TyPoExampleTest: + ``` + """ + + # For the test purpose we want am empty section as custom tests could not be using ruff. + # ruff: noqa: D414 + + categories: ClassVar[list[str]] = [] + commands: ClassVar[list[AntaCommand | AntaTemplate]] = [] + + @AntaTest.anta_test + def test(self) -> None: + """Test function.""" + self.result.is_success() + + +def test_find_tests_examples() -> None: + """Test find_tests_examples. + + Only testing the failure scenarii not tested through test_commands. + TODO: expand + """ + with pytest.raises(ValueError, match="Error when importing"): + find_tests_examples("blah", "UnusedTestName") + + +def test_print_test() -> None: + """Test print_test.""" + with pytest.raises(ValueError, match="Could not find the name of the test"): + print_test(TypoExampleTest) + with pytest.raises(LookupError, match="is missing an Example"): + print_test(MissingExampleTest) + with pytest.raises(LookupError, match="is missing an Example"): + print_test(EmptyExampleTest) + + +def test_extract_examples() -> None: + """Test extract_examples. + + Only testing the case where the 'Examples' is missing as everything else + is covered already in test_commands.py. + """ + assert MissingExampleTest.__doc__ is not None + assert EmptyExampleTest.__doc__ is not None + assert extract_examples(MissingExampleTest.__doc__) is None + assert extract_examples(EmptyExampleTest.__doc__) is None diff --git a/tests/units/cli/nrfu/test_commands.py b/tests/units/cli/nrfu/test_commands.py index 6a2624c..372c86a 100644 --- a/tests/units/cli/nrfu/test_commands.py +++ b/tests/units/cli/nrfu/test_commands.py @@ -17,7 +17,7 @@ from anta.cli.utils import ExitCode if TYPE_CHECKING: from click.testing import CliRunner -DATA_DIR: Path = Path(__file__).parent.parent.parent.parent.resolve() / "data" +DATA_DIR: Path = Path(__file__).parents[3].resolve() / "data" def test_anta_nrfu_table_help(click_runner: CliRunner) -> None: @@ -76,6 +76,19 @@ def test_anta_nrfu_text(click_runner: CliRunner) -> None: assert "leaf1 :: VerifyEOSVersion :: SUCCESS" in result.output +def test_anta_nrfu_text_multiple_failures(click_runner: CliRunner) -> None: + """Test anta nrfu text with multiple failures, catalog is given via env.""" + result = click_runner.invoke(anta, ["nrfu", "text"], env={"ANTA_CATALOG": str(DATA_DIR / "test_catalog_double_failure.yml")}) + assert result.exit_code == ExitCode.TESTS_FAILED + assert ( + """spine1 :: VerifyInterfacesSpeed :: FAILURE + Interface `Ethernet2` is not found. + Interface `Ethernet3` is not found. + Interface `Ethernet4` is not found.""" + in result.output + ) + + def test_anta_nrfu_json(click_runner: CliRunner) -> None: """Test anta nrfu, catalog is given via env.""" result = click_runner.invoke(anta, ["nrfu", "json"]) diff --git a/tests/units/input_models/__init__.py b/tests/units/input_models/__init__.py new file mode 100644 index 0000000..62747a6 --- /dev/null +++ b/tests/units/input_models/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2023-2024 Arista Networks, Inc. +# Use of this source code is governed by the Apache License 2.0 +# that can be found in the LICENSE file. +"""Tests for anta.input_models module.""" diff --git a/tests/units/input_models/routing/__init__.py b/tests/units/input_models/routing/__init__.py new file mode 100644 index 0000000..b56adb5 --- /dev/null +++ b/tests/units/input_models/routing/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2023-2024 Arista Networks, Inc. +# Use of this source code is governed by the Apache License 2.0 +# that can be found in the LICENSE file. +"""Test for anta.input_models.routing submodule.""" diff --git a/tests/units/input_models/routing/test_bgp.py b/tests/units/input_models/routing/test_bgp.py new file mode 100644 index 0000000..66c37af --- /dev/null +++ b/tests/units/input_models/routing/test_bgp.py @@ -0,0 +1,238 @@ +# Copyright (c) 2023-2024 Arista Networks, Inc. +# Use of this source code is governed by the Apache License 2.0 +# that can be found in the LICENSE file. +"""Tests for anta.input_models.routing.bgp.py.""" + +# pylint: disable=C0302 +from __future__ import annotations + +from typing import TYPE_CHECKING + +import pytest +from pydantic import ValidationError + +from anta.input_models.routing.bgp import BgpAddressFamily, BgpPeer +from anta.tests.routing.bgp import ( + VerifyBGPExchangedRoutes, + VerifyBGPPeerCount, + VerifyBGPPeerMPCaps, + VerifyBGPPeerRouteLimit, + VerifyBgpRouteMaps, + VerifyBGPSpecificPeers, + VerifyBGPTimers, +) + +if TYPE_CHECKING: + from anta.custom_types import Afi, Safi + + +class TestBgpAddressFamily: + """Test anta.input_models.routing.bgp.BgpAddressFamily.""" + + @pytest.mark.parametrize( + ("afi", "safi", "vrf"), + [ + pytest.param("ipv4", "unicast", "MGMT", id="afi"), + pytest.param("evpn", None, "default", id="safi"), + pytest.param("ipv4", "unicast", "default", id="vrf"), + ], + ) + def test_valid(self, afi: Afi, safi: Safi, vrf: str) -> None: + """Test BgpAddressFamily valid inputs.""" + BgpAddressFamily(afi=afi, safi=safi, vrf=vrf) + + @pytest.mark.parametrize( + ("afi", "safi", "vrf"), + [ + pytest.param("ipv4", None, "default", id="afi"), + pytest.param("evpn", "multicast", "default", id="safi"), + pytest.param("evpn", None, "MGMT", id="vrf"), + ], + ) + def test_invalid(self, afi: Afi, safi: Safi, vrf: str) -> None: + """Test BgpAddressFamily invalid inputs.""" + with pytest.raises(ValidationError): + BgpAddressFamily(afi=afi, safi=safi, vrf=vrf) + + +class TestVerifyBGPPeerCountInput: + """Test anta.tests.routing.bgp.VerifyBGPPeerCount.Input.""" + + @pytest.mark.parametrize( + ("address_families"), + [ + pytest.param([{"afi": "evpn", "num_peers": 2}], id="valid"), + ], + ) + def test_valid(self, address_families: list[BgpAddressFamily]) -> None: + """Test VerifyBGPPeerCount.Input valid inputs.""" + VerifyBGPPeerCount.Input(address_families=address_families) + + @pytest.mark.parametrize( + ("address_families"), + [ + pytest.param([{"afi": "evpn", "num_peers": 0}], id="zero-peer"), + pytest.param([{"afi": "evpn"}], id="None"), + ], + ) + def test_invalid(self, address_families: list[BgpAddressFamily]) -> None: + """Test VerifyBGPPeerCount.Input invalid inputs.""" + with pytest.raises(ValidationError): + VerifyBGPPeerCount.Input(address_families=address_families) + + +class TestVerifyBGPSpecificPeersInput: + """Test anta.tests.routing.bgp.VerifyBGPSpecificPeers.Input.""" + + @pytest.mark.parametrize( + ("address_families"), + [ + pytest.param([{"afi": "evpn", "peers": ["10.1.0.1", "10.1.0.2"]}], id="valid"), + ], + ) + def test_valid(self, address_families: list[BgpAddressFamily]) -> None: + """Test VerifyBGPSpecificPeers.Input valid inputs.""" + VerifyBGPSpecificPeers.Input(address_families=address_families) + + @pytest.mark.parametrize( + ("address_families"), + [ + pytest.param([{"afi": "evpn"}], id="None"), + ], + ) + def test_invalid(self, address_families: list[BgpAddressFamily]) -> None: + """Test VerifyBGPSpecificPeers.Input invalid inputs.""" + with pytest.raises(ValidationError): + VerifyBGPSpecificPeers.Input(address_families=address_families) + + +class TestVerifyBGPExchangedRoutesInput: + """Test anta.tests.routing.bgp.VerifyBGPExchangedRoutes.Input.""" + + @pytest.mark.parametrize( + ("bgp_peers"), + [ + pytest.param( + [{"peer_address": "172.30.255.5", "vrf": "default", "advertised_routes": ["192.0.254.5/32"], "received_routes": ["192.0.255.4/32"]}], + id="valid_both_received_advertised", + ), + ], + ) + def test_valid(self, bgp_peers: list[BgpPeer]) -> None: + """Test VerifyBGPExchangedRoutes.Input valid inputs.""" + VerifyBGPExchangedRoutes.Input(bgp_peers=bgp_peers) + + @pytest.mark.parametrize( + ("bgp_peers"), + [ + pytest.param([{"peer_address": "172.30.255.5", "vrf": "default"}], id="invalid"), + pytest.param([{"peer_address": "172.30.255.5", "vrf": "default", "advertised_routes": ["192.0.254.5/32"]}], id="invalid_received_route"), + pytest.param([{"peer_address": "172.30.255.5", "vrf": "default", "received_routes": ["192.0.254.5/32"]}], id="invalid_advertised_route"), + ], + ) + def test_invalid(self, bgp_peers: list[BgpPeer]) -> None: + """Test VerifyBGPExchangedRoutes.Input invalid inputs.""" + with pytest.raises(ValidationError): + VerifyBGPExchangedRoutes.Input(bgp_peers=bgp_peers) + + +class TestVerifyBGPPeerMPCapsInput: + """Test anta.tests.routing.bgp.VerifyBGPPeerMPCaps.Input.""" + + @pytest.mark.parametrize( + ("bgp_peers"), + [ + pytest.param([{"peer_address": "172.30.255.5", "vrf": "default", "capabilities": ["ipv4Unicast"]}], id="valid"), + ], + ) + def test_valid(self, bgp_peers: list[BgpPeer]) -> None: + """Test VerifyBGPPeerMPCaps.Input valid inputs.""" + VerifyBGPPeerMPCaps.Input(bgp_peers=bgp_peers) + + @pytest.mark.parametrize( + ("bgp_peers"), + [ + pytest.param([{"peer_address": "172.30.255.5", "vrf": "default"}], id="invalid"), + ], + ) + def test_invalid(self, bgp_peers: list[BgpPeer]) -> None: + """Test VerifyBGPPeerMPCaps.Input invalid inputs.""" + with pytest.raises(ValidationError): + VerifyBGPPeerMPCaps.Input(bgp_peers=bgp_peers) + + +class TestVerifyBGPTimersInput: + """Test anta.tests.routing.bgp.VerifyBGPTimers.Input.""" + + @pytest.mark.parametrize( + ("bgp_peers"), + [ + pytest.param([{"peer_address": "172.30.255.5", "vrf": "default", "hold_time": 180, "keep_alive_time": 60}], id="valid"), + ], + ) + def test_valid(self, bgp_peers: list[BgpPeer]) -> None: + """Test VerifyBGPTimers.Input valid inputs.""" + VerifyBGPTimers.Input(bgp_peers=bgp_peers) + + @pytest.mark.parametrize( + ("bgp_peers"), + [ + pytest.param([{"peer_address": "172.30.255.5", "vrf": "default"}], id="invalid"), + pytest.param([{"peer_address": "172.30.255.5", "vrf": "default", "hold_time": 180}], id="invalid_keep_alive"), + pytest.param([{"peer_address": "172.30.255.5", "vrf": "default", "keep_alive_time": 180}], id="invalid_hold_time"), + ], + ) + def test_invalid(self, bgp_peers: list[BgpPeer]) -> None: + """Test VerifyBGPTimers.Input invalid inputs.""" + with pytest.raises(ValidationError): + VerifyBGPTimers.Input(bgp_peers=bgp_peers) + + +class TestVerifyBgpRouteMapsInput: + """Test anta.tests.routing.bgp.VerifyBgpRouteMaps.Input.""" + + @pytest.mark.parametrize( + ("bgp_peers"), + [ + pytest.param([{"peer_address": "172.30.255.5", "vrf": "default", "inbound_route_map": "Test", "outbound_route_map": "Test"}], id="valid"), + ], + ) + def test_valid(self, bgp_peers: list[BgpPeer]) -> None: + """Test VerifyBgpRouteMaps.Input valid inputs.""" + VerifyBgpRouteMaps.Input(bgp_peers=bgp_peers) + + @pytest.mark.parametrize( + ("bgp_peers"), + [ + pytest.param([{"peer_address": "172.30.255.5", "vrf": "default"}], id="invalid"), + ], + ) + def test_invalid(self, bgp_peers: list[BgpPeer]) -> None: + """Test VerifyBgpRouteMaps.Input invalid inputs.""" + with pytest.raises(ValidationError): + VerifyBgpRouteMaps.Input(bgp_peers=bgp_peers) + + +class TestVerifyBGPPeerRouteLimitInput: + """Test anta.tests.routing.bgp.VerifyBGPPeerRouteLimit.Input.""" + + @pytest.mark.parametrize( + ("bgp_peers"), + [ + pytest.param([{"peer_address": "172.30.255.5", "vrf": "default", "maximum_routes": 10000}], id="valid"), + ], + ) + def test_valid(self, bgp_peers: list[BgpPeer]) -> None: + """Test VerifyBGPPeerRouteLimit.Input valid inputs.""" + VerifyBGPPeerRouteLimit.Input(bgp_peers=bgp_peers) + + @pytest.mark.parametrize( + ("bgp_peers"), + [ + pytest.param([{"peer_address": "172.30.255.5", "vrf": "default"}], id="invalid"), + ], + ) + def test_invalid(self, bgp_peers: list[BgpPeer]) -> None: + """Test VerifyBGPPeerRouteLimit.Input invalid inputs.""" + with pytest.raises(ValidationError): + VerifyBGPPeerRouteLimit.Input(bgp_peers=bgp_peers) diff --git a/tests/units/input_models/test_interfaces.py b/tests/units/input_models/test_interfaces.py new file mode 100644 index 0000000..87d742d --- /dev/null +++ b/tests/units/input_models/test_interfaces.py @@ -0,0 +1,33 @@ +# Copyright (c) 2023-2024 Arista Networks, Inc. +# Use of this source code is governed by the Apache License 2.0 +# that can be found in the LICENSE file. +"""Tests for anta.input_models.interfaces.py.""" + +# pylint: disable=C0302 +from __future__ import annotations + +from typing import TYPE_CHECKING + +import pytest + +from anta.input_models.interfaces import InterfaceState + +if TYPE_CHECKING: + from anta.custom_types import Interface, PortChannelInterface + + +class TestInterfaceState: + """Test anta.input_models.interfaces.InterfaceState.""" + + # pylint: disable=too-few-public-methods + + @pytest.mark.parametrize( + ("name", "portchannel", "expected"), + [ + pytest.param("Ethernet1", "Port-Channel42", "Interface: Ethernet1 Port-Channel: Port-Channel42", id="with port-channel"), + pytest.param("Ethernet1", None, "Interface: Ethernet1", id="no port-channel"), + ], + ) + def test_valid__str__(self, name: Interface, portchannel: PortChannelInterface | None, expected: str) -> None: + """Test InterfaceState __str__.""" + assert str(InterfaceState(name=name, portchannel=portchannel)) == expected diff --git a/tests/units/reporter/conftest.py b/tests/units/reporter/conftest.py index ae7d3df..d0eed36 100644 --- a/tests/units/reporter/conftest.py +++ b/tests/units/reporter/conftest.py @@ -5,4 +5,4 @@ from tests.units.result_manager.conftest import list_result_factory, result_manager, result_manager_factory, test_result_factory -__all__ = ["result_manager", "result_manager_factory", "list_result_factory", "test_result_factory"] +__all__ = ["list_result_factory", "result_manager", "result_manager_factory", "test_result_factory"] diff --git a/tests/units/reporter/test__init__.py b/tests/units/reporter/test__init__.py index af26b54..71cccdd 100644 --- a/tests/units/reporter/test__init__.py +++ b/tests/units/reporter/test__init__.py @@ -188,5 +188,5 @@ class TestReportJinja: def test_fail__init__file_not_found(self) -> None: """Test __init__ failure if file is not found.""" - with pytest.raises(FileNotFoundError, match="template file is not found: /gnu/terry/pratchett"): + with pytest.raises(FileNotFoundError, match=r"template file is not found: [/|\\]gnu[/|\\]terry[/|\\]pratchett"): ReportJinja(Path("/gnu/terry/pratchett")) diff --git a/tests/units/reporter/test_csv.py b/tests/units/reporter/test_csv.py index 1d59dae..d88098e 100644 --- a/tests/units/reporter/test_csv.py +++ b/tests/units/reporter/test_csv.py @@ -8,6 +8,7 @@ import csv import pathlib from typing import Any, Callable +from unittest.mock import patch import pytest @@ -49,8 +50,8 @@ class TestReportCsv: # Generate the CSV report ReportCsv.generate(result_manager, csv_filename) - # Read the generated CSV file - with pathlib.Path.open(csv_filename, encoding="utf-8") as csvfile: + # Read the generated CSV file - newline required on Windows.. + with pathlib.Path.open(csv_filename, encoding="utf-8", newline="") as csvfile: reader = csv.reader(csvfile, delimiter=",") rows = list(reader) @@ -82,11 +83,9 @@ class TestReportCsv: max_test_entries = 10 result_manager = result_manager_factory(max_test_entries) - # Create a temporary CSV file path and make tmp_path read_only - tmp_path.chmod(0o400) csv_filename = tmp_path / "read_only.csv" - with pytest.raises(OSError, match="Permission denied"): + with patch("pathlib.Path.open", side_effect=OSError("Any OSError")), pytest.raises(OSError, match="Any OSError"): # Generate the CSV report ReportCsv.generate(result_manager, csv_filename) diff --git a/tests/units/reporter/test_md_reporter.py b/tests/units/reporter/test_md_reporter.py index a607733..c0676bb 100644 --- a/tests/units/reporter/test_md_reporter.py +++ b/tests/units/reporter/test_md_reporter.py @@ -5,7 +5,7 @@ from __future__ import annotations -from io import StringIO +from io import BytesIO, TextIOWrapper from pathlib import Path import pytest @@ -46,7 +46,7 @@ def test_md_report_base() -> None: results = ResultManager() - with StringIO() as mock_file: + with TextIOWrapper(BytesIO(b"1 2 3")) as mock_file: report = FakeMDReportBase(mock_file, results) assert report.generate_heading_name() == "Fake MD Report Base" diff --git a/tests/units/result_manager/test__init__.py b/tests/units/result_manager/test__init__.py index 1fd51cb..e41a436 100644 --- a/tests/units/result_manager/test__init__.py +++ b/tests/units/result_manager/test__init__.py @@ -6,6 +6,7 @@ from __future__ import annotations import json +import logging import re from contextlib import AbstractContextManager, nullcontext from typing import TYPE_CHECKING, Callable @@ -379,3 +380,103 @@ class TestResultManager: assert len(result_manager.get_devices()) == 2 assert all(t in result_manager.get_devices() for t in ["Device1", "Device2"]) + + def test_stats_computation_methods(self, test_result_factory: Callable[[], TestResult], caplog: pytest.LogCaptureFixture) -> None: + """Test ResultManager internal stats computation methods.""" + result_manager = ResultManager() + + # Initially stats should be unsynced + assert result_manager._stats_in_sync is False + + # Test _reset_stats + result_manager._reset_stats() + assert result_manager._stats_in_sync is False + assert len(result_manager._device_stats) == 0 + assert len(result_manager._category_stats) == 0 + assert len(result_manager._test_stats) == 0 + + # Add some test results + test1 = test_result_factory() + test1.name = "device1" + test1.result = AntaTestStatus.SUCCESS + test1.categories = ["system"] + test1.test = "test1" + + test2 = test_result_factory() + test2.name = "device2" + test2.result = AntaTestStatus.FAILURE + test2.categories = ["interfaces"] + test2.test = "test2" + + result_manager.add(test1) + result_manager.add(test2) + + # Stats should still be unsynced after adding results + assert result_manager._stats_in_sync is False + + # Test _compute_stats directly + with caplog.at_level(logging.INFO): + result_manager._compute_stats() + assert "Computing statistics for all results" in caplog.text + assert result_manager._stats_in_sync is True + + # Verify stats content + assert len(result_manager._device_stats) == 2 + assert len(result_manager._category_stats) == 2 + assert len(result_manager._test_stats) == 2 + assert result_manager._device_stats["device1"].tests_success_count == 1 + assert result_manager._device_stats["device2"].tests_failure_count == 1 + assert result_manager._category_stats["system"].tests_success_count == 1 + assert result_manager._category_stats["interfaces"].tests_failure_count == 1 + assert result_manager._test_stats["test1"].devices_success_count == 1 + assert result_manager._test_stats["test2"].devices_failure_count == 1 + + def test_stats_property_computation(self, test_result_factory: Callable[[], TestResult], caplog: pytest.LogCaptureFixture) -> None: + """Test that stats are computed only once when accessed via properties.""" + result_manager = ResultManager() + + # Add some test results + test1 = test_result_factory() + test1.name = "device1" + test1.result = AntaTestStatus.SUCCESS + test1.categories = ["system"] + result_manager.add(test1) + + test2 = test_result_factory() + test2.name = "device2" + test2.result = AntaTestStatus.FAILURE + test2.categories = ["interfaces"] + result_manager.add(test2) + + # Stats should be unsynced after adding results + assert result_manager._stats_in_sync is False + assert "Computing statistics" not in caplog.text + + # Access device_stats property - should trigger computation + with caplog.at_level(logging.INFO): + _ = result_manager.device_stats + assert "Computing statistics for all results" in caplog.text + assert result_manager._stats_in_sync is True + + # Clear the log + caplog.clear() + + # Access other stats properties - should not trigger computation again + with caplog.at_level(logging.INFO): + _ = result_manager.category_stats + _ = result_manager.test_stats + _ = result_manager.sorted_category_stats + assert "Computing statistics" not in caplog.text + + # Add another result - should mark stats as unsynced + test3 = test_result_factory() + test3.name = "device3" + test3.result = "error" + result_manager.add(test3) + assert result_manager._stats_in_sync is False + + # Access stats again - should trigger recomputation + with caplog.at_level(logging.INFO): + _ = result_manager.device_stats + assert "Computing statistics for all results" in caplog.text + assert result_manager._stats_in_sync is True diff --git a/tests/units/test_custom_types.py b/tests/units/test_custom_types.py index 6970171..95c5234 100644 --- a/tests/units/test_custom_types.py +++ b/tests/units/test_custom_types.py @@ -192,8 +192,7 @@ def test_regexp_eos_blacklist_cmds(test_string: str, expected: bool) -> None: """Test REGEXP_EOS_BLACKLIST_CMDS.""" def matches_any_regex(string: str, regex_list: list[str]) -> bool: - """ - Check if a string matches at least one regular expression in a list. + """Check if a string matches at least one regular expression in a list. :param string: The string to check. :param regex_list: A list of regular expressions. diff --git a/tests/units/test_decorators.py b/tests/units/test_decorators.py new file mode 100644 index 0000000..c267df1 --- /dev/null +++ b/tests/units/test_decorators.py @@ -0,0 +1,77 @@ +# Copyright (c) 2023-2024 Arista Networks, Inc. +# Use of this source code is governed by the Apache License 2.0 +# that can be found in the LICENSE file. +"""test anta.decorators.py.""" + +from __future__ import annotations + +import logging +from typing import TYPE_CHECKING, ClassVar + +import pytest + +from anta.decorators import deprecated_test_class, skip_on_platforms +from anta.models import AntaCommand, AntaTemplate, AntaTest + +if TYPE_CHECKING: + from anta.device import AntaDevice + + +class ExampleTest(AntaTest): + """ANTA test that always succeed.""" + + categories: ClassVar[list[str]] = [] + commands: ClassVar[list[AntaCommand | AntaTemplate]] = [] + + @AntaTest.anta_test + def test(self) -> None: + """Test function.""" + self.result.is_success() + + +@pytest.mark.parametrize( + "new_tests", + [ + pytest.param(None, id="No new_tests"), + pytest.param(["NewExampleTest"], id="one new_tests"), + pytest.param(["NewExampleTest1", "NewExampleTest2"], id="multiple new_tests"), + ], +) +def test_deprecated_test_class(caplog: pytest.LogCaptureFixture, device: AntaDevice, new_tests: list[str] | None) -> None: + """Test deprecated_test_class decorator.""" + caplog.set_level(logging.INFO) + + decorated_test_class = deprecated_test_class(new_tests=new_tests)(ExampleTest) + + # Initialize the decorated test + decorated_test_class(device) + + if new_tests is None: + assert "ExampleTest test is deprecated." in caplog.messages + else: + assert f"ExampleTest test is deprecated. Consider using the following new tests: {', '.join(new_tests)}." in caplog.messages + + +@pytest.mark.parametrize( + ("platforms", "device_platform", "expected_result"), + [ + pytest.param([], "cEOS-lab", "success", id="empty platforms"), + pytest.param(["cEOS-lab"], "cEOS-lab", "skipped", id="skip on one platform - match"), + pytest.param(["cEOS-lab"], "vEOS", "success", id="skip on one platform - no match"), + pytest.param(["cEOS-lab", "vEOS"], "cEOS-lab", "skipped", id="skip on multiple platforms - match"), + ], +) +async def test_skip_on_platforms(device: AntaDevice, platforms: list[str], device_platform: str, expected_result: str) -> None: + """Test skip_on_platforms decorator. + + Leverage the ExampleTest defined at the top of the module. + """ + # Apply the decorator - ignoring mypy warning - this is for testing + ExampleTest.test = skip_on_platforms(platforms)(ExampleTest.test) # type: ignore[method-assign] + + device.hw_model = device_platform + + test_instance = ExampleTest(device) + await test_instance.test() + + assert test_instance.result.result == expected_result diff --git a/tests/units/test_device.py b/tests/units/test_device.py index faf6144..17669df 100644 --- a/tests/units/test_device.py +++ b/tests/units/test_device.py @@ -6,13 +6,15 @@ from __future__ import annotations import asyncio +from contextlib import AbstractContextManager +from contextlib import nullcontext as does_not_raise from pathlib import Path from typing import TYPE_CHECKING, Any from unittest.mock import patch import pytest from asyncssh import SSHClientConnection, SSHClientConnectionOptions -from httpx import ConnectError, HTTPError +from httpx import ConnectError, HTTPError, TimeoutException from rich import print as rprint from anta.device import AntaDevice, AsyncEOSDevice @@ -24,13 +26,37 @@ if TYPE_CHECKING: from _pytest.mark.structures import ParameterSet INIT_PARAMS: list[ParameterSet] = [ - pytest.param({"host": "42.42.42.42", "username": "anta", "password": "anta"}, {"name": "42.42.42.42"}, id="no name, no port"), - pytest.param({"host": "42.42.42.42", "username": "anta", "password": "anta", "port": 666}, {"name": "42.42.42.42:666"}, id="no name, port"), + pytest.param({"host": "42.42.42.42", "username": "anta", "password": "anta"}, {"name": "42.42.42.42"}, does_not_raise(), id="no name, no port"), + pytest.param({"host": "42.42.42.42", "username": "anta", "password": "anta", "port": 666}, {"name": "42.42.42.42:666"}, does_not_raise(), id="no name, port"), pytest.param( - {"host": "42.42.42.42", "username": "anta", "password": "anta", "name": "test.anta.ninja", "disable_cache": True}, {"name": "test.anta.ninja"}, id="name" + {"host": "42.42.42.42", "username": "anta", "password": "anta", "name": "test.anta.ninja", "disable_cache": True}, + {"name": "test.anta.ninja"}, + does_not_raise(), + id="name", ), pytest.param( - {"host": "42.42.42.42", "username": "anta", "password": "anta", "name": "test.anta.ninja", "insecure": True}, {"name": "test.anta.ninja"}, id="insecure" + {"host": "42.42.42.42", "username": "anta", "password": "anta", "name": "test.anta.ninja", "insecure": True}, + {"name": "test.anta.ninja"}, + does_not_raise(), + id="insecure", + ), + pytest.param( + {"host": None, "username": "anta", "password": "anta", "name": "test.anta.ninja"}, + None, + pytest.raises(ValueError, match="'host' is required to create an AsyncEOSDevice"), + id="host is None", + ), + pytest.param( + {"host": "42.42.42.42", "username": None, "password": "anta", "name": "test.anta.ninja"}, + None, + pytest.raises(ValueError, match="'username' is required to instantiate device 'test.anta.ninja'"), + id="username is None", + ), + pytest.param( + {"host": "42.42.42.42", "username": "anta", "password": None, "name": "test.anta.ninja"}, + None, + pytest.raises(ValueError, match="'password' is required to instantiate device 'test.anta.ninja'"), + id="password is None", ), ] EQUALITY_PARAMS: list[ParameterSet] = [ @@ -48,7 +74,10 @@ EQUALITY_PARAMS: list[ParameterSet] = [ id="not-equal-port", ), pytest.param( - {"host": "42.42.42.41", "username": "anta", "password": "anta"}, {"host": "42.42.42.42", "username": "anta", "password": "anta"}, False, id="not-equal-host" + {"host": "42.42.42.41", "username": "anta", "password": "anta"}, + {"host": "42.42.42.42", "username": "anta", "password": "anta"}, + False, + id="not-equal-host", ), ] ASYNCEAPI_COLLECT_PARAMS: list[ParameterSet] = [ @@ -287,7 +316,58 @@ ASYNCEAPI_COLLECT_PARAMS: list[ParameterSet] = [ }, }, {"output": None, "errors": ["Authorization denied for command 'show version'"]}, - id="asynceapi.EapiCommandError", + id="asynceapi.EapiCommandError - Authorization denied", + ), + pytest.param( + {}, + { + "command": "show version", + "patch_kwargs": { + "side_effect": EapiCommandError( + passed=[], + failed="show version", + errors=["not supported on this hardware platform"], + errmsg="Invalid command", + not_exec=[], + ) + }, + }, + {"output": None, "errors": ["not supported on this hardware platform"]}, + id="asynceapi.EapiCommandError - not supported", + ), + pytest.param( + {}, + { + "command": "show version", + "patch_kwargs": { + "side_effect": EapiCommandError( + passed=[], + failed="show version", + errors=["BGP inactive"], + errmsg="Invalid command", + not_exec=[], + ) + }, + }, + {"output": None, "errors": ["BGP inactive"]}, + id="asynceapi.EapiCommandError - known EOS error", + ), + pytest.param( + {}, + { + "command": "show version", + "patch_kwargs": { + "side_effect": EapiCommandError( + passed=[], + failed="show version", + errors=["Invalid input (privileged mode required)"], + errmsg="Invalid command", + not_exec=[], + ) + }, + }, + {"output": None, "errors": ["Invalid input (privileged mode required)"]}, + id="asynceapi.EapiCommandError - requires privileges", ), pytest.param( {}, @@ -301,6 +381,12 @@ ASYNCEAPI_COLLECT_PARAMS: list[ParameterSet] = [ {"output": None, "errors": ["ConnectError: Cannot open port"]}, id="httpx.ConnectError", ), + pytest.param( + {}, + {"command": "show version", "patch_kwargs": {"side_effect": TimeoutException("Test")}}, + {"output": None, "errors": ["TimeoutException: Test"]}, + id="httpx.TimeoutException", + ), ] ASYNCEAPI_COPY_PARAMS: list[ParameterSet] = [ pytest.param({}, {"sources": [Path("/mnt/flash"), Path("/var/log/agents")], "destination": Path(), "direction": "from"}, id="from"), @@ -531,22 +617,24 @@ class TestAntaDevice: class TestAsyncEOSDevice: """Test for anta.device.AsyncEOSDevice.""" - @pytest.mark.parametrize(("device", "expected"), INIT_PARAMS) - def test__init__(self, device: dict[str, Any], expected: dict[str, Any]) -> None: + @pytest.mark.parametrize(("device", "expected", "expected_raise"), INIT_PARAMS) + def test__init__(self, device: dict[str, Any], expected: dict[str, Any] | None, expected_raise: AbstractContextManager[Exception]) -> None: """Test the AsyncEOSDevice constructor.""" - dev = AsyncEOSDevice(**device) + with expected_raise: + dev = AsyncEOSDevice(**device) - assert dev.name == expected["name"] - if device.get("disable_cache") is True: - assert dev.cache is None - assert dev.cache_locks is None - else: # False or None - assert dev.cache is not None - assert dev.cache_locks is not None - hash(dev) + assert expected is not None + assert dev.name == expected["name"] + if device.get("disable_cache") is True: + assert dev.cache is None + assert dev.cache_locks is None + else: # False or None + assert dev.cache is not None + assert dev.cache_locks is not None + hash(dev) - with patch("anta.device.__DEBUG__", new=True): - rprint(dev) + with patch("anta.device.__DEBUG__", new=True): + rprint(dev) @pytest.mark.parametrize(("device1", "device2", "expected"), EQUALITY_PARAMS) def test__eq(self, device1: dict[str, Any], device2: dict[str, Any], expected: bool) -> None: diff --git a/tests/units/test_models.py b/tests/units/test_models.py index d604b48..d12d859 100644 --- a/tests/units/test_models.py +++ b/tests/units/test_models.py @@ -26,8 +26,6 @@ if TYPE_CHECKING: class FakeTest(AntaTest): """ANTA test that always succeed.""" - name = "FakeTest" - description = "ANTA test that always succeed" categories: ClassVar[list[str]] = [] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [] @@ -40,8 +38,6 @@ class FakeTest(AntaTest): class FakeTestWithFailedCommand(AntaTest): """ANTA test with a command that failed.""" - name = "FakeTestWithFailedCommand" - description = "ANTA test with a command that failed" categories: ClassVar[list[str]] = [] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show version", errors=["failed command"])] @@ -54,8 +50,6 @@ class FakeTestWithFailedCommand(AntaTest): class FakeTestWithUnsupportedCommand(AntaTest): """ANTA test with an unsupported command.""" - name = "FakeTestWithUnsupportedCommand" - description = "ANTA test with an unsupported command" categories: ClassVar[list[str]] = [] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [ AntaCommand( @@ -70,11 +64,26 @@ class FakeTestWithUnsupportedCommand(AntaTest): self.result.is_success() +class FakeTestWithKnownEOSError(AntaTest): + """ANTA test triggering a known EOS Error that should translate to failure of the test.""" + + categories: ClassVar[list[str]] = [] + commands: ClassVar[list[AntaCommand | AntaTemplate]] = [ + AntaCommand( + command="show bgp evpn route-type mac-ip aa:c1:ab:de:50:ad vni 10010", + errors=["BGP inactive"], + ) + ] + + @AntaTest.anta_test + def test(self) -> None: + """Test function.""" + self.result.is_success() + + class FakeTestWithInput(AntaTest): """ANTA test with inputs that always succeed.""" - name = "FakeTestWithInput" - description = "ANTA test with inputs that always succeed" categories: ClassVar[list[str]] = [] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [] @@ -92,8 +101,6 @@ class FakeTestWithInput(AntaTest): class FakeTestWithTemplate(AntaTest): """ANTA test with template that always succeed.""" - name = "FakeTestWithTemplate" - description = "ANTA test with template that always succeed" categories: ClassVar[list[str]] = [] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaTemplate(template="show interface {interface}")] @@ -115,8 +122,6 @@ class FakeTestWithTemplate(AntaTest): class FakeTestWithTemplateNoRender(AntaTest): """ANTA test with template that miss the render() method.""" - name = "FakeTestWithTemplateNoRender" - description = "ANTA test with template that miss the render() method" categories: ClassVar[list[str]] = [] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaTemplate(template="show interface {interface}")] @@ -134,8 +139,6 @@ class FakeTestWithTemplateNoRender(AntaTest): class FakeTestWithTemplateBadRender1(AntaTest): """ANTA test with template that raises a AntaTemplateRenderError exception.""" - name = "FakeTestWithTemplateBadRender" - description = "ANTA test with template that raises a AntaTemplateRenderError exception" categories: ClassVar[list[str]] = [] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaTemplate(template="show interface {interface}")] @@ -157,8 +160,6 @@ class FakeTestWithTemplateBadRender1(AntaTest): class FakeTestWithTemplateBadRender2(AntaTest): """ANTA test with template that raises an arbitrary exception in render().""" - name = "FakeTestWithTemplateBadRender2" - description = "ANTA test with template that raises an arbitrary exception in render()" categories: ClassVar[list[str]] = [] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaTemplate(template="show interface {interface}")] @@ -180,8 +181,6 @@ class FakeTestWithTemplateBadRender2(AntaTest): class FakeTestWithTemplateBadRender3(AntaTest): """ANTA test with template that gives extra template parameters in render().""" - name = "FakeTestWithTemplateBadRender3" - description = "ANTA test with template that gives extra template parameters in render()" categories: ClassVar[list[str]] = [] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaTemplate(template="show interface {interface}")] @@ -203,8 +202,6 @@ class FakeTestWithTemplateBadRender3(AntaTest): class FakeTestWithTemplateBadTest(AntaTest): """ANTA test with template that tries to access an undefined template parameter in test().""" - name = "FakeTestWithTemplateBadTest" - description = "ANTA test with template that tries to access an undefined template parameter in test()" categories: ClassVar[list[str]] = [] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaTemplate(template="show interface {interface}")] @@ -227,8 +224,6 @@ class FakeTestWithTemplateBadTest(AntaTest): class SkipOnPlatformTest(AntaTest): """ANTA test that is skipped.""" - name = "SkipOnPlatformTest" - description = "ANTA test that is skipped on a specific platform" categories: ClassVar[list[str]] = [] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [] @@ -242,8 +237,6 @@ class SkipOnPlatformTest(AntaTest): class UnSkipOnPlatformTest(AntaTest): """ANTA test that is skipped.""" - name = "UnSkipOnPlatformTest" - description = "ANTA test that is skipped on a specific platform" categories: ClassVar[list[str]] = [] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [] @@ -257,8 +250,6 @@ class UnSkipOnPlatformTest(AntaTest): class SkipOnPlatformTestWithInput(AntaTest): """ANTA test skipped on platforms but with Input.""" - name = "SkipOnPlatformTestWithInput" - description = "ANTA test skipped on platforms but with Input" categories: ClassVar[list[str]] = [] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [] @@ -277,8 +268,6 @@ class SkipOnPlatformTestWithInput(AntaTest): class DeprecatedTestWithoutNewTest(AntaTest): """ANTA test that is deprecated without new test.""" - name = "DeprecatedTestWitouthNewTest" - description = "ANTA test that is deprecated without new test" categories: ClassVar[list[str]] = [] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [] @@ -292,8 +281,6 @@ class DeprecatedTestWithoutNewTest(AntaTest): class DeprecatedTestWithNewTest(AntaTest): """ANTA test that is deprecated with new test.""" - name = "DeprecatedTestWithNewTest" - description = "ANTA deprecated test with New Test" categories: ClassVar[list[str]] = [] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [] @@ -307,8 +294,6 @@ class DeprecatedTestWithNewTest(AntaTest): class FakeTestWithMissingTest(AntaTest): """ANTA test with missing test() method implementation.""" - name = "FakeTestWithMissingTest" - description = "ANTA test with missing test() method implementation" categories: ClassVar[list[str]] = [] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [] @@ -516,6 +501,18 @@ ANTATEST_DATA: list[dict[str, Any]] = [ }, }, }, + { + "name": "known EOS error command", + "test": FakeTestWithKnownEOSError, + "inputs": None, + "expected": { + "__init__": {"result": "unset"}, + "test": { + "result": "failure", + "messages": ["BGP inactive"], + }, + }, + }, ] BLACKLIST_COMMANDS_PARAMS = ["reload", "reload --force", "write", "wr mem"] @@ -526,65 +523,61 @@ class TestAntaTest: def test__init_subclass__(self) -> None: """Test __init_subclass__.""" - with pytest.raises(NotImplementedError) as exec_info: + with pytest.raises(AttributeError) as exec_info: - class _WrongTestNoName(AntaTest): - """ANTA test that is missing a name.""" + class _WrongTestNoCategories(AntaTest): + """ANTA test that is missing categories.""" - description = "ANTA test that is missing a name" - categories: ClassVar[list[str]] = [] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [] @AntaTest.anta_test def test(self) -> None: self.result.is_success() - assert exec_info.value.args[0] == "Class tests.units.test_models._WrongTestNoName is missing required class attribute name" + assert exec_info.value.args[0] == "Class tests.units.test_models._WrongTestNoCategories is missing required class attribute(s): categories" - with pytest.raises(NotImplementedError) as exec_info: + with pytest.raises(AttributeError) as exec_info: - class _WrongTestNoDescription(AntaTest): - """ANTA test that is missing a description.""" + class _WrongTestNoCommands(AntaTest): + """ANTA test that is missing commands.""" - name = "WrongTestNoDescription" categories: ClassVar[list[str]] = [] - commands: ClassVar[list[AntaCommand | AntaTemplate]] = [] @AntaTest.anta_test def test(self) -> None: self.result.is_success() - assert exec_info.value.args[0] == "Class tests.units.test_models._WrongTestNoDescription is missing required class attribute description" + assert exec_info.value.args[0] == "Class tests.units.test_models._WrongTestNoCommands is missing required class attribute(s): commands" - with pytest.raises(NotImplementedError) as exec_info: + with pytest.raises( + AttributeError, + match="Cannot set the description for class _WrongTestNoDescription, either set it in the class definition or add a docstring to the class.", + ): - class _WrongTestNoCategories(AntaTest): - """ANTA test that is missing categories.""" + class _WrongTestNoDescription(AntaTest): + # ANTA test that is missing a description and does not have a doctstring. - name = "WrongTestNoCategories" - description = "ANTA test that is missing categories" commands: ClassVar[list[AntaCommand | AntaTemplate]] = [] + categories: ClassVar[list[str]] = [] @AntaTest.anta_test def test(self) -> None: self.result.is_success() - assert exec_info.value.args[0] == "Class tests.units.test_models._WrongTestNoCategories is missing required class attribute categories" + class _TestOverwriteNameAndDescription(AntaTest): + """ANTA test where both the test name and description are overwritten in the class definition.""" - with pytest.raises(NotImplementedError) as exec_info: - - class _WrongTestNoCommands(AntaTest): - """ANTA test that is missing commands.""" - - name = "WrongTestNoCommands" - description = "ANTA test that is missing commands" - categories: ClassVar[list[str]] = [] + name: ClassVar[str] = "CustomName" + description: ClassVar[str] = "Custom description" + commands: ClassVar[list[AntaCommand | AntaTemplate]] = [] + categories: ClassVar[list[str]] = [] - @AntaTest.anta_test - def test(self) -> None: - self.result.is_success() + @AntaTest.anta_test + def test(self) -> None: + self.result.is_success() - assert exec_info.value.args[0] == "Class tests.units.test_models._WrongTestNoCommands is missing required class attribute commands" + assert _TestOverwriteNameAndDescription.name == "CustomName" + assert _TestOverwriteNameAndDescription.description == "Custom description" def test_abc(self) -> None: """Test that an error is raised if AntaTest is not implemented.""" @@ -626,8 +619,6 @@ class TestAntaTest: class FakeTestWithBlacklist(AntaTest): """Fake Test for blacklist.""" - name = "FakeTestWithBlacklist" - description = "ANTA test that has blacklisted command" categories: ClassVar[list[str]] = [] commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command=command)] @@ -651,7 +642,7 @@ class TestAntaTest: assert test.result.custom_field == "a custom field" -class TestAntaComamnd: +class TestAntaCommand: """Test for anta.models.AntaCommand.""" # ruff: noqa: B018 @@ -710,6 +701,32 @@ class TestAntaComamnd: ) assert command.requires_privileges is False command = AntaCommand(command="show aaa methods accounting") - with pytest.raises(RuntimeError) as exec_info: + with pytest.raises( + RuntimeError, match="Command 'show aaa methods accounting' has not been collected and has not returned an error. Call AntaDevice.collect()." + ): command.requires_privileges - assert exec_info.value.args[0] == "Command 'show aaa methods accounting' has not been collected and has not returned an error. Call AntaDevice.collect()." + + @pytest.mark.parametrize( + ("command_str", "error", "is_known"), + [ + ("show ip interface Ethernet1", "Ethernet1 does not support IP", True), + ("ping vrf MGMT 1.1.1.1 source Management0 size 100 df-bit repeat 2", "VRF 'MGMT' is not active", True), + ("ping vrf MGMT 1.1.1.1 source Management1 size 100 df-bit repeat 2", "No source interface Management1", True), + ("show bgp evpn route-type mac-ip aa:c1:ab:de:50:ad vni 10010", "BGP inactive", True), + ("show isis BLAH neighbors", "IS-IS (BLAH) is disabled because: IS-IS Network Entity Title (NET) configuration is not present", True), + ("show ip interface Ethernet1", None, False), + ], + ) + def test_returned_known_eos_error(self, command_str: str, error: str | None, is_known: bool) -> None: + """Test the returned_known_eos_error property.""" + # Adding fake output when no error is present to mimic that the command has been collected + command = AntaCommand(command=command_str, errors=[error] if error else [], output=None if error else "{}") + assert command.returned_known_eos_error is is_known + + def test_returned_known_eos_error_failure(self) -> None: + """Test the returned_known_eos_error property unset.""" + command = AntaCommand(command="show ip interface Ethernet1") + with pytest.raises( + RuntimeError, match="Command 'show ip interface Ethernet1' has not been collected and has not returned an error. Call AntaDevice.collect()." + ): + command.returned_known_eos_error diff --git a/tests/units/test_runner.py b/tests/units/test_runner.py index b80259c..23f4102 100644 --- a/tests/units/test_runner.py +++ b/tests/units/test_runner.py @@ -6,7 +6,7 @@ from __future__ import annotations import logging -import resource +import os import sys from pathlib import Path from unittest.mock import patch @@ -16,10 +16,16 @@ import pytest from anta.catalog import AntaCatalog from anta.inventory import AntaInventory from anta.result_manager import ResultManager -from anta.runner import adjust_rlimit_nofile, main, prepare_tests +from anta.runner import main, prepare_tests from .test_models import FakeTest, FakeTestWithMissingTest +if os.name == "posix": + # The function is not defined on non-POSIX system + import resource + + from anta.runner import adjust_rlimit_nofile + DATA_DIR: Path = Path(__file__).parent.parent.resolve() / "data" FAKE_CATALOG: AntaCatalog = AntaCatalog.from_list([(FakeTest, None)]) @@ -65,8 +71,10 @@ async def test_no_selected_device(caplog: pytest.LogCaptureFixture, inventory: A assert msg in caplog.messages +@pytest.mark.skipif(os.name != "posix", reason="Cannot run this test on Windows") def test_adjust_rlimit_nofile_valid_env(caplog: pytest.LogCaptureFixture) -> None: """Test adjust_rlimit_nofile with valid environment variables.""" + # pylint: disable=E0606 with ( caplog.at_level(logging.DEBUG), patch.dict("os.environ", {"ANTA_NOFILE": "20480"}), @@ -96,6 +104,7 @@ def test_adjust_rlimit_nofile_valid_env(caplog: pytest.LogCaptureFixture) -> Non setrlimit_mock.assert_called_once_with(resource.RLIMIT_NOFILE, (20480, 1048576)) +@pytest.mark.skipif(os.name != "posix", reason="Cannot run this test on Windows") def test_adjust_rlimit_nofile_invalid_env(caplog: pytest.LogCaptureFixture) -> None: """Test adjust_rlimit_nofile with valid environment variables.""" with ( @@ -129,6 +138,31 @@ def test_adjust_rlimit_nofile_invalid_env(caplog: pytest.LogCaptureFixture) -> N setrlimit_mock.assert_called_once_with(resource.RLIMIT_NOFILE, (16384, 1048576)) +@pytest.mark.skipif(os.name == "posix", reason="Run this test on Windows only") +async def test_check_runner_log_for_windows(caplog: pytest.LogCaptureFixture, inventory: AntaInventory) -> None: + """Test log output for Windows host regarding rlimit.""" + caplog.set_level(logging.INFO) + manager = ResultManager() + # Using dry-run to shorten the test + await main(manager, inventory, FAKE_CATALOG, dry_run=True) + assert "Running on a non-POSIX system, cannot adjust the maximum number of file descriptors." in caplog.records[-3].message + + +# We could instead merge multiple coverage report together but that requires more work than just this. +@pytest.mark.skipif(os.name != "posix", reason="Fake non-posix for coverage") +async def test_check_runner_log_for_windows_fake(caplog: pytest.LogCaptureFixture, inventory: AntaInventory) -> None: + """Test log output for Windows host regarding rlimit.""" + with patch("os.name", new="win32"): + del sys.modules["anta.runner"] + from anta.runner import main # pylint: disable=W0621 + + caplog.set_level(logging.INFO) + manager = ResultManager() + # Using dry-run to shorten the test + await main(manager, inventory, FAKE_CATALOG, dry_run=True) + assert "Running on a non-POSIX system, cannot adjust the maximum number of file descriptors." in caplog.records[-3].message + + @pytest.mark.parametrize( ("inventory", "tags", "tests", "devices_count", "tests_count"), [ @@ -138,6 +172,7 @@ def test_adjust_rlimit_nofile_invalid_env(caplog: pytest.LogCaptureFixture) -> N pytest.param({"filename": "test_inventory_with_tags.yml"}, None, {"VerifyMlagStatus", "VerifyUptime"}, 3, 5, id="filtered-tests"), pytest.param({"filename": "test_inventory_with_tags.yml"}, {"leaf"}, {"VerifyMlagStatus", "VerifyUptime"}, 2, 4, id="1-tag-filtered-tests"), pytest.param({"filename": "test_inventory_with_tags.yml"}, {"invalid"}, None, 0, 0, id="invalid-tag"), + pytest.param({"filename": "test_inventory_with_tags.yml"}, {"dc1"}, None, 0, 0, id="device-tag-no-tests"), ], indirect=["inventory"], ) diff --git a/tests/units/test_tools.py b/tests/units/test_tools.py index 16f0443..b1f96a5 100644 --- a/tests/units/test_tools.py +++ b/tests/units/test_tools.py @@ -11,7 +11,7 @@ from typing import Any import pytest -from anta.tools import convert_categories, custom_division, get_dict_superset, get_failed_logs, get_item, get_value +from anta.tools import convert_categories, custom_division, format_data, get_dict_superset, get_failed_logs, get_item, get_value TEST_GET_FAILED_LOGS_DATA = [ {"id": 1, "name": "Alice", "age": 30, "email": "alice@example.com"}, @@ -513,3 +513,17 @@ def test_convert_categories(test_input: list[str], expected_raise: AbstractConte """Test convert_categories.""" with expected_raise: assert convert_categories(test_input) == expected_result + + +@pytest.mark.parametrize( + ("input_data", "expected_output"), + [ + pytest.param({"advertised": True, "received": True, "enabled": True}, "Advertised: True, Received: True, Enabled: True", id="multiple entry, all True"), + pytest.param({"advertised": False, "received": False}, "Advertised: False, Received: False", id="multiple entry, all False"), + pytest.param({}, "", id="empty dict"), + pytest.param({"test": True}, "Test: True", id="single entry"), + ], +) +def test_format_data(input_data: dict[str, bool], expected_output: str) -> None: + """Test format_data.""" + assert format_data(input_data) == expected_output |