diff options
Diffstat (limited to 'tests/topotests/munet/mutest')
-rw-r--r-- | tests/topotests/munet/mutest/__main__.py | 89 | ||||
-rw-r--r-- | tests/topotests/munet/mutest/userapi.py | 102 |
2 files changed, 161 insertions, 30 deletions
diff --git a/tests/topotests/munet/mutest/__main__.py b/tests/topotests/munet/mutest/__main__.py index c870311..a78c69e 100644 --- a/tests/topotests/munet/mutest/__main__.py +++ b/tests/topotests/munet/mutest/__main__.py @@ -20,9 +20,13 @@ from copy import deepcopy from pathlib import Path from typing import Union +from munet import mulog from munet import parser +from munet.args import add_testing_args from munet.base import Bridge from munet.base import get_event_loop +from munet.cli import async_cli +from munet.compat import PytestConfig from munet.mutest import userapi as uapi from munet.native import L3NodeMixin from munet.native import Munet @@ -36,7 +40,9 @@ root_logger = logging.getLogger("") exec_formatter = logging.Formatter("%(asctime)s %(levelname)5s: %(name)s: %(message)s") -async def get_unet(config: dict, croot: Path, rundir: Path, unshare: bool = False): +async def get_unet( + config: dict, croot: Path, rundir: Path, args: Namespace, unshare: bool = False +): """Create and run a new Munet topology. The topology is built from the given ``config`` to run inside the path indicated @@ -48,6 +54,7 @@ async def get_unet(config: dict, croot: Path, rundir: Path, unshare: bool = Fals value will be modified and stored in the built ``Munet`` object. croot: common root of all tests, used to search for ``kinds.yaml`` files. rundir: the path to the run directory for this topology. + args: argparse args unshare: True to unshare the process into it's own private namespace. Yields: @@ -58,7 +65,11 @@ async def get_unet(config: dict, croot: Path, rundir: Path, unshare: bool = Fals try: try: unet = await async_build_topology( - config, rundir=str(rundir), unshare_inline=unshare + config, + rundir=str(rundir), + args=args, + pytestconfig=PytestConfig(args), + unshare_inline=unshare, ) except Exception as error: logging.debug("unet build failed: %s", error, exc_info=True) @@ -221,9 +232,13 @@ async def execute_test( targets["."] = unet tc = uapi.TestCase( - str(test_num), test_name, test, targets, logger, reslog, args.full_summary + str(test_num), test_name, test, targets, args, logger, reslog, args.full_summary ) - passed, failed, e = tc.execute() + try: + passed, failed, e = tc.execute() + except uapi.CLIOnErrorError as error: + await async_cli(unet) + passed, failed, e = 0, 0, error run_time = time.time() - tc.info.start_time @@ -278,6 +293,10 @@ async def run_tests(args): start_time = time.time() try: for dirpath in tests: + if args.validate_only: + parser.validate_config(configs[dirpath], reslog, args) + continue + test_files = tests[dirpath] for test in test_files: tnum += 1 @@ -294,10 +313,12 @@ async def run_tests(args): root_logger.addHandler(exec_handler) try: - async for unet in get_unet(config, common, rundir): + async for unet in get_unet(config, common, rundir, args): + if not printed_header: print_header(reslog, unet) printed_header = True + passed, failed, e = await execute_test( unet, test, args, tnum, exec_handler ) @@ -321,6 +342,9 @@ async def run_tests(args): except KeyboardInterrupt: pass + if args.validate_only: + return False + run_time = time.time() - start_time tnum = 0 tpassed = 0 @@ -357,8 +381,10 @@ async def run_tests(args): for result in results: test_name, passed, failed, e = result tnum += 1 - s = "FAIL" if failed or e else "PASS" - reslog.info(" %s %s:%s", s, tnum, test_name) + if failed or e: + reslog.warning(" FAIL %s:%s", tnum, test_name) + else: + reslog.info(" PASS %s:%s", tnum, test_name) reslog.info("-" * 70) reslog.info( @@ -386,35 +412,47 @@ async def async_main(args): def main(): ap = ArgumentParser() ap.add_argument( - "--dist", - type=int, - nargs="?", - const=-1, - default=0, - action="store", - metavar="NUM-THREADS", - help="Run in parallel, value is num. of threads or no value for auto", + "-v", dest="verbose", action="count", default=0, help="More -v's, more verbose" ) - ap.add_argument("-d", "--rundir", help="runtime directory for tempfiles, logs, etc") ap.add_argument( + "-V", "--version", action="store_true", help="print the verison number and exit" + ) + ap.add_argument("paths", nargs="*", help="Paths to collect tests from") + + rap = ap.add_argument_group(title="Runtime", description="runtime related options") + rap.add_argument( + "-d", "--rundir", help="runtime directory for tempfiles, logs, etc" + ) + add_testing_args(rap.add_argument) + + eap = ap.add_argument_group(title="Uncommon", description="uncommonly used options") + eap.add_argument( "--file-select", default="mutest_*.py", help="shell glob for finding tests" ) - ap.add_argument("--log-config", help="logging config file (yaml, toml, json, ...)") - ap.add_argument( - "-V", + eap.add_argument( "--full-summary", action="store_true", help="print full summary headers from docstrings", ) - ap.add_argument( - "-v", dest="verbose", action="count", default=0, help="More -v's, more verbose" + eap.add_argument("--log-config", help="logging config file (yaml, toml, json, ...)") + eap.add_argument( + "--validate-only", + action="store_true", + help="Validate the munet configs against the schema definition", ) - ap.add_argument("paths", nargs="*", help="Paths to collect tests from") + args = ap.parse_args() + if args.version: + from importlib import metadata # pylint: disable=C0415 + + print(metadata.version("munet")) + sys.exit(0) + rundir = args.rundir if args.rundir else "/tmp/mutest" - args.rundir = Path(rundir) - os.environ["MUNET_RUNDIR"] = rundir + rundir = Path(rundir).absolute() + args.rundir = rundir + os.environ["MUNET_RUNDIR"] = str(rundir) subprocess.run(f"mkdir -p {rundir} && chmod 755 {rundir}", check=True, shell=True) config = parser.setup_logging(args, config_base="logconf-mutest") @@ -425,6 +463,9 @@ def main(): fconfig.get("format"), fconfig.get("datefmt") ) + if not hasattr(sys.stderr, "isatty") or not sys.stderr.isatty(): + mulog.do_color = False + loop = None status = 4 try: diff --git a/tests/topotests/munet/mutest/userapi.py b/tests/topotests/munet/mutest/userapi.py index 7967dd0..abc63af 100644 --- a/tests/topotests/munet/mutest/userapi.py +++ b/tests/topotests/munet/mutest/userapi.py @@ -65,8 +65,11 @@ import json import logging import pprint import re +import subprocess +import sys import time +from argparse import Namespace from pathlib import Path from typing import Any from typing import Union @@ -76,6 +79,51 @@ from deepdiff import DeepDiff as json_cmp from munet.base import Commander +class ScriptError(Exception): + """An unrecoverable script failure.""" + + +class CLIOnErrorError(Exception): + """Enter CLI after error.""" + + +def pause_test(desc=""): + isatty = sys.stdout.isatty() + if not isatty: + desc = f" for {desc}" if desc else "" + logging.info("NO PAUSE on non-tty terminal%s", desc) + return + + while True: + if desc: + print(f"\n== PAUSING: {desc} ==") + try: + user = input('PAUSED, "cli" for CLI, "pdb" to debug, "Enter" to continue: ') + except EOFError: + print("^D...continuing") + break + user = user.strip() + if user == "cli": + raise CLIOnErrorError() + if user == "pdb": + breakpoint() # pylint: disable=W1515 + elif user: + print(f'Unrecognized input: "{user}"') + else: + break + + +def act_on_result(success, args, desc=""): + if args.pause: + pause_test(desc) + elif success: + return + if args.cli_on_error: + raise CLIOnErrorError() + if args.pause_on_error: + pause_test(desc) + + class TestCaseInfo: """Object to hold nestable TestCase Results.""" @@ -140,6 +188,7 @@ class TestCase: name: str, path: Path, targets: dict, + args: Namespace, output_logger: logging.Logger = None, result_logger: logging.Logger = None, full_summary: bool = False, @@ -157,6 +206,7 @@ class TestCase: self.__in_section = False self.targets = targets + self.args = args self.last = "" self.last_m = None @@ -285,7 +335,10 @@ class TestCase: # Extract any docstring as a title. if print_header: - title = locals()[f"_{name}"].__doc__.lstrip() + title = locals()[f"_{name}"].__doc__ + if title is None: + title = "" + title = title.lstrip() if self.__short_doc_header and (title := title.lstrip()): if (idx := title.find("\n")) != -1: title = title[:idx].strip() @@ -299,6 +352,10 @@ class TestCase: # Here's where we can do async in the future if we want. # result = await locals()[f"_{name}"](_ok_result) + except ScriptError as error: + return error + except CLIOnErrorError: + raise except Exception as error: logging.error( "Unexpected exception executing %s: %s", name, error, exc_info=True @@ -381,7 +438,9 @@ class TestCase: target: the target to execute the command on. cmd: string to execut on the target. """ - out = self.targets[target].cmd_nostatus(cmd, warn=False) + out = self.targets[target].cmd_nostatus( + cmd, stdin=subprocess.DEVNULL, warn=False + ) self.last = out = out.rstrip() report = out if out else "<no output>" self.logf("COMMAND OUTPUT:\n%s", report) @@ -398,12 +457,14 @@ class TestCase: target: the target to execute the command on. cmd: string to execute on the target. """ - out = self.targets[target].cmd_nostatus(cmd, warn=False) + out = self.targets[target].cmd_nostatus( + cmd, stdin=subprocess.DEVNULL, warn=False + ) self.last = out = out.rstrip() try: js = json.loads(out) except Exception as error: - js = {} + js = None self.olog.warning( "JSON load failed. Check command output is in JSON format: %s", error, @@ -482,20 +543,33 @@ class TestCase: exact_match: if True then the json must exactly match. """ js = self._command_json(target, cmd) + if js is None: + # Always fail on bad json, even if user expected failure + # return expect_fail, {} + return False, {} + try: + # Convert to string to validate the input is valid JSON + if not isinstance(match, str): + match = json.dumps(match) expect = json.loads(match) except Exception as error: expect = {} self.olog.warning( "JSON load failed. Check match value is in JSON format: %s", error ) + # Always fail on bad json, even if user expected failure + # return expect_fail, {} + return False, {} if exact_match: deep_diff = json_cmp(expect, js) # Convert DeepDiff completely into dicts or lists at all levels json_diff = json.loads(deep_diff.to_json()) else: - deep_diff = json_cmp(expect, js, ignore_order=True) + deep_diff = json_cmp( + expect, js, ignore_order=True, cutoff_intersection_for_pairs=1 + ) # Convert DeepDiff completely into dicts or lists at all levels json_diff = json.loads(deep_diff.to_json()) # Remove new fields in json object from diff @@ -570,6 +644,7 @@ class TestCase: """ path = Path(pathname) path = self.info.path.parent.joinpath(path) + do_cli = False self.oplogf( "include: new path: %s create section: %s currently __in_section: %s", @@ -589,7 +664,12 @@ class TestCase: self.info.path = path self.oplogf("include: swapped info path: new %s old %s", path, old_path) - self.__exec_script(path, print_header=new_section, add_newline=new_section) + try: + e = self.__exec_script( + path, print_header=new_section, add_newline=new_section + ) + except CLIOnErrorError: + do_cli = True if new_section: # Something within the section creating include has also created a section @@ -616,6 +696,11 @@ class TestCase: self.info.path = old_path self.oplogf("include: restored info path: %s", old_path) + if do_cli: + raise CLIOnErrorError() + if e: + raise ScriptError(e) + def __end_section(self): self.oplogf("__end_section: __in_section: %s", self.__in_section) info = self.__pop_execinfo() @@ -719,6 +804,7 @@ class TestCase: ) if desc: self.__post_result(target, success, desc) + act_on_result(success, self.args, desc) return success, ret def test_step(self, expr_or_value: Any, desc: str, target: str = "") -> bool: @@ -728,6 +814,7 @@ class TestCase: """ success = bool(expr_or_value) self.__post_result(target, success, desc) + act_on_result(success, self.args, desc) return success def match_step_json( @@ -760,6 +847,7 @@ class TestCase: ) if desc: self.__post_result(target, success, desc) + act_on_result(success, self.args, desc) return success, ret def wait_step( @@ -808,6 +896,7 @@ class TestCase: ) if desc: self.__post_result(target, success, desc) + act_on_result(success, self.args, desc) return success, ret def wait_step_json( @@ -846,6 +935,7 @@ class TestCase: ) if desc: self.__post_result(target, success, desc) + act_on_result(success, self.args, desc) return success, ret |