summaryrefslogtreecommitdiffstats
path: root/tests/topotests/munet
diff options
context:
space:
mode:
Diffstat (limited to 'tests/topotests/munet')
-rw-r--r--tests/topotests/munet/__main__.py63
-rw-r--r--tests/topotests/munet/args.py89
-rw-r--r--tests/topotests/munet/base.py66
-rw-r--r--tests/topotests/munet/cleanup.py34
-rw-r--r--tests/topotests/munet/cli.py12
-rw-r--r--tests/topotests/munet/logconf-mutest.yaml5
-rw-r--r--tests/topotests/munet/mucmd.py16
-rw-r--r--tests/topotests/munet/mulog.py26
-rw-r--r--tests/topotests/munet/munet-schema.json51
-rw-r--r--tests/topotests/munet/mutest/__main__.py89
-rw-r--r--tests/topotests/munet/mutest/userapi.py102
-rw-r--r--tests/topotests/munet/native.py467
-rw-r--r--tests/topotests/munet/parser.py10
-rw-r--r--tests/topotests/munet/testing/fixtures.py36
-rw-r--r--tests/topotests/munet/testing/hooks.py93
15 files changed, 833 insertions, 326 deletions
diff --git a/tests/topotests/munet/__main__.py b/tests/topotests/munet/__main__.py
index 4419ab9..145eb26 100644
--- a/tests/topotests/munet/__main__.py
+++ b/tests/topotests/munet/__main__.py
@@ -16,8 +16,10 @@ import sys
from . import cli
from . import parser
+from .args import add_launch_args
from .base import get_event_loop
from .cleanup import cleanup_previous
+from .cleanup import is_running_in_rundir
from .compat import PytestConfig
@@ -106,66 +108,43 @@ def main(*args):
cap.add_argument(
"--project-root", help="directory to stop searching for kinds config at"
)
+
rap = ap.add_argument_group(title="Runtime", description="runtime related options")
+ add_launch_args(rap.add_argument)
+
+ # Move to munet.args?
rap.add_argument(
"-C",
"--cleanup",
action="store_true",
help="Remove the entire rundir (not just node subdirs) prior to running.",
)
- rap.add_argument(
- "--gdb", metavar="NODE-LIST", help="comma-sep list of hosts to run gdb on"
- )
- rap.add_argument(
- "--gdb-breakpoints",
- metavar="BREAKPOINT-LIST",
- help="comma-sep list of breakpoints to set",
- )
- rap.add_argument(
- "--host",
- action="store_true",
- help="no isolation for top namespace, bridges exposed to default namespace",
- )
- rap.add_argument(
- "--pcap",
- metavar="TARGET-LIST",
- help="comma-sep list of capture targets (NETWORK or NODE:IFNAME)",
- )
- rap.add_argument(
- "--shell", metavar="NODE-LIST", help="comma-sep list of nodes to open shells on"
- )
- rap.add_argument(
- "--stderr",
- metavar="NODE-LIST",
- help="comma-sep list of nodes to open windows viewing stderr",
- )
- rap.add_argument(
- "--stdout",
- metavar="NODE-LIST",
- help="comma-sep list of nodes to open windows viewing stdout",
- )
+ # Move to munet.args?
rap.add_argument(
"--topology-only",
action="store_true",
help="Do not run any node commands",
)
- rap.add_argument("--unshare-inline", action="store_true", help=argparse.SUPPRESS)
rap.add_argument(
"--validate-only",
action="store_true",
help="Validate the config against the schema definition",
)
+ rap.add_argument("--unshare-inline", action="store_true", help=argparse.SUPPRESS)
+
rap.add_argument("-v", "--verbose", action="store_true", help="be verbose")
rap.add_argument(
"-V", "--version", action="store_true", help="print the verison number and exit"
)
+
eap = ap.add_argument_group(title="Uncommon", description="uncommonly used options")
eap.add_argument("--log-config", help="logging config file (yaml, toml, json, ...)")
eap.add_argument(
- "--no-kill",
+ "--kill",
action="store_true",
- help="Do not kill previous running processes",
+ help="Kill previous running processes using same rundir and exit",
)
+ eap.add_argument("--no-kill", action="store_true", help=argparse.SUPPRESS)
eap.add_argument(
"--no-cli", action="store_true", help="Do not run the interactive CLI"
)
@@ -180,8 +159,18 @@ def main(*args):
sys.exit(0)
rundir = args.rundir if args.rundir else "/tmp/munet"
+ rundir = os.path.abspath(rundir)
args.rundir = rundir
+ if args.kill:
+ logging.info("Killing any previous run using rundir: {rundir}")
+ cleanup_previous(args.rundir)
+ elif is_running_in_rundir(args.rundir):
+ logging.fatal(
+ "Munet processes using rundir: %s, use `--kill` to cleanup first", rundir
+ )
+ return 1
+
if args.cleanup:
if os.path.exists(rundir):
if not os.path.exists(f"{rundir}/config.json"):
@@ -194,6 +183,9 @@ def main(*args):
else:
subprocess.run(["/usr/bin/rm", "-rf", rundir], check=True)
+ if args.kill:
+ return 0
+
subprocess.run(f"mkdir -p {rundir} && chmod 755 {rundir}", check=True, shell=True)
os.environ["MUNET_RUNDIR"] = rundir
@@ -208,9 +200,6 @@ def main(*args):
logger.critical("No nodes defined in config file")
return 1
- if not args.no_kill:
- cleanup_previous()
-
loop = None
status = 4
try:
diff --git a/tests/topotests/munet/args.py b/tests/topotests/munet/args.py
new file mode 100644
index 0000000..49ad891
--- /dev/null
+++ b/tests/topotests/munet/args.py
@@ -0,0 +1,89 @@
+# -*- coding: utf-8 eval: (blacken-mode 1) -*-
+#
+# April 14 2024, Christian Hopps <chopps@labn.net>
+#
+# Copyright (c) 2024, LabN Consulting, L.L.C.
+#
+"""Common CLI execute argument."""
+
+
+def add_launch_args(add_func):
+
+ add_func("--gdb", metavar="NODE-LIST", help="comma-sep list of hosts to run gdb on")
+ add_func(
+ "--gdb-breakpoints",
+ metavar="BREAKPOINT-LIST",
+ help="comma-sep list of breakpoints to set",
+ )
+ add_func(
+ "--gdb-use-emacs",
+ action="store_true",
+ help="Use emacsclient to run gdb instead of a shell",
+ )
+
+ add_func(
+ "--host",
+ action="store_true",
+ help="no isolation for top namespace, bridges exposed to default namespace",
+ )
+ add_func(
+ "--pcap",
+ metavar="TARGET-LIST",
+ help="comma-sep list of capture targets (NETWORK or NODE:IFNAME) or 'all'",
+ )
+ add_func(
+ "--shell", metavar="NODE-LIST", help="comma-sep list of nodes to open shells on"
+ )
+ add_func(
+ "--stderr",
+ metavar="NODE-LIST",
+ help="comma-sep list of nodes to open windows viewing stderr",
+ )
+ add_func(
+ "--stdout",
+ metavar="NODE-LIST",
+ help="comma-sep list of nodes to open windows viewing stdout",
+ )
+
+
+def add_testing_args(add_func):
+ add_func(
+ "--cli-on-error",
+ action="store_true",
+ help="CLI on test failure",
+ )
+
+ add_func(
+ "--coverage",
+ action="store_true",
+ help="Enable coverage gathering if supported",
+ )
+
+ add_func(
+ "--cov-build-dir",
+ help="Specify the build dir for locating coverage data files",
+ )
+
+ add_launch_args(add_func)
+
+ add_func(
+ "--pause",
+ action="store_true",
+ help="Pause after each test",
+ )
+ add_func(
+ "--pause-at-end",
+ action="store_true",
+ help="Pause before taking munet down",
+ )
+ add_func(
+ "--pause-on-error",
+ action="store_true",
+ help="Pause after (disables default when --shell or -vtysh given)",
+ )
+ add_func(
+ "--no-pause-on-error",
+ dest="pause_on_error",
+ action="store_false",
+ help="Do not pause after (disables default when --shell or -vtysh given)",
+ )
diff --git a/tests/topotests/munet/base.py b/tests/topotests/munet/base.py
index 72b5df5..e77eb15 100644
--- a/tests/topotests/munet/base.py
+++ b/tests/topotests/munet/base.py
@@ -21,6 +21,7 @@ import subprocess
import sys
import tempfile
import time as time_mod
+
from collections import defaultdict
from pathlib import Path
from typing import Union
@@ -28,8 +29,10 @@ from typing import Union
from . import config as munet_config
from . import linux
+
try:
import pexpect
+
from pexpect.fdpexpect import fdspawn
from pexpect.popen_spawn import PopenSpawn
@@ -273,6 +276,9 @@ def get_event_loop():
"""
policy = asyncio.get_event_loop_policy()
loop = policy.get_event_loop()
+ if not hasattr(os, "pidfd_open"):
+ return loop
+
owatcher = policy.get_child_watcher()
logging.debug(
"event_loop_fixture: global policy %s, current loop %s, current watcher %s",
@@ -463,6 +469,8 @@ class Commander: # pylint: disable=R0904
env = {**(kwargs["env"] if "env" in kwargs else os.environ)}
if "MUNET_NODENAME" not in env:
env["MUNET_NODENAME"] = self.name
+ if "MUNET_PID" not in env and "MUNET_PID" in os.environ:
+ env["MUNET_PID"] = os.environ["MUNET_PID"]
kwargs["env"] = env
defaults.update(kwargs)
@@ -774,8 +782,14 @@ class Commander: # pylint: disable=R0904
ps1 = re.escape(ps1)
ps2 = re.escape(ps2)
-
- extra = "PAGER=cat; export PAGER; TERM=dumb; unset HISTFILE; set +o emacs +o vi"
+ extra = [
+ "TERM=dumb",
+ "set +o emacs",
+ "set +o vi",
+ "unset HISTFILE",
+ "PAGER=cat",
+ "export PAGER",
+ ]
pchg = "PS1='{0}' PS2='{1}' PROMPT_COMMAND=''\n".format(ps1p, ps2p)
p.send(pchg)
return ShellWrapper(p, ps1, ps2, extra_init_cmd=extra, will_echo=will_echo)
@@ -928,15 +942,25 @@ class Commander: # pylint: disable=R0904
def _cmd_status(self, cmds, raises=False, warn=True, stdin=None, **kwargs):
"""Execute a command."""
+ timeout = None
+ if "timeout" in kwargs:
+ timeout = kwargs["timeout"]
+ del kwargs["timeout"]
+
pinput, stdin = Commander._cmd_status_input(stdin)
p, actual_cmd = self._popen("cmd_status", cmds, stdin=stdin, **kwargs)
- o, e = p.communicate(pinput)
+ o, e = p.communicate(pinput, timeout=timeout)
return self._cmd_status_finish(p, cmds, actual_cmd, o, e, raises, warn)
async def _async_cmd_status(
self, cmds, raises=False, warn=True, stdin=None, text=None, **kwargs
):
"""Execute a command."""
+ timeout = None
+ if "timeout" in kwargs:
+ timeout = kwargs["timeout"]
+ del kwargs["timeout"]
+
pinput, stdin = Commander._cmd_status_input(stdin)
p, actual_cmd = await self._async_popen(
"async_cmd_status", cmds, stdin=stdin, **kwargs
@@ -949,7 +973,12 @@ class Commander: # pylint: disable=R0904
if encoding is not None and isinstance(pinput, str):
pinput = pinput.encode(encoding)
- o, e = await p.communicate(pinput)
+ try:
+ o, e = await asyncio.wait_for(p.communicate(), timeout=timeout)
+ except (TimeoutError, asyncio.TimeoutError) as error:
+ raise subprocess.TimeoutExpired(
+ cmd=actual_cmd, timeout=timeout, output=None, stderr=None
+ ) from error
if encoding is not None:
o = o.decode(encoding) if o is not None else o
e = e.decode(encoding) if e is not None else e
@@ -1214,7 +1243,13 @@ class Commander: # pylint: disable=R0904
if self.is_vm and self.use_ssh and not ns_only: # pylint: disable=E1101
if isinstance(cmd, str):
cmd = shlex.split(cmd)
- cmd = ["/usr/bin/env", f"MUNET_NODENAME={self.name}"] + cmd
+ cmd = [
+ "/usr/bin/env",
+ f"MUNET_NODENAME={self.name}",
+ ]
+ if "MUNET_PID" in os.environ:
+ cmd.append(f"MUNET_PID={os.environ.get('MUNET_PID')}")
+ cmd += cmd
# get the ssh cmd
cmd = self._get_pre_cmd(False, True, ns_only=ns_only) + [shlex.join(cmd)]
@@ -1234,6 +1269,8 @@ class Commander: # pylint: disable=R0904
envvars = f"MUNET_NODENAME={self.name} NODENAME={self.name}"
if hasattr(self, "rundir"):
envvars += f" RUNDIR={self.rundir}"
+ if "MUNET_PID" in os.environ:
+ envvars += f" MUNET_PID={os.environ.get('MUNET_PID')}"
if hasattr(self.unet, "config_dirname") and self.unet.config_dirname:
envvars += f" CONFIGDIR={self.unet.config_dirname}"
elif "CONFIGDIR" in os.environ:
@@ -2514,7 +2551,7 @@ class Bridge(SharedNamespace, InterfaceMixin):
self.logger.debug("Bridge: Creating")
- assert len(self.name) <= 16 # Make sure fits in IFNAMSIZE
+ # assert len(self.name) <= 16 # Make sure fits in IFNAMSIZE
self.cmd_raises(f"ip link delete {name} || true")
self.cmd_raises(f"ip link add {name} type bridge")
if self.mtu:
@@ -2638,10 +2675,6 @@ class BaseMunet(LinuxNamespace):
self.cfgopt = munet_config.ConfigOptionsProxy(pytestconfig)
- super().__init__(
- name, mount=True, net=isolated, uts=isolated, pid=pid, unet=None, **kwargs
- )
-
# This allows us to cleanup any leftover running munet's
if "MUNET_PID" in os.environ:
if os.environ["MUNET_PID"] != str(our_pid):
@@ -2652,6 +2685,10 @@ class BaseMunet(LinuxNamespace):
)
os.environ["MUNET_PID"] = str(our_pid)
+ super().__init__(
+ name, mount=True, net=isolated, uts=isolated, pid=pid, unet=None, **kwargs
+ )
+
# this is for testing purposes do not use
if not BaseMunet.g_unet:
BaseMunet.g_unet = self
@@ -2759,7 +2796,7 @@ class BaseMunet(LinuxNamespace):
self.logger.error('"%s" len %s > 16', nsif1, len(nsif1))
elif len(nsif2) > 16:
self.logger.error('"%s" len %s > 16', nsif2, len(nsif2))
- assert len(nsif1) <= 16 and len(nsif2) <= 16 # Make sure fits in IFNAMSIZE
+ assert len(nsif1) < 16 and len(nsif2) < 16 # Make sure fits in IFNAMSIZE
self.logger.debug("%s: Creating veth pair for link %s", self, lname)
@@ -2987,8 +3024,11 @@ if True: # pylint: disable=using-constant-test
self._expectf = self.child.expect
if extra_init_cmd:
- self.expect_prompt()
- self.child.sendline(extra_init_cmd)
+ if isinstance(extra_init_cmd, str):
+ extra_init_cmd = [extra_init_cmd]
+ for ecmd in extra_init_cmd:
+ self.expect_prompt()
+ self.child.sendline(ecmd)
self.expect_prompt()
def expect_prompt(self, timeout=-1):
diff --git a/tests/topotests/munet/cleanup.py b/tests/topotests/munet/cleanup.py
index c641cda..12ea6e2 100644
--- a/tests/topotests/munet/cleanup.py
+++ b/tests/topotests/munet/cleanup.py
@@ -59,25 +59,33 @@ def _get_our_pids():
return {}
-def _get_other_pids():
- piddict = get_pids_with_env("MUNET_PID")
- unet_pids = {d["MUNET_PID"] for d in piddict.values()}
+def _get_other_pids(rundir):
+ if rundir:
+ # get only munet pids using the given rundir
+ piddict = get_pids_with_env("MUNET_RUNDIR", str(rundir))
+ else:
+ # Get all munet pids
+ piddict = get_pids_with_env("MUNET_PID")
+ unet_pids = {d["MUNET_PID"] for d in piddict.values() if "MUNET_PID" in d}
pids_by_upid = {p: set() for p in unet_pids}
for pid, envdict in piddict.items():
+ if "MUNET_PID" not in envdict:
+ continue
unet_pid = envdict["MUNET_PID"]
pids_by_upid[unet_pid].add(pid)
# Filter out any child pid sets whos munet pid is still running
return {x: y for x, y in pids_by_upid.items() if x not in y}
-def _get_pids_by_upid(ours):
+def _get_pids_by_upid(ours, rundir):
if ours:
+ assert rundir is None
return _get_our_pids()
- return _get_other_pids()
+ return _get_other_pids(rundir)
-def _cleanup_pids(ours):
- pids_by_upid = _get_pids_by_upid(ours).items()
+def _cleanup_pids(ours, rundir):
+ pids_by_upid = _get_pids_by_upid(ours, rundir).items()
if not pids_by_upid:
return
@@ -94,7 +102,7 @@ def _cleanup_pids(ours):
# return
# time.sleep(1)
- pids_by_upid = _get_pids_by_upid(ours).items()
+ pids_by_upid = _get_pids_by_upid(ours, rundir).items()
_kill_piddict(pids_by_upid, signal.SIGKILL)
@@ -103,12 +111,16 @@ def cleanup_current():
Currently this only scans for old processes.
"""
- _cleanup_pids(True)
+ _cleanup_pids(True, None)
-def cleanup_previous():
+def cleanup_previous(rundir=None):
"""Attempt to cleanup preview runs.
Currently this only scans for old processes.
"""
- _cleanup_pids(False)
+ _cleanup_pids(False, rundir)
+
+
+def is_running_in_rundir(rundir):
+ return bool(get_pids_with_env("MUNET_RUNDIR", str(rundir)))
diff --git a/tests/topotests/munet/cli.py b/tests/topotests/munet/cli.py
index 133644e..01a7091 100644
--- a/tests/topotests/munet/cli.py
+++ b/tests/topotests/munet/cli.py
@@ -106,9 +106,13 @@ def is_host_regex(restr):
def get_host_regex(restr):
- if len(restr) < 3 or restr[0] != "/" or restr[-1] != "/":
+ try:
+ if len(restr) < 3 or restr[0] != "/" or restr[-1] != "/":
+ return None
+ return re.compile(restr[1:-1])
+ except re.error:
+ logging.error("Invalid regex")
return None
- return re.compile(restr[1:-1])
def host_in(restr, names):
@@ -126,8 +130,8 @@ def expand_host(restr, names):
hosts = []
regexp = get_host_regex(restr)
if not regexp:
- assert restr in names
- hosts.append(restr)
+ if restr in names:
+ hosts.append(restr)
else:
for name in names:
if regexp.fullmatch(name):
diff --git a/tests/topotests/munet/logconf-mutest.yaml b/tests/topotests/munet/logconf-mutest.yaml
index b450fb9..c0b636c 100644
--- a/tests/topotests/munet/logconf-mutest.yaml
+++ b/tests/topotests/munet/logconf-mutest.yaml
@@ -1,5 +1,8 @@
version: 1
formatters:
+ result_color:
+ class: munet.mulog.ResultColorFormatter
+ format: '%(levelname)5s: %(message)s'
brief:
format: '%(levelname)5s: %(message)s'
operfmt:
@@ -22,7 +25,7 @@ handlers:
info_console:
level: INFO
class: logging.StreamHandler
- formatter: brief
+ formatter: result_color
stream: ext://sys.stderr
oper_console:
level: DEBUG
diff --git a/tests/topotests/munet/mucmd.py b/tests/topotests/munet/mucmd.py
index 5518c6d..cd356f3 100644
--- a/tests/topotests/munet/mucmd.py
+++ b/tests/topotests/munet/mucmd.py
@@ -9,7 +9,6 @@
import argparse
import json
import os
-import subprocess
import sys
from pathlib import Path
@@ -90,19 +89,14 @@ def main(*args):
ecmd = "/usr/bin/nsenter"
eargs = [ecmd]
- output = subprocess.check_output(["/usr/bin/nsenter", "--help"], encoding="utf-8")
- if " -a," in output:
- eargs.append("-a")
- else:
- # -U doesn't work
- for flag in ["-u", "-i", "-m", "-n", "-C", "-T"]:
- if f" {flag}," in output:
- eargs.append(flag)
+ #start mucmd same way base process is started
+ eargs.append(f"--mount=/proc/{pid}/ns/mnt")
+ eargs.append(f"--net=/proc/{pid}/ns/net")
eargs.append(f"--pid=/proc/{pid}/ns/pid_for_children")
+ eargs.append(f"--uts=/proc/{pid}/ns/uts")
eargs.append(f"--wd={rundir}")
- eargs.extend(["-t", pid])
eargs += args.shellcmd
- # print("Using ", eargs)
+ #print("Using ", eargs)
return os.execvpe(ecmd, eargs, {**env, **envcfg})
diff --git a/tests/topotests/munet/mulog.py b/tests/topotests/munet/mulog.py
index f840eae..968acd9 100644
--- a/tests/topotests/munet/mulog.py
+++ b/tests/topotests/munet/mulog.py
@@ -12,6 +12,9 @@ import logging
from pathlib import Path
+do_color = True
+
+
class MultiFileHandler(logging.FileHandler):
"""A logging handler that logs to new files based on the logger name.
@@ -118,5 +121,28 @@ class ColorFormatter(logging.Formatter):
super().__init__(fmt, datefmt, style, **kwargs)
def format(self, record):
+ if not do_color:
+ return super().format(record)
formatter = self.formatters.get(record.levelno)
return formatter.format(record)
+
+
+class ResultColorFormatter(logging.Formatter):
+ """A formatter that colorizes PASS/FAIL strings based on level."""
+
+ green = "\x1b[32m"
+ red = "\x1b[31m"
+ reset = "\x1b[0m"
+
+ def format(self, record):
+ s = super().format(record)
+ if not do_color:
+ return s
+ idx = s.find("FAIL")
+ if idx >= 0 and record.levelno > logging.INFO:
+ s = s[:idx] + self.red + "FAIL" + self.reset + s[idx + 4 :]
+ elif record.levelno == logging.INFO:
+ idx = s.find("PASS")
+ if idx >= 0:
+ s = s[:idx] + self.green + "PASS" + self.reset + s[idx + 4 :]
+ return s
diff --git a/tests/topotests/munet/munet-schema.json b/tests/topotests/munet/munet-schema.json
index a1dcd87..6ebc368 100644
--- a/tests/topotests/munet/munet-schema.json
+++ b/tests/topotests/munet/munet-schema.json
@@ -93,12 +93,24 @@
"image": {
"type": "string"
},
+ "hostnet": {
+ "type": "boolean"
+ },
"server": {
"type": "string"
},
"server-port": {
"type": "number"
},
+ "ssh-identity-file": {
+ "type": "string"
+ },
+ "ssh-user": {
+ "type": "string"
+ },
+ "ssh-password": {
+ "type": "string"
+ },
"qemu": {
"type": "object",
"properties": {
@@ -108,6 +120,15 @@
"disk": {
"type": "string"
},
+ "disk-driver": {
+ "type": "string"
+ },
+ "disk-template": {
+ "type": "string"
+ },
+ "initial-cmd": {
+ "type": "string"
+ },
"kerenel": {
"type": "string"
},
@@ -141,6 +162,9 @@
"password": {
"type": "string"
},
+ "initial-password": {
+ "type": "string"
+ },
"expects": {
"type": "array",
"items": {
@@ -362,6 +386,9 @@
},
"ipv6": {
"type": "string"
+ },
+ "external": {
+ "type": "boolean"
}
}
}
@@ -401,12 +428,24 @@
"image": {
"type": "string"
},
+ "hostnet": {
+ "type": "boolean"
+ },
"server": {
"type": "string"
},
"server-port": {
"type": "number"
},
+ "ssh-identity-file": {
+ "type": "string"
+ },
+ "ssh-user": {
+ "type": "string"
+ },
+ "ssh-password": {
+ "type": "string"
+ },
"qemu": {
"type": "object",
"properties": {
@@ -416,6 +455,15 @@
"disk": {
"type": "string"
},
+ "disk-driver": {
+ "type": "string"
+ },
+ "disk-template": {
+ "type": "string"
+ },
+ "initial-cmd": {
+ "type": "string"
+ },
"kerenel": {
"type": "string"
},
@@ -449,6 +497,9 @@
"password": {
"type": "string"
},
+ "initial-password": {
+ "type": "string"
+ },
"expects": {
"type": "array",
"items": {
diff --git a/tests/topotests/munet/mutest/__main__.py b/tests/topotests/munet/mutest/__main__.py
index c870311..a78c69e 100644
--- a/tests/topotests/munet/mutest/__main__.py
+++ b/tests/topotests/munet/mutest/__main__.py
@@ -20,9 +20,13 @@ from copy import deepcopy
from pathlib import Path
from typing import Union
+from munet import mulog
from munet import parser
+from munet.args import add_testing_args
from munet.base import Bridge
from munet.base import get_event_loop
+from munet.cli import async_cli
+from munet.compat import PytestConfig
from munet.mutest import userapi as uapi
from munet.native import L3NodeMixin
from munet.native import Munet
@@ -36,7 +40,9 @@ root_logger = logging.getLogger("")
exec_formatter = logging.Formatter("%(asctime)s %(levelname)5s: %(name)s: %(message)s")
-async def get_unet(config: dict, croot: Path, rundir: Path, unshare: bool = False):
+async def get_unet(
+ config: dict, croot: Path, rundir: Path, args: Namespace, unshare: bool = False
+):
"""Create and run a new Munet topology.
The topology is built from the given ``config`` to run inside the path indicated
@@ -48,6 +54,7 @@ async def get_unet(config: dict, croot: Path, rundir: Path, unshare: bool = Fals
value will be modified and stored in the built ``Munet`` object.
croot: common root of all tests, used to search for ``kinds.yaml`` files.
rundir: the path to the run directory for this topology.
+ args: argparse args
unshare: True to unshare the process into it's own private namespace.
Yields:
@@ -58,7 +65,11 @@ async def get_unet(config: dict, croot: Path, rundir: Path, unshare: bool = Fals
try:
try:
unet = await async_build_topology(
- config, rundir=str(rundir), unshare_inline=unshare
+ config,
+ rundir=str(rundir),
+ args=args,
+ pytestconfig=PytestConfig(args),
+ unshare_inline=unshare,
)
except Exception as error:
logging.debug("unet build failed: %s", error, exc_info=True)
@@ -221,9 +232,13 @@ async def execute_test(
targets["."] = unet
tc = uapi.TestCase(
- str(test_num), test_name, test, targets, logger, reslog, args.full_summary
+ str(test_num), test_name, test, targets, args, logger, reslog, args.full_summary
)
- passed, failed, e = tc.execute()
+ try:
+ passed, failed, e = tc.execute()
+ except uapi.CLIOnErrorError as error:
+ await async_cli(unet)
+ passed, failed, e = 0, 0, error
run_time = time.time() - tc.info.start_time
@@ -278,6 +293,10 @@ async def run_tests(args):
start_time = time.time()
try:
for dirpath in tests:
+ if args.validate_only:
+ parser.validate_config(configs[dirpath], reslog, args)
+ continue
+
test_files = tests[dirpath]
for test in test_files:
tnum += 1
@@ -294,10 +313,12 @@ async def run_tests(args):
root_logger.addHandler(exec_handler)
try:
- async for unet in get_unet(config, common, rundir):
+ async for unet in get_unet(config, common, rundir, args):
+
if not printed_header:
print_header(reslog, unet)
printed_header = True
+
passed, failed, e = await execute_test(
unet, test, args, tnum, exec_handler
)
@@ -321,6 +342,9 @@ async def run_tests(args):
except KeyboardInterrupt:
pass
+ if args.validate_only:
+ return False
+
run_time = time.time() - start_time
tnum = 0
tpassed = 0
@@ -357,8 +381,10 @@ async def run_tests(args):
for result in results:
test_name, passed, failed, e = result
tnum += 1
- s = "FAIL" if failed or e else "PASS"
- reslog.info(" %s %s:%s", s, tnum, test_name)
+ if failed or e:
+ reslog.warning(" FAIL %s:%s", tnum, test_name)
+ else:
+ reslog.info(" PASS %s:%s", tnum, test_name)
reslog.info("-" * 70)
reslog.info(
@@ -386,35 +412,47 @@ async def async_main(args):
def main():
ap = ArgumentParser()
ap.add_argument(
- "--dist",
- type=int,
- nargs="?",
- const=-1,
- default=0,
- action="store",
- metavar="NUM-THREADS",
- help="Run in parallel, value is num. of threads or no value for auto",
+ "-v", dest="verbose", action="count", default=0, help="More -v's, more verbose"
)
- ap.add_argument("-d", "--rundir", help="runtime directory for tempfiles, logs, etc")
ap.add_argument(
+ "-V", "--version", action="store_true", help="print the verison number and exit"
+ )
+ ap.add_argument("paths", nargs="*", help="Paths to collect tests from")
+
+ rap = ap.add_argument_group(title="Runtime", description="runtime related options")
+ rap.add_argument(
+ "-d", "--rundir", help="runtime directory for tempfiles, logs, etc"
+ )
+ add_testing_args(rap.add_argument)
+
+ eap = ap.add_argument_group(title="Uncommon", description="uncommonly used options")
+ eap.add_argument(
"--file-select", default="mutest_*.py", help="shell glob for finding tests"
)
- ap.add_argument("--log-config", help="logging config file (yaml, toml, json, ...)")
- ap.add_argument(
- "-V",
+ eap.add_argument(
"--full-summary",
action="store_true",
help="print full summary headers from docstrings",
)
- ap.add_argument(
- "-v", dest="verbose", action="count", default=0, help="More -v's, more verbose"
+ eap.add_argument("--log-config", help="logging config file (yaml, toml, json, ...)")
+ eap.add_argument(
+ "--validate-only",
+ action="store_true",
+ help="Validate the munet configs against the schema definition",
)
- ap.add_argument("paths", nargs="*", help="Paths to collect tests from")
+
args = ap.parse_args()
+ if args.version:
+ from importlib import metadata # pylint: disable=C0415
+
+ print(metadata.version("munet"))
+ sys.exit(0)
+
rundir = args.rundir if args.rundir else "/tmp/mutest"
- args.rundir = Path(rundir)
- os.environ["MUNET_RUNDIR"] = rundir
+ rundir = Path(rundir).absolute()
+ args.rundir = rundir
+ os.environ["MUNET_RUNDIR"] = str(rundir)
subprocess.run(f"mkdir -p {rundir} && chmod 755 {rundir}", check=True, shell=True)
config = parser.setup_logging(args, config_base="logconf-mutest")
@@ -425,6 +463,9 @@ def main():
fconfig.get("format"), fconfig.get("datefmt")
)
+ if not hasattr(sys.stderr, "isatty") or not sys.stderr.isatty():
+ mulog.do_color = False
+
loop = None
status = 4
try:
diff --git a/tests/topotests/munet/mutest/userapi.py b/tests/topotests/munet/mutest/userapi.py
index 7967dd0..abc63af 100644
--- a/tests/topotests/munet/mutest/userapi.py
+++ b/tests/topotests/munet/mutest/userapi.py
@@ -65,8 +65,11 @@ import json
import logging
import pprint
import re
+import subprocess
+import sys
import time
+from argparse import Namespace
from pathlib import Path
from typing import Any
from typing import Union
@@ -76,6 +79,51 @@ from deepdiff import DeepDiff as json_cmp
from munet.base import Commander
+class ScriptError(Exception):
+ """An unrecoverable script failure."""
+
+
+class CLIOnErrorError(Exception):
+ """Enter CLI after error."""
+
+
+def pause_test(desc=""):
+ isatty = sys.stdout.isatty()
+ if not isatty:
+ desc = f" for {desc}" if desc else ""
+ logging.info("NO PAUSE on non-tty terminal%s", desc)
+ return
+
+ while True:
+ if desc:
+ print(f"\n== PAUSING: {desc} ==")
+ try:
+ user = input('PAUSED, "cli" for CLI, "pdb" to debug, "Enter" to continue: ')
+ except EOFError:
+ print("^D...continuing")
+ break
+ user = user.strip()
+ if user == "cli":
+ raise CLIOnErrorError()
+ if user == "pdb":
+ breakpoint() # pylint: disable=W1515
+ elif user:
+ print(f'Unrecognized input: "{user}"')
+ else:
+ break
+
+
+def act_on_result(success, args, desc=""):
+ if args.pause:
+ pause_test(desc)
+ elif success:
+ return
+ if args.cli_on_error:
+ raise CLIOnErrorError()
+ if args.pause_on_error:
+ pause_test(desc)
+
+
class TestCaseInfo:
"""Object to hold nestable TestCase Results."""
@@ -140,6 +188,7 @@ class TestCase:
name: str,
path: Path,
targets: dict,
+ args: Namespace,
output_logger: logging.Logger = None,
result_logger: logging.Logger = None,
full_summary: bool = False,
@@ -157,6 +206,7 @@ class TestCase:
self.__in_section = False
self.targets = targets
+ self.args = args
self.last = ""
self.last_m = None
@@ -285,7 +335,10 @@ class TestCase:
# Extract any docstring as a title.
if print_header:
- title = locals()[f"_{name}"].__doc__.lstrip()
+ title = locals()[f"_{name}"].__doc__
+ if title is None:
+ title = ""
+ title = title.lstrip()
if self.__short_doc_header and (title := title.lstrip()):
if (idx := title.find("\n")) != -1:
title = title[:idx].strip()
@@ -299,6 +352,10 @@ class TestCase:
# Here's where we can do async in the future if we want.
# result = await locals()[f"_{name}"](_ok_result)
+ except ScriptError as error:
+ return error
+ except CLIOnErrorError:
+ raise
except Exception as error:
logging.error(
"Unexpected exception executing %s: %s", name, error, exc_info=True
@@ -381,7 +438,9 @@ class TestCase:
target: the target to execute the command on.
cmd: string to execut on the target.
"""
- out = self.targets[target].cmd_nostatus(cmd, warn=False)
+ out = self.targets[target].cmd_nostatus(
+ cmd, stdin=subprocess.DEVNULL, warn=False
+ )
self.last = out = out.rstrip()
report = out if out else "<no output>"
self.logf("COMMAND OUTPUT:\n%s", report)
@@ -398,12 +457,14 @@ class TestCase:
target: the target to execute the command on.
cmd: string to execute on the target.
"""
- out = self.targets[target].cmd_nostatus(cmd, warn=False)
+ out = self.targets[target].cmd_nostatus(
+ cmd, stdin=subprocess.DEVNULL, warn=False
+ )
self.last = out = out.rstrip()
try:
js = json.loads(out)
except Exception as error:
- js = {}
+ js = None
self.olog.warning(
"JSON load failed. Check command output is in JSON format: %s",
error,
@@ -482,20 +543,33 @@ class TestCase:
exact_match: if True then the json must exactly match.
"""
js = self._command_json(target, cmd)
+ if js is None:
+ # Always fail on bad json, even if user expected failure
+ # return expect_fail, {}
+ return False, {}
+
try:
+ # Convert to string to validate the input is valid JSON
+ if not isinstance(match, str):
+ match = json.dumps(match)
expect = json.loads(match)
except Exception as error:
expect = {}
self.olog.warning(
"JSON load failed. Check match value is in JSON format: %s", error
)
+ # Always fail on bad json, even if user expected failure
+ # return expect_fail, {}
+ return False, {}
if exact_match:
deep_diff = json_cmp(expect, js)
# Convert DeepDiff completely into dicts or lists at all levels
json_diff = json.loads(deep_diff.to_json())
else:
- deep_diff = json_cmp(expect, js, ignore_order=True)
+ deep_diff = json_cmp(
+ expect, js, ignore_order=True, cutoff_intersection_for_pairs=1
+ )
# Convert DeepDiff completely into dicts or lists at all levels
json_diff = json.loads(deep_diff.to_json())
# Remove new fields in json object from diff
@@ -570,6 +644,7 @@ class TestCase:
"""
path = Path(pathname)
path = self.info.path.parent.joinpath(path)
+ do_cli = False
self.oplogf(
"include: new path: %s create section: %s currently __in_section: %s",
@@ -589,7 +664,12 @@ class TestCase:
self.info.path = path
self.oplogf("include: swapped info path: new %s old %s", path, old_path)
- self.__exec_script(path, print_header=new_section, add_newline=new_section)
+ try:
+ e = self.__exec_script(
+ path, print_header=new_section, add_newline=new_section
+ )
+ except CLIOnErrorError:
+ do_cli = True
if new_section:
# Something within the section creating include has also created a section
@@ -616,6 +696,11 @@ class TestCase:
self.info.path = old_path
self.oplogf("include: restored info path: %s", old_path)
+ if do_cli:
+ raise CLIOnErrorError()
+ if e:
+ raise ScriptError(e)
+
def __end_section(self):
self.oplogf("__end_section: __in_section: %s", self.__in_section)
info = self.__pop_execinfo()
@@ -719,6 +804,7 @@ class TestCase:
)
if desc:
self.__post_result(target, success, desc)
+ act_on_result(success, self.args, desc)
return success, ret
def test_step(self, expr_or_value: Any, desc: str, target: str = "") -> bool:
@@ -728,6 +814,7 @@ class TestCase:
"""
success = bool(expr_or_value)
self.__post_result(target, success, desc)
+ act_on_result(success, self.args, desc)
return success
def match_step_json(
@@ -760,6 +847,7 @@ class TestCase:
)
if desc:
self.__post_result(target, success, desc)
+ act_on_result(success, self.args, desc)
return success, ret
def wait_step(
@@ -808,6 +896,7 @@ class TestCase:
)
if desc:
self.__post_result(target, success, desc)
+ act_on_result(success, self.args, desc)
return success, ret
def wait_step_json(
@@ -846,6 +935,7 @@ class TestCase:
)
if desc:
self.__post_result(target, success, desc)
+ act_on_result(success, self.args, desc)
return success, ret
diff --git a/tests/topotests/munet/native.py b/tests/topotests/munet/native.py
index 4fbbb85..5747d5e 100644
--- a/tests/topotests/munet/native.py
+++ b/tests/topotests/munet/native.py
@@ -8,8 +8,10 @@
# pylint: disable=protected-access
"""A module that defines objects for standalone use."""
import asyncio
+import base64
import errno
import getpass
+import glob
import ipaddress
import logging
import os
@@ -26,8 +28,10 @@ from . import cli
from .base import BaseMunet
from .base import Bridge
from .base import Commander
+from .base import InterfaceMixin
from .base import LinuxNamespace
from .base import MunetError
+from .base import SharedNamespace
from .base import Timeout
from .base import _async_get_exec_path
from .base import _get_exec_path
@@ -130,6 +134,22 @@ def convert_ranges_to_bitmask(ranges):
return bitmask
+class ExternalNetwork(SharedNamespace, InterfaceMixin):
+ """A network external to munet."""
+
+ def __init__(self, name=None, unet=None, logger=None, mtu=None, config=None):
+ """Create an external network."""
+ del logger # avoid linter
+ del mtu # avoid linter
+ # Do we want to use os.getpid() rather than unet.pid?
+ super().__init__(name, pid=unet.pid, nsflags=unet.nsflags, unet=unet)
+ self.config = config if config else {}
+
+ async def _async_delete(self):
+ self.logger.debug("%s: deleting", self)
+ await super()._async_delete()
+
+
class L2Bridge(Bridge):
"""A linux bridge with no IP network address."""
@@ -394,6 +414,10 @@ class NodeMixin:
async def async_cleanup_cmd(self):
"""Run the configured cleanup commands for this node."""
+ if self.cleanup_called:
+ return
+ self.cleanup_called = True
+
return await self._async_cleanup_cmd()
def has_ready_cmd(self) -> bool:
@@ -433,14 +457,14 @@ class NodeMixin:
outopt = outopt if outopt is not None else ""
if outopt == "all" or self.name in outopt.split(","):
outname = stdout.name if hasattr(stdout, "name") else stdout
- self.run_in_window(f"tail -F {outname}", title=f"O:{self.name}")
+ self.run_in_window(f"tail -n+1 -F {outname}", title=f"O:{self.name}")
if stderr:
erropt = self.unet.cfgopt.getoption("--stderr")
erropt = erropt if erropt is not None else ""
if erropt == "all" or self.name in erropt.split(","):
errname = stderr.name if hasattr(stderr, "name") else stderr
- self.run_in_window(f"tail -F {errname}", title=f"E:{self.name}")
+ self.run_in_window(f"tail -n+1 -F {errname}", title=f"E:{self.name}")
def pytest_hook_open_shell(self):
if not self.unet:
@@ -549,17 +573,38 @@ class NodeMixin:
await super()._async_delete()
+class HostnetNode(NodeMixin, LinuxNamespace):
+ """A node for running commands in the host network namespace."""
+
+ def __init__(self, name, pid=True, **kwargs):
+ if "net" in kwargs:
+ del kwargs["net"]
+ super().__init__(name, pid=pid, net=False, **kwargs)
+
+ self.logger.debug("%s: creating", self)
+
+ self.mgmt_ip = None
+ self.mgmt_ip6 = None
+ self.set_ns_cwd(self.rundir)
+
+ super().pytest_hook_open_shell()
+ self.logger.info("%s: created", self)
+
+ def get_ifname(self, netname): # pylint: disable=useless-return
+ del netname
+ return None
+
+ async def _async_delete(self):
+ self.logger.debug("%s: deleting", self)
+ await super()._async_delete()
+
+
class SSHRemote(NodeMixin, Commander):
"""SSHRemote a node representing an ssh connection to something."""
def __init__(
self,
name,
- server,
- port=22,
- user=None,
- password=None,
- idfile=None,
**kwargs,
):
super().__init__(name, **kwargs)
@@ -574,32 +619,33 @@ class SSHRemote(NodeMixin, Commander):
self.mgmt_ip = None
self.mgmt_ip6 = None
- self.port = port
-
- if user:
- self.user = user
- elif "SUDO_USER" in os.environ:
- self.user = os.environ["SUDO_USER"]
- else:
+ self.server = self.config["server"]
+ self.port = int(self.config.get("server-port", 22))
+ self.sudo_user = os.environ.get("SUDO_USER")
+ self.user = self.config.get("ssh-user")
+ if not self.user:
+ self.user = self.sudo_user
+ if not self.user:
self.user = getpass.getuser()
- self.password = password
- self.idfile = idfile
-
- self.server = f"{self.user}@{server}"
+ self.password = self.config.get("ssh-password")
+ self.idfile = self.config.get("ssh-identity-file")
+ self.use_host_network = None
# Setup our base `pre-cmd` values
#
# We maybe should add environment variable transfer here in particular
# MUNET_NODENAME. The problem is the user has to explicitly approve
# of SendEnv variables.
- self.__base_cmd = [
- get_exec_path_host("sudo"),
- "-E",
- f"-u{self.user}",
- get_exec_path_host("ssh"),
- ]
- if port != 22:
- self.__base_cmd.append(f"-p{port}")
+ self.__base_cmd = []
+ if self.idfile and self.sudo_user:
+ self.__base_cmd += [
+ get_exec_path_host("sudo"),
+ "-E",
+ f"-u{self.sudo_user}",
+ ]
+ self.__base_cmd.append(get_exec_path_host("ssh"))
+ if self.port != 22:
+ self.__base_cmd.append(f"-p{self.port}")
self.__base_cmd.append("-q")
self.__base_cmd.append("-oStrictHostKeyChecking=no")
self.__base_cmd.append("-oUserKnownHostsFile=/dev/null")
@@ -609,18 +655,34 @@ class SSHRemote(NodeMixin, Commander):
# self.__base_cmd.append("-oSendVar='TEST'")
self.__base_cmd_pty = list(self.__base_cmd)
self.__base_cmd_pty.append("-t")
- self.__base_cmd.append(self.server)
- self.__base_cmd_pty.append(self.server)
+ server_str = f"{self.user}@{self.server}"
+ self.__base_cmd.append(server_str)
+ self.__base_cmd_pty.append(server_str)
# self.set_pre_cmd(pre_cmd, pre_cmd_tty)
self.logger.info("%s: created", self)
- def has_ready_cmd(self) -> bool:
- return bool(self.config.get("ready-cmd", "").strip())
-
def _get_pre_cmd(self, use_str, use_pty, ns_only=False, **kwargs):
- pre_cmd = []
- if self.unet:
+ # None on first use, set after
+ if self.use_host_network is None:
+ # We have networks now so try and ping the server in the namespace
+ if not self.unet:
+ self.use_host_network = True
+ else:
+ rc, _, _ = self.unet.cmd_status(f"ping -w1 -c1 {self.server}")
+ if rc:
+ self.use_host_network = True
+ else:
+ self.use_host_network = False
+
+ if self.use_host_network:
+ self.logger.debug("Using host namespace for ssh connection")
+ else:
+ self.logger.debug("Using munet namespace for ssh connection")
+
+ if self.use_host_network:
+ pre_cmd = []
+ else:
pre_cmd = self.unet._get_pre_cmd(False, use_pty, ns_only=False, **kwargs)
if ns_only:
return pre_cmd
@@ -976,17 +1038,16 @@ ff02::2\tip6-allrouters
)
self.unet.rootcmd.cmd_status(f"ip link set {dname} name {hname}")
- rc, o, _ = self.unet.rootcmd.cmd_status("ip -o link show")
- m = re.search(rf"\d+:\s+{re.escape(hname)}:.*", o)
- if m:
- self.unet.rootcmd.cmd_nostatus(f"ip link set {hname} down ")
- self.unet.rootcmd.cmd_raises(f"ip link set {hname} netns {self.pid}")
+ # Make sure the interface is there.
+ self.unet.rootcmd.cmd_raises(f"ip -o link show {hname}")
+ self.unet.rootcmd.cmd_nostatus(f"ip link set {hname} down ")
+ self.unet.rootcmd.cmd_raises(f"ip link set {hname} netns {self.pid}")
+
# Wait for interface to show up in namespace
for retry in range(0, 10):
rc, o, _ = self.cmd_status(f"ip -o link show {hname}")
if not rc:
- if re.search(rf"\d+: {re.escape(hname)}:.*", o):
- break
+ break
if retry > 0:
await asyncio.sleep(1)
self.cmd_raises(f"ip link set {hname} name {lname}")
@@ -998,12 +1059,11 @@ ff02::2\tip6-allrouters
lname = self.host_intfs[hname]
self.cmd_raises(f"ip link set {lname} down")
self.cmd_raises(f"ip link set {lname} name {hname}")
- self.cmd_status(f"ip link set netns 1 dev {hname}")
- # The above is failing sometimes and not sure why
- # logging.error(
- # "XXX after setns %s",
- # self.unet.rootcmd.cmd_nostatus(f"ip link show {hname}"),
- # )
+ # We need to NOT run this command in the new pid namespace so that pid 1 is the
+ # root init process and so the interface gets returned to the root namespace
+ self.unet.rootcmd.cmd_raises(
+ f"nsenter -t {self.pid} -n ip link set netns 1 dev {hname}"
+ )
del self.host_intfs[hname]
async def add_phy_intf(self, devaddr, lname):
@@ -1522,11 +1582,14 @@ class L3ContainerNode(L3NodeMixin, LinuxNamespace):
async def async_cleanup_cmd(self):
"""Run the configured cleanup commands for this node."""
+ if self.cleanup_called:
+ return
self.cleanup_called = True
if "cleanup-cmd" not in self.config:
return
+ # The opposite of other types, the container needs cmd_p running
if not self.cmd_p:
self.logger.warning("async_cleanup_cmd: container no longer running")
return
@@ -1639,7 +1702,15 @@ class L3QemuVM(L3NodeMixin, LinuxNamespace):
rundir=os.path.join(self.rundir, self.name),
configdir=self.unet.config_dirname,
)
- self.ssh_keyfile = self.qemu_config.get("sshkey")
+ self.ssh_keyfile = self.config.get("ssh-identity-file")
+ if not self.ssh_keyfile:
+ self.ssh_keyfile = self.qemu_config.get("sshkey")
+
+ self.ssh_user = self.config.get("ssh-user")
+ if not self.ssh_user:
+ self.ssh_user = self.qemu_config.get("sshuser", "root")
+
+ self.disk_created = False
@property
def is_vm(self):
@@ -1680,10 +1751,9 @@ class L3QemuVM(L3NodeMixin, LinuxNamespace):
self.__base_cmd_pty = list(self.__base_cmd)
self.__base_cmd_pty.append("-t")
- user = self.qemu_config.get("sshuser", "root")
- self.__base_cmd.append(f"{user}@{mgmt_ip}")
+ self.__base_cmd.append(f"{self.ssh_user}@{mgmt_ip}")
self.__base_cmd.append("--")
- self.__base_cmd_pty.append(f"{user}@{mgmt_ip}")
+ self.__base_cmd_pty.append(f"{self.ssh_user}@{mgmt_ip}")
# self.__base_cmd_pty.append("--")
return True
@@ -1810,15 +1880,15 @@ class L3QemuVM(L3NodeMixin, LinuxNamespace):
if args:
self.extra_mounts += args
- async def run_cmd(self):
+ async def _run_cmd(self, cmd_node):
"""Run the configured commands for this node inside VM."""
self.logger.debug(
"[rundir %s exists %s]", self.rundir, os.path.exists(self.rundir)
)
- cmd = self.config.get("cmd", "").strip()
+ cmd = self.config.get(cmd_node, "").strip()
if not cmd:
- self.logger.debug("%s: no `cmd` to run", self)
+ self.logger.debug("%s: no `%s` to run", self, cmd_node)
return None
shell_cmd = self.config.get("shell", "/bin/bash")
@@ -1837,15 +1907,17 @@ class L3QemuVM(L3NodeMixin, LinuxNamespace):
cmd += "\n"
# Write a copy to the rundir
- cmdpath = os.path.join(self.rundir, "cmd.shebang")
+ cmdpath = os.path.join(self.rundir, f"{cmd_node}.shebang")
with open(cmdpath, mode="w+", encoding="utf-8") as cmdfile:
cmdfile.write(cmd)
commander.cmd_raises(f"chmod 755 {cmdpath}")
# Now write a copy inside the VM
- self.conrepl.cmd_status("cat > /tmp/cmd.shebang << EOF\n" + cmd + "\nEOF")
- self.conrepl.cmd_status("chmod 755 /tmp/cmd.shebang")
- cmds = "/tmp/cmd.shebang"
+ self.conrepl.cmd_status(
+ f"cat > /tmp/{cmd_node}.shebang << EOF\n" + cmd + "\nEOF"
+ )
+ self.conrepl.cmd_status(f"chmod 755 /tmp/{cmd_node}.shebang")
+ cmds = f"/tmp/{cmd_node}.shebang"
else:
cmd = cmd.replace("%CONFIGDIR%", str(self.unet.config_dirname))
cmd = cmd.replace("%RUNDIR%", str(self.rundir))
@@ -1883,20 +1955,30 @@ class L3QemuVM(L3NodeMixin, LinuxNamespace):
# When run_command supports async_ arg we can use the above...
self.cmd_p = now_proc(self.cmdrepl.run_command(cmds, timeout=120))
-
- # stdout and err both combined into logfile from the spawned repl
- stdout = os.path.join(self.rundir, "_cmdcon-log.txt")
- self.pytest_hook_run_cmd(stdout, None)
else:
# If we only have a console we can't run in parallel, so run to completion
self.cmd_p = now_proc(self.conrepl.run_command(cmds, timeout=120))
return self.cmd_p
+ async def run_cmd(self):
+ if self.disk_created:
+ await self._run_cmd("initial-cmd")
+ await self._run_cmd("cmd")
+
+ # stdout and err both combined into logfile from the spawned repl
+ if self.cmdrepl:
+ stdout = os.path.join(self.rundir, "_cmdcon-log.txt")
+ self.pytest_hook_run_cmd(stdout, None)
+
# InterfaceMixin override
# We need a name unique in the shared namespace.
def get_ns_ifname(self, ifname):
- return self.name + ifname
+ ifname = self.name + ifname
+ ifname = re.sub("gigabitethernet", "GE", ifname, flags=re.I)
+ if len(ifname) >= 16:
+ ifname = ifname[0:7] + ifname[-8:]
+ return ifname
async def add_host_intf(self, hname, lname, mtu=None):
# L3QemuVM needs it's own add_host_intf for macvtap, We need to create the tap
@@ -2044,24 +2126,50 @@ class L3QemuVM(L3NodeMixin, LinuxNamespace):
async def gather_coverage_data(self):
con = self.conrepl
+ gcda_root = "/sys/kernel/debug/gcov"
+ dest = "/tmp/gcov-data.tgz"
- gcda = "/sys/kernel/debug/gcov"
- tmpdir = con.cmd_raises("mktemp -d").strip()
- dest = "/gcov-data.tgz"
- con.cmd_raises(rf"find {gcda} -type d -exec mkdir -p {tmpdir}/{{}} \;")
- con.cmd_raises(
- rf"find {gcda} -name '*.gcda' -exec sh -c 'cat < $0 > {tmpdir}/$0' {{}} \;"
- )
- con.cmd_raises(
- rf"find {gcda} -name '*.gcno' -exec sh -c 'cp -d $0 {tmpdir}/$0' {{}} \;"
- )
- con.cmd_raises(rf"tar cf - -C {tmpdir} sys | gzip -c > {dest}")
- con.cmd_raises(rf"rm -rf {tmpdir}")
- self.logger.info("Saved coverage data in VM at %s", dest)
+ if gcda_root != "/sys/kernel/debug/gcov":
+ con.cmd_raises(
+ rf"cd {gcda_root} && find * -name '*.gc??' "
+ "| tar -cf - -T - | gzip -c > {dest}"
+ )
+ else:
+ # Some tars dont try and read 0 length files so we need to copy them.
+ tmpdir = con.cmd_raises("mktemp -d").strip()
+ con.cmd_raises(
+ rf"cd {gcda_root} && find -type d -exec mkdir -p {tmpdir}/{{}} \;"
+ )
+ con.cmd_raises(
+ rf"cd {gcda_root} && "
+ rf"find -name '*.gcda' -exec sh -c 'cat < $0 > {tmpdir}/$0' {{}} \;"
+ )
+ con.cmd_raises(
+ rf"cd {gcda_root} && "
+ rf"find -name '*.gcno' -exec sh -c 'cp -d $0 {tmpdir}/$0' {{}} \;"
+ )
+ con.cmd_raises(
+ rf"cd {tmpdir} && "
+ rf"find * -name '*.gc??' | tar -cf - -T - | gzip -c > {dest}"
+ )
+ con.cmd_raises(rf"rm -rf {tmpdir}")
+
+ self.logger.debug("Saved coverage data in VM at %s", dest)
+ ldest = os.path.join(self.rundir, "gcov-data.tgz")
if self.use_ssh:
- ldest = os.path.join(self.rundir, "gcov-data.tgz")
self.cmd_raises(["/bin/cat", dest], stdout=open(ldest, "wb"))
- self.logger.info("Saved coverage data on host at %s", ldest)
+ self.logger.debug("Saved coverage data on host at %s", ldest)
+ else:
+ output = con.cmd_raises(rf"base64 {dest}")
+ with open(ldest, "wb") as f:
+ f.write(base64.b64decode(output))
+ self.logger.debug("Saved coverage data on host at %s", ldest)
+ self.logger.info("Extracting coverage for %s into %s", self.name, ldest)
+
+ # We need to place the gcda files where munet expects to find them
+ gcdadir = Path(os.environ["GCOV_PREFIX"]) / self.name
+ self.unet.cmd_raises_nsonly(f"mkdir -p {gcdadir}")
+ self.unet.cmd_raises_nsonly(f"tar -C {gcdadir} -xzf {ldest}")
async def _opencons(
self,
@@ -2119,6 +2227,7 @@ class L3QemuVM(L3NodeMixin, LinuxNamespace):
expects=expects,
sends=sends,
timeout=timeout,
+ init_newline=True,
trace=True,
)
)
@@ -2247,30 +2356,45 @@ class L3QemuVM(L3NodeMixin, LinuxNamespace):
if not nnics:
args += ["-nic", "none"]
- dtpl = qc.get("disk-template")
+ dtplpath = dtpl = qc.get("disk-template")
diskpath = disk = qc.get("disk")
- if dtpl and not disk:
- disk = qc["disk"] = f"{self.name}-{os.path.basename(dtpl)}"
- diskpath = os.path.join(self.rundir, disk)
+ if diskpath:
+ if diskpath[0] != "/":
+ diskpath = os.path.join(self.unet.config_dirname, diskpath)
+
+ if dtpl and (not disk or not os.path.exists(diskpath)):
+ if not disk:
+ disk = qc["disk"] = f"{self.name}-{os.path.basename(dtpl)}"
+ diskpath = os.path.join(self.rundir, disk)
if self.path_exists(diskpath):
logging.debug("Disk '%s' file exists, using.", diskpath)
else:
- dtplpath = os.path.abspath(
- os.path.join(
- os.path.dirname(self.unet.config["config_pathname"]), dtpl
- )
- )
+ if dtplpath[0] != "/":
+ dtplpath = os.path.join(self.unet.config_dirname, dtpl)
logging.info("Create disk '%s' from template '%s'", diskpath, dtplpath)
self.cmd_raises(
f"qemu-img create -f qcow2 -F qcow2 -b {dtplpath} {diskpath}"
)
+ self.disk_created = True
+ disk_driver = qc.get("disk-driver", "virtio")
if diskpath:
- args.extend(
- ["-drive", f"file={diskpath},if=none,id=sata-disk0,format=qcow2"]
- )
- args.extend(["-device", "ahci,id=ahci"])
- args.extend(["-device", "ide-hd,bus=ahci.0,drive=sata-disk0"])
+ if disk_driver == "virtio":
+ args.extend(["-drive", f"file={diskpath},if=virtio,format=qcow2"])
+ else:
+ args.extend(
+ ["-drive", f"file={diskpath},if=none,id=sata-disk0,format=qcow2"]
+ )
+ args.extend(["-device", "ahci,id=ahci"])
+ args.extend(["-device", "ide-hd,bus=ahci.0,drive=sata-disk0"])
+
+ cidiskpath = qc.get("cloud-init-disk")
+ if cidiskpath:
+ if cidiskpath[0] != "/":
+ cidiskpath = os.path.join(self.unet.config_dirname, cidiskpath)
+ args.extend(["-drive", f"file={cidiskpath},if=virtio,format=qcow2"])
+
+ # args.extend(["-display", "vnc=0.0.0.0:40"])
use_stdio = cc.get("stdio", True)
has_cmd = self.config.get("cmd")
@@ -2360,6 +2484,10 @@ class L3QemuVM(L3NodeMixin, LinuxNamespace):
if use_cmdcon:
confiles.append("_cmdcon")
+ password = cc.get("password", "")
+ if self.disk_created:
+ password = cc.get("initial-password", password)
+
#
# Connect to the console socket, retrying
#
@@ -2369,7 +2497,7 @@ class L3QemuVM(L3NodeMixin, LinuxNamespace):
prompt=prompt,
is_bourne=not bool(prompt),
user=cc.get("user", "root"),
- password=cc.get("password", ""),
+ password=password,
expects=cc.get("expects"),
sends=cc.get("sends"),
timeout=int(cc.get("timeout", 60)),
@@ -2425,6 +2553,8 @@ class L3QemuVM(L3NodeMixin, LinuxNamespace):
async def async_cleanup_cmd(self):
"""Run the configured cleanup commands for this node."""
+ if self.cleanup_called:
+ return
self.cleanup_called = True
if "cleanup-cmd" not in self.config:
@@ -2815,7 +2945,9 @@ ff02::2\tip6-allrouters
else:
node2.set_lan_addr(node1, c2)
- if "physical" not in c1 and not node1.is_vm:
+ if isinstance(node1, ExternalNetwork):
+ pass
+ elif "physical" not in c1 and not node1.is_vm:
node1.set_intf_constraints(if1, **c1)
if "physical" not in c2 and not node2.is_vm:
node2.set_intf_constraints(if2, **c2)
@@ -2828,14 +2960,8 @@ ff02::2\tip6-allrouters
cls = L3QemuVM
elif config and config.get("server"):
cls = SSHRemote
- kwargs["server"] = config["server"]
- kwargs["port"] = int(config.get("server-port", 22))
- if "ssh-identity-file" in config:
- kwargs["idfile"] = config.get("ssh-identity-file")
- if "ssh-user" in config:
- kwargs["user"] = config.get("ssh-user")
- if "ssh-password" in config:
- kwargs["password"] = config.get("ssh-password")
+ elif config and config.get("hostnet"):
+ cls = HostnetNode
else:
cls = L3NamespaceNode
return super().add_host(name, cls=cls, config=config, **kwargs)
@@ -2845,20 +2971,113 @@ ff02::2\tip6-allrouters
if config is None:
config = {}
- cls = L3Bridge if config.get("ip") else L2Bridge
+ if config.get("external"):
+ cls = ExternalNetwork
+ elif config.get("ip"):
+ cls = L3Bridge
+ else:
+ cls = L2Bridge
mtu = kwargs.get("mtu", config.get("mtu"))
return super().add_switch(name, cls=cls, config=config, mtu=mtu, **kwargs)
- async def run(self):
+ def coverage_setup(self):
+ bdir = self.cfgopt.getoption("--cov-build-dir")
+ if not bdir:
+ # Try and find the build dir using common prefix of gcno files
+ common = None
+ cwd = os.getcwd()
+ for f in glob.iglob(rf"{cwd}/**/*.gcno", recursive=True):
+ if not common:
+ common = os.path.dirname(f)
+ else:
+ common = os.path.commonprefix([common, f])
+ if not common:
+ break
+ assert (
+ bdir
+ ), "Can't locate build directory for coverage data, use --cov-build-dir"
+
+ bdir = Path(bdir).resolve()
+ rundir = Path(self.rundir).resolve()
+ gcdadir = rundir / "gcda"
+ os.environ["GCOV_BUILD_DIR"] = str(bdir)
+ os.environ["GCOV_PREFIX_STRIP"] = str(len(bdir.parts) - 1)
+ os.environ["GCOV_PREFIX"] = str(gcdadir)
+
+ # commander.cmd_raises(f"find {bdir} -name '*.gc??' -exec chmod o+rw {{}} +")
+ group_id = bdir.stat().st_gid
+ commander.cmd_raises(f"mkdir -p {gcdadir}")
+ commander.cmd_raises(f"chown -R root:{group_id} {gcdadir}")
+ commander.cmd_raises(f"chmod 2775 {gcdadir}")
+
+ async def coverage_finish(self):
+ rundir = Path(self.rundir).resolve()
+ bdir = Path(os.environ["GCOV_BUILD_DIR"])
+ gcdadir = Path(os.environ["GCOV_PREFIX"])
+
+ # Create .gcno symlinks if they don't already exist, for kernel they will
+ self.logger.info("Creating .gcno symlinks from '%s' to '%s'", gcdadir, bdir)
+ commander.cmd_raises(
+ f'cd "{gcdadir}"; bdir="{bdir}"'
+ + """
+for f in $(find . -name '*.gcda'); do
+ f=${f#./};
+ f=${f%.gcda}.gcno;
+ if [ ! -h "$f" ]; then
+ ln -fs $bdir/$f $f;
+ touch -h -r $bdir/$f $f;
+ echo $f;
+ fi;
+done"""
+ )
+
+ # Get the results into a summary file
+ data_file = rundir / "coverage.info"
+ self.logger.info("Gathering coverage data into: %s", data_file)
+ commander.cmd_raises(
+ f"lcov --directory {gcdadir} --capture --output-file {data_file}"
+ )
+
+ # Get coverage info filtered to a specific set of files
+ report_file = rundir / "coverage.info"
+ self.logger.debug("Generating coverage summary: %s", report_file)
+ output = commander.cmd_raises(f"lcov --summary {data_file}")
+ self.logger.info("\nCOVERAGE-SUMMARY-START\n%s\nCOVERAGE-SUMMARY-END", output)
+ # terminalreporter.write(
+ # f"\nCOVERAGE-SUMMARY-START\n{output}\nCOVERAGE-SUMMARY-END\n"
+ # )
+
+ async def load_images(self, images):
tasks = []
+ for image in images:
+ logging.debug("Checking for image %s", image)
+ rc, _, _ = self.rootcmd.cmd_status(
+ f"podman image inspect {image}", warn=False
+ )
+ if not rc:
+ continue
+ logging.info("Pulling missing image %s", image)
+ aw = self.rootcmd.async_cmd_raises(f"podman pull {image}")
+ tasks.append(asyncio.create_task(aw))
+ if not tasks:
+ return
+ _, pending = await asyncio.wait(tasks, timeout=600)
+ assert not pending, "Failed to pull container images"
+ async def run(self):
+ tasks = []
hosts = self.hosts.values()
+
+ images = {x.container_image for x in hosts if hasattr(x, "container_image")}
+ await self.load_images(images)
+
launch_nodes = [x for x in hosts if hasattr(x, "launch")]
launch_nodes = [x for x in launch_nodes if x.config.get("qemu")]
- run_nodes = [x for x in hosts if hasattr(x, "has_run_cmd") and x.has_run_cmd()]
- ready_nodes = [
- x for x in hosts if hasattr(x, "has_ready_cmd") and x.has_ready_cmd()
- ]
+ run_nodes = [x for x in hosts if x.has_run_cmd()]
+ ready_nodes = [x for x in hosts if x.has_ready_cmd()]
+
+ if self.cfgopt.getoption("--coverage"):
+ self.coverage_setup()
pcapopt = self.cfgopt.getoption("--pcap")
pcapopt = pcapopt if pcapopt else ""
@@ -2920,10 +3139,10 @@ ff02::2\tip6-allrouters
await asyncio.sleep(0.25)
logging.debug("%s is ready!", x)
+ tasks = [asyncio.create_task(wait_until_ready(x)) for x in ready_nodes]
+
logging.debug("Waiting for ready on nodes: %s", ready_nodes)
- _, pending = await asyncio.wait(
- [wait_until_ready(x) for x in ready_nodes], timeout=30
- )
+ _, pending = await asyncio.wait(tasks, timeout=30)
if pending:
logging.warning("Timeout waiting for ready: %s", pending)
for nr in pending:
@@ -2940,15 +3159,6 @@ ff02::2\tip6-allrouters
self.logger.debug("%s: deleting.", self)
- if self.cfgopt.getoption("--coverage"):
- nodes = (
- x for x in self.hosts.values() if hasattr(x, "gather_coverage_data")
- )
- try:
- await asyncio.gather(*(x.gather_coverage_data() for x in nodes))
- except Exception as error:
- logging.warning("Error gathering coverage data: %s", error)
-
pause = bool(self.cfgopt.getoption("--pause-at-end"))
pause = pause or bool(self.cfgopt.getoption("--pause"))
if pause:
@@ -2959,6 +3169,25 @@ ff02::2\tip6-allrouters
except Exception as error:
self.logger.error("\n...continuing after error: %s", error)
+ # Run cleanup-cmd's.
+ nodes = (x for x in self.hosts.values() if x.has_cleanup_cmd())
+ try:
+ await asyncio.gather(*(x.async_cleanup_cmd() for x in nodes))
+ except Exception as error:
+ logging.warning("Error running cleanup cmds: %s", error)
+
+ # Gather any coverage data
+ if self.cfgopt.getoption("--coverage"):
+ nodes = (
+ x for x in self.hosts.values() if hasattr(x, "gather_coverage_data")
+ )
+ try:
+ await asyncio.gather(*(x.gather_coverage_data() for x in nodes))
+ except Exception as error:
+ logging.warning("Error gathering coverage data: %s", error)
+
+ await self.coverage_finish()
+
# XXX should we cancel launch and run tasks?
try:
diff --git a/tests/topotests/munet/parser.py b/tests/topotests/munet/parser.py
index 4fc0c75..a8b73a2 100644
--- a/tests/topotests/munet/parser.py
+++ b/tests/topotests/munet/parser.py
@@ -230,7 +230,7 @@ def load_kinds(args, search=None):
if args:
os.chdir(args.rundir)
- args_config = args.kinds_config if args else None
+ args_config = args.kinds_config if args and hasattr(args, "kinds_config") else None
try:
if search is None:
search = [cwd]
@@ -305,7 +305,7 @@ async def async_build_topology(
# create search directories from common root if given
cpath = Path(config["config_pathname"]).absolute()
- project_root = args.project_root if args else None
+ project_root = args.project_root if args and hasattr(args, "project_root") else None
if not search_root:
search_root = find_project_root(cpath, project_root)
if not search_root:
@@ -341,7 +341,11 @@ async def async_build_topology(
pytestconfig=pytestconfig,
isolated=isolated,
pid=top_level_pidns,
- unshare_inline=args.unshare_inline if args else unshare_inline,
+ unshare_inline=(
+ args.unshare_inline
+ if args and hasattr(args, "unshare_inline")
+ else unshare_inline
+ ),
logger=logger,
)
diff --git a/tests/topotests/munet/testing/fixtures.py b/tests/topotests/munet/testing/fixtures.py
index 3c6d946..3c6ddf9 100644
--- a/tests/topotests/munet/testing/fixtures.py
+++ b/tests/topotests/munet/testing/fixtures.py
@@ -25,7 +25,6 @@ from ..base import BaseMunet
from ..base import Bridge
from ..base import get_event_loop
from ..cleanup import cleanup_current
-from ..cleanup import cleanup_previous
from ..native import L3NodeMixin
from ..parser import async_build_topology
from ..parser import get_config
@@ -130,9 +129,12 @@ def session_autouse():
else:
is_worker = True
- if not is_worker:
- # This is unfriendly to multi-instance
- cleanup_previous()
+ # We dont want to kill all munet and we don't have the rundir here yet
+ # This was more useful back when we used to leave processes around a lot
+ # more.
+ # if not is_worker:
+ # # This is unfriendly to multi-instance
+ # cleanup_previous()
# We never pop as we want to keep logging
_push_log_handler("session", "/tmp/unet-test/pytest-session.log")
@@ -150,8 +152,9 @@ def session_autouse():
@pytest.fixture(autouse=True, scope="module")
def module_autouse(request):
- logpath = get_test_logdir(request.node.name, True)
- logpath = os.path.join("/tmp/unet-test", logpath, "pytest-exec.log")
+ root_path = os.environ.get("MUNET_RUNDIR", "/tmp/unet-test")
+ logpath = get_test_logdir(request.node.nodeid, True)
+ logpath = os.path.join(root_path, logpath, "pytest-exec.log")
with log_handler("module", logpath):
sdir = os.path.dirname(os.path.realpath(request.fspath))
with chdir(sdir, "module autouse fixture"):
@@ -161,7 +164,7 @@ def module_autouse(request):
raise Exception("Base Munet was not cleaned up/deleted")
-@pytest.fixture(scope="module")
+@pytest.fixture(scope="session")
def event_loop():
"""Create an instance of the default event loop for the session."""
loop = get_event_loop()
@@ -174,7 +177,8 @@ def event_loop():
@pytest.fixture(scope="module")
def rundir_module():
- d = os.path.join("/tmp/unet-test", get_test_logdir(module=True))
+ root_path = os.environ.get("MUNET_RUNDIR", "/tmp/unet-test")
+ d = os.path.join(root_path, get_test_logdir(module=True))
logging.debug("conftest: test module rundir %s", d)
return d
@@ -213,18 +217,14 @@ async def _unet_impl(
param,
exc_info=True,
)
- pytest.skip(
- f"unet fixture: unet build failed: {error}", allow_module_level=True
- )
- raise
+ pytest.fail(f"unet fixture: unet build failed: {error}")
try:
tasks = await _unet.run()
except Exception as error:
logging.debug("unet fixture: unet run failed: %s", error, exc_info=True)
await _unet.async_delete()
- pytest.skip(f"unet fixture: unet run failed: {error}", allow_module_level=True)
- raise
+ pytest.fail(f"unet fixture: unet run failed: {error}")
logging.debug("unet fixture: containers running")
@@ -379,7 +379,8 @@ async def astepf(pytestconfig):
@pytest.fixture(scope="function")
def rundir():
- d = os.path.join("/tmp/unet-test", get_test_logdir(module=False))
+ root_path = os.environ.get("MUNET_RUNDIR", "/tmp/unet-test")
+ d = os.path.join(root_path, get_test_logdir(module=False))
logging.debug("conftest: test function rundir %s", d)
return d
@@ -387,9 +388,8 @@ def rundir():
# Configure logging
@pytest.hookimpl(hookwrapper=True, tryfirst=True)
def pytest_runtest_setup(item):
- d = os.path.join(
- "/tmp/unet-test", get_test_logdir(nodeid=item.nodeid, module=False)
- )
+ root_path = os.environ.get("MUNET_RUNDIR", "/tmp/unet-test")
+ d = os.path.join(root_path, get_test_logdir(nodeid=item.nodeid, module=False))
config = item.config
logging_plugin = config.pluginmanager.get_plugin("logging-plugin")
filename = Path(d, "pytest-exec.log")
diff --git a/tests/topotests/munet/testing/hooks.py b/tests/topotests/munet/testing/hooks.py
index 985eef9..f6f2115 100644
--- a/tests/topotests/munet/testing/hooks.py
+++ b/tests/topotests/munet/testing/hooks.py
@@ -18,6 +18,7 @@ import traceback
import pytest
+from ..args import add_testing_args
from ..base import BaseMunet # pylint: disable=import-error
from ..cli import cli # pylint: disable=import-error
from .util import pause_test
@@ -29,85 +30,7 @@ from .util import pause_test
def pytest_addoption(parser):
- parser.addoption(
- "--cli-on-error",
- action="store_true",
- help="CLI on test failure",
- )
-
- parser.addoption(
- "--coverage",
- action="store_true",
- help="Enable coverage gathering if supported",
- )
-
- parser.addoption(
- "--gdb",
- default="",
- metavar="HOST[,HOST...]",
- help="Comma-separated list of nodes to launch gdb on, or 'all'",
- )
- parser.addoption(
- "--gdb-breakpoints",
- default="",
- metavar="BREAKPOINT[,BREAKPOINT...]",
- help="Comma-separated list of breakpoints",
- )
- parser.addoption(
- "--gdb-use-emacs",
- action="store_true",
- help="Use emacsclient to run gdb instead of a shell",
- )
-
- parser.addoption(
- "--pcap",
- default="",
- metavar="NET[,NET...]",
- help="Comma-separated list of networks to capture packets on, or 'all'",
- )
-
- parser.addoption(
- "--pause",
- action="store_true",
- help="Pause after each test",
- )
- parser.addoption(
- "--pause-at-end",
- action="store_true",
- help="Pause before taking munet down",
- )
- parser.addoption(
- "--pause-on-error",
- action="store_true",
- help="Pause after (disables default when --shell or -vtysh given)",
- )
- parser.addoption(
- "--no-pause-on-error",
- dest="pause_on_error",
- action="store_false",
- help="Do not pause after (disables default when --shell or -vtysh given)",
- )
-
- parser.addoption(
- "--shell",
- default="",
- metavar="NODE[,NODE...]",
- help="Comma-separated list of nodes to spawn shell on, or 'all'",
- )
-
- parser.addoption(
- "--stdout",
- default="",
- metavar="NODE[,NODE...]",
- help="Comma-separated list of nodes to open tail-f stdout window on, or 'all'",
- )
-
- parser.addoption(
- "--stderr",
- default="",
- metavar="NODE[,NODE...]",
- help="Comma-separated list of nodes to open tail-f stderr window on, or 'all'",
- )
+ add_testing_args(parser.addoption)
def pytest_configure(config):
@@ -146,6 +69,18 @@ def pytest_configure(config):
elif b and not is_xdist and not have_windows:
pytest.exit(f"{winopt} use requires byobu/TMUX/SCREEN/XTerm")
+ cli_pause = (
+ config.getoption("--cli-on-error")
+ or config.getoption("--pause")
+ or config.getoption("--pause-at-end")
+ or config.getoption("--pause-on-error")
+ )
+ if config.getoption("--capture") == "fd" and cli_pause:
+ pytest.exit(
+ "CLI is not compatible with `--capture=fd`, "
+ "please run again with `-s` or other `--capture` value"
+ )
+
def pytest_runtest_makereport(item, call):
"""Pause or invoke CLI as directed by config."""