summaryrefslogtreecommitdiffstats
path: root/python
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--python/Makefile.am12
-rw-r--r--python/pacemaker/__init__.py6
-rw-r--r--python/pacemaker/_cts/CTS.py137
-rw-r--r--python/pacemaker/_cts/__init__.py6
-rw-r--r--python/pacemaker/_cts/audits.py350
-rw-r--r--python/pacemaker/_cts/cib.py65
-rw-r--r--python/pacemaker/_cts/cibxml.py547
-rw-r--r--python/pacemaker/_cts/clustermanager.py212
-rw-r--r--python/pacemaker/_cts/cmcorosync.py17
-rw-r--r--python/pacemaker/_cts/corosync.py52
-rw-r--r--python/pacemaker/_cts/environment.py128
-rw-r--r--python/pacemaker/_cts/errors.py22
-rw-r--r--python/pacemaker/_cts/input.py8
-rw-r--r--python/pacemaker/_cts/logging.py45
-rw-r--r--python/pacemaker/_cts/network.py34
-rw-r--r--python/pacemaker/_cts/patterns.py188
-rw-r--r--python/pacemaker/_cts/process.py18
-rw-r--r--python/pacemaker/_cts/remote.py143
-rw-r--r--python/pacemaker/_cts/scenarios.py154
-rw-r--r--python/pacemaker/_cts/test.py295
-rw-r--r--python/pacemaker/_cts/tests/__init__.py14
-rw-r--r--python/pacemaker/_cts/tests/componentfail.py23
-rw-r--r--python/pacemaker/_cts/tests/ctstest.py112
-rw-r--r--python/pacemaker/_cts/tests/fliptest.py18
-rw-r--r--python/pacemaker/_cts/tests/maintenancemode.py38
-rw-r--r--python/pacemaker/_cts/tests/nearquorumpointtest.py30
-rw-r--r--python/pacemaker/_cts/tests/partialstart.py23
-rw-r--r--python/pacemaker/_cts/tests/reattach.py51
-rw-r--r--python/pacemaker/_cts/tests/remotebasic.py18
-rw-r--r--python/pacemaker/_cts/tests/remotedriver.py152
-rw-r--r--python/pacemaker/_cts/tests/remotemigrate.py21
-rw-r--r--python/pacemaker/_cts/tests/remoterscfailure.py26
-rw-r--r--python/pacemaker/_cts/tests/remotestonithd.py26
-rw-r--r--python/pacemaker/_cts/tests/resourcerecover.py35
-rw-r--r--python/pacemaker/_cts/tests/restartonebyone.py18
-rw-r--r--python/pacemaker/_cts/tests/restarttest.py18
-rw-r--r--python/pacemaker/_cts/tests/resynccib.py23
-rw-r--r--python/pacemaker/_cts/tests/simulstart.py18
-rw-r--r--python/pacemaker/_cts/tests/simulstartlite.py31
-rw-r--r--python/pacemaker/_cts/tests/simulstop.py18
-rw-r--r--python/pacemaker/_cts/tests/simulstoplite.py33
-rw-r--r--python/pacemaker/_cts/tests/splitbraintest.py36
-rw-r--r--python/pacemaker/_cts/tests/standbytest.py22
-rw-r--r--python/pacemaker/_cts/tests/startonebyone.py18
-rw-r--r--python/pacemaker/_cts/tests/starttest.py24
-rw-r--r--python/pacemaker/_cts/tests/stonithdtest.py24
-rw-r--r--python/pacemaker/_cts/tests/stoponebyone.py18
-rw-r--r--python/pacemaker/_cts/tests/stoptest.py26
-rw-r--r--python/pacemaker/_cts/timer.py44
-rw-r--r--python/pacemaker/_cts/watcher.py489
-rw-r--r--python/pacemaker/buildoptions.py.in49
-rw-r--r--python/pacemaker/exitstatus.py99
-rw-r--r--python/tests/test_cts_network.py1
-rw-r--r--python/tests/test_exitstatus.py1
54 files changed, 1910 insertions, 2126 deletions
diff --git a/python/Makefile.am b/python/Makefile.am
index 803fb0c..71e587e 100644
--- a/python/Makefile.am
+++ b/python/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2023 the Pacemaker project contributors
+# Copyright 2023-2024 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -24,3 +24,13 @@ check-local:
.PHONY: pylint
pylint:
pylint $(SUBDIRS)
+
+# Disabled warnings:
+# E501 - Line too long
+#
+# Disable unused imports on __init__.py files (we likely just have them
+# there for re-exporting).
+# Disable docstrings warnings on unit tests.
+.PHONY: pyflake
+pyflake:
+ flake8 --ignore=E501 --per-file-ignores="__init__.py:F401 tests/*:D100,D101,D102,D104" $(SUBDIRS)
diff --git a/python/pacemaker/__init__.py b/python/pacemaker/__init__.py
index e5d992e..e6b1b2a 100644
--- a/python/pacemaker/__init__.py
+++ b/python/pacemaker/__init__.py
@@ -1,8 +1,6 @@
-"""
-API reference documentation for the `pacemaker` package.
-"""
+"""API reference documentation for the `pacemaker` package."""
-__copyright__ = "Copyright 2023 the Pacemaker project contributors"
+__copyright__ = "Copyright 2023-2024 the Pacemaker project contributors"
__license__ = "GNU Lesser General Public License version 2.1 or later (LGPLv2.1+)"
from pacemaker.buildoptions import BuildOptions
diff --git a/python/pacemaker/_cts/CTS.py b/python/pacemaker/_cts/CTS.py
index 166ea10..bc46525 100644
--- a/python/pacemaker/_cts/CTS.py
+++ b/python/pacemaker/_cts/CTS.py
@@ -1,7 +1,7 @@
-""" Main classes for Pacemaker's Cluster Test Suite (CTS) """
+"""Main classes for Pacemaker's Cluster Test Suite (CTS)."""
__all__ = ["CtsLab", "NodeStatus", "Process"]
-__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__copyright__ = "Copyright 2000-2024 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
import sys
@@ -14,53 +14,52 @@ from pacemaker._cts.input import should_continue
from pacemaker._cts.logging import LogFactory
from pacemaker._cts.remote import RemoteFactory
+
class CtsLab:
- """ A class that defines the Lab Environment for the Cluster Test System.
- It defines those things which are expected to change from test
- environment to test environment for the same cluster manager.
-
- This is where you define the set of nodes that are in your test lab,
- what kind of reset mechanism you use, etc. All this data is stored
- as key/value pairs in an Environment instance constructed from arguments
- passed to this class.
-
- The CTS code ignores names it doesn't know about or need. Individual
- tests have access to this information, and it is perfectly acceptable
- to provide hints, tweaks, fine-tuning directions, or other information
- to the tests through this mechanism.
"""
+ A class that defines the Lab Environment for the Cluster Test System.
- def __init__(self, args=None):
- """ Create a new CtsLab instance. This class can be treated kind
- of like a dictionary due to the presence of typical dict functions
- like __contains__, __getitem__, and __setitem__. However, it is not a
- dictionary so do not rely on standard dictionary behavior.
+ It defines those things which are expected to change from test
+ environment to test environment for the same cluster manager.
- Arguments:
+ This is where you define the set of nodes that are in your test lab,
+ what kind of reset mechanism you use, etc. All this data is stored
+ as key/value pairs in an Environment instance constructed from arguments
+ passed to this class.
+
+ The CTS code ignores names it doesn't know about or need. Individual
+ tests have access to this information, and it is perfectly acceptable
+ to provide hints, tweaks, fine-tuning directions, or other information
+ to the tests through this mechanism.
+ """
- args -- A list of command line parameters, minus the program name.
+ def __init__(self, args=None):
"""
+ Create a new CtsLab instance.
+
+ This class can be treated kind of like a dictionary due to the presence
+ of typical dict functions like __contains__, __getitem__, and __setitem__.
+ However, it is not a dictionary so do not rely on standard dictionary
+ behavior.
+ Arguments:
+ args -- A list of command line parameters, minus the program name.
+ """
self._env = EnvFactory().getInstance(args)
self._logger = LogFactory()
def dump(self):
- """ Print the current environment """
-
+ """Print the current environment."""
self._env.dump()
def __contains__(self, key):
- """ Does the given environment key exist? """
-
+ """Return True if the given environment key exists."""
# pylint gets confused because of EnvFactory here.
# pylint: disable=unsupported-membership-test
return key in self._env
def __getitem__(self, key):
- """ Return the given environment key, or raise KeyError if it does
- not exist
- """
-
+ """Return the given environment key, or raise KeyError if it does not exist."""
# Throughout this file, pylint has trouble understanding that EnvFactory
# and RemoteFactory are singleton instances that can be treated as callable
# and subscriptable objects. Various warnings are disabled because of this.
@@ -69,21 +68,16 @@ class CtsLab:
return self._env[key]
def __setitem__(self, key, value):
- """ Set the given environment key to the given value, overriding any
- previous value
- """
-
+ """Set the given environment key to the given value, overriding any previous value."""
# pylint: disable=unsupported-assignment-operation
self._env[key] = value
def run(self, scenario, iterations):
- """ Run the given scenario the given number of times.
-
- Returns:
-
- ExitStatus.OK on success, or ExitStatus.ERROR on error
"""
+ Run the given scenario the given number of times.
+ Returns ExitStatus.OK on success, or ExitStatus.ERROR on error.
+ """
if not scenario:
self._logger.log("No scenario was defined")
return ExitStatus.ERROR
@@ -101,7 +95,7 @@ class CtsLab:
# pylint: disable=bare-except
try:
scenario.run(iterations)
- except:
+ except: # noqa: E722
self._logger.log("Exception by %s" % sys.exc_info()[0])
self._logger.traceback(traceback)
@@ -123,43 +117,43 @@ class CtsLab:
class NodeStatus:
- """ A class for querying the status of cluster nodes - are nodes up? Do
- they respond to SSH connections?
"""
+ A class for querying the status of cluster nodes.
- def __init__(self, env):
- """ Create a new NodeStatus instance
+ Are nodes up? Do they respond to SSH connections?
+ """
- Arguments:
+ def __init__(self, env):
+ """
+ Create a new NodeStatus instance.
- env -- An Environment instance
+ Arguments:
+ env -- An Environment instance
"""
self._env = env
def _node_booted(self, node):
- """ Return True if the given node is booted (responds to pings) """
-
+ """Return True if the given node is booted (responds to pings)."""
# pylint: disable=not-callable
(rc, _) = RemoteFactory().getInstance()("localhost", "ping -nq -c1 -w1 %s" % node, verbose=0)
return rc == 0
def _sshd_up(self, node):
- """ Return true if sshd responds on the given node """
-
+ """Return true if sshd responds on the given node."""
# pylint: disable=not-callable
(rc, _) = RemoteFactory().getInstance()(node, "true", verbose=0)
return rc == 0
def wait_for_node(self, node, timeout=300):
- """ Wait for a node to become available. Should the timeout be reached,
- the user will be given a choice whether to continue or not. If not,
- ValueError will be raised.
+ """
+ Wait for a node to become available.
- Returns:
+ Should the timeout be reached, the user will be given a choice whether
+ to continue or not. If not, ValueError will be raised.
- True when the node is available, or False if the timeout is reached.
+ Returns True when the node is available, or False if the timeout is
+ reached.
"""
-
initial_timeout = timeout
anytimeouts = False
@@ -186,8 +180,7 @@ class NodeStatus:
return False
def wait_for_all_nodes(self, nodes, timeout=300):
- """ Return True when all nodes come up, or False if the timeout is reached """
-
+ """Return True when all nodes come up, or False if the timeout is reached."""
for node in nodes:
if not self.wait_for_node(node, timeout):
return False
@@ -196,24 +189,23 @@ class NodeStatus:
class Process:
- """ A class for managing a Pacemaker daemon """
+ """A class for managing a Pacemaker daemon."""
# pylint: disable=invalid-name
def __init__(self, cm, name, dc_only=False, pats=None, dc_pats=None,
badnews_ignore=None):
- """ Create a new Process instance.
-
- Arguments:
-
- cm -- A ClusterManager instance
- name -- The command being run
- dc_only -- Should this daemon be killed only on the DC?
- pats -- Regexes we expect to find in log files
- dc_pats -- Additional DC-specific regexes we expect to find
- in log files
- badnews_ignore -- Regexes for lines in the log that can be ignored
"""
-
+ Create a new Process instance.
+
+ Arguments:
+ cm -- A ClusterManager instance
+ name -- The command being run
+ dc_only -- Should this daemon be killed only on the DC?
+ pats -- Regexes we expect to find in log files
+ dc_pats -- Additional DC-specific regexes we expect to find
+ in log files
+ badnews_ignore -- Regexes for lines in the log that can be ignored
+ """
self._cm = cm
self.badnews_ignore = badnews_ignore
self.dc_only = dc_only
@@ -231,8 +223,7 @@ class Process:
self.pats = []
def kill(self, node):
- """ Kill the instance of this process running on the given node """
-
+ """Kill the instance of this process running on the given node."""
(rc, _) = self._cm.rsh(node, "killall -9 %s" % self.name)
if rc != 0:
diff --git a/python/pacemaker/_cts/__init__.py b/python/pacemaker/_cts/__init__.py
index dfc05ad..5b8dfab 100644
--- a/python/pacemaker/_cts/__init__.py
+++ b/python/pacemaker/_cts/__init__.py
@@ -1,6 +1,4 @@
-"""
-Internal Python API for the `pacemaker` package.
-"""
+"""Internal Python API for the `pacemaker` package."""
-__copyright__ = "Copyright 2023 the Pacemaker project contributors"
+__copyright__ = "Copyright 2023-2024 the Pacemaker project contributors"
__license__ = "GNU Lesser General Public License version 2.1 or later (LGPLv2.1+)"
diff --git a/python/pacemaker/_cts/audits.py b/python/pacemaker/_cts/audits.py
index dc66f96..74f8b18 100644
--- a/python/pacemaker/_cts/audits.py
+++ b/python/pacemaker/_cts/audits.py
@@ -1,7 +1,7 @@
-""" Auditing classes for Pacemaker's Cluster Test Suite (CTS) """
+"""Auditing classes for Pacemaker's Cluster Test Suite (CTS)."""
__all__ = ["AuditConstraint", "AuditResource", "ClusterAudit", "audit_list"]
-__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__copyright__ = "Copyright 2000-2024 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
import re
@@ -14,65 +14,67 @@ from pacemaker._cts.watcher import LogKind, LogWatcher
class ClusterAudit:
- """ The base class for various kinds of auditors. Specific audit implementations
- should be built on top of this one. Audits can do all kinds of checks on the
- system. The basic interface for callers is the `__call__` method, which
- returns True if the audit passes and False if it fails.
"""
+ The base class for various kinds of auditors.
- def __init__(self, cm):
- """ Create a new ClusterAudit instance
-
- Arguments:
+ Specific audit implementations should be built on top of this one. Audits
+ can do all kinds of checks on the system. The basic interface for callers
+ is the `__call__` method, which returns True if the audit passes and False
+ if it fails.
+ """
- cm -- A ClusterManager instance
+ def __init__(self, cm):
"""
+ Create a new ClusterAudit instance.
+ Arguments:
+ cm -- A ClusterManager instance
+ """
# pylint: disable=invalid-name
self._cm = cm
self.name = None
def __call__(self):
+ """Perform the audit action."""
raise NotImplementedError
def is_applicable(self):
- """ Return True if this audit is applicable in the current test configuration.
- This method must be implemented by all subclasses.
"""
+ Return True if this audit is applicable in the current test configuration.
+ This method must be implemented by all subclasses.
+ """
raise NotImplementedError
def log(self, args):
- """ Log a message """
-
+ """Log a message."""
self._cm.log("audit: %s" % args)
def debug(self, args):
- """ Log a debug message """
-
+ """Log a debug message."""
self._cm.debug("audit: %s" % args)
class LogAudit(ClusterAudit):
- """ Audit each cluster node to verify that some logging system is usable.
- This is done by logging a unique test message and then verifying that
- we can read back that test message using logging tools.
"""
+ Audit each cluster node to verify that some logging system is usable.
- def __init__(self, cm):
- """ Create a new LogAudit instance
-
- Arguments:
+ This is done by logging a unique test message and then verifying that we
+ can read back that test message using logging tools.
+ """
- cm -- A ClusterManager instance
+ def __init__(self, cm):
"""
+ Create a new LogAudit instance.
+ Arguments:
+ cm -- A ClusterManager instance
+ """
ClusterAudit.__init__(self, cm)
self.name = "LogAudit"
def _restart_cluster_logging(self, nodes=None):
- """ Restart logging on the given nodes, or all if none are given """
-
+ """Restart logging on the given nodes, or all if none are given."""
if not nodes:
nodes = self._cm.env["nodes"]
@@ -93,8 +95,7 @@ class LogAudit(ClusterAudit):
self._cm.log("ERROR: Cannot restart '%s' on %s" % (self._cm.env["syslogd"], node))
def _create_watcher(self, patterns, kind):
- """ Create a new LogWatcher instance for the given patterns """
-
+ """Create a new LogWatcher instance for the given patterns."""
watch = LogWatcher(self._cm.env["LogFileName"], patterns,
self._cm.env["nodes"], kind, "LogAudit", 5,
silent=True)
@@ -102,8 +103,7 @@ class LogAudit(ClusterAudit):
return watch
def _test_logging(self):
- """ Perform the log audit """
-
+ """Perform the log audit."""
patterns = []
prefix = "Test message from"
suffix = str(uuid.uuid4())
@@ -120,12 +120,13 @@ class LogAudit(ClusterAudit):
patterns.append("%s.*%s %s %s" % (simple, prefix, node, suffix))
- watch_pref = self._cm.env["LogWatcher"]
- if watch_pref == LogKind.ANY:
- kinds = [LogKind.FILE]
+ watch_pref = self._cm.env["log_kind"]
+ if watch_pref is None:
+ kinds = [LogKind.LOCAL_FILE]
if self._cm.env["have_systemd"]:
- kinds += [LogKind.JOURNAL]
- kinds += [LogKind.REMOTE_FILE]
+ kinds.append(LogKind.JOURNAL)
+ kinds.append(LogKind.REMOTE_FILE)
+
for k in kinds:
watch[k] = self._create_watcher(patterns, k)
self._cm.log("Logging test message with identifier %s" % suffix)
@@ -141,21 +142,22 @@ class LogAudit(ClusterAudit):
for k in list(watch.keys()):
w = watch[k]
- if watch_pref == LogKind.ANY:
+ if watch_pref is None:
self._cm.log("Checking for test message in %s logs" % k)
w.look_for_all(silent=True)
if w.unmatched:
for regex in w.unmatched:
self._cm.log("Test message [%s] not found in %s logs" % (regex, w.kind))
else:
- if watch_pref == LogKind.ANY:
+ if watch_pref is None:
self._cm.log("Found test message in %s logs" % k)
- self._cm.env["LogWatcher"] = k
+ self._cm.env["log_kind"] = k
return 1
return False
def __call__(self):
+ """Perform the audit action."""
max_attempts = 3
attempt = 0
@@ -163,7 +165,7 @@ class LogAudit(ClusterAudit):
while attempt <= max_attempts and not self._test_logging():
attempt += 1
self._restart_cluster_logging()
- time.sleep(60*attempt)
+ time.sleep(60 * attempt)
if attempt > max_attempts:
self._cm.log("ERROR: Cluster logging unrecoverable.")
@@ -172,8 +174,7 @@ class LogAudit(ClusterAudit):
return True
def is_applicable(self):
- """ Return True if this audit is applicable in the current test configuration. """
-
+ """Return True if this audit is applicable in the current test configuration."""
if self._cm.env["LogAuditDisabled"]:
return False
@@ -181,25 +182,28 @@ class LogAudit(ClusterAudit):
class DiskAudit(ClusterAudit):
- """ Audit disk usage on cluster nodes to verify that there is enough free
- space left on whichever mounted file system holds the logs.
-
- Warn on: less than 100 MB or 10% of free space
- Error on: less than 10 MB or 5% of free space
"""
+ Audit disk usage on cluster nodes.
- def __init__(self, cm):
- """ Create a new DiskAudit instance
+ Verify that there is enough free space left on whichever mounted file
+ system holds the logs.
- Arguments:
+ Warn on: less than 100 MB or 10% of free space
+ Error on: less than 10 MB or 5% of free space
+ """
- cm -- A ClusterManager instance
+ def __init__(self, cm):
"""
+ Create a new DiskAudit instance.
+ Arguments:
+ cm -- A ClusterManager instance
+ """
ClusterAudit.__init__(self, cm)
self.name = "DiskspaceAudit"
def __call__(self):
+ """Perform the audit action."""
result = True
# @TODO Use directory of PCMK_logfile if set on host
@@ -236,31 +240,32 @@ class DiskAudit(ClusterAudit):
return result
def is_applicable(self):
- """ Return True if this audit is applicable in the current test configuration. """
-
+ """Return True if this audit is applicable in the current test configuration."""
return True
class FileAudit(ClusterAudit):
- """ Audit the filesystem looking for various failure conditions:
+ """
+ Audit the filesystem looking for various failure conditions.
- * The presence of core dumps from corosync or Pacemaker daemons
- * Stale IPC files
+ Check for:
+ * The presence of core dumps from corosync or Pacemaker daemons
+ * Stale IPC files
"""
def __init__(self, cm):
- """ Create a new FileAudit instance
-
- Arguments:
-
- cm -- A ClusterManager instance
"""
+ Create a new FileAudit instance.
+ Arguments:
+ cm -- A ClusterManager instance
+ """
ClusterAudit.__init__(self, cm)
self.known = []
self.name = "FileAudit"
def __call__(self):
+ """Perform the audit action."""
result = True
self._cm.ns.wait_for_all_nodes(self._cm.env["nodes"])
@@ -307,24 +312,22 @@ class FileAudit(ClusterAudit):
return result
def is_applicable(self):
- """ Return True if this audit is applicable in the current test configuration. """
-
+ """Return True if this audit is applicable in the current test configuration."""
return True
class AuditResource:
- """ A base class for storing information about a cluster resource """
+ """A base class for storing information about a cluster resource."""
def __init__(self, cm, line):
- """ Create a new AuditResource instance
-
- Arguments:
-
- cm -- A ClusterManager instance
- line -- One line of output from `crm_resource` describing a single
- resource
"""
+ Create a new AuditResource instance.
+ Arguments:
+ cm -- A ClusterManager instance
+ line -- One line of output from `crm_resource` describing a single
+ resource
+ """
# pylint: disable=invalid-name
fields = line.split()
self._cm = cm
@@ -346,36 +349,32 @@ class AuditResource:
@property
def unique(self):
- """ Is this resource unique? """
-
+ """Return True if this resource is unique."""
return self.flags & 0x20
@property
def orphan(self):
- """ Is this resource an orphan? """
-
+ """Return True if this resource is an orphan."""
return self.flags & 0x01
@property
def managed(self):
- """ Is this resource managed by the cluster? """
-
+ """Return True if this resource is managed by the cluster."""
return self.flags & 0x02
class AuditConstraint:
- """ A base class for storing information about a cluster constraint """
+ """A base class for storing information about a cluster constraint."""
def __init__(self, cm, line):
- """ Create a new AuditConstraint instance
-
- Arguments:
-
- cm -- A ClusterManager instance
- line -- One line of output from `crm_resource` describing a single
- constraint
"""
+ Create a new AuditConstraint instance.
+ Arguments:
+ cm -- A ClusterManager instance
+ line -- One line of output from `crm_resource` describing a single
+ constraint
+ """
# pylint: disable=invalid-name
fields = line.split()
self._cm = cm
@@ -396,19 +395,22 @@ class AuditConstraint:
class PrimitiveAudit(ClusterAudit):
- """ Audit primitive resources to verify a variety of conditions, including that
- they are active and managed only when expected; they are active on the
- expected clusted node; and that they are not orphaned.
"""
+ Audit primitive resources to verify a variety of conditions.
- def __init__(self, cm):
- """ Create a new PrimitiveAudit instance
-
- Arguments:
+ Check that:
+ * Resources are active and managed only when expected
+ * Resources are active on the expected cluster node
+ * Resources are not orphaned
+ """
- cm -- A ClusterManager instance
+ def __init__(self, cm):
"""
+ Create a new PrimitiveAudit instance.
+ Arguments:
+ cm -- A ClusterManager instance
+ """
ClusterAudit.__init__(self, cm)
self.name = "PrimitiveAudit"
@@ -419,8 +421,7 @@ class PrimitiveAudit(ClusterAudit):
self._target = None
def _audit_resource(self, resource, quorum):
- """ Perform the audit of a single resource """
-
+ """Perform the audit of a single resource."""
rc = True
active = self._cm.resource_location(resource.id)
@@ -468,10 +469,11 @@ class PrimitiveAudit(ClusterAudit):
return rc
def _setup(self):
- """ Verify cluster nodes are active, and collect resource and colocation
- information used for performing the audit.
"""
+ Verify cluster nodes are active.
+ Collect resource and colocation information used for performing the audit.
+ """
for node in self._cm.env["nodes"]:
if self._cm.expected_status[node] == "up":
self._active_nodes.append(node)
@@ -501,6 +503,7 @@ class PrimitiveAudit(ClusterAudit):
return True
def __call__(self):
+ """Perform the audit action."""
result = True
if not self._setup():
@@ -514,30 +517,31 @@ class PrimitiveAudit(ClusterAudit):
return result
def is_applicable(self):
- """ Return True if this audit is applicable in the current test configuration. """
-
+ """Return True if this audit is applicable in the current test configuration."""
# @TODO Due to long-ago refactoring, this name test would never match,
# so this audit (and those derived from it) would never run.
# Uncommenting the next lines fixes the name test, but that then
# exposes pre-existing bugs that need to be fixed.
- #if self._cm["Name"] == "crm-corosync":
- # return True
+ # if self._cm["Name"] == "crm-corosync":
+ # return True
return False
class GroupAudit(PrimitiveAudit):
- """ Audit group resources to verify that each of its child primitive
- resources is active on the expected cluster node.
"""
+ Audit group resources.
- def __init__(self, cm):
- """ Create a new GroupAudit instance
-
- Arguments:
+ Check that:
+ * Each of its child primitive resources is active on the expected cluster node
+ """
- cm -- A ClusterManager instance
+ def __init__(self, cm):
"""
+ Create a new GroupAudit instance.
+ Arguments:
+ cm -- A ClusterManager instance
+ """
PrimitiveAudit.__init__(self, cm)
self.name = "GroupAudit"
@@ -587,18 +591,19 @@ class GroupAudit(PrimitiveAudit):
class CloneAudit(PrimitiveAudit):
- """ Audit clone resources. NOTE: Currently, this class does not perform
- any actual audit functions.
"""
+ Audit clone resources.
- def __init__(self, cm):
- """ Create a new CloneAudit instance
-
- Arguments:
+ NOTE: Currently, this class does not perform any actual audit functions.
+ """
- cm -- A ClusterManager instance
+ def __init__(self, cm):
"""
+ Create a new CloneAudit instance.
+ Arguments:
+ cm -- A ClusterManager instance
+ """
PrimitiveAudit.__init__(self, cm)
self.name = "CloneAudit"
@@ -624,24 +629,26 @@ class CloneAudit(PrimitiveAudit):
class ColocationAudit(PrimitiveAudit):
- """ Audit cluster resources to verify that those that should be colocated
- with each other actually are.
"""
+ Audit cluster resources.
- def __init__(self, cm):
- """ Create a new ColocationAudit instance
+ Check that:
- Arguments:
+ * Resources are colocated with the expected resource
+ """
- cm -- A ClusterManager instance
+ def __init__(self, cm):
"""
+ Create a new ColocationAudit instance.
+ Arguments:
+ cm -- A ClusterManager instance
+ """
PrimitiveAudit.__init__(self, cm)
self.name = "ColocationAudit"
def _crm_location(self, resource):
- """ Return a list of cluster nodes where a given resource is running """
-
+ """Return a list of cluster nodes where a given resource is running."""
(rc, lines) = self._cm.rsh(self._target, "crm_resource -W -r %s -Q" % resource, verbose=1)
hosts = []
@@ -669,7 +676,7 @@ class ColocationAudit(PrimitiveAudit):
self.debug("Colocation audit (%s): %s not running" % (coloc.id, coloc.rsc))
else:
for node in source:
- if not node in target:
+ if node not in target:
result = False
self._cm.log("Colocation audit (%s): %s running on %s (not in %r)"
% (coloc.id, coloc.rsc, node, target))
@@ -681,18 +688,15 @@ class ColocationAudit(PrimitiveAudit):
class ControllerStateAudit(ClusterAudit):
- """ Audit cluster nodes to verify that those we expect to be active are
- active, and those that are expected to be inactive are inactive.
- """
+ """Verify active and inactive resources."""
def __init__(self, cm):
- """ Create a new ControllerStateAudit instance
-
- Arguments:
-
- cm -- A ClusterManager instance
"""
+ Create a new ControllerStateAudit instance.
+ Arguments:
+ cm -- A ClusterManager instance
+ """
ClusterAudit.__init__(self, cm)
self.name = "ControllerStateAudit"
@@ -734,28 +738,26 @@ class ControllerStateAudit(ClusterAudit):
return result
def is_applicable(self):
- """ Return True if this audit is applicable in the current test configuration. """
-
+ """Return True if this audit is applicable in the current test configuration."""
# @TODO Due to long-ago refactoring, this name test would never match,
# so this audit (and those derived from it) would never run.
# Uncommenting the next lines fixes the name test, but that then
# exposes pre-existing bugs that need to be fixed.
- #if self._cm["Name"] == "crm-corosync":
- # return True
+ # if self._cm["Name"] == "crm-corosync":
+ # return True
return False
class CIBAudit(ClusterAudit):
- """ Audit the CIB by verifying that it is identical across cluster nodes """
+ """Audit the CIB by verifying that it is identical across cluster nodes."""
def __init__(self, cm):
- """ Create a new CIBAudit instance
-
- Arguments:
-
- cm -- A ClusterManager instance
"""
+ Create a new CIBAudit instance.
+ Arguments:
+ cm -- A ClusterManager instance
+ """
ClusterAudit.__init__(self, cm)
self.name = "CibAudit"
@@ -776,8 +778,7 @@ class CIBAudit(ClusterAudit):
return result
def _audit_cib_contents(self, hostlist):
- """ Perform the CIB audit on the given hosts """
-
+ """Perform the CIB audit on the given hosts."""
passed = True
node0 = None
node0_xml = None
@@ -816,10 +817,11 @@ class CIBAudit(ClusterAudit):
return passed
def _store_remote_cib(self, node, target):
- """ Store a copy of the given node's CIB on the given target node. If
- no target is given, store the CIB on the given node.
"""
+ Store a copy of the given node's CIB on the given target node.
+ If no target is given, store the CIB on the given node.
+ """
filename = "/tmp/ctsaudit.%s.xml" % node
if not target:
@@ -841,36 +843,37 @@ class CIBAudit(ClusterAudit):
return filename
def is_applicable(self):
- """ Return True if this audit is applicable in the current test configuration. """
-
+ """Return True if this audit is applicable in the current test configuration."""
# @TODO Due to long-ago refactoring, this name test would never match,
# so this audit (and those derived from it) would never run.
# Uncommenting the next lines fixes the name test, but that then
# exposes pre-existing bugs that need to be fixed.
- #if self._cm["Name"] == "crm-corosync":
- # return True
+ # if self._cm["Name"] == "crm-corosync":
+ # return True
return False
class PartitionAudit(ClusterAudit):
- """ Audit each partition in a cluster to verify a variety of conditions:
-
- * The number of partitions and the nodes in each is as expected
- * Each node is active when it should be active and inactive when it
- should be inactive
- * The status and epoch of each node is as expected
- * A partition has quorum
- * A partition has a DC when expected
"""
+ Audit each partition in a cluster to verify a variety of conditions.
- def __init__(self, cm):
- """ Create a new PartitionAudit instance
+ Check that:
- Arguments:
+ * The number of partitions and the nodes in each is as expected
+ * Each node is active when it should be active and inactive when it
+ should be inactive
+ * The status and epoch of each node is as expected
+ * A partition has quorum
+ * A partition has a DC when expected
+ """
- cm -- A ClusterManager instance
+ def __init__(self, cm):
"""
+ Create a new PartitionAudit instance.
+ Arguments:
+ cm -- A ClusterManager instance
+ """
ClusterAudit.__init__(self, cm)
self.name = "PartitionAudit"
@@ -901,8 +904,7 @@ class PartitionAudit(ClusterAudit):
return result
def _trim_string(self, avalue):
- """ Remove the last character from a multi-character string """
-
+ """Remove the last character from a multi-character string."""
if not avalue:
return None
@@ -912,10 +914,7 @@ class PartitionAudit(ClusterAudit):
return avalue
def _trim2int(self, avalue):
- """ Remove the last character from a multi-character string and convert
- the result to an int.
- """
-
+ """Remove the last character from a multi-character string and convert the result to an int."""
trimmed = self._trim_string(avalue)
if trimmed:
return int(trimmed)
@@ -923,8 +922,7 @@ class PartitionAudit(ClusterAudit):
return None
def _audit_partition(self, partition):
- """ Perform the audit of a single partition """
-
+ """Perform the audit of a single partition."""
passed = True
dc_found = []
dc_allowed_list = []
@@ -1000,23 +998,19 @@ class PartitionAudit(ClusterAudit):
return passed
def is_applicable(self):
- """ Return True if this audit is applicable in the current test configuration. """
-
+ """Return True if this audit is applicable in the current test configuration."""
# @TODO Due to long-ago refactoring, this name test would never match,
# so this audit (and those derived from it) would never run.
# Uncommenting the next lines fixes the name test, but that then
# exposes pre-existing bugs that need to be fixed.
- #if self._cm["Name"] == "crm-corosync":
- # return True
+ # if self._cm["Name"] == "crm-corosync":
+ # return True
return False
# pylint: disable=invalid-name
def audit_list(cm):
- """ Return a list of instances of applicable audits that can be performed
- for the given ClusterManager.
- """
-
+ """Return a list of instances of applicable audits that can be performed."""
result = []
for auditclass in [DiskAudit, FileAudit, LogAudit, ControllerStateAudit,
diff --git a/python/pacemaker/_cts/cib.py b/python/pacemaker/_cts/cib.py
index b8b5d5d..bb33077 100644
--- a/python/pacemaker/_cts/cib.py
+++ b/python/pacemaker/_cts/cib.py
@@ -1,7 +1,7 @@
-""" CIB generator for Pacemaker's Cluster Test Suite (CTS) """
+"""CIB generator for Pacemaker's Cluster Test Suite (CTS)."""
__all__ = ["ConfigFactory"]
-__copyright__ = "Copyright 2008-2023 the Pacemaker project contributors"
+__copyright__ = "Copyright 2008-2024 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
import warnings
@@ -13,21 +13,18 @@ from pacemaker._cts.network import next_ip
class CIB:
- """ A class for generating, representing, and installing a CIB file onto
- cluster nodes
- """
+ """A class for generating, representing, and installing a CIB file onto cluster nodes."""
def __init__(self, cm, version, factory, tmpfile=None):
- """ Create a new CIB instance
-
- Arguments:
-
- cm -- A ClusterManager instance
- version -- The schema syntax version
- factory -- A ConfigFactory instance
- tmpfile -- Where to store the CIB, or None to use a new tempfile
"""
+ Create a new CIB instance.
+ Arguments:
+ cm -- A ClusterManager instance
+ version -- The schema syntax version
+ factory -- A ConfigFactory instance
+ tmpfile -- Where to store the CIB, or None to use a new tempfile
+ """
# pylint: disable=invalid-name
self._cib = None
self._cm = cm
@@ -50,8 +47,7 @@ class CIB:
self._factory.tmpfile = tmpfile
def _show(self):
- """ Query a cluster node for its generated CIB; log and return the result """
-
+ """Query a cluster node for its generated CIB; log and return the result."""
output = ""
(_, result) = self._factory.rsh(self._factory.target, "HOME=/root CIB_file=%s cibadmin -Ql" % self._factory.tmpfile, verbose=1)
@@ -62,10 +58,7 @@ class CIB:
return output
def new_ip(self, name=None):
- """ Generate an IP resource for the next available IP address, optionally
- specifying the resource's name.
- """
-
+ """Generate an IP resource for the next available IP address, optionally specifying the resource's name."""
if self._cm.env["IPagent"] == "IPaddr2":
ip = next_ip(self._cm.env["IPBase"])
if not name:
@@ -95,8 +88,7 @@ class CIB:
return r
def get_node_id(self, node_name):
- """ Check the cluster configuration for the node ID for the given node_name """
-
+ """Check the cluster configuration for the node ID for the given node_name."""
# We can't account for every possible configuration,
# so we only return a node ID if:
# * The node is specified in /etc/corosync/corosync.conf
@@ -125,8 +117,7 @@ class CIB:
return node_id
def install(self, target):
- """ Generate a CIB file and install it to the given cluster node """
-
+ """Generate a CIB file and install it to the given cluster node."""
old = self._factory.tmpfile
# Force a rebuild
@@ -139,8 +130,7 @@ class CIB:
self._factory.tmpfile = old
def contents(self, target):
- """ Generate a complete CIB file """
-
+ """Generate a complete CIB file."""
# fencing resource
if self._cib:
return self._cib
@@ -175,7 +165,7 @@ class CIB:
# For remote node tests, a cluster node is stopped and brought back up
# as a remote node with the name "remote-OLDNAME". To allow fencing
# devices to fence these nodes, create a list of all possible node names.
- all_node_names = [prefix+n for n in self._cm.env["nodes"] for prefix in ('', 'remote-')]
+ all_node_names = [prefix + n for n in self._cm.env["nodes"] for prefix in ('', 'remote-')]
# Add all parameters specified by user
entries = self._cm.env["stonith-params"].split(',')
@@ -313,8 +303,7 @@ class CIB:
return self._cib
def add_resources(self):
- """ Add various resources and their constraints to the CIB """
-
+ """Add various resources and their constraints to the CIB."""
# Per-node resources
for node in self._cm.env["nodes"]:
name = "rsc_%s" % node
@@ -392,16 +381,15 @@ class CIB:
class ConfigFactory:
- """ Singleton to generate a CIB file for the environment's schema version """
+ """Singleton to generate a CIB file for the environment's schema version."""
def __init__(self, cm):
- """ Create a new ConfigFactory instance
-
- Arguments:
-
- cm -- A ClusterManager instance
"""
+ Create a new ConfigFactory instance.
+ Arguments:
+ cm -- A ClusterManager instance
+ """
# pylint: disable=invalid-name
self._cm = cm
self.rsh = self._cm.rsh
@@ -410,16 +398,13 @@ class ConfigFactory:
self.tmpfile = None
def log(self, args):
- """ Log a message """
-
+ """Log a message."""
self._cm.log("cib: %s" % args)
def debug(self, args):
- """ Log a debug message """
-
+ """Log a debug message."""
self._cm.debug("cib: %s" % args)
def create_config(self, name="pacemaker-%s" % BuildOptions.CIB_SCHEMA_VERSION):
- """ Return a CIB object for the given schema version """
-
+ """Return a CIB object for the given schema version."""
return CIB(self._cm, name, self)
diff --git a/python/pacemaker/_cts/cibxml.py b/python/pacemaker/_cts/cibxml.py
index 52e3721..a5bc315 100644
--- a/python/pacemaker/_cts/cibxml.py
+++ b/python/pacemaker/_cts/cibxml.py
@@ -1,4 +1,4 @@
-""" CIB XML generator for Pacemaker's Cluster Test Suite (CTS) """
+"""CIB XML generator for Pacemaker's Cluster Test Suite (CTS)."""
__all__ = [
"Alerts",
@@ -12,23 +12,22 @@ __all__ = [
"Resource",
"Rule",
]
-__copyright__ = "Copyright 2008-2023 the Pacemaker project contributors"
+__copyright__ = "Copyright 2008-2024 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
def key_val_string(**kwargs):
- """ Given keyword arguments as key=value pairs, construct a single string
- containing all those pairs separated by spaces. This is suitable for
- using in an XML element as a list of its attributes.
+ """
+ Construct a string from kwargs containing key=value pairs separated by spaces.
- Any pairs that have value=None will be skipped.
+ This is suitable for using in an XML element as a list of its attributes.
+ Any pairs that have value=None will be skipped.
- Note that a dictionary can be passed to this function instead of kwargs
- by using a construction like:
+ Note that a dictionary can be passed to this function instead of kwargs
+ by using a construction like:
key_val_string(**{"a": 1, "b": 2})
"""
-
retval = ""
for (k, v) in kwargs.items():
@@ -41,47 +40,46 @@ def key_val_string(**kwargs):
def element(element_name, **kwargs):
- """ Create an XML element string with the given element_name and attributes.
- This element does not support having any children, so it will be closed
- on the same line. The attributes are processed by key_val_string.
"""
+ Create an XML element string with the given element_name and attributes.
+ This element does not support having any children, so it will be closed
+ on the same line. The attributes are processed by key_val_string.
+ """
return "<%s %s/>" % (element_name, key_val_string(**kwargs))
def containing_element(element_name, inner, **kwargs):
- """ Like element, but surrounds some child text passed by the inner
- parameter.
- """
-
+ """Like element, but surrounds some child text passed by the inner parameter."""
attrs = key_val_string(**kwargs)
return "<%s %s>%s</%s>" % (element_name, attrs, inner, element_name)
class XmlBase:
- """ A base class for deriving all kinds of XML sections in the CIB. This
- class contains only the most basic operations common to all sections.
- It is up to subclasses to provide most behavior.
-
- Note that subclasses of this base class often have different sets of
- arguments to their __init__ methods. In general this is not a great
- practice, however it is so thoroughly used in these classes that trying
- to straighten it out is likely to cause more bugs than just leaving it
- alone for now.
"""
+ A base class for deriving all kinds of XML sections in the CIB.
- def __init__(self, factory, tag, _id, **kwargs):
- """ Create a new XmlBase instance
+ This class contains only the most basic operations common to all sections.
+ It is up to subclasses to provide most behavior.
- Arguments:
+ Note that subclasses of this base class often have different sets of
+ arguments to their __init__ methods. In general this is not a great
+ practice, however it is so thoroughly used in these classes that trying
+ to straighten it out is likely to cause more bugs than just leaving it
+ alone for now.
+ """
- factory -- A ConfigFactory instance
- tag -- The XML element's start and end tag
- _id -- A unique name for the element
- kwargs -- Any additional key/value pairs that should be added to
- this element as attributes
+ def __init__(self, factory, tag, _id, **kwargs):
+ """
+ Create a new XmlBase instance.
+
+ Arguments:
+ factory -- A ConfigFactory instance
+ tag -- The XML element's start and end tag
+ _id -- A unique name for the element
+ kwargs -- Any additional key/value pairs that should be added to
+ this element as attributes
"""
-
self._children = []
self._factory = factory
self._kwargs = kwargs
@@ -90,30 +88,27 @@ class XmlBase:
self.name = _id
def __repr__(self):
- """ Return a short string description of this XML section """
-
+ """Return a short string description of this XML section."""
return "%s-%s" % (self._tag, self.name)
def add_child(self, child):
- """ Add an XML section as a child of this one """
-
+ """Add an XML section as a child of this one."""
self._children.append(child)
def __setitem__(self, key, value):
- """ Add a key/value pair to this element, resulting in it becoming an
- XML attribute. If value is None, remove the key.
"""
+ Add a key/value pair to this element.
+ The resulting pair becomes an XML attribute. If value is None, remove
+ the key.
+ """
if value:
self._kwargs[key] = value
else:
self._kwargs.pop(key, None)
def show(self):
- """ Return a string representation of this XML section, including all
- of its children
- """
-
+ """Recursively return a string representation of this XML section."""
text = '''<%s''' % self._tag
if self.name:
text += ''' id="%s"''' % self.name
@@ -133,19 +128,17 @@ class XmlBase:
return text
def _run(self, operation, xml, section, options=""):
- """ Update the CIB on the cluster to include this XML section, including
- all of its children
-
- Arguments:
-
- operation -- Whether this update is a "create" or "modify" operation
- xml -- The XML to update the CIB with, typically the result
- of calling show
- section -- Which section of the CIB this update applies to (see
- the --scope argument to cibadmin for allowed values)
- options -- Extra options to pass to cibadmin
"""
-
+ Update the CIB on the cluster to include this XML section.
+
+ Arguments:
+ operation -- Whether this update is a "create" or "modify" operation
+ xml -- The XML to update the CIB with, typically the result
+ of calling show
+ section -- Which section of the CIB this update applies to (see
+ the --scope argument to cibadmin for allowed values)
+ options -- Extra options to pass to cibadmin
+ """
if self.name:
label = self.name
else:
@@ -162,20 +155,17 @@ class XmlBase:
class InstanceAttributes(XmlBase):
- """ A class that creates an <instance_attributes> XML section with
- key/value pairs
- """
+ """Create an <instance_attributes> XML section with key/value pairs."""
def __init__(self, factory, _id, attrs):
- """ Create a new InstanceAttributes instance
-
- Arguments:
-
- factory -- A ConfigFactory instance
- _id -- A unique name for the element
- attrs -- Key/value pairs to add as nvpair child elements
"""
+ Create a new InstanceAttributes instance.
+ Arguments:
+ factory -- A ConfigFactory instance
+ _id -- A unique name for the element
+ attrs -- Key/value pairs to add as nvpair child elements
+ """
XmlBase.__init__(self, factory, "instance_attributes", _id)
# Create an <nvpair> for each attribute
@@ -185,90 +175,79 @@ class InstanceAttributes(XmlBase):
class Node(XmlBase):
- """ A class that creates a <node> XML section for a single node, complete
- with node attributes
- """
+ """Create a <node> XML section for a single node, complete with node attributes."""
def __init__(self, factory, node_name, node_id, node_attrs):
- """ Create a new Node instance
-
- Arguments:
-
- factory -- A ConfigFactory instance
- node_name -- The value of the uname attribute for this node
- node_id -- A unique name for the element
- node_attrs -- Additional key/value pairs to set as instance
- attributes for this node
"""
-
+ Create a new Node instance.
+
+ Arguments:
+ factory -- A ConfigFactory instance
+ node_name -- The value of the uname attribute for this node
+ node_id -- A unique name for the element
+ node_attrs -- Additional key/value pairs to set as instance
+ attributes for this node
+ """
XmlBase.__init__(self, factory, "node", node_id, uname=node_name)
self.add_child(InstanceAttributes(factory, "%s-1" % node_name, node_attrs))
class Nodes(XmlBase):
- """ A class that creates a <nodes> XML section containing multiple Node
- instances as children
- """
+ """Create a <nodes> XML section containing multiple Node instances as children."""
def __init__(self, factory):
- """ Create a new Nodes instance
-
- Arguments:
-
- factory -- A ConfigFactory instance
"""
+ Create a new Nodes instance.
+ Arguments:
+ factory -- A ConfigFactory instance
+ """
XmlBase.__init__(self, factory, "nodes", None)
def add_node(self, node_name, node_id, node_attrs):
- """ Add a child node element
-
- Arguments:
-
- node_name -- The value of the uname attribute for this node
- node_id -- A unique name for the element
- node_attrs -- Additional key/value pairs to set as instance
- attributes for this node
"""
+ Add a child node element.
+ Arguments:
+ node_name -- The value of the uname attribute for this node
+ node_id -- A unique name for the element
+ node_attrs -- Additional key/value pairs to set as instance
+ attributes for this node
+ """
self.add_child(Node(self._factory, node_name, node_id, node_attrs))
def commit(self):
- """ Modify the CIB on the cluster to include this XML section """
-
+ """Modify the CIB on the cluster to include this XML section."""
self._run("modify", self.show(), "configuration", "--allow-create")
class FencingTopology(XmlBase):
- """ A class that creates a <fencing-topology> XML section describing how
- fencing is configured in the cluster
- """
+ """Create a <fencing-topology> XML section describing how fencing is configured in the cluster."""
def __init__(self, factory):
- """ Create a new FencingTopology instance
-
- Arguments:
-
- factory -- A ConfigFactory instance
"""
+ Create a new FencingTopology instance.
+ Arguments:
+ factory -- A ConfigFactory instance
+ """
XmlBase.__init__(self, factory, "fencing-topology", None)
def level(self, index, target, devices, target_attr=None, target_value=None):
- """ Generate a <fencing-level> XML element
-
- index -- The order in which to attempt fencing-levels
- (1 through 9). Levels are attempted in ascending
- order until one succeeds.
- target -- The name of a single node to which this level applies
- devices -- A list of devices that must all be tried for this
- level
- target_attr -- The name of a node attribute that is set for nodes
- to which this level applies
- target_value -- The value of a node attribute that is set for nodes
- to which this level applies
"""
-
+ Generate a <fencing-level> XML element.
+
+ index -- The order in which to attempt fencing-levels
+ (1 through 9). Levels are attempted in ascending
+ order until one succeeds.
+ target -- The name of a single node to which this level applies
+ devices -- A list of devices that must all be tried for this
+ level
+ target_attr -- The name of a node attribute that is set for nodes
+ to which this level applies
+ target_value -- The value of a node attribute that is set for nodes
+ to which this level applies
+ """
if target:
xml_id = "cts-%s.%d" % (target, index)
self.add_child(XmlBase(self._factory, "fencing-level", xml_id, target=target, index=index, devices=devices))
@@ -281,92 +260,77 @@ class FencingTopology(XmlBase):
self.add_child(child)
def commit(self):
- """ Create this XML section in the CIB """
-
+ """Create this XML section in the CIB."""
self._run("create", self.show(), "configuration", "--allow-create")
class Option(XmlBase):
- """ A class that creates a <cluster_property_set> XML section of key/value
- pairs for cluster-wide configuration settings
- """
+ """Create a <cluster_property_set> XML section of key/value pairs for cluster-wide configuration settings."""
def __init__(self, factory, _id="cib-bootstrap-options"):
- """ Create a new Option instance
-
- Arguments:
-
- factory -- A ConfigFactory instance
- _id -- A unique name for the element
"""
+ Create a new Option instance.
+ Arguments:
+ factory -- A ConfigFactory instance
+ _id -- A unique name for the element
+ """
XmlBase.__init__(self, factory, "cluster_property_set", _id)
def __setitem__(self, key, value):
- """ Add a child nvpair element containing the given key/value pair """
-
+ """Add a child nvpair element containing the given key/value pair."""
self.add_child(XmlBase(self._factory, "nvpair", "cts-%s" % key, name=key, value=value))
def commit(self):
- """ Modify the CIB on the cluster to include this XML section """
-
+ """Modify the CIB on the cluster to include this XML section."""
self._run("modify", self.show(), "crm_config", "--allow-create")
class OpDefaults(XmlBase):
- """ A class that creates a <cts-op_defaults-meta> XML section of key/value
- pairs for operation default settings
- """
+ """Create a <cts-op_defaults-meta> XML section of key/value pairs for operation default settings."""
def __init__(self, factory):
- """ Create a new OpDefaults instance
-
- Arguments:
-
- factory -- A ConfigFactory instance
"""
+ Create a new OpDefaults instance.
+ Arguments:
+ factory -- A ConfigFactory instance
+ """
XmlBase.__init__(self, factory, "op_defaults", None)
self.meta = XmlBase(self._factory, "meta_attributes", "cts-op_defaults-meta")
self.add_child(self.meta)
def __setitem__(self, key, value):
- """ Add a child nvpair meta_attribute element containing the given
- key/value pair
- """
-
+ """Add a child nvpair meta_attribute element containing the given key/value pair."""
self.meta.add_child(XmlBase(self._factory, "nvpair", "cts-op_defaults-%s" % key, name=key, value=value))
def commit(self):
- """ Modify the CIB on the cluster to include this XML section """
-
+ """Modify the CIB on the cluster to include this XML section."""
self._run("modify", self.show(), "configuration", "--allow-create")
class Alerts(XmlBase):
- """ A class that creates an <alerts> XML section """
+ """Create an <alerts> XML section."""
def __init__(self, factory):
- """ Create a new Alerts instance
-
- Arguments:
-
- factory -- A ConfigFactory instance
"""
+ Create a new Alerts instance.
+ Arguments:
+ factory -- A ConfigFactory instance
+ """
XmlBase.__init__(self, factory, "alerts", None)
self._alert_count = 0
def add_alert(self, path, recipient):
- """ Create a new alert as a child of this XML section
-
- Arguments:
-
- path -- The path to a script to be called when a cluster
- event occurs
- recipient -- An environment variable to be passed to the script
"""
+ Create a new alert as a child of this XML section.
+ Arguments:
+ path -- The path to a script to be called when a cluster
+ event occurs
+ recipient -- An environment variable to be passed to the script
+ """
self._alert_count += 1
alert = XmlBase(self._factory, "alert", "alert-%d" % self._alert_count,
path=path)
@@ -377,54 +341,47 @@ class Alerts(XmlBase):
self.add_child(alert)
def commit(self):
- """ Modify the CIB on the cluster to include this XML section """
-
+ """Modify the CIB on the cluster to include this XML section."""
self._run("modify", self.show(), "configuration", "--allow-create")
class Expression(XmlBase):
- """ A class that creates an <expression> XML element as part of some
- constraint rule
- """
+ """Create an <expression> XML element as part of some constraint rule."""
def __init__(self, factory, _id, attr, op, value=None):
- """ Create a new Expression instance
-
- Arguments:
-
- factory -- A ConfigFactory instance
- _id -- A unique name for the element
- attr -- The attribute to be tested
- op -- The comparison to perform ("lt", "eq", "defined", etc.)
- value -- Value for comparison (can be None for "defined" and
- "not_defined" operations)
"""
-
+ Create a new Expression instance.
+
+ Arguments:
+ factory -- A ConfigFactory instance
+ _id -- A unique name for the element
+ attr -- The attribute to be tested
+ op -- The comparison to perform ("lt", "eq", "defined", etc.)
+ value -- Value for comparison (can be None for "defined" and
+ "not_defined" operations)
+ """
XmlBase.__init__(self, factory, "expression", _id, attribute=attr, operation=op)
if value:
self["value"] = value
class Rule(XmlBase):
- """ A class that creates a <rule> XML section consisting of one or more
- expressions, as part of some constraint
- """
+ """Create a <rule> XML section consisting of one or more expressions, as part of some constraint."""
def __init__(self, factory, _id, score, op="and", expr=None):
- """ Create a new Rule instance
-
- Arguments:
-
- factory -- A ConfigFactory instance
- _id -- A unique name for the element
- score -- If this rule is used in a location constraint and
- evaluates to true, apply this score to the constraint
- op -- If this rule contains more than one expression, use this
- boolean op when evaluating
- expr -- An Expression instance that can be added to this Rule
- when it is created
"""
-
+ Create a new Rule instance.
+
+ Arguments:
+ factory -- A ConfigFactory instance
+ _id -- A unique name for the element
+ score -- If this rule is used in a location constraint and
+ evaluates to true, apply this score to the constraint
+ op -- If this rule contains more than one expression, use this
+ boolean op when evaluating
+ expr -- An Expression instance that can be added to this Rule
+ when it is created
+ """
XmlBase.__init__(self, factory, "rule", _id)
self["boolean-op"] = op
@@ -435,24 +392,25 @@ class Rule(XmlBase):
class Resource(XmlBase):
- """ A base class that creates all kinds of <resource> XML sections fully
- describing a single cluster resource. This defaults to primitive
- resources, but subclasses can create other types.
"""
+ A base class that creates all kinds of <resource> XML sections.
- def __init__(self, factory, _id, rtype, standard, provider=None):
- """ Create a new Resource instance
-
- Arguments:
+ These sections fully describe a single cluster resource. This defaults to
+ primitive resources, but subclasses can create other types.
+ """
- factory -- A ConfigFactory instance
- _id -- A unique name for the element
- rtype -- The name of the resource agent
- standard -- The standard the resource agent follows ("ocf",
- "systemd", etc.)
- provider -- The vendor providing the resource agent
+ def __init__(self, factory, _id, rtype, standard, provider=None):
+ """
+ Create a new Resource instance.
+
+ Arguments:
+ factory -- A ConfigFactory instance
+ _id -- A unique name for the element
+ rtype -- The name of the resource agent
+ standard -- The standard the resource agent follows ("ocf",
+ "systemd", etc.)
+ provider -- The vendor providing the resource agent
"""
-
XmlBase.__init__(self, factory, "native", _id)
self._provider = provider
@@ -473,52 +431,41 @@ class Resource(XmlBase):
self._provider = None
def __setitem__(self, key, value):
- """ Add a child nvpair element containing the given key/value pair as
- an instance attribute
- """
-
+ """Add a child nvpair element containing the given key/value pair as an instance attribute."""
self._add_param(key, value)
def add_op(self, _id, interval, **kwargs):
- """ Add an operation child XML element to this resource
-
- Arguments:
-
- _id -- A unique name for the element. Also, the action to
- perform ("monitor", "start", "stop", etc.)
- interval -- How frequently (in seconds) to perform the operation
- kwargs -- Any additional key/value pairs that should be added to
- this element as attributes
"""
-
+ Add an operation child XML element to this resource.
+
+ Arguments:
+ _id -- A unique name for the element. Also, the action to
+ perform ("monitor", "start", "stop", etc.)
+ interval -- How frequently (in seconds) to perform the operation
+ kwargs -- Any additional key/value pairs that should be added to
+ this element as attributes
+ """
self._op.append(XmlBase(self._factory, "op", "%s-%s" % (_id, interval),
name=_id, interval=interval, **kwargs))
def _add_param(self, name, value):
- """ Add a child nvpair element containing the given key/value pair as
- an instance attribute
- """
-
+ """Add a child nvpair element containing the given key/value pair as an instance attribute."""
self._param[name] = value
def add_meta(self, name, value):
- """ Add a child nvpair element containing the given key/value pair as
- a meta attribute
- """
-
+ """Add a child nvpair element containing the given key/value pair as a meta attribute."""
self._meta[name] = value
def prefer(self, node, score="INFINITY", rule=None):
- """ Add a location constraint where this resource prefers some node
-
- Arguments:
-
- node -- The name of the node to prefer
- score -- Apply this score to the location constraint
- rule -- A Rule instance to use in creating this constraint, instead
- of creating a new rule
"""
+ Add a location constraint where this resource prefers some node.
+ Arguments:
+ node -- The name of the node to prefer
+ score -- Apply this score to the location constraint
+ rule -- A Rule instance to use in creating this constraint, instead
+ of creating a new rule
+ """
if not rule:
rule = Rule(self._factory, "prefer-%s-r" % node, score,
expr=Expression(self._factory, "prefer-%s-e" % node, "#uname", "eq", node))
@@ -526,23 +473,22 @@ class Resource(XmlBase):
self._scores[node] = rule
def after(self, resource, kind="Mandatory", first="start", then="start", **kwargs):
- """ Create an ordering constraint between this resource and some other
-
- Arguments:
-
- resource -- The name of the dependent resource
- kind -- How to enforce the constraint ("mandatory", "optional",
- "serialize")
- first -- The action that this resource must complete before the
- then-action can be initiated for the dependent resource
- ("start", "stop", "promote", "demote")
- then -- The action that the dependent resource can execute only
- after the first-action has completed (same values as
- first)
- kwargs -- Any additional key/value pairs that should be added to
- this element as attributes
"""
-
+ Create an ordering constraint between this resource and some other.
+
+ Arguments:
+ resource -- The name of the dependent resource
+ kind -- How to enforce the constraint ("mandatory", "optional",
+ "serialize")
+ first -- The action that this resource must complete before the
+ then-action can be initiated for the dependent resource
+ ("start", "stop", "promote", "demote")
+ then -- The action that the dependent resource can execute only
+ after the first-action has completed (same values as
+ first)
+ kwargs -- Any additional key/value pairs that should be added to
+ this element as attributes
+ """
kargs = kwargs.copy()
kargs["kind"] = kind
@@ -556,21 +502,20 @@ class Resource(XmlBase):
self._needs[resource] = kargs
def colocate(self, resource, score="INFINITY", role=None, withrole=None, **kwargs):
- """ Create a colocation constraint between this resource and some other
-
- Arguments:
-
- resource -- The name of the resource that should be located relative
- this one
- score -- Apply this score to the colocation constraint
- role -- Apply this colocation constraint only to promotable clones
- in this role ("started", "promoted", "unpromoted")
- withrole -- Apply this colocation constraint only to with-rsc promotable
- clones in this role
- kwargs -- Any additional key/value pairs that should be added to
- this element as attributes
"""
-
+ Create a colocation constraint between this resource and some other.
+
+ Arguments:
+ resource -- The name of the resource that should be located relative
+ this one
+ score -- Apply this score to the colocation constraint
+ role -- Apply this colocation constraint only to promotable clones
+ in this role ("started", "promoted", "unpromoted")
+ withrole -- Apply this colocation constraint only to with-rsc promotable
+ clones in this role
+ kwargs -- Any additional key/value pairs that should be added to
+ this element as attributes
+ """
kargs = kwargs.copy()
kargs["score"] = score
@@ -583,10 +528,7 @@ class Resource(XmlBase):
self._coloc[resource] = kargs
def _constraints(self):
- """ Generate a <constraints> XML section containing all previously added
- ordering and colocation constraints
- """
-
+ """Generate a <constraints> XML section containing all previously added ordering and colocation constraints."""
text = "<constraints>"
for (k, v) in self._scores.items():
@@ -605,10 +547,7 @@ class Resource(XmlBase):
return text
def show(self):
- """ Return a string representation of this XML section, including all
- of its children
- """
-
+ """Recursively return a string representation of this XML section."""
text = '''<primitive id="%s" class="%s" type="%s"''' % (self.name, self._standard, self._rtype)
if self._provider:
@@ -649,38 +588,36 @@ class Resource(XmlBase):
return text
def commit(self):
- """ Modify the CIB on the cluster to include this XML section """
-
+ """Modify the CIB on the cluster to include this XML section."""
self._run("create", self.show(), "resources")
self._run("modify", self._constraints(), "constraints")
class Group(Resource):
- """ A specialized Resource subclass that creates a <group> XML section
- describing a single group resource consisting of multiple child
- primitive resources
"""
+ A specialized Resource subclass that creates a <group> XML section.
- def __init__(self, factory, _id):
- """ Create a new Group instance
-
- Arguments:
+ This section describes a single group resource consisting of multiple child
+ primitive resources.
+ """
- factory -- A ConfigFactory instance
- _id -- A unique name for the element
+ def __init__(self, factory, _id):
"""
+ Create a new Group instance.
+ Arguments:
+ factory -- A ConfigFactory instance
+ _id -- A unique name for the element
+ """
Resource.__init__(self, factory, _id, None, None)
self.tag = "group"
def __setitem__(self, key, value):
+ """Add a child nvpair element containing the given key/value pair as an instance attribute."""
self.add_meta(key, value)
def show(self):
- """ Return a string representation of this XML section, including all
- of its children
- """
-
+ """Recursively return a string representation of this XML section."""
text = '''<%s id="%s">''' % (self.tag, self.name)
if len(self._meta) > 0:
@@ -700,23 +637,24 @@ class Group(Resource):
class Clone(Group):
- """ A specialized Group subclass that creates a <clone> XML section
- describing a clone resource containing multiple instances of a
- single primitive resource
"""
+ A specialized Group subclass that creates a <clone> XML section.
- def __init__(self, factory, _id, child=None):
- """ Create a new Clone instance
-
- Arguments:
+ This section describes a clone resource containing multiple instances of a
+ single primitive resource.
+ """
- factory -- A ConfigFactory instance
- _id -- A unique name for the element
- child -- A Resource instance that can be added to this Clone
- when it is created. Alternately, use add_child later.
- Note that a Clone may only have one child.
+ def __init__(self, factory, _id, child=None):
+ """
+ Create a new Clone instance.
+
+ Arguments:
+ factory -- A ConfigFactory instance
+ _id -- A unique name for the element
+ child -- A Resource instance that can be added to this Clone
+ when it is created. Alternately, use add_child later.
+ Note that a Clone may only have one child.
"""
-
Group.__init__(self, factory, _id)
self.tag = "clone"
@@ -724,10 +662,11 @@ class Clone(Group):
self.add_child(child)
def add_child(self, child):
- """ Add the given resource as a child of this Clone. Note that a
- Clone resource only supports one child at a time.
"""
+ Add the given resource as a child of this Clone.
+ Note that a Clone resource only supports one child at a time.
+ """
if not self._children:
self._children.append(child)
else:
diff --git a/python/pacemaker/_cts/clustermanager.py b/python/pacemaker/_cts/clustermanager.py
index 652108f..0e9b1cf 100644
--- a/python/pacemaker/_cts/clustermanager.py
+++ b/python/pacemaker/_cts/clustermanager.py
@@ -1,7 +1,7 @@
-""" ClusterManager class for Pacemaker's Cluster Test Suite (CTS) """
+"""ClusterManager class for Pacemaker's Cluster Test Suite (CTS)."""
__all__ = ["ClusterManager"]
-__copyright__ = """Copyright 2000-2023 the Pacemaker project contributors.
+__copyright__ = """Copyright 2000-2024 the Pacemaker project contributors.
Certain portions by Huang Zhen <zhenhltc@cn.ibm.com> are copyright 2004
International Business Machines. The version control history for this file
may have further details."""
@@ -34,31 +34,33 @@ from pacemaker._cts.watcher import LogWatcher
# ClusterManager has a lot of methods.
# pylint: disable=too-many-public-methods
+
class ClusterManager(UserDict):
- """ An abstract base class for managing the cluster. This class implements
- high-level operations on the cluster and/or its cluster managers.
- Actual cluster-specific management classes should be subclassed from this
- one.
+ """
+ An abstract base class for managing the cluster.
- Among other things, this class tracks the state every node is expected to
- be in.
+ This class implements high-level operations on the cluster and/or its cluster
+ managers. Actual cluster-specific management classes should be subclassed
+ from this one.
+
+ Among other things, this class tracks the state every node is expected to be in.
"""
def _final_conditions(self):
- """ Check all keys to make sure they have a non-None value """
-
+ """Check all keys to make sure they have a non-None value."""
for (key, val) in self._data.items():
if val is None:
raise ValueError("Improper derivation: self[%s] must be overridden by subclass." % key)
def __init__(self):
- """ Create a new ClusterManager instance. This class can be treated
- kind of like a dictionary due to the process of certain dict
- functions like __getitem__ and __setitem__. This is because it
- contains a lot of name/value pairs. However, it is not actually
- a dictionary so do not rely on standard dictionary behavior.
"""
+ Create a new ClusterManager instance.
+ This class can be treated kind of like a dictionary due to the process
+ of certain dict functions like __getitem__ and __setitem__. This is
+ because it contains a lot of name/value pairs. However, it is not
+ actually a dictionary so do not rely on standard dictionary behavior.
+ """
# Eventually, ClusterManager should not be a UserDict subclass. Until
# that point...
# pylint: disable=super-init-not-called
@@ -85,6 +87,15 @@ class ClusterManager(UserDict):
self._cib_sync = {}
def __getitem__(self, key):
+ """
+ Return the given key, checking for it in several places.
+
+ If key is "Name", return the name of the cluster manager. If the key
+ was previously added to the dictionary via __setitem__, return that.
+ Otherwise, return the template pattern for the key.
+
+ This method should not be used and may be removed in the future.
+ """
if key == "Name":
return self.name
@@ -95,41 +106,38 @@ class ClusterManager(UserDict):
return self.templates.get_patterns(key)
def __setitem__(self, key, value):
+ """
+ Set the given key to the given value, overriding any previous value.
+
+ This method should not be used and may be removed in the future.
+ """
print("FIXME: Setting %s=%s on %r" % (key, value, self))
self._data[key] = value
def clear_instance_errors_to_ignore(self):
- """ Reset instance-specific errors to ignore on each iteration """
-
+ """Reset instance-specific errors to ignore on each iteration."""
self.__instance_errors_to_ignore = []
@property
def instance_errors_to_ignore(self):
- """ Return a list of known errors that should be ignored for a specific
- test instance
- """
-
+ """Return a list of known errors that should be ignored for a specific test instance."""
return self.__instance_errors_to_ignore
@property
def errors_to_ignore(self):
- """ Return a list of known error messages that should be ignored """
-
+ """Return a list of known error messages that should be ignored."""
return self.templates.get_patterns("BadNewsIgnore")
def log(self, args):
- """ Log a message """
-
+ """Log a message."""
self._logger.log(args)
def debug(self, args):
- """ Log a debug message """
-
+ """Log a debug message."""
self._logger.debug(args)
def upcount(self):
- """ How many nodes are up? """
-
+ """Return how many nodes are up."""
count = 0
for node in self.env["nodes"]:
@@ -139,16 +147,16 @@ class ClusterManager(UserDict):
return count
def install_support(self, command="install"):
- """ Install or uninstall the CTS support files - various init scripts and data,
- daemons, fencing agents, etc.
"""
+ Install or uninstall the CTS support files.
+ This includes various init scripts and data, daemons, fencing agents, etc.
+ """
for node in self.env["nodes"]:
self.rsh(node, "%s/cts-support %s" % (BuildOptions.DAEMON_DIR, command))
def prepare_fencing_watcher(self):
- """ Return a LogWatcher object that watches for fencing log messages """
-
+ """Return a LogWatcher object that watches for fencing log messages."""
# If we don't have quorum now but get it as a result of starting this node,
# then a bunch of nodes might get fenced
if self.has_quorum(None):
@@ -175,13 +183,12 @@ class ClusterManager(UserDict):
])
stonith = LogWatcher(self.env["LogFileName"], stonith_pats, self.env["nodes"],
- self.env["LogWatcher"], "StartupFencing", 0)
+ self.env["log_kind"], "StartupFencing", 0)
stonith.set_watch()
return stonith
def fencing_cleanup(self, node, stonith):
- """ Wait for a previously fenced node to return to the cluster """
-
+ """Wait for a previously fenced node to return to the cluster."""
peer_list = []
peer_state = {}
@@ -225,7 +232,7 @@ class ClusterManager(UserDict):
if not peer:
self._logger.log("ERROR: Unknown stonith match: %r" % shot)
- elif not peer in peer_list:
+ elif peer not in peer_list:
self.debug("Found peer: %s" % peer)
peer_list.append(peer)
@@ -263,14 +270,13 @@ class ClusterManager(UserDict):
return peer_list
def start_cm(self, node, verbose=False):
- """ Start up the cluster manager on a given node """
-
+ """Start up the cluster manager on a given node."""
if verbose:
self._logger.log("Starting %s on node %s" % (self.templates["Name"], node))
else:
self.debug("Starting %s on node %s" % (self.templates["Name"], node))
- if not node in self.expected_status:
+ if node not in self.expected_status:
self.expected_status[node] = "down"
if self.expected_status[node] != "down":
@@ -286,7 +292,8 @@ class ClusterManager(UserDict):
else:
patterns.append(self.templates["Pat:NonDC_started"] % node)
- watch = LogWatcher(self.env["LogFileName"], patterns, self.env["nodes"], self.env["LogWatcher"],
+ watch = LogWatcher(self.env["LogFileName"], patterns,
+ self.env["nodes"], self.env["log_kind"],
"StartaCM", self.env["StartTime"] + 10)
self.install_config(node)
@@ -325,8 +332,7 @@ class ClusterManager(UserDict):
return False
def start_cm_async(self, node, verbose=False):
- """ Start up the cluster manager on a given node without blocking """
-
+ """Start up the cluster manager on a given node without blocking."""
if verbose:
self._logger.log("Starting %s on node %s" % (self["Name"], node))
else:
@@ -337,8 +343,7 @@ class ClusterManager(UserDict):
self.expected_status[node] = "up"
def stop_cm(self, node, verbose=False, force=False):
- """ Stop the cluster manager on a given node """
-
+ """Stop the cluster manager on a given node."""
if verbose:
self._logger.log("Stopping %s on node %s" % (self["Name"], node))
else:
@@ -358,18 +363,14 @@ class ClusterManager(UserDict):
return False
def stop_cm_async(self, node):
- """ Stop the cluster manager on a given node without blocking """
-
+ """Stop the cluster manager on a given node without blocking."""
self.debug("Stopping %s on node %s" % (self["Name"], node))
self.rsh(node, self.templates["StopCmd"], synchronous=False)
self.expected_status[node] = "down"
def startall(self, nodelist=None, verbose=False, quick=False):
- """ Start the cluster manager on every node in the cluster, or on every
- node in nodelist if not None
- """
-
+ """Start the cluster manager on every node in the cluster, or on every node in nodelist."""
if not nodelist:
nodelist = self.env["nodes"]
@@ -395,7 +396,8 @@ class ClusterManager(UserDict):
# Start all the nodes - at about the same time...
watch = LogWatcher(self.env["LogFileName"], watchpats, self.env["nodes"],
- self.env["LogWatcher"], "fast-start", self.env["DeadTime"] + 10)
+ self.env["log_kind"], "fast-start",
+ self.env["DeadTime"] + 10)
watch.set_watch()
if not self.start_cm(nodelist[0], verbose=verbose):
@@ -416,10 +418,7 @@ class ClusterManager(UserDict):
return True
def stopall(self, nodelist=None, verbose=False, force=False):
- """ Stop the cluster manager on every node in the cluster, or on every
- node in nodelist if not None
- """
-
+ """Stop the cluster manager on every node in the cluster, or on every node in nodelist."""
ret = True
if not nodelist:
@@ -433,10 +432,7 @@ class ClusterManager(UserDict):
return ret
def statall(self, nodelist=None):
- """ Return the status of the cluster manager on every node in the cluster,
- or on every node in nodelist if not None
- """
-
+ """Return the status of the cluster manager on every node in the cluster, or on every node in nodelist."""
result = {}
if not nodelist:
@@ -451,10 +447,7 @@ class ClusterManager(UserDict):
return result
def isolate_node(self, target, nodes=None):
- """ Break communication between the target node and all other nodes in the
- cluster, or nodes if not None
- """
-
+ """Break communication between the target node and all other nodes in the cluster, or nodes."""
if not nodes:
nodes = self.env["nodes"]
@@ -472,10 +465,7 @@ class ClusterManager(UserDict):
return True
def unisolate_node(self, target, nodes=None):
- """ Re-establish communication between the target node and all other nodes
- in the cluster, or nodes if not None
- """
-
+ """Re-establish communication between the target node and all other nodes in the cluster, or nodes."""
if not nodes:
nodes = self.env["nodes"]
@@ -490,8 +480,7 @@ class ClusterManager(UserDict):
self.debug("Communication restored between %s and %s" % (target, node))
def oprofile_start(self, node=None):
- """ Start profiling on the given node, or all nodes in the cluster """
-
+ """Start profiling on the given node, or all nodes in the cluster."""
if not node:
for n in self.env["oprofile"]:
self.oprofile_start(n)
@@ -504,10 +493,7 @@ class ClusterManager(UserDict):
self.rsh(node, "opcontrol --reset")
def oprofile_save(self, test, node=None):
- """ Save profiling data and restart profiling on the given node, or all
- nodes in the cluster if None
- """
-
+ """Save profiling data and restart profiling on the given node, or all nodes in the cluster."""
if not node:
for n in self.env["oprofile"]:
self.oprofile_save(test, n)
@@ -520,10 +506,11 @@ class ClusterManager(UserDict):
self.oprofile_start(node)
def oprofile_stop(self, node=None):
- """ Start profiling on the given node, or all nodes in the cluster. This
- does not save profiling data, so call oprofile_save first if needed.
"""
+ Start profiling on the given node, or all nodes in the cluster.
+ This does not save profiling data, so call oprofile_save first if needed.
+ """
if not node:
for n in self.env["oprofile"]:
self.oprofile_stop(n)
@@ -534,8 +521,7 @@ class ClusterManager(UserDict):
self.rsh(node, "opcontrol --shutdown 2>&1 > /dev/null")
def install_config(self, node):
- """ Remove and re-install the CIB on the first node in the cluster """
-
+ """Remove and re-install the CIB on the first node in the cluster."""
if not self.ns.wait_for_node(node):
self.log("Node %s is not up." % node)
return
@@ -566,10 +552,12 @@ class ClusterManager(UserDict):
self.rsh(node, "chown %s %s/cib.xml" % (BuildOptions.DAEMON_USER, BuildOptions.CIB_DIR))
def prepare(self):
- """ Finish initialization by clearing out the expected status and recording
- the current status of every node in the cluster
"""
+ Finish initialization.
+ Clear out the expected status and record the current status of every
+ node in the cluster.
+ """
self.partitions_expected = 1
for node in self.env["nodes"]:
self.expected_status[node] = ""
@@ -580,11 +568,12 @@ class ClusterManager(UserDict):
self.stat_cm(node)
def test_node_cm(self, node):
- """ Check the status of a given node. Returns 0 if the node is
- down, 1 if the node is up but unstable, and 2 if the node is
- up and stable
"""
+ Check the status of a given node.
+ Returns 0 if the node is down, 1 if the node is up but unstable, and 2
+ if the node is up and stable.
+ """
watchpats = [
"Current ping state: (S_IDLE|S_NOT_DC)",
self.templates["Pat:NonDC_started"] % node,
@@ -592,7 +581,7 @@ class ClusterManager(UserDict):
]
idle_watch = LogWatcher(self.env["LogFileName"], watchpats, [node],
- self.env["LogWatcher"], "ClusterIdle")
+ self.env["log_kind"], "ClusterIdle")
idle_watch.set_watch()
(_, out) = self.rsh(node, self.templates["StatusCmd"] % node, verbose=1)
@@ -637,14 +626,12 @@ class ClusterManager(UserDict):
return 2
def stat_cm(self, node):
- """ Report the status of the cluster manager on a given node """
-
+ """Report the status of the cluster manager on a given node."""
return self.test_node_cm(node) > 0
# Being up and being stable is not the same question...
def node_stable(self, node):
- """ Return whether or not the given node is stable """
-
+ """Return whether or not the given node is stable."""
if self.test_node_cm(node) == 2:
return True
@@ -652,8 +639,7 @@ class ClusterManager(UserDict):
return False
def partition_stable(self, nodes, timeout=None):
- """ Return whether or not all nodes in the given partition are stable """
-
+ """Return whether or not all nodes in the given partition are stable."""
watchpats = [
"Current ping state: S_IDLE",
self.templates["Pat:DC_IDLE"],
@@ -669,7 +655,7 @@ class ClusterManager(UserDict):
return True
idle_watch = LogWatcher(self.env["LogFileName"], watchpats, nodes.split(),
- self.env["LogWatcher"], "ClusterStable", timeout)
+ self.env["log_kind"], "ClusterStable", timeout)
idle_watch.set_watch()
for node in nodes.split():
@@ -691,8 +677,7 @@ class ClusterManager(UserDict):
return False
def cluster_stable(self, timeout=None, double_check=False):
- """ Return whether or not all nodes in the cluster are stable """
-
+ """Return whether or not all nodes in the cluster are stable."""
partitions = self.find_partitions()
for partition in partitions:
@@ -713,10 +698,11 @@ class ClusterManager(UserDict):
return True
def is_node_dc(self, node, status_line=None):
- """ Return whether or not the given node is the cluster DC by checking
- the given status_line, or by querying the cluster if None
"""
+ Return whether or not the given node is the cluster DC.
+ Check the given status_line, or query the cluster if None.
+ """
if not status_line:
(_, out) = self.rsh(node, self.templates["StatusCmd"] % node, verbose=1)
@@ -744,8 +730,7 @@ class ClusterManager(UserDict):
return False
def active_resources(self, node):
- """ Return a list of primitive resources active on the given node """
-
+ """Return a list of primitive resources active on the given node."""
(_, output) = self.rsh(node, "crm_resource -c", verbose=1)
resources = []
for line in output:
@@ -759,8 +744,7 @@ class ClusterManager(UserDict):
return resources
def resource_location(self, rid):
- """ Return a list of nodes on which the given resource is running """
-
+ """Return a list of nodes on which the given resource is running."""
resource_nodes = []
for node in self.env["nodes"]:
if self.expected_status[node] != "up":
@@ -780,10 +764,12 @@ class ClusterManager(UserDict):
return resource_nodes
def find_partitions(self):
- """ Return a list of all partitions in the cluster. Each element of the
- list is itself a list of all active nodes in that partition.
"""
+ Return a list of all partitions in the cluster.
+ Each element of the list is itself a list of all active nodes in that
+ partition.
+ """
ccm_partitions = []
for node in self.env["nodes"]:
@@ -822,8 +808,7 @@ class ClusterManager(UserDict):
return ccm_partitions
def has_quorum(self, node_list):
- """ Return whether or not the cluster has quorum """
-
+ """Return whether or not the cluster has quorum."""
# If we are auditing a partition, then one side will
# have quorum and the other not.
# So the caller needs to tell us which we are checking
@@ -850,15 +835,15 @@ class ClusterManager(UserDict):
@property
def components(self):
- """ A list of all patterns that should be ignored for the cluster's
- components. This must be provided by all subclasses.
"""
+ Return a list of all patterns that should be ignored for the cluster's components.
+ This must be provided by all subclasses.
+ """
raise NotImplementedError
def in_standby_mode(self, node):
- """ Return whether or not the node is in Standby """
-
+ """Return whether or not the node is in Standby."""
(_, out) = self.rsh(node, self.templates["StandbyQueryCmd"] % node, verbose=1)
if not out:
@@ -869,10 +854,11 @@ class ClusterManager(UserDict):
return out == "on"
def set_standby_mode(self, node, status):
- """ Set node to Standby if status is True, or Active if status is False.
- Return whether the node is now in the requested status.
"""
+ Set node to Standby if status is True, or Active if status is False.
+ Return whether the node is now in the requested status.
+ """
current_status = self.in_standby_mode(node)
if current_status == status:
@@ -887,8 +873,7 @@ class ClusterManager(UserDict):
return rc == 0
def add_dummy_rsc(self, node, rid):
- """ Add a dummy resource with the given ID to the given node """
-
+ """Add a dummy resource with the given ID to the given node."""
rsc_xml = """ '<resources>
<primitive class=\"ocf\" id=\"%s\" provider=\"pacemaker\" type=\"Dummy\">
<operations>
@@ -905,10 +890,7 @@ class ClusterManager(UserDict):
self.rsh(node, self.templates['CibAddXml'] % constraint_xml)
def remove_dummy_rsc(self, node, rid):
- """ Remove the previously added dummy resource given by rid on the
- given node
- """
-
+ """Remove the previously added dummy resource given by rid on the given node."""
constraint = "\"//rsc_location[@rsc='%s']\"" % rid
rsc = "\"//primitive[@id='%s']\"" % rid
diff --git a/python/pacemaker/_cts/cmcorosync.py b/python/pacemaker/_cts/cmcorosync.py
index cac059b..f64b811 100644
--- a/python/pacemaker/_cts/cmcorosync.py
+++ b/python/pacemaker/_cts/cmcorosync.py
@@ -1,7 +1,7 @@
-""" Corosync-specific class for Pacemaker's Cluster Test Suite (CTS) """
+"""Corosync-specific class for Pacemaker's Cluster Test Suite (CTS)."""
__all__ = ["Corosync2"]
-__copyright__ = "Copyright 2007-2023 the Pacemaker project contributors"
+__copyright__ = "Copyright 2007-2024 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
from pacemaker._cts.CTS import Process
@@ -14,14 +14,12 @@ from pacemaker._cts.patterns import PatternSelector
# self._rsh in environment.py.
# pylint: disable=unsubscriptable-object
+
class Corosync2(ClusterManager):
- """ A subclass of ClusterManager specialized to handle corosync2 and later
- based clusters
- """
+ """A subclass of ClusterManager specialized to handle corosync2 and later based clusters."""
def __init__(self):
- """ Create a new Corosync2 instance """
-
+ """Create a new Corosync2 instance."""
ClusterManager.__init__(self)
self._fullcomplist = {}
@@ -29,10 +27,7 @@ class Corosync2(ClusterManager):
@property
def components(self):
- """ A list of all patterns that should be ignored for the cluster's
- components.
- """
-
+ """Return a list of patterns that should be ignored for the cluster's components."""
complist = []
if not self._fullcomplist:
diff --git a/python/pacemaker/_cts/corosync.py b/python/pacemaker/_cts/corosync.py
index aabaecd..af3b1de 100644
--- a/python/pacemaker/_cts/corosync.py
+++ b/python/pacemaker/_cts/corosync.py
@@ -1,7 +1,7 @@
-""" A module providing functions for manipulating corosync """
+"""A module providing functions for manipulating corosync."""
__all__ = ["Corosync", "localname"]
-__copyright__ = "Copyright 2009-2023 the Pacemaker project contributors"
+__copyright__ = "Copyright 2009-2024 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+)"
import os
@@ -40,14 +40,12 @@ logging {
def corosync_cfg_exists():
- """ Does the corosync config file exist? """
-
+ """Return whether the corosync config file exists."""
return os.path.exists(BuildOptions.COROSYNC_CONFIG_FILE)
def corosync_log_file(cfgfile):
- """ Where does corosync log to? """
-
+ """Return the path to the corosync log file, or None."""
with open(cfgfile, "r", encoding="utf-8") as f:
for line in f.readlines():
# "to_logfile:" could also be in the config file, so check for a
@@ -59,8 +57,7 @@ def corosync_log_file(cfgfile):
def generate_corosync_cfg(logdir, cluster_name, node_name):
- """ Generate the corosync config file, if it does not already exist """
-
+ """Generate the corosync config file, if it does not already exist."""
if corosync_cfg_exists():
return False
@@ -73,8 +70,7 @@ def generate_corosync_cfg(logdir, cluster_name, node_name):
def localname():
- """ Return the uname of the local host """
-
+ """Return the uname of the local host."""
our_uname = stdout_from_command(["uname", "-n"])
if our_uname:
our_uname = our_uname[0]
@@ -85,18 +81,17 @@ def localname():
class Corosync:
- """ A class for managing corosync processes and config files """
+ """A class for managing corosync processes and config files."""
def __init__(self, verbose, logdir, cluster_name):
- """ Create a new Corosync instance.
-
- Arguments:
-
- verbose -- Whether to print the corosync log file
- logdir -- The base directory under which to store log files
- cluster_name -- The name of the cluster
"""
+ Create a new Corosync instance.
+ Arguments:
+ verbose -- Whether to print the corosync log file
+ logdir -- The base directory under which to store log files
+ cluster_name -- The name of the cluster
+ """
self.verbose = verbose
self.logdir = logdir
self.cluster_name = cluster_name
@@ -104,8 +99,7 @@ class Corosync:
self._generated_cfg_file = False
def _ready(self, logfile, timeout=10):
- """ Is corosync ready to go? """
-
+ """Return whether corosync is ready."""
i = 0
while i < timeout:
@@ -123,16 +117,15 @@ class Corosync:
raise TimeoutError
def start(self, kill_first=False, timeout=10):
- """ Start the corosync process
-
- Arguments:
-
- kill_first -- Whether to kill any pre-existing corosync processes before
- starting a new one
- timeout -- If corosync does not start within this many seconds, raise
- TimeoutError
"""
+ Start the corosync process.
+ Arguments:
+ kill_first -- Whether to kill any pre-existing corosync processes before
+ starting a new one
+ timeout -- If corosync does not start within this many seconds, raise
+ TimeoutError
+ """
if kill_first:
killall(["corosync"])
@@ -150,8 +143,7 @@ class Corosync:
self._ready(logfile, timeout=timeout)
def stop(self):
- """ Stop the corosync process """
-
+ """Stop the corosync process."""
killall(["corosync"])
# If we did not write out the corosync config file, don't do anything else.
diff --git a/python/pacemaker/_cts/environment.py b/python/pacemaker/_cts/environment.py
index 732ab24..2407414 100644
--- a/python/pacemaker/_cts/environment.py
+++ b/python/pacemaker/_cts/environment.py
@@ -1,7 +1,7 @@
-""" Test environment classes for Pacemaker's Cluster Test Suite (CTS) """
+"""Test environment classes for Pacemaker's Cluster Test Suite (CTS)."""
__all__ = ["EnvFactory"]
-__copyright__ = "Copyright 2014-2023 the Pacemaker project contributors"
+__copyright__ = "Copyright 2014-2024 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
import argparse
@@ -16,9 +16,12 @@ from pacemaker._cts.logging import LogFactory
from pacemaker._cts.remote import RemoteFactory
from pacemaker._cts.watcher import LogKind
+
class Environment:
- """ A class for managing the CTS environment, consisting largely of processing
- and storing command line parameters
+ """
+ A class for managing the CTS environment.
+
+ This consists largely of processing and storing command line parameters.
"""
# pylint doesn't understand that self._rsh is callable (it stores the
@@ -30,17 +33,18 @@ class Environment:
# pylint: disable=not-callable
def __init__(self, args):
- """ Create a new Environment instance. This class can be treated kind
- of like a dictionary due to the presence of typical dict functions
- like __contains__, __getitem__, and __setitem__. However, it is not a
- dictionary so do not rely on standard dictionary behavior.
+ """
+ Create a new Environment instance.
- Arguments:
+ This class can be treated kind of like a dictionary due to the presence
+ of typical dict functions like __contains__, __getitem__, and __setitem__.
+ However, it is not a dictionary so do not rely on standard dictionary
+ behavior.
- args -- A list of command line parameters, minus the program name.
- If None, sys.argv will be used.
+ Arguments:
+ args -- A list of command line parameters, minus the program name.
+ If None, sys.argv will be used.
"""
-
self.data = {}
self._nodes = []
@@ -56,7 +60,7 @@ class Environment:
self["ClobberCIB"] = False
self["CIBfilename"] = None
self["CIBResource"] = False
- self["LogWatcher"] = LogKind.ANY
+ self["log_kind"] = None
self["node-limit"] = 0
self["scenario"] = "random"
@@ -74,10 +78,13 @@ class Environment:
self._discover()
def _seed_random(self, seed=None):
- """ Initialize the random number generator with the given seed, or use
- the current time if None
"""
+ Initialize the random number generator.
+ Arguments:
+ seed -- Use this to see the random number generator, or use the
+ current time if None.
+ """
if not seed:
seed = int(time.time())
@@ -85,8 +92,7 @@ class Environment:
self.random_gen.seed(str(seed))
def dump(self):
- """ Print the current environment """
-
+ """Print the current environment."""
keys = []
for key in list(self.data.keys()):
keys.append(key)
@@ -97,21 +103,18 @@ class Environment:
self._logger.debug("{key:35}: {val}".format(key=s, val=str(self[key])))
def keys(self):
- """ Return a list of all environment keys stored in this instance """
-
+ """Return a list of all environment keys stored in this instance."""
return list(self.data.keys())
def __contains__(self, key):
- """ Does the given environment key exist? """
-
+ """Return True if the given key exists in the environment."""
if key == "nodes":
return True
return key in self.data
def __getitem__(self, key):
- """ Return the given environment key, or None if it does not exist """
-
+ """Return the given environment key, or None if it does not exist."""
if str(key) == "0":
raise ValueError("Bad call to 'foo in X', should reference 'foo in X.keys()' instead")
@@ -124,10 +127,7 @@ class Environment:
return self.data.get(key)
def __setitem__(self, key, value):
- """ Set the given environment key to the given value, overriding any
- previous value
- """
-
+ """Set the given environment key to the given value, overriding any previous value."""
if key == "Stack":
self._set_stack(value)
@@ -145,7 +145,7 @@ class Environment:
n = node.strip()
socket.gethostbyname_ex(n)
self._nodes.append(n)
- except:
+ except socket.herror:
self._logger.log("%s not found in DNS... aborting" % node)
raise
@@ -155,21 +155,18 @@ class Environment:
self.data[key] = value
def random_node(self):
- """ Choose a random node from the cluster """
-
+ """Choose a random node from the cluster."""
return self.random_gen.choice(self["nodes"])
def get(self, key, default=None):
- """ Return the value for key if key is in the environment, else default """
-
+ """Return the value for key if key is in the environment, else default."""
if key == "nodes":
return self._nodes
return self.data.get(key, default)
def _set_stack(self, name):
- """ Normalize the given cluster stack name """
-
+ """Normalize the given cluster stack name."""
if name in ["corosync", "cs", "mcp"]:
self.data["Stack"] = "corosync 2+"
@@ -177,8 +174,7 @@ class Environment:
raise ValueError("Unknown stack: %s" % name)
def _get_stack_short(self):
- """ Return the short name for the currently set cluster stack """
-
+ """Return the short name for the currently set cluster stack."""
if "Stack" not in self.data:
return "unknown"
@@ -189,15 +185,13 @@ class Environment:
raise ValueError("Unknown stack: %s" % self["stack"])
def _detect_systemd(self):
- """ Detect whether systemd is in use on the target node """
-
+ """Detect whether systemd is in use on the target node."""
if "have_systemd" not in self.data:
(rc, _) = self._rsh(self._target, "systemctl list-units", verbose=0)
self["have_systemd"] = rc == 0
def _detect_syslog(self):
- """ Detect the syslog variant in use on the target node """
-
+ """Detect the syslog variant in use on the target node."""
if "syslogd" not in self.data:
if self["have_systemd"]:
# Systemd
@@ -213,8 +207,7 @@ class Environment:
self["syslogd"] = "rsyslog"
def disable_service(self, node, service):
- """ Disable the given service on the given node """
-
+ """Disable the given service on the given node."""
if self["have_systemd"]:
# Systemd
(rc, _) = self._rsh(node, "systemctl disable %s" % service)
@@ -225,8 +218,7 @@ class Environment:
return rc
def enable_service(self, node, service):
- """ Enable the given service on the given node """
-
+ """Enable the given service on the given node."""
if self["have_systemd"]:
# Systemd
(rc, _) = self._rsh(node, "systemctl enable %s" % service)
@@ -237,8 +229,7 @@ class Environment:
return rc
def service_is_enabled(self, node, service):
- """ Is the given service enabled on the given node? """
-
+ """Return True if the given service is enabled on the given node."""
if self["have_systemd"]:
# Systemd
@@ -254,15 +245,13 @@ class Environment:
return rc == 0
def _detect_at_boot(self):
- """ Detect if the cluster starts at boot """
-
+ """Detect if the cluster starts at boot."""
if "at-boot" not in self.data:
self["at-boot"] = self.service_is_enabled(self._target, "corosync") \
- or self.service_is_enabled(self._target, "pacemaker")
+ or self.service_is_enabled(self._target, "pacemaker")
def _detect_ip_offset(self):
- """ Detect the offset for IPaddr resources """
-
+ """Detect the offset for IPaddr resources."""
if self["CIBResource"] and "IPBase" not in self.data:
(_, lines) = self._rsh(self._target, "ip addr | grep inet | grep -v -e link -e inet6 -e '/32' -e ' lo' | awk '{print $2}'", verbose=0)
network = lines[0].strip()
@@ -290,10 +279,12 @@ class Environment:
self._logger.log("Defaulting to '%s', use --test-ip-base to override" % self["IPBase"])
def _filter_nodes(self):
- """ If --limit-nodes is given, keep that many nodes from the front of the
- list of cluster nodes and drop the rest
"""
+ Filter the list of cluster nodes.
+ If --limit-nodes is given, keep that many nodes from the front of the
+ list of cluster nodes and drop the rest.
+ """
if self["node-limit"] > 0:
if len(self["nodes"]) > self["node-limit"]:
# pylint thinks self["node-limit"] is a list even though we initialize
@@ -303,17 +294,15 @@ class Environment:
% (len(self["nodes"]), self["node-limit"]))
while len(self["nodes"]) > self["node-limit"]:
- self["nodes"].pop(len(self["nodes"])-1)
+ self["nodes"].pop(len(self["nodes"]) - 1)
def _validate(self):
- """ Were we given all the required command line parameters? """
-
+ """Check that we were given all required command line parameters."""
if not self["nodes"]:
raise ValueError("No nodes specified!")
def _discover(self):
- """ Probe cluster nodes to figure out how to log and manage services """
-
+ """Probe cluster nodes to figure out how to log and manage services."""
self._target = random.Random().choice(self["nodes"])
exerciser = socket.gethostname()
@@ -332,11 +321,12 @@ class Environment:
self._detect_ip_offset()
def _parse_args(self, argv):
- """ Parse and validate command line parameters, setting the appropriate
- values in the environment dictionary. If argv is None, use sys.argv
- instead.
"""
+ Parse and validate command line parameters.
+ Set the appropriate values in the environment dictionary. If argv is
+ None, use sys.argv instead.
+ """
if not argv:
argv = sys.argv[1:]
@@ -525,10 +515,10 @@ class Environment:
with open(dsh_file, "r", encoding="utf-8") as f:
for line in f:
- l = line.strip()
+ stripped = line.strip()
- if not l.startswith('#'):
- self["nodes"].append(l)
+ if not stripped.startswith('#'):
+ self["nodes"].append(stripped)
else:
print("Unknown DSH group: %s" % args.dsh_group)
@@ -597,7 +587,7 @@ class Environment:
if args.logfile:
self["LogAuditDisabled"] = True
self["LogFileName"] = args.logfile
- self["LogWatcher"] = LogKind.REMOTE_FILE
+ self["log_kind"] = LogKind.REMOTE_FILE
else:
# We can't set this as the default on the parser.add_argument call
# for this option because then args.logfile will be set, which means
@@ -629,17 +619,19 @@ class Environment:
self[name] = value
print("Setting %s = %s" % (name, value))
+
class EnvFactory:
- """ A class for constructing a singleton instance of an Environment object """
+ """A class for constructing a singleton instance of an Environment object."""
instance = None
# pylint: disable=invalid-name
def getInstance(self, args=None):
- """ Returns the previously created instance of Environment, or creates a
- new instance if one does not already exist.
"""
+ Return the previously created instance of Environment.
+ If no instance exists, create a new instance and return that.
+ """
if not EnvFactory.instance:
EnvFactory.instance = Environment(args)
diff --git a/python/pacemaker/_cts/errors.py b/python/pacemaker/_cts/errors.py
index 2e245e7..a731640 100644
--- a/python/pacemaker/_cts/errors.py
+++ b/python/pacemaker/_cts/errors.py
@@ -1,53 +1,61 @@
-""" A module providing custom exception classes used throughout the pacemaker library """
+"""A module providing custom exception classes used throughout the pacemaker library."""
__all__ = ["ExitCodeError", "OutputFoundError", "OutputNotFoundError", "XmlValidationError"]
-__copyright__ = "Copyright 2009-2023 the Pacemaker project contributors"
+__copyright__ = "Copyright 2009-2024 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+)"
class TestError(Exception):
- """ Base class for exceptions in this module """
+ """Base class for exceptions in this module."""
class ExitCodeError(TestError):
- """ Exception raised when command exit status is unexpected """
+ """Exception raised when command exit status is unexpected."""
def __init__(self, exit_code):
+ """Create a new ExitCodeError instance."""
TestError.__init__(self)
self.exit_code = exit_code
def __str__(self):
+ """Return a printable string for this exception."""
return repr(self.exit_code)
class OutputNotFoundError(TestError):
- """ Exception raised when command output does not contain wanted string """
+ """Exception raised when command output does not contain wanted string."""
def __init__(self, output):
+ """Create a new OutputNotFoundError instance."""
TestError.__init__(self)
self.output = output
def __str__(self):
+ """Return a printable string for this exception."""
return repr(self.output)
class OutputFoundError(TestError):
- """ Exception raised when command output contains unwanted string """
+ """Exception raised when command output contains unwanted string."""
def __init__(self, output):
+ """Create a new OutputFoundError instance."""
TestError.__init__(self)
self.output = output
def __str__(self):
+ """Return a printable string for this exception."""
return repr(self.output)
class XmlValidationError(TestError):
- """ Exception raised when xmllint fails """
+ """Exception raised when xmllint fails."""
def __init__(self, output):
+ """Create a new XmlValidationError instance."""
TestError.__init__(self)
self.output = output
def __str__(self):
+ """Return a printable string for this exception."""
return repr(self.output)
diff --git a/python/pacemaker/_cts/input.py b/python/pacemaker/_cts/input.py
index 7e734f6..739d371 100644
--- a/python/pacemaker/_cts/input.py
+++ b/python/pacemaker/_cts/input.py
@@ -1,12 +1,12 @@
-""" User input related utilities for CTS """
+"""User input related utilities for CTS."""
__all__ = ["should_continue"]
-__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__copyright__ = "Copyright 2000-2024 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
-def should_continue(env):
- """ On failure, prompt the user to see if we should continue """
+def should_continue(env):
+ """On failure, prompt the user to see if we should continue."""
if env["continue"]:
return True
diff --git a/python/pacemaker/_cts/logging.py b/python/pacemaker/_cts/logging.py
index 6c7bfb0..de1ed05 100644
--- a/python/pacemaker/_cts/logging.py
+++ b/python/pacemaker/_cts/logging.py
@@ -1,7 +1,7 @@
-""" Logging classes for Pacemaker's Cluster Test Suite (CTS) """
+"""Logging classes for Pacemaker's Cluster Test Suite (CTS)."""
__all__ = ["LogFactory"]
-__copyright__ = "Copyright 2014-2023 the Pacemaker project contributors"
+__copyright__ = "Copyright 2014-2024 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
import os
@@ -10,7 +10,7 @@ import time
class Logger:
- """ Abstract class to use as parent for CTS logging classes """
+ """Abstract class to use as parent for CTS logging classes."""
TimeFormat = "%b %d %H:%M:%S\t"
@@ -26,38 +26,33 @@ class Logger:
self._source = ""
def __call__(self, lines):
- """ Log specified messages """
-
+ """Log specified messages."""
raise ValueError("Abstract class member (__call__)")
def write(self, line):
- """ Log a single line excluding trailing whitespace """
-
+ """Log a single line excluding trailing whitespace."""
return self(line.rstrip())
def writelines(self, lines):
- """ Log a series of lines excluding trailing whitespace """
-
+ """Log a series of lines excluding trailing whitespace."""
for line in lines:
self.write(line)
@property
def is_debug_target(self):
- """ Return True if this logger should receive debug messages """
-
+ """Return True if this logger should receive debug messages."""
return self._debug_target
class StdErrLog(Logger):
- """ Class to log to standard error """
+ """Class to log to standard error."""
def __init__(self, filename, tag):
Logger.__init__(self, filename, tag)
self._debug_target = False
def __call__(self, lines):
- """ Log specified lines to stderr """
-
+ """Log specified lines to stderr."""
timestamp = time.strftime(Logger.TimeFormat,
time.localtime(time.time()))
if isinstance(lines, str):
@@ -70,15 +65,14 @@ class StdErrLog(Logger):
class FileLog(Logger):
- """ Class to log to a file """
+ """Class to log to a file."""
def __init__(self, filename, tag):
Logger.__init__(self, filename, tag)
self._hostname = os.uname()[1]
def __call__(self, lines):
- """ Log specified lines to the file """
-
+ """Log specified lines to the file."""
with open(self._logfile, "at", encoding="utf-8") as logf:
timestamp = time.strftime(Logger.TimeFormat,
time.localtime(time.time()))
@@ -92,39 +86,34 @@ class FileLog(Logger):
class LogFactory:
- """ Singleton to log messages to various destinations """
+ """Singleton to log messages to various destinations."""
log_methods = []
have_stderr = False
def add_file(self, filename, tag=None):
- """ When logging messages, log them to specified file """
-
+ """When logging messages, log them to specified file."""
if filename:
LogFactory.log_methods.append(FileLog(filename, tag))
def add_stderr(self):
- """ When logging messages, log them to standard error """
-
+ """When logging messages, log them to standard error."""
if not LogFactory.have_stderr:
LogFactory.have_stderr = True
LogFactory.log_methods.append(StdErrLog(None, None))
def log(self, args):
- """ Log a message (to all configured log destinations) """
-
+ """Log a message (to all configured log destinations)."""
for logfn in LogFactory.log_methods:
logfn(args.strip())
def debug(self, args):
- """ Log a debug message (to all configured log destinations) """
-
+ """Log a debug message (to all configured log destinations)."""
for logfn in LogFactory.log_methods:
if logfn.is_debug_target:
logfn("debug: %s" % args.strip())
def traceback(self, traceback):
- """ Log a stack trace (to all configured log destinations) """
-
+ """Log a stack trace (to all configured log destinations)."""
for logfn in LogFactory.log_methods:
traceback.print_exc(50, logfn)
diff --git a/python/pacemaker/_cts/network.py b/python/pacemaker/_cts/network.py
index 33e401f..6ba776c 100644
--- a/python/pacemaker/_cts/network.py
+++ b/python/pacemaker/_cts/network.py
@@ -1,29 +1,29 @@
-""" Network related utilities for CTS """
+"""Network related utilities for CTS."""
__all__ = ["next_ip"]
-__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__copyright__ = "Copyright 2000-2024 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
# pylint: disable=global-statement
CURRENT_IP = None
-def next_ip(ip_base=None, reset=False):
- """ Return the next available IP address.
-
- Arguments:
-
- ip_base -- The initial IP address to start from. The first call to next_ip
- will return the next IP address from this base. Each subsequent
- call will return the next address from the previous call, so you
- can just omit this argument for subsequent calls.
- reset -- Force next_ip to start from ip_base again. This requires also
- passing the ip_base argument. (Mostly useful for unit testing,
- but may be useful elsewhere).
- This function only increments the last portion of the IP address. Once it
- has hit the upper limit, ValueError will be raised.
+def next_ip(ip_base=None, reset=False):
+ """
+ Return the next available IP address.
+
+ This function only increments the last portion of the IP address. Once it
+ has hit the upper limit, ValueError will be raised.
+
+ Arguments:
+ ip_base -- The initial IP address to start from. The first call to next_ip
+ will return the next IP address from this base. Each subsequent
+ call will return the next address from the previous call, so you
+ can just omit this argument for subsequent calls.
+ reset -- Force next_ip to start from ip_base again. This requires also
+ passing the ip_base argument. (Mostly useful for unit testing,
+ but may be useful elsewhere).
"""
-
global CURRENT_IP
if CURRENT_IP is None or reset:
diff --git a/python/pacemaker/_cts/patterns.py b/python/pacemaker/_cts/patterns.py
index 0fb1c2b..333eac4 100644
--- a/python/pacemaker/_cts/patterns.py
+++ b/python/pacemaker/_cts/patterns.py
@@ -1,24 +1,23 @@
-""" Pattern-holding classes for Pacemaker's Cluster Test Suite (CTS) """
+"""Pattern-holding classes for Pacemaker's Cluster Test Suite (CTS)."""
__all__ = ["PatternSelector"]
-__copyright__ = "Copyright 2008-2023 the Pacemaker project contributors"
+__copyright__ = "Copyright 2008-2024 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+)"
import argparse
from pacemaker.buildoptions import BuildOptions
+
class BasePatterns:
- """ The base class for holding a stack-specific set of command and log
- file/stdout patterns. Stack-specific classes need to be built on top
- of this one.
"""
+ The base class for holding a stack-specific set of command and log file/stdout patterns.
- def __init__(self):
- """ Create a new BasePatterns instance which holds a very minimal set of
- basic patterns.
- """
+ Stack-specific classes need to be built on top of this one.
+ """
+ def __init__(self):
+ """Create a new BasePatterns instance which holds a very minimal set of basic patterns."""
self._bad_news = []
self._components = {}
self._name = "crm-base"
@@ -32,60 +31,67 @@ class BasePatterns:
# pcs can log this when node is fenced, but fencing is OK in some
# tests (and we will catch it in pacemaker logs when not OK)
r"pcs.daemon:No response from: .* request: get_configs, error:",
+
+ # This is overbroad, but there's no way to say that only certain
+ # transition errors are acceptable. We have to rely on causes of a
+ # transition error logging their own error message, which should
+ # always be the case.
+ r"pacemaker-schedulerd.* Calculated transition .*/pe-error",
]
self._commands = {
- "StatusCmd" : "crmadmin -t 60 -S %s 2>/dev/null",
- "CibQuery" : "cibadmin -Ql",
- "CibAddXml" : "cibadmin --modify -c --xml-text %s",
- "CibDelXpath" : "cibadmin --delete --xpath %s",
- "RscRunning" : BuildOptions.DAEMON_DIR + "/cts-exec-helper -R -r %s",
- "CIBfile" : "%s:" + BuildOptions.CIB_DIR + "/cib.xml",
- "TmpDir" : "/tmp",
-
- "BreakCommCmd" : "iptables -A INPUT -s %s -j DROP >/dev/null 2>&1",
- "FixCommCmd" : "iptables -D INPUT -s %s -j DROP >/dev/null 2>&1",
-
- "MaintenanceModeOn" : "cibadmin --modify -c --xml-text '<cluster_property_set id=\"cib-bootstrap-options\"><nvpair id=\"cts-maintenance-mode-setting\" name=\"maintenance-mode\" value=\"true\"/></cluster_property_set>'",
- "MaintenanceModeOff" : "cibadmin --delete --xpath \"//nvpair[@name='maintenance-mode']\"",
-
- "StandbyCmd" : "crm_attribute -Vq -U %s -n standby -l forever -v %s 2>/dev/null",
- "StandbyQueryCmd" : "crm_attribute -qG -U %s -n standby -l forever -d off 2>/dev/null",
+ "StatusCmd": "crmadmin -t 60 -S %s 2>/dev/null",
+ "CibQuery": "cibadmin -Ql",
+ "CibAddXml": "cibadmin --modify -c --xml-text %s",
+ "CibDelXpath": "cibadmin --delete --xpath %s",
+ "RscRunning": BuildOptions.DAEMON_DIR + "/cts-exec-helper -R -r %s",
+ "CIBfile": "%s:" + BuildOptions.CIB_DIR + "/cib.xml",
+ "TmpDir": "/tmp",
+
+ "BreakCommCmd": "iptables -A INPUT -s %s -j DROP >/dev/null 2>&1",
+ "FixCommCmd": "iptables -D INPUT -s %s -j DROP >/dev/null 2>&1",
+
+ "MaintenanceModeOn": "cibadmin --modify -c --xml-text '<cluster_property_set id=\"cib-bootstrap-options\"><nvpair id=\"cts-maintenance-mode-setting\" name=\"maintenance-mode\" value=\"true\"/></cluster_property_set>'",
+ "MaintenanceModeOff": "cibadmin --delete --xpath \"//nvpair[@name='maintenance-mode']\"",
+
+ "StandbyCmd": "crm_attribute -Vq -U %s -n standby -l forever -v %s 2>/dev/null",
+ "StandbyQueryCmd": "crm_attribute -qG -U %s -n standby -l forever -d off 2>/dev/null",
}
self._search = {
- "Pat:DC_IDLE" : r"pacemaker-controld.*State transition.*-> S_IDLE",
+ "Pat:DC_IDLE": r"pacemaker-controld.*State transition.*-> S_IDLE",
# This won't work if we have multiple partitions
- "Pat:Local_started" : r"%s\W.*controller successfully started",
- "Pat:NonDC_started" : r"%s\W.*State transition.*-> S_NOT_DC",
- "Pat:DC_started" : r"%s\W.*State transition.*-> S_IDLE",
- "Pat:We_stopped" : r"%s\W.*OVERRIDE THIS PATTERN",
- "Pat:They_stopped" : r"%s\W.*LOST:.* %s ",
- "Pat:They_dead" : r"node %s.*: is dead",
- "Pat:They_up" : r"%s %s\W.*OVERRIDE THIS PATTERN",
- "Pat:TransitionComplete" : "Transition status: Complete: complete",
-
- "Pat:Fencing_start" : r"Requesting peer fencing .* targeting %s",
- "Pat:Fencing_ok" : r"pacemaker-fenced.*:\s*Operation .* targeting %s by .* for .*@.*: OK",
- "Pat:Fencing_recover" : r"pacemaker-schedulerd.*: Recover\s+%s",
- "Pat:Fencing_active" : r"stonith resource .* is active on 2 nodes (attempting recovery)",
- "Pat:Fencing_probe" : r"pacemaker-controld.* Result of probe operation for %s on .*: Error",
-
- "Pat:RscOpOK" : r"pacemaker-controld.*:\s+Result of %s operation for %s.*: (0 \()?ok",
- "Pat:RscOpFail" : r"pacemaker-schedulerd.*:.*Unexpected result .* recorded for %s of %s ",
- "Pat:CloneOpFail" : r"pacemaker-schedulerd.*:.*Unexpected result .* recorded for %s of (%s|%s) ",
- "Pat:RscRemoteOpOK" : r"pacemaker-controld.*:\s+Result of %s operation for %s on %s: (0 \()?ok",
- "Pat:NodeFenced" : r"pacemaker-controld.*:\s* Peer %s was terminated \(.*\) by .* on behalf of .*: OK",
+ "Pat:Local_started": r"%s\W.*controller successfully started",
+ "Pat:NonDC_started": r"%s\W.*State transition.*-> S_NOT_DC",
+ "Pat:DC_started": r"%s\W.*State transition.*-> S_IDLE",
+ "Pat:We_stopped": r"%s\W.*OVERRIDE THIS PATTERN",
+ "Pat:They_stopped": r"%s\W.*LOST:.* %s ",
+ "Pat:They_dead": r"node %s.*: is dead",
+ "Pat:They_up": r"%s %s\W.*OVERRIDE THIS PATTERN",
+ "Pat:TransitionComplete": "Transition status: Complete: complete",
+
+ "Pat:Fencing_start": r"Requesting peer fencing .* targeting %s",
+ "Pat:Fencing_ok": r"pacemaker-fenced.*:\s*Operation .* targeting %s by .* for .*@.*: OK",
+ "Pat:Fencing_recover": r"pacemaker-schedulerd.*: Recover\s+%s",
+ "Pat:Fencing_active": r"stonith resource .* is active on 2 nodes (attempting recovery)",
+ "Pat:Fencing_probe": r"pacemaker-controld.* Result of probe operation for %s on .*: Error",
+
+ "Pat:RscOpOK": r"pacemaker-controld.*:\s+Result of %s operation for %s.*: (0 \()?ok",
+ "Pat:RscOpFail": r"pacemaker-schedulerd.*:.*Unexpected result .* recorded for %s of %s ",
+ "Pat:CloneOpFail": r"pacemaker-schedulerd.*:.*Unexpected result .* recorded for %s of (%s|%s) ",
+ "Pat:RscRemoteOpOK": r"pacemaker-controld.*:\s+Result of %s operation for %s on %s: (0 \()?ok",
+ "Pat:NodeFenced": r"pacemaker-controld.*:\s* Peer %s was terminated \(.*\) by .* on behalf of .*: OK",
}
def get_component(self, key):
- """ Return the patterns for a single component as a list, given by key.
- This is typically the name of some subprogram (pacemaker-based,
- pacemaker-fenced, etc.) or various special purpose keys. If key is
- unknown, return an empty list.
"""
+ Return the patterns for a single component as a list, given by key.
+ This is typically the name of some subprogram (pacemaker-based,
+ pacemaker-fenced, etc.) or various special purpose keys. If key is
+ unknown, return an empty list.
+ """
if key in self._components:
return self._components[key]
@@ -93,11 +99,12 @@ class BasePatterns:
return []
def get_patterns(self, key):
- """ Return various patterns supported by this object, given by key.
- Depending on the key, this could either be a list or a hash. If key
- is unknown, return None.
"""
+ Return various patterns supported by this object, given by key.
+ Depending on the key, this could either be a list or a hash. If key is
+ unknown, return None.
+ """
if key == "BadNews":
return self._bad_news
if key == "BadNewsIgnore":
@@ -125,36 +132,36 @@ class BasePatterns:
class Corosync2Patterns(BasePatterns):
- """ Patterns for Corosync version 2 cluster manager class """
+ """Patterns for Corosync version 2 cluster manager class."""
def __init__(self):
BasePatterns.__init__(self)
self._name = "crm-corosync"
self._commands.update({
- "StartCmd" : "service corosync start && service pacemaker start",
- "StopCmd" : "service pacemaker stop; [ ! -e /usr/sbin/pacemaker-remoted ] || service pacemaker_remote stop; service corosync stop",
+ "StartCmd": "service corosync start && service pacemaker start",
+ "StopCmd": "service pacemaker stop; [ ! -e /usr/sbin/pacemaker-remoted ] || service pacemaker_remote stop; service corosync stop",
- "EpochCmd" : "crm_node -e",
- "QuorumCmd" : "crm_node -q",
- "PartitionCmd" : "crm_node -p",
+ "EpochCmd": "crm_node -e",
+ "QuorumCmd": "crm_node -q",
+ "PartitionCmd": "crm_node -p",
})
self._search.update({
# Close enough ... "Corosync Cluster Engine exiting normally" isn't
# printed reliably.
- "Pat:We_stopped" : r"%s\W.*Unloading all Corosync service engines",
- "Pat:They_stopped" : r"%s\W.*pacemaker-controld.*Node %s(\[|\s).*state is now lost",
- "Pat:They_dead" : r"pacemaker-controld.*Node %s(\[|\s).*state is now lost",
- "Pat:They_up" : r"\W%s\W.*pacemaker-controld.*Node %s state is now member",
+ "Pat:We_stopped": r"%s\W.*Unloading all Corosync service engines",
+ "Pat:They_stopped": r"%s\W.*pacemaker-controld.*Node %s(\[|\s).*state is now lost",
+ "Pat:They_dead": r"pacemaker-controld.*Node %s(\[|\s).*state is now lost",
+ "Pat:They_up": r"\W%s\W.*pacemaker-controld.*Node %s state is now member",
- "Pat:ChildExit" : r"\[[0-9]+\] exited with status [0-9]+ \(",
+ "Pat:ChildExit": r"\[[0-9]+\] exited with status [0-9]+ \(",
# "with signal 9" == pcmk_child_exit(), "$" == check_active_before_startup_processes()
- "Pat:ChildKilled" : r"%s\W.*pacemakerd.*%s\[[0-9]+\] terminated( with signal 9|$)",
- "Pat:ChildRespawn" : r"%s\W.*pacemakerd.*Respawning %s subdaemon after unexpected exit",
+ "Pat:ChildKilled": r"%s\W.*pacemakerd.*%s\[[0-9]+\] terminated( with signal 9|$)",
+ "Pat:ChildRespawn": r"%s\W.*pacemakerd.*Respawning %s subdaemon after unexpected exit",
- "Pat:InfraUp" : r"%s\W.*corosync.*Initializing transport",
- "Pat:PacemakerUp" : r"%s\W.*pacemakerd.*Starting Pacemaker",
+ "Pat:InfraUp": r"%s\W.*corosync.*Initializing transport",
+ "Pat:PacemakerUp": r"%s\W.*pacemakerd.*Starting Pacemaker",
})
self._ignore += [
@@ -239,13 +246,7 @@ class Corosync2Patterns(BasePatterns):
r"error:.*cib_(shm|rw) IPC provider disconnected while waiting",
r"error:.*Connection to (fencer|stonith-ng).* (closed|failed|lost)",
r"error: Lost fencer connection",
- # This is overbroad, but we don't have a way to say that only
- # certain transition errors are acceptable (if the fencer respawns,
- # fence devices may appear multiply active). We have to rely on
- # other causes of a transition error logging their own error
- # message, which is the usual practice.
- r"pacemaker-schedulerd.* Calculated transition .*/pe-error",
- ]
+ ]
self._components["corosync"] = [
# We expect each daemon to lose its cluster connection.
@@ -281,12 +282,6 @@ class Corosync2Patterns(BasePatterns):
r"pacemaker-execd.*Connection to (fencer|stonith-ng).* (closed|failed|lost)",
r"pacemaker-controld.*:\s+Result of .* operation for Fencing.*Error \(Lost connection to fencer\)",
r"pacemaker-controld.*:Could not connect to attrd: Connection refused",
- # This is overbroad, but we don't have a way to say that only
- # certain transition errors are acceptable (if the fencer respawns,
- # fence devices may appear multiply active). We have to rely on
- # other causes of a transition error logging their own error
- # message, which is the usual practice.
- r"pacemaker-schedulerd.* Calculated transition .*/pe-error",
]
self._components["pacemaker-execd"] = [
@@ -338,12 +333,6 @@ class Corosync2Patterns(BasePatterns):
r"error:.*Lost fencer connection",
r"error:.*Fencer connection failed \(will retry\)",
r"pacemaker-controld.*:\s+Result of .* operation for Fencing.*Error \(Lost connection to fencer\)",
- # This is overbroad, but we don't have a way to say that only
- # certain transition errors are acceptable (if the fencer respawns,
- # fence devices may appear multiply active). We have to rely on
- # other causes of a transition error logging their own error
- # message, which is the usual practice.
- r"pacemaker-schedulerd.* Calculated transition .*/pe-error",
]
self._components["pacemaker-fenced-ignore"].extend(self._components["common-ignore"])
@@ -356,17 +345,16 @@ patternVariants = {
class PatternSelector:
- """ A class for choosing one of several Pattern objects and then extracting
- various pieces of information from that object
- """
+ """Choose from among several Pattern objects and return the information from that object."""
def __init__(self, name="crm-corosync"):
- """ Create a new PatternSelector object by instantiating whatever class
- is given by name. Defaults to Corosync2Patterns for "crm-corosync" or
- None. While other objects could be supported in the future, only this
- and the base object are supported at this time.
"""
+ Create a new PatternSelector object.
+ Instantiate whatever class is given by name. Defaults to Corosync2Patterns
+ for "crm-corosync" or None. While other objects could be supported in the
+ future, only this and the base object are supported at this time.
+ """
self._name = name
# If no name was given, use the default. Otherwise, look up the appropriate
@@ -377,23 +365,23 @@ class PatternSelector:
self._base = patternVariants[name]()
def get_patterns(self, kind):
- """ Call get_patterns on the previously instantiated pattern object """
-
+ """Call get_patterns on the previously instantiated pattern object."""
return self._base.get_patterns(kind)
def get_template(self, key):
- """ Return a single pattern from the previously instantiated pattern
- object as a string, or None if no pattern exists for the given key.
"""
+ Return a single pattern from the previously instantiated pattern object.
+ If no pattern exists for the given key, return None.
+ """
return self._base[key]
def get_component(self, kind):
- """ Call get_component on the previously instantiated pattern object """
-
+ """Call get_component on the previously instantiated pattern object."""
return self._base.get_component(kind)
def __getitem__(self, key):
+ """Return the pattern for the given key, or None if it does not exist."""
return self.get_template(key)
diff --git a/python/pacemaker/_cts/process.py b/python/pacemaker/_cts/process.py
index 757360c..c25ce33 100644
--- a/python/pacemaker/_cts/process.py
+++ b/python/pacemaker/_cts/process.py
@@ -1,4 +1,4 @@
-""" A module for managing and communicating with external processes """
+"""A module for managing and communicating with external processes."""
__all__ = ["killall", "exit_if_proc_running", "pipe_communicate", "stdout_from_command"]
__copyright__ = "Copyright 2009-2023 the Pacemaker project contributors"
@@ -11,9 +11,9 @@ import psutil
from pacemaker.exitstatus import ExitStatus
-def killall(process_names, terminate=False):
- """ Kill all instances of every process in a list """
+def killall(process_names, terminate=False):
+ """Kill all instances of every process in a list."""
if not process_names:
return
@@ -36,8 +36,7 @@ def killall(process_names, terminate=False):
def is_proc_running(process_name):
- """ Check whether a process with a given name is running """
-
+ """Check whether a process with a given name is running."""
for proc in psutil.process_iter(["name"]):
if proc.info["name"] == process_name:
return True
@@ -45,8 +44,7 @@ def is_proc_running(process_name):
def exit_if_proc_running(process_name):
- """ Exit with error if a given process is running """
-
+ """Exit with error if a given process is running."""
if is_proc_running(process_name):
print("Error: %s is already running!" % process_name)
print("Run %s only when the cluster is stopped." % sys.argv[0])
@@ -54,8 +52,7 @@ def exit_if_proc_running(process_name):
def pipe_communicate(pipes, check_stderr=False, stdin=None):
- """ Get text output from pipes """
-
+ """Get text output from pipes."""
if stdin is not None:
pipe_outputs = pipes.communicate(input=stdin.encode())
else:
@@ -69,8 +66,7 @@ def pipe_communicate(pipes, check_stderr=False, stdin=None):
def stdout_from_command(args):
- """ Execute command and return its standard output """
-
+ """Execute command and return its standard output."""
with subprocess.Popen(args, stdout=subprocess.PIPE) as p:
p.wait()
return pipe_communicate(p).split("\n")
diff --git a/python/pacemaker/_cts/remote.py b/python/pacemaker/_cts/remote.py
index 4b6b8f6..ba5b878 100644
--- a/python/pacemaker/_cts/remote.py
+++ b/python/pacemaker/_cts/remote.py
@@ -1,7 +1,7 @@
-""" Remote command runner for Pacemaker's Cluster Test Suite (CTS) """
+"""Remote command runner for Pacemaker's Cluster Test Suite (CTS)."""
__all__ = ["RemoteExec", "RemoteFactory"]
-__copyright__ = "Copyright 2014-2023 the Pacemaker project contributors"
+__copyright__ = "Copyright 2014-2024 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
import re
@@ -12,11 +12,14 @@ from threading import Thread
from pacemaker._cts.logging import LogFactory
+
def convert2string(lines):
- """ Convert a byte string to a UTF-8 string, and a list of byte strings to
- a list of UTF-8 strings. All other text formats are passed through.
"""
+ Convert byte strings to UTF-8 strings.
+ Lists of byte strings are converted to a list of UTF-8 strings. All other
+ text formats are passed through.
+ """
if isinstance(lines, bytes):
return lines.decode("utf-8")
@@ -32,23 +35,23 @@ def convert2string(lines):
return lines
+
class AsyncCmd(Thread):
- """ A class for doing the hard work of running a command on another machine """
+ """A class for doing the hard work of running a command on another machine."""
def __init__(self, node, command, proc=None, delegate=None):
- """ Create a new AsyncCmd instance
-
- Arguments:
-
- node -- The remote machine to run on
- command -- The ssh command string to use for remote execution
- proc -- If not None, a process object previously created with Popen.
- Instead of spawning a new process, we will then wait on
- this process to finish and handle its output.
- delegate -- When the command completes, call the async_complete method
- on this object
"""
-
+ Create a new AsyncCmd instance.
+
+ Arguments:
+ node -- The remote machine to run on
+ command -- The ssh command string to use for remote execution
+ proc -- If not None, a process object previously created with Popen.
+ Instead of spawning a new process, we will then wait on
+ this process to finish and handle its output.
+ delegate -- When the command completes, call the async_complete method
+ on this object
+ """
self._command = command
self._delegate = delegate
self._logger = LogFactory()
@@ -58,8 +61,7 @@ class AsyncCmd(Thread):
Thread.__init__(self)
def run(self):
- """ Run the previously instantiated AsyncCmd object """
-
+ """Run the previously instantiated AsyncCmd object."""
out = None
err = None
@@ -92,21 +94,23 @@ class AsyncCmd(Thread):
if self._delegate:
self._delegate.async_complete(self._proc.pid, self._proc.returncode, out, err)
+
class RemoteExec:
- """ An abstract class for remote execution. It runs a command on another
- machine using ssh and scp.
"""
+ An abstract class for remote execution.
- def __init__(self, command, cp_command, silent=False):
- """ Create a new RemoteExec instance
-
- Arguments:
+ It runs a command on another machine using ssh and scp.
+ """
- command -- The ssh command string to use for remote execution
- cp_command -- The scp command string to use for copying files
- silent -- Should we log command status?
+ def __init__(self, command, cp_command, silent=False):
"""
+ Create a new RemoteExec instance.
+ Arguments:
+ command -- The ssh command string to use for remote execution
+ cp_command -- The scp command string to use for copying files
+ silent -- Should we log command status?
+ """
self._command = command
self._cp_command = cp_command
self._logger = LogFactory()
@@ -114,15 +118,11 @@ class RemoteExec:
self._our_node = os.uname()[1].lower()
def _fixcmd(self, cmd):
- """ Perform shell escapes on certain characters in the input cmd string """
-
+ """Perform shell escapes on certain characters in the input cmd string."""
return re.sub("\'", "'\\''", cmd)
def _cmd(self, args):
- """ Given a list of arguments, return the string that will be run on the
- remote system
- """
-
+ """Given a list of arguments, return the string that will be run on the remote system."""
sysname = args[0]
command = args[1]
@@ -134,56 +134,48 @@ class RemoteExec:
return ret
def _log(self, args):
- """ Log a message """
-
+ """Log a message."""
if not self._silent:
self._logger.log(args)
def _debug(self, args):
- """ Log a message at the debug level """
-
+ """Log a message at the debug level."""
if not self._silent:
self._logger.debug(args)
def call_async(self, node, command, delegate=None):
- """ Run the given command on the given remote system and do not wait for
- it to complete.
-
- Arguments:
-
- node -- The remote machine to run on
- command -- The command to run, as a string
- delegate -- When the command completes, call the async_complete method
- on this object
+ """
+ Run the given command on the given remote system and do not wait for it to complete.
- Returns:
+ Arguments:
+ node -- The remote machine to run on
+ command -- The command to run, as a string
+ delegate -- When the command completes, call the async_complete method
+ on this object
- The running process object
+ Returns the running process object.
"""
-
aproc = AsyncCmd(node, self._cmd([node, command]), delegate=delegate)
aproc.start()
return aproc
def __call__(self, node, command, synchronous=True, verbose=2):
- """ Run the given command on the given remote system. If you call this class
- like a function, this is what gets called. It's approximately the same
- as a system() call on the remote machine.
-
- Arguments:
+ """
+ Run the given command on the given remote system.
- node -- The remote machine to run on
- command -- The command to run, as a string
- synchronous -- Should we wait for the command to complete?
- verbose -- If 0, do not lo:g anything. If 1, log the command and its
- return code but not its output. If 2, additionally log
- command output.
+ If you call this class like a function, this is what gets called. It's
+ approximately the same as a system() call on the remote machine.
- Returns:
+ Arguments:
+ node -- The remote machine to run on
+ command -- The command to run, as a string
+ synchronous -- Should we wait for the command to complete?
+ verbose -- If 0, do not lo:g anything. If 1, log the command and its
+ return code but not its output. If 2, additionally log
+ command output.
- A tuple of (return code, command output)
+ Returns a tuple of (return code, command output).
"""
-
rc = 0
result = None
# pylint: disable=consider-using-with
@@ -222,14 +214,14 @@ class RemoteExec:
return (rc, result)
def copy(self, source, target, silent=False):
- """ Perform a copy of the source file to the remote target, using the
- cp_command provided when the RemoteExec object was created.
+ """
+ Perform a copy of the source file to the remote target.
- Returns:
+ This function uses the cp_command provided when the RemoteExec object
+ was created.
- The return code of the cp_command
+ Returns the return code of the cp_command.
"""
-
cmd = "%s '%s' '%s'" % (self._cp_command, source, target)
rc = os.system(cmd)
@@ -239,8 +231,7 @@ class RemoteExec:
return rc
def exists_on_all(self, filename, hosts):
- """ Return True if specified file exists on all specified hosts. """
-
+ """Return True if specified file exists on all specified hosts."""
for host in hosts:
rc = self(host, "test -r %s" % filename)
if rc != 0:
@@ -250,7 +241,7 @@ class RemoteExec:
class RemoteFactory:
- """ A class for constructing a singleton instance of a RemoteExec object """
+ """A class for constructing a singleton instance of a RemoteExec object."""
# Class variables
@@ -268,10 +259,11 @@ class RemoteFactory:
# pylint: disable=invalid-name
def getInstance(self):
- """ Returns the previously created instance of RemoteExec, or creates a
- new instance if one does not already exist.
"""
+ Return the previously created instance of RemoteExec.
+ If no instance exists, create one and then return that.
+ """
if not RemoteFactory.instance:
RemoteFactory.instance = RemoteExec(RemoteFactory.command,
RemoteFactory.cp_command,
@@ -279,8 +271,7 @@ class RemoteFactory:
return RemoteFactory.instance
def enable_qarsh(self):
- """ Enable the QA remote shell """
-
+ """Enable the QA remote shell."""
# http://nstraz.wordpress.com/2008/12/03/introducing-qarsh/
print("Using QARSH for connections to cluster nodes")
diff --git a/python/pacemaker/_cts/scenarios.py b/python/pacemaker/_cts/scenarios.py
index 769b2d0..81e9e40 100644
--- a/python/pacemaker/_cts/scenarios.py
+++ b/python/pacemaker/_cts/scenarios.py
@@ -1,4 +1,4 @@
-""" Test scenario classes for Pacemaker's Cluster Test Suite (CTS) """
+"""Test scenario classes for Pacemaker's Cluster Test Suite (CTS)."""
__all__ = [
"AllOnce",
@@ -8,7 +8,7 @@ __all__ = [
"RandomTests",
"Sequence",
]
-__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__copyright__ = "Copyright 2000-2024 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
import re
@@ -19,66 +19,72 @@ from pacemaker._cts.input import should_continue
from pacemaker._cts.tests.ctstest import CTSTest
from pacemaker._cts.watcher import LogWatcher
+
class ScenarioComponent:
- """ The base class for all scenario components. A scenario component is
- one single step in a scenario. Each component is basically just a setup
- and teardown method.
"""
+ The base class for all scenario components.
- def __init__(self, cm, env):
- """ Create a new ScenarioComponent instance
-
- Arguments:
+ A scenario component is one single step in a scenario. Each component is
+ basically just a setup and teardown method.
+ """
- cm -- A ClusterManager instance
- env -- An Environment instance
+ def __init__(self, cm, env):
"""
+ Create a new ScenarioComponent instance.
+ Arguments:
+ cm -- A ClusterManager instance
+ env -- An Environment instance
+ """
# pylint: disable=invalid-name
self._cm = cm
self._env = env
def is_applicable(self):
- """ Return True if this component is applicable in the given Environment.
- This method must be provided by all subclasses.
"""
+ Return True if this component is applicable in the given Environment.
+ This method must be provided by all subclasses.
+ """
raise NotImplementedError
def setup(self):
- """ Set up the component, returning True on success. This method must be
- provided by all subclasses.
"""
+ Set up the component, returning True on success.
+ This method must be provided by all subclasses.
+ """
raise NotImplementedError
def teardown(self):
- """ Tear down the given component. This method must be provided by all
- subclasses.
"""
+ Tear down the given component.
+ This method must be provided by all subclasses.
+ """
raise NotImplementedError
class Scenario:
- """ The base class for scenario. A scenario is an ordered list of
- ScenarioComponent objects. A scenario proceeds by setting up all its
- components in sequence, running a list of tests and audits, and then
- tearing down its components in reverse.
"""
+ The base class for scenarios.
- def __init__(self, cm, components, audits, tests):
- """ Create a new Scenario instance
-
- Arguments:
+ A scenario is an ordered list of ScenarioComponent objects. A scenario
+ proceeds by setting up all its components in sequence, running a list of
+ tests and audits, and then tearing down its components in reverse.
+ """
- cm -- A ClusterManager instance
- components -- A list of ScenarioComponents comprising this Scenario
- audits -- A list of ClusterAudits that will be performed as
- part of this Scenario
- tests -- A list of CTSTests that will be run
+ def __init__(self, cm, components, audits, tests):
+ """
+ Create a new Scenario instance.
+
+ Arguments:
+ cm -- A ClusterManager instance
+ components -- A list of ScenarioComponents comprising this Scenario
+ audits -- A list of ClusterAudits that will be performed as
+ part of this Scenario
+ tests -- A list of CTSTests that will be run
"""
-
# pylint: disable=invalid-name
self.stats = {
@@ -107,8 +113,7 @@ class Scenario:
raise ValueError("Init value must be a subclass of CTSTest")
def is_applicable(self):
- """ Return True if all ScenarioComponents are applicable """
-
+ """Return True if all ScenarioComponents are applicable."""
for comp in self._components:
if not comp.is_applicable():
return False
@@ -116,12 +121,14 @@ class Scenario:
return True
def setup(self):
- """ Set up the scenario, returning True on success. If setup fails at
- some point, tear down those components that did successfully set up.
"""
+ Set up the scenario, returning True on success.
+ If setup fails at some point, tear down those components that did
+ successfully set up.
+ """
self._cm.prepare()
- self.audit() # Also detects remote/local log config
+ self.audit() # Also detects remote/local log config
self._cm.ns.wait_for_all_nodes(self._cm.env["nodes"])
self.audit()
@@ -130,9 +137,9 @@ class Scenario:
self._bad_news = LogWatcher(self._cm.env["LogFileName"],
self._cm.templates.get_patterns("BadNews"),
self._cm.env["nodes"],
- self._cm.env["LogWatcher"],
+ self._cm.env["log_kind"],
"BadNews", 0)
- self._bad_news.set_watch() # Call after we've figured out what type of log watching to do in LogAudit
+ self._bad_news.set_watch() # Call after we've figured out what type of log watching to do in LogAudit
j = 0
while j < len(self._components):
@@ -149,12 +156,13 @@ class Scenario:
return True
def teardown(self, n_components=None):
- """ Tear down the scenario in the reverse order it was set up. If
- n_components is not None, only tear down that many components.
"""
+ Tear down the scenario in the reverse order it was set up.
+ If n_components is not None, only tear down that many components.
+ """
if not n_components:
- n_components = len(self._components)-1
+ n_components = len(self._components) - 1
j = n_components
@@ -166,37 +174,34 @@ class Scenario:
self._cm.install_support("uninstall")
def incr(self, name):
- """ Increment the given stats key """
-
- if not name in self.stats:
+ """Increment the given stats key."""
+ if name not in self.stats:
self.stats[name] = 0
self.stats[name] += 1
def run(self, iterations):
- """ Run all tests in the scenario the given number of times """
-
+ """Run all tests in the scenario the given number of times."""
self._cm.oprofile_start()
try:
self._run_loop(iterations)
self._cm.oprofile_stop()
- except:
+ except: # noqa: E722
self._cm.oprofile_stop()
raise
def _run_loop(self, iterations):
- """ Do the hard part of the run method - actually run all the tests the
- given number of times.
- """
-
+ """Run all the tests the given number of times."""
raise NotImplementedError
def run_test(self, test, testcount):
- """ Run the given test. testcount is the number of tests (including
- this one) that have been run across all iterations.
"""
+ Run the given test.
+ testcount is the number of tests (including this one) that have been
+ run across all iterations.
+ """
nodechoice = self._cm.env.random_node()
ret = True
@@ -254,8 +259,7 @@ class Scenario:
return did_run
def summarize(self):
- """ Output scenario results """
-
+ """Output scenario results."""
self._cm.log("****************")
self._cm.log("Overall Results:%r" % self.stats)
self._cm.log("****************")
@@ -283,11 +287,12 @@ class Scenario:
self._cm.log("<<<<<<<<<<<<<<<< TESTS COMPLETED")
def audit(self, local_ignore=None):
- """ Perform all scenario audits and log results. If there are too many
- failures, prompt the user to confirm that the scenario should continue
- running.
"""
+ Perform all scenario audits and log results.
+ If there are too many failures, prompt the user to confirm that the
+ scenario should continue running.
+ """
errcount = 0
ignorelist = ["CTS:"]
@@ -340,7 +345,7 @@ class Scenario:
class AllOnce(Scenario):
- """ Every Test Once """
+ """Every Test Once."""
def _run_loop(self, iterations):
testcount = 1
@@ -351,7 +356,7 @@ class AllOnce(Scenario):
class RandomTests(Scenario):
- """ Random Test Execution """
+ """Random Test Execution."""
def _run_loop(self, iterations):
testcount = 1
@@ -363,7 +368,7 @@ class RandomTests(Scenario):
class Sequence(Scenario):
- """ Named Tests in Sequence """
+ """Named Tests in Sequence."""
def _run_loop(self, iterations):
testcount = 1
@@ -375,26 +380,27 @@ class Sequence(Scenario):
class Boot(Scenario):
- """ Start the Cluster """
+ """Start the Cluster."""
def _run_loop(self, iterations):
return
class BootCluster(ScenarioComponent):
- """ The BootCluster component simply starts the cluster manager on all
- nodes, waiting for each to come up before starting given that a node
- might have been rebooted or crashed beforehand.
"""
+ Start the cluster manager on all nodes.
- def is_applicable(self):
- """ BootCluster is always applicable """
+ Wait for each to come up before starting in order to account for the
+ possibility that a given node might have been rebooted or crashed
+ beforehand.
+ """
+ def is_applicable(self):
+ """Return whether this scenario is applicable."""
return True
def setup(self):
- """ Set up the component, returning True on success """
-
+ """Set up the component, returning True on success."""
self._cm.prepare()
# Clear out the cobwebs ;-)
@@ -405,18 +411,14 @@ class BootCluster(ScenarioComponent):
return self._cm.startall(verbose=True, quick=True)
def teardown(self):
- """ Tear down the component """
-
+ """Tear down the component."""
self._cm.log("Stopping Cluster Manager on all nodes")
self._cm.stopall(verbose=True, force=False)
class LeaveBooted(BootCluster):
- """ The LeaveBooted component leaves all nodes up when the scenario
- is complete.
- """
+ """Leave all nodes up when the scenario is complete."""
def teardown(self):
- """ Tear down the component """
-
+ """Tear down the component."""
self._cm.log("Leaving Cluster running on all nodes")
diff --git a/python/pacemaker/_cts/test.py b/python/pacemaker/_cts/test.py
index 577ebb3..d67abf7 100644
--- a/python/pacemaker/_cts/test.py
+++ b/python/pacemaker/_cts/test.py
@@ -1,10 +1,13 @@
-""" A module providing base classes for defining regression tests and groups of
- regression tests. Everything exported here should be considered an abstract
- class that needs to be subclassed in order to do anything useful. Various
- functions will raise NotImplementedError if not overridden by a subclass.
"""
+A module providing base classes.
-__copyright__ = "Copyright 2009-2023 the Pacemaker project contributors"
+These classes are used for defining regression tests and groups of regression
+tests. Everything exported here should be considered an abstract class that
+needs to be subclassed in order to do anything useful. Various functions
+will raise NotImplementedError if not overridden by a subclass.
+"""
+
+__copyright__ = "Copyright 2009-2024 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+)"
__all__ = ["Test", "Tests"]
@@ -23,11 +26,13 @@ from pacemaker._cts.process import pipe_communicate
from pacemaker.buildoptions import BuildOptions
from pacemaker.exitstatus import ExitStatus
+
def find_validator(rng_file):
- """ Return the command line used to validate XML output, or None if the validator
- is not installed.
"""
+ Return the command line used to validate XML output.
+ If no validator is found, return None.
+ """
if os.access("/usr/bin/xmllint", os.X_OK):
if rng_file is None:
return ["xmllint", "-"]
@@ -38,8 +43,7 @@ def find_validator(rng_file):
def rng_directory():
- """ Which directory contains the RNG schema files? """
-
+ """Return the directory containing RNG schema files."""
if "PCMK_schema_directory" in os.environ:
return os.environ["PCMK_schema_directory"]
@@ -50,18 +54,17 @@ def rng_directory():
class Pattern:
- """ A class for checking log files for a given pattern """
+ """A class for checking log files for a given pattern."""
def __init__(self, pat, negative=False, regex=False):
- """ Create a new Pattern instance
-
- Arguments:
-
- pat -- The string to search for
- negative -- If True, pat must not be found in any input
- regex -- If True, pat is a regex and not a substring
"""
+ Create a new Pattern instance.
+ Arguments:
+ pat -- The string to search for
+ negative -- If True, pat must not be found in any input
+ regex -- If True, pat is a regex and not a substring
+ """
self._pat = pat
self.negative = negative
self.regex = regex
@@ -70,8 +73,7 @@ class Pattern:
return self._pat
def match(self, line):
- """ Is this pattern found in the given line? """
-
+ """Return True if this pattern is found in the given line."""
if self.regex:
return re.search(self._pat, line) is not None
@@ -79,33 +81,36 @@ class Pattern:
class Test:
- """ The base class for a single regression test. A single regression test
- may still run multiple commands as part of its execution.
+ """
+ The base class for a single regression test.
+
+ A single regression test may still run multiple commands as part of its
+ execution.
"""
def __init__(self, name, description, **kwargs):
- """ Create a new Test instance. This method must be provided by all
- subclasses, which must call Test.__init__ first.
-
- Arguments:
-
- description -- A user-readable description of the test, helpful in
- identifying what test is running or has failed.
- name -- The name of the test. Command line tools use this
- attribute to allow running only tests with the exact
- name, or tests whose name matches a given pattern.
- This should be unique among all tests.
-
- Keyword arguments:
-
- force_wait --
- logdir -- The base directory under which to create a directory
- to store output and temporary data.
- timeout -- How long to wait for the test to complete.
- verbose -- Whether to print additional information, including
- verbose command output and daemon log files.
"""
-
+ Create a new Test instance.
+
+ This method must be provided by all subclasses, which must call
+ Test.__init__ first.
+
+ Arguments:
+ description -- A user-readable description of the test, helpful in
+ identifying what test is running or has failed.
+ name -- The name of the test. Command line tools use this
+ attribute to allow running only tests with the exact
+ name, or tests whose name matches a given pattern.
+ This should be unique among all tests.
+
+ Keyword arguments:
+ force_wait --
+ logdir -- The base directory under which to create a directory
+ to store output and temporary data.
+ timeout -- How long to wait for the test to complete.
+ verbose -- Whether to print additional information, including
+ verbose command output and daemon log files.
+ """
self.description = description
self.executed = False
self.name = name
@@ -125,15 +130,17 @@ class Test:
self._result_exitcode = ExitStatus.OK
self._result_txt = ""
- ###
- ### PROPERTIES
- ###
+ #
+ # PROPERTIES
+ #
@property
def exitcode(self):
- """ The final exitcode of the Test. If all commands pass, this property
- will be ExitStatus.OK. Otherwise, this property will be the exitcode
- of the first command to fail.
+ """
+ Return the final exitcode of the Test.
+
+ If all commands pass, this property will be ExitStatus.OK. Otherwise,
+ this property will be the exitcode of the first command to fail.
"""
return self._result_exitcode
@@ -143,24 +150,28 @@ class Test:
@property
def logpath(self):
- """ The path to the log for whatever daemon is being tested. Note that
- this requires all subclasses to set self._daemon_location before
- accessing this property or an exception will be raised.
+ """
+ Return the path to the log for whatever daemon is being tested.
+
+ Note that this requires all subclasses to set self._daemon_location
+ before accessing this property or an exception will be raised.
"""
return os.path.join(self.logdir, "%s.log" % self._daemon_location)
- ###
- ### PRIVATE METHODS
- ###
+ #
+ # PRIVATE METHODS
+ #
def _kill_daemons(self):
- """ Kill any running daemons in preparation for executing the test """
+ """Kill any running daemons in preparation for executing the test."""
raise NotImplementedError("_kill_daemons not provided by subclass")
def _match_log_patterns(self):
- """ Check test output for expected patterns, setting self.exitcode and
- self._result_txt as appropriate. Not all subclass will need to do
- this.
+ """
+ Check test output for expected patterns.
+
+ Set self.exitcode and self._result_txt as appropriate. Not all subclass
+ will need to do this.
"""
if len(self._patterns) == 0:
return
@@ -195,41 +206,38 @@ class Test:
self._result_txt = msg % (self.name, n_failed_matches, len(self._patterns), n_negative_matches)
self.exitcode = ExitStatus.ERROR
-
def _new_cmd(self, cmd, args, exitcode, **kwargs):
- """ Add a command to be executed as part of this test.
-
- Arguments:
-
- cmd -- The program to run.
- args -- Commands line arguments to pass to cmd, as a string.
- exitcode -- The expected exit code of cmd. This can be used to
- run a command that is expected to fail.
-
- Keyword arguments:
-
- stdout_match -- If not None, a string that is expected to be
- present in the stdout of cmd. This can be a
- regular expression.
- no_wait -- Do not wait for cmd to complete.
- stdout_negative_match -- If not None, a string that is expected to be
- missing in the stdout of cmd. This can be a
- regualr expression.
- kill -- A command to be run after cmd, typically in
- order to kill a failed process. This should be
- the entire command line including arguments as
- a single string.
- validate -- If True, the output of cmd will be passed to
- xmllint for validation. If validation fails,
- XmlValidationError will be raised.
- check_rng -- If True and validate is True, command output
- will additionally be checked against the
- api-result.rng file.
- check_stderr -- If True, the stderr of cmd will be included in
- output.
- env -- If not None, variables to set in the environment
"""
-
+ Add a command to be executed as part of this test.
+
+ Arguments:
+ cmd -- The program to run.
+ args -- Commands line arguments to pass to cmd, as a string.
+ exitcode -- The expected exit code of cmd. This can be used to
+ run a command that is expected to fail.
+
+ Keyword arguments:
+ stdout_match -- If not None, a string that is expected to be
+ present in the stdout of cmd. This can be a
+ regular expression.
+ no_wait -- Do not wait for cmd to complete.
+ stdout_negative_match -- If not None, a string that is expected to be
+ missing in the stdout of cmd. This can be a
+ regualr expression.
+ kill -- A command to be run after cmd, typically in
+ order to kill a failed process. This should be
+ the entire command line including arguments as
+ a single string.
+ validate -- If True, the output of cmd will be passed to
+ xmllint for validation. If validation fails,
+ XmlValidationError will be raised.
+ check_rng -- If True and validate is True, command output
+ will additionally be checked against the
+ api-result.rng file.
+ check_stderr -- If True, the stderr of cmd will be included in
+ output.
+ env -- If not None, variables to set in the environment
+ """
self._cmds.append(
{
"args": args,
@@ -247,60 +255,52 @@ class Test:
)
def _start_daemons(self):
- """ Start any necessary daemons in preparation for executing the test """
+ """Start any necessary daemons in preparation for executing the test."""
raise NotImplementedError("_start_daemons not provided by subclass")
- ###
- ### PUBLIC METHODS
- ###
+ #
+ # PUBLIC METHODS
+ #
def add_cmd(self, cmd, args, validate=True, check_rng=True, check_stderr=True,
env=None):
- """ Add a simple command to be executed as part of this test """
-
+ """Add a simple command to be executed as part of this test."""
self._new_cmd(cmd, args, ExitStatus.OK, validate=validate, check_rng=check_rng,
check_stderr=check_stderr, env=env)
def add_cmd_and_kill(self, cmd, args, kill_proc):
- """ Add a command and system command to be executed as part of this test """
-
+ """Add a command and system command to be executed as part of this test."""
self._new_cmd(cmd, args, ExitStatus.OK, kill=kill_proc)
def add_cmd_check_stdout(self, cmd, args, match, no_match=None, env=None):
- """ Add a simple command with expected output to be executed as part of this test """
-
+ """Add a simple command with expected output to be executed as part of this test."""
self._new_cmd(cmd, args, ExitStatus.OK, stdout_match=match,
stdout_negative_match=no_match, env=env)
def add_cmd_expected_fail(self, cmd, args, exitcode=ExitStatus.ERROR):
- """ Add a command that is expected to fail to be executed as part of this test """
-
+ """Add a command that is expected to fail to be executed as part of this test."""
self._new_cmd(cmd, args, exitcode)
def add_cmd_no_wait(self, cmd, args):
- """ Add a simple command to be executed (without waiting) as part of this test """
-
+ """Add a simple command to be executed (without waiting) as part of this test."""
self._new_cmd(cmd, args, ExitStatus.OK, no_wait=True)
def add_log_pattern(self, pattern, negative=False, regex=False):
- """ Add a pattern that should appear in the test's logs """
-
+ """Add a pattern that should appear in the test's logs."""
self._patterns.append(Pattern(pattern, negative=negative, regex=regex))
def _signal_dict(self):
- """ Return a dictionary mapping signal numbers to their names """
-
+ """Return a dictionary mapping signal numbers to their names."""
# FIXME: When we support python >= 3.5, this function can be replaced with:
# signal.Signals(self.daemon_process.returncode).name
return {
getattr(signal, _signame): _signame
- for _signame in dir(signal)
- if _signame.startswith("SIG") and not _signame.startswith("SIG_")
+ for _signame in dir(signal)
+ if _signame.startswith("SIG") and not _signame.startswith("SIG_")
}
def clean_environment(self):
- """ Clean up the host after executing a test """
-
+ """Clean up the host after executing a test."""
if self._daemon_process:
if self._daemon_process.poll() is None:
self._daemon_process.terminate()
@@ -330,13 +330,11 @@ class Test:
print("Daemon Output End")
def print_result(self, filler):
- """ Print the result of the last test execution """
-
+ """Print the result of the last test execution."""
print("%s%s" % (filler, self._result_txt))
def run(self):
- """ Execute this test """
-
+ """Execute this test."""
i = 1
self.start_environment()
@@ -384,8 +382,7 @@ class Test:
self.executed = True
def run_cmd(self, args):
- """ Execute a command as part of this test """
-
+ """Execute a command as part of this test."""
cmd = shlex.split(args['args'])
cmd.insert(0, args['cmd'])
@@ -406,10 +403,10 @@ class Test:
if self.verbose:
print("Also running: %s" % args['kill'])
- ### Typically, the kill argument is used to detect some sort of
- ### failure. Without yielding for a few seconds here, the process
- ### launched earlier that is listening for the failure may not have
- ### time to connect to pacemaker-execd.
+ # Typically, the kill argument is used to detect some sort of
+ # failure. Without yielding for a few seconds here, the process
+ # launched earlier that is listening for the failure may not have
+ # time to connect to pacemaker-execd.
time.sleep(2)
subprocess.Popen(shlex.split(args['kill']))
@@ -459,15 +456,13 @@ class Test:
return ExitStatus.OK
def set_error(self, step, cmd):
- """ Record failure of this test """
-
+ """Record failure of this test."""
msg = "FAILURE - '%s' failed at step %d. Command: %s %s"
self._result_txt = msg % (self.name, step, cmd['cmd'], cmd['args'])
self.exitcode = ExitStatus.ERROR
def start_environment(self):
- """ Prepare the host for executing a test """
-
+ """Prepare the host for executing a test."""
if os.path.exists(self.logpath):
os.remove(self.logpath)
@@ -500,32 +495,34 @@ class Test:
if not self.force_wait:
print("\tDaemon %s doesn't seem to have been initialized within %fs."
"\n\tConsider specifying a longer '--timeout' value."
- %(self._daemon_location, self.timeout))
+ % (self._daemon_location, self.timeout))
return
if self.verbose and (now - update_time) >= 5:
print("Waiting for %s to be initialized: %fs ..."
- %(self._daemon_location, now - init_time))
+ % (self._daemon_location, now - init_time))
update_time = now
class Tests:
- """ The base class for a collection of regression tests """
+ """The base class for a collection of regression tests."""
def __init__(self, **kwargs):
- """ Create a new Tests instance. This method must be provided by all
- subclasses, which must call Tests.__init__ first.
+ """
+ Create a new Tests instance.
- Keywork arguments:
+ This method must be provided by all subclasses, which must call
+ Tests.__init__ first.
- force_wait --
- logdir -- The base directory under which to create a directory
- to store output and temporary data.
- timeout -- How long to wait for the test to complete.
- verbose -- Whether to print additional information, including
- verbose command output and daemon log files.
- """
+ Keywork arguments:
+ force_wait --
+ logdir -- The base directory under which to create a directory
+ to store output and temporary data.
+ timeout -- How long to wait for the test to complete.
+ verbose -- Whether to print additional information, including
+ verbose command output and daemon log files.
+ """
self.force_wait = kwargs.get("force_wait", False)
self.logdir = kwargs.get("logdir", "/tmp")
self.timeout = kwargs.get("timeout", 2)
@@ -534,8 +531,7 @@ class Tests:
self._tests = []
def exit(self):
- """ Exit (with error status code if any test failed) """
-
+ """Exit (with error status code if any test failed)."""
for test in self._tests:
if not test.executed:
continue
@@ -546,8 +542,7 @@ class Tests:
sys.exit(ExitStatus.OK)
def print_list(self):
- """ List all registered tests """
-
+ """List all registered tests."""
print("\n==== %d TESTS FOUND ====" % len(self._tests))
print("%35s - %s" % ("TEST NAME", "TEST DESCRIPTION"))
print("%35s - %s" % ("--------------------", "--------------------"))
@@ -558,8 +553,7 @@ class Tests:
print("==== END OF LIST ====\n")
def print_results(self):
- """ Print summary of results of executed tests """
-
+ """Print summary of results of executed tests."""
failures = 0
success = 0
@@ -582,22 +576,19 @@ class Tests:
print("\n--- TOTALS\n Pass:%d\n Fail:%d\n" % (success, failures))
def run_single(self, name):
- """ Run a single named test """
-
+ """Run a single named test."""
for test in self._tests:
if test.name == name:
test.run()
break
def run_tests(self):
- """ Run all tests """
-
+ """Run all tests."""
for test in self._tests:
test.run()
def run_tests_matching(self, pattern):
- """ Run all tests whose name matches a pattern """
-
+ """Run all tests whose name matches a pattern."""
for test in self._tests:
if test.name.count(pattern) != 0:
test.run()
diff --git a/python/pacemaker/_cts/tests/__init__.py b/python/pacemaker/_cts/tests/__init__.py
index 63b34aa..27bef12 100644
--- a/python/pacemaker/_cts/tests/__init__.py
+++ b/python/pacemaker/_cts/tests/__init__.py
@@ -1,8 +1,6 @@
-"""
-Test classes for the `pacemaker._cts` package.
-"""
+"""Test classes for the `pacemaker._cts` package."""
-__copyright__ = "Copyright 2023 the Pacemaker project contributors"
+__copyright__ = "Copyright 2023-2024 the Pacemaker project contributors"
__license__ = "GNU Lesser General Public License version 2.1 or later (LGPLv2.1+)"
from pacemaker._cts.tests.componentfail import ComponentFail
@@ -33,12 +31,14 @@ from pacemaker._cts.tests.stonithdtest import StonithdTest
from pacemaker._cts.tests.stoponebyone import StopOnebyOne
from pacemaker._cts.tests.stoptest import StopTest
+
def test_list(cm, audits):
- """ Return a list of test class objects that are enabled and whose
- is_applicable methods return True. These are the tests that
- should be run.
"""
+ Return a list of runnable test class objects.
+ These are objects that are enabled and whose is_applicable methods return
+ True.
+ """
# cm is a reasonable name here.
# pylint: disable=invalid-name
diff --git a/python/pacemaker/_cts/tests/componentfail.py b/python/pacemaker/_cts/tests/componentfail.py
index f3d3622..0832407 100644
--- a/python/pacemaker/_cts/tests/componentfail.py
+++ b/python/pacemaker/_cts/tests/componentfail.py
@@ -1,7 +1,7 @@
-""" Kill a pacemaker daemon and test how the cluster recovers """
+"""Kill a pacemaker daemon and test how the cluster recovers."""
__all__ = ["ComponentFail"]
-__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__copyright__ = "Copyright 2000-2024 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
import re
@@ -22,18 +22,15 @@ from pacemaker._cts.tests.simulstartlite import SimulStartLite
class ComponentFail(CTSTest):
- """ A concrete test that kills a random pacemaker daemon and waits for the
- cluster to recover
- """
+ """Kill a random pacemaker daemon and wait for the cluster to recover."""
def __init__(self, cm):
- """ Create a new ComponentFail instance
-
- Arguments:
-
- cm -- A ClusterManager instance
"""
+ Create a new ComponentFail instance.
+ Arguments:
+ cm -- A ClusterManager instance
+ """
CTSTest.__init__(self, cm)
self.is_unsafe = True
@@ -45,8 +42,7 @@ class ComponentFail(CTSTest):
self._startall = SimulStartLite(cm)
def __call__(self, node):
- """ Perform this test """
-
+ """Perform this test."""
self.incr("calls")
self._patterns = []
self._okerrpatterns = []
@@ -159,8 +155,7 @@ class ComponentFail(CTSTest):
@property
def errors_to_ignore(self):
- """ Return list of errors which should be ignored """
-
+ """Return a list of errors which should be ignored."""
# Note that okerrpatterns refers to the last time we ran this test
# The good news is that this works fine for us...
self._okerrpatterns.extend(self._patterns)
diff --git a/python/pacemaker/_cts/tests/ctstest.py b/python/pacemaker/_cts/tests/ctstest.py
index 8669e48..3ed4931 100644
--- a/python/pacemaker/_cts/tests/ctstest.py
+++ b/python/pacemaker/_cts/tests/ctstest.py
@@ -1,7 +1,7 @@
-""" Base classes for CTS tests """
+"""Base classes for CTS tests."""
__all__ = ["CTSTest"]
-__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__copyright__ = "Copyright 2000-2024 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
import re
@@ -18,25 +18,23 @@ from pacemaker._cts.watcher import LogWatcher
# possibility that we'll miss some other cause of the same warning, but we'll
# just have to be careful.
-# pylint doesn't understand that self._rsh is callable.
-# pylint: disable=not-callable
-
class CTSTest:
- """ The base class for all cluster tests. This implements a basic set of
- properties and behaviors like setup, tear down, time keeping, and
- statistics tracking. It is up to specific tests to implement their own
- specialized behavior on top of this class.
"""
+ The base class for all cluster tests.
- def __init__(self, cm):
- """ Create a new CTSTest instance
-
- Arguments:
+ This implements a basic set of properties and behaviors like setup, tear
+ down, time keeping, and statistics tracking. It is up to specific tests
+ to implement their own specialized behavior on top of this class.
+ """
- cm -- A ClusterManager instance
+ def __init__(self, cm):
"""
+ Create a new CTSTest instance.
+ Arguments:
+ cm -- A ClusterManager instance
+ """
# pylint: disable=invalid-name
self.audits = []
@@ -68,28 +66,22 @@ class CTSTest:
self.passed = True
def log(self, args):
- """ Log a message """
-
+ """Log a message."""
self._logger.log(args)
def debug(self, args):
- """ Log a debug message """
-
+ """Log a debug message."""
self._logger.debug(args)
def get_timer(self, key="test"):
- """ Get the start time of the given timer """
-
+ """Get the start time of the given timer."""
try:
return self._timers[key].start_time
except KeyError:
return 0
def set_timer(self, key="test"):
- """ Set the start time of the given timer to now, and return
- that time
- """
-
+ """Set the start time of the given timer to now, and return that time."""
if key not in self._timers:
self._timers[key] = Timer(self._logger, self.name, key)
@@ -97,8 +89,7 @@ class CTSTest:
return self._timers[key].start_time
def log_timer(self, key="test"):
- """ Log the elapsed time of the given timer """
-
+ """Log the elapsed time of the given timer."""
if key not in self._timers:
return
@@ -107,8 +98,7 @@ class CTSTest:
del self._timers[key]
def incr(self, name):
- """ Increment the given stats key """
-
+ """Increment the given stats key."""
if name not in self.stats:
self.stats[name] = 0
@@ -119,8 +109,7 @@ class CTSTest:
self.passed = True
def failure(self, reason="none"):
- """ Increment the failure count, with an optional failure reason """
-
+ """Increment the failure count, with an optional failure reason."""
self.passed = False
self.incr("failure")
self._logger.log(("Test %s" % self.name).ljust(35) + " FAILED: %s" % reason)
@@ -128,27 +117,21 @@ class CTSTest:
return False
def success(self):
- """ Increment the success count """
-
+ """Increment the success count."""
self.incr("success")
return True
def skipped(self):
- """ Increment the skipped count """
-
+ """Increment the skipped count."""
self.incr("skipped")
return True
def __call__(self, node):
- """ Perform this test """
-
+ """Perform this test."""
raise NotImplementedError
def audit(self):
- """ Perform all the relevant audits (see ClusterAudit), returning
- whether or not they all passed.
- """
-
+ """Perform all the relevant audits (see ClusterAudit), returning whether or not they all passed."""
passed = True
for audit in self.audits:
@@ -160,39 +143,52 @@ class CTSTest:
return passed
def setup(self, node):
- """ Setup this test """
-
+ """Set up this test."""
# node is used in subclasses
# pylint: disable=unused-argument
return self.success()
def teardown(self, node):
- """ Tear down this test """
-
+ """Tear down this test."""
# node is used in subclasses
# pylint: disable=unused-argument
return self.success()
def create_watch(self, patterns, timeout, name=None):
- """ Create a new LogWatcher object with the given patterns, timeout,
- and optional name. This object can be used to search log files
- for matching patterns during this test's run.
+ """
+ Create a new LogWatcher object.
+
+ This object can be used to search log files for matching patterns
+ during this test's run.
+
+ Arguments:
+ patterns -- A list of regular expressions to match against the log
+ timeout -- Default number of seconds to watch a log file at a time;
+ this can be overridden by the timeout= parameter to
+ self.look on an as-needed basis
+ name -- A unique name to use when logging about this watch
"""
if not name:
name = self.name
- return LogWatcher(self._env["LogFileName"], patterns, self._env["nodes"], self._env["LogWatcher"], name, timeout)
+ return LogWatcher(self._env["LogFileName"], patterns,
+ self._env["nodes"], self._env["log_kind"], name,
+ timeout)
def local_badnews(self, prefix, watch, local_ignore=None):
- """ Use the given watch object to search through log files for messages
- starting with the given prefix. If no prefix is given, use
- "LocalBadNews:" by default. The optional local_ignore list should
- be a list of regexes that, if found in a line, will cause that line
- to be ignored.
+ """
+ Search through log files for messages.
- Return the number of matches found.
+ Arguments:
+ prefix -- The string to look for at the beginning of lines,
+ or "LocalBadNews:" if None.
+ watch -- The LogWatcher object to use for searching.
+ local_ignore -- A list of regexes that, if found in a line, will
+ cause that line to be ignored.
+
+ Return the number of matches found.
"""
errcount = 0
if not prefix:
@@ -224,10 +220,11 @@ class CTSTest:
return errcount
def is_applicable(self):
- """ Return True if this test is applicable in the current test configuration.
- This method must be implemented by all subclasses.
"""
+ Return True if this test is applicable in the current test configuration.
+ This method must be implemented by all subclasses.
+ """
if self.is_loop and not self._env["loop-tests"]:
return False
@@ -247,6 +244,5 @@ class CTSTest:
@property
def errors_to_ignore(self):
- """ Return list of errors which should be ignored """
-
+ """Return a list of errors which should be ignored."""
return []
diff --git a/python/pacemaker/_cts/tests/fliptest.py b/python/pacemaker/_cts/tests/fliptest.py
index 5e77936..52692f4 100644
--- a/python/pacemaker/_cts/tests/fliptest.py
+++ b/python/pacemaker/_cts/tests/fliptest.py
@@ -1,7 +1,7 @@
-""" Stop running nodes, and start stopped nodes """
+"""Stop running nodes, and start stopped nodes."""
__all__ = ["FlipTest"]
-__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__copyright__ = "Copyright 2000-2024 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
import time
@@ -20,16 +20,15 @@ from pacemaker._cts.tests.stoptest import StopTest
class FlipTest(CTSTest):
- """ A concrete test that stops running nodes and starts stopped nodes """
+ """Stop running nodes and start stopped nodes."""
def __init__(self, cm):
- """ Create a new FlipTest instance
-
- Arguments:
-
- cm -- A ClusterManager instance
"""
+ Create a new FlipTest instance.
+ Arguments:
+ cm -- A ClusterManager instance
+ """
CTSTest.__init__(self, cm)
self.name = "Flip"
@@ -37,8 +36,7 @@ class FlipTest(CTSTest):
self._stop = StopTest(cm)
def __call__(self, node):
- """ Perform this test """
-
+ """Perform this test."""
self.incr("calls")
if self._cm.expected_status[node] == "up":
diff --git a/python/pacemaker/_cts/tests/maintenancemode.py b/python/pacemaker/_cts/tests/maintenancemode.py
index 3c57c07..7ec061a 100644
--- a/python/pacemaker/_cts/tests/maintenancemode.py
+++ b/python/pacemaker/_cts/tests/maintenancemode.py
@@ -1,7 +1,7 @@
-""" Toggle nodes in and out of maintenance mode """
+"""Toggle nodes in and out of maintenance mode."""
__all__ = ["MaintenanceMode"]
-__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__copyright__ = "Copyright 2000-2024 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
import re
@@ -22,16 +22,15 @@ from pacemaker._cts.timer import Timer
class MaintenanceMode(CTSTest):
- """ A concrete test that toggles nodes in and out of maintenance mode """
+ """Toggle nodes in and ount of maintenance mode."""
def __init__(self, cm):
- """ Create a new MaintenanceMode instance
-
- Arguments:
-
- cm -- A ClusterManager instance
"""
+ Create a new MaintenanceMode instance.
+ Arguments:
+ cm -- A ClusterManager instance
+ """
CTSTest.__init__(self, cm)
self.benchmark = True
@@ -43,8 +42,7 @@ class MaintenanceMode(CTSTest):
self._startall = SimulStartLite(cm)
def _toggle_maintenance_mode(self, node, enabled):
- """ Toggle maintenance mode on the given node """
-
+ """Toggle maintenance mode on the given node."""
pats = [
self.templates["Pat:DC_IDLE"]
]
@@ -83,8 +81,7 @@ class MaintenanceMode(CTSTest):
return ""
def _insert_maintenance_dummy(self, node):
- """ Create a dummy resource on the given node """
-
+ """Create a dummy resource on the given node."""
pats = [
("%s.*" % node) + (self.templates["Pat:RscOpOK"] % ("start", self._rid))
]
@@ -104,8 +101,7 @@ class MaintenanceMode(CTSTest):
return ""
def _remove_maintenance_dummy(self, node):
- """ Remove the previously created dummy resource on the given node """
-
+ """Remove the previously created dummy resource on the given node."""
pats = [
self.templates["Pat:RscOpOK"] % ("stop", self._rid)
]
@@ -124,8 +120,7 @@ class MaintenanceMode(CTSTest):
return ""
def _managed_rscs(self, node):
- """ Return a list of all resources managed by the cluster """
-
+ """Return a list of all resources managed by the cluster."""
rscs = []
(_, lines) = self._rsh(node, "crm_resource -c", verbose=1)
@@ -139,10 +134,7 @@ class MaintenanceMode(CTSTest):
return rscs
def _verify_resources(self, node, rscs, managed):
- """ Verify that all resources in rscList are managed if they are expected
- to be, or unmanaged if they are expected to be.
- """
-
+ """Verify that all resources are managed or unmanaged as expected."""
managed_rscs = rscs
managed_str = "managed"
@@ -171,8 +163,7 @@ class MaintenanceMode(CTSTest):
return False
def __call__(self, node):
- """ Perform this test """
-
+ """Perform this test."""
self.incr("calls")
verify_managed = False
verify_unmanaged = False
@@ -227,8 +218,7 @@ class MaintenanceMode(CTSTest):
@property
def errors_to_ignore(self):
- """ Return list of errors which should be ignored """
-
+ """Return a list of errors which should be ignored."""
return [
r"Updating failcount for %s" % self._rid,
r"schedulerd.*: Recover\s+%s\s+\(.*\)" % self._rid,
diff --git a/python/pacemaker/_cts/tests/nearquorumpointtest.py b/python/pacemaker/_cts/tests/nearquorumpointtest.py
index c5b70b7..343cc6e 100644
--- a/python/pacemaker/_cts/tests/nearquorumpointtest.py
+++ b/python/pacemaker/_cts/tests/nearquorumpointtest.py
@@ -1,7 +1,7 @@
-""" Randomly start and stop nodes to bring the cluster close to the quorum point """
+"""Randomly start and stop nodes to bring the cluster close to the quorum point."""
__all__ = ["NearQuorumPointTest"]
-__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__copyright__ = "Copyright 2000-2024 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
from pacemaker._cts.tests.ctstest import CTSTest
@@ -18,31 +18,27 @@ from pacemaker._cts.tests.ctstest import CTSTest
class NearQuorumPointTest(CTSTest):
- """ A concrete test that randomly starts and stops nodes to bring the
- cluster close to the quorum point
- """
+ """Randomly start and stop nodes to bring the cluster close to the quorum point."""
def __init__(self, cm):
- """ Create a new NearQuorumPointTest instance
-
- Arguments:
-
- cm -- A ClusterManager instance
"""
+ Create a new NearQuorumPointTest instance.
+ Arguments:
+ cm -- A ClusterManager instance
+ """
CTSTest.__init__(self, cm)
self.name = "NearQuorumPoint"
def __call__(self, dummy):
- """ Perform this test """
-
+ """Perform this test."""
self.incr("calls")
startset = []
stopset = []
stonith = self._cm.prepare_fencing_watcher()
- #decide what to do with each node
+ # decide what to do with each node
for node in self._env["nodes"]:
action = self._env.random_gen.choice(["start", "stop"])
@@ -54,7 +50,7 @@ class NearQuorumPointTest(CTSTest):
self.debug("start nodes:%r" % startset)
self.debug("stop nodes:%r" % stopset)
- #add search patterns
+ # add search patterns
watchpats = []
for node in stopset:
if self._cm.expected_status[node] == "up":
@@ -78,7 +74,7 @@ class NearQuorumPointTest(CTSTest):
watch.set_watch()
- #begin actions
+ # begin actions
for node in stopset:
if self._cm.expected_status[node] == "up":
self._cm.stop_cm_async(node)
@@ -87,7 +83,7 @@ class NearQuorumPointTest(CTSTest):
if self._cm.expected_status[node] == "down":
self._cm.start_cm_async(node)
- #get the result
+ # get the result
if watch.look_for_all():
self._cm.cluster_stable()
self._cm.fencing_cleanup("NearQuorumPoint", stonith)
@@ -95,7 +91,7 @@ class NearQuorumPointTest(CTSTest):
self._logger.log("Warn: Patterns not found: %r" % watch.unmatched)
- #get the "bad" nodes
+ # get the "bad" nodes
upnodes = []
for node in stopset:
if self._cm.stat_cm(node):
diff --git a/python/pacemaker/_cts/tests/partialstart.py b/python/pacemaker/_cts/tests/partialstart.py
index 1b074e6..0cee4f3 100644
--- a/python/pacemaker/_cts/tests/partialstart.py
+++ b/python/pacemaker/_cts/tests/partialstart.py
@@ -1,7 +1,7 @@
-""" Start a node and then tell it to stop before it is fully running """
+"""Start a node and then tell it to stop before it is fully running."""
__all__ = ["PartialStart"]
-__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__copyright__ = "Copyright 2000-2024 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
from pacemaker._cts.tests.ctstest import CTSTest
@@ -19,16 +19,15 @@ from pacemaker._cts.tests.stoptest import StopTest
class PartialStart(CTSTest):
- """ A concrete test that interrupts a node before it's finished starting up """
+ """Interrupt a node before it's finished starting up."""
def __init__(self, cm):
- """ Create a new PartialStart instance
-
- Arguments:
-
- cm -- A ClusterManager instance
"""
+ Create a new PartialStart instance.
+ Arguments:
+ cm -- A ClusterManager instance
+ """
CTSTest.__init__(self, cm)
self.name = "PartialStart"
@@ -38,8 +37,7 @@ class PartialStart(CTSTest):
self._stopall = SimulStopLite(cm)
def __call__(self, node):
- """ Perform this test """
-
+ """Perform this test."""
self.incr("calls")
ret = self._stopall(None)
@@ -47,7 +45,7 @@ class PartialStart(CTSTest):
return self.failure("Setup failed")
watchpats = [
- "pacemaker-controld.*Connecting to .* cluster infrastructure"
+ "pacemaker-controld.*Connecting to .* cluster layer"
]
watch = self.create_watch(watchpats, self._env["DeadTime"] + 10)
watch.set_watch()
@@ -66,8 +64,7 @@ class PartialStart(CTSTest):
@property
def errors_to_ignore(self):
- """ Return list of errors which should be ignored """
-
+ """Return a list of errors which should be ignored."""
# We might do some fencing in the 2-node case if we make it up far enough
return [
r"Executing reboot fencing operation",
diff --git a/python/pacemaker/_cts/tests/reattach.py b/python/pacemaker/_cts/tests/reattach.py
index 4452bc0..ca3b541 100644
--- a/python/pacemaker/_cts/tests/reattach.py
+++ b/python/pacemaker/_cts/tests/reattach.py
@@ -1,7 +1,7 @@
-""" Restart the cluster and verify resources remain running """
+"""Restart the cluster and verify resources remain running."""
__all__ = ["Reattach"]
-__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__copyright__ = "Copyright 2000-2024 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
import re
@@ -24,18 +24,15 @@ from pacemaker._cts.tests.starttest import StartTest
class Reattach(CTSTest):
- """ A concrete test that restarts the cluster and verifies that resources
- remain running throughout
- """
+ """Restart the cluster and verify that resources remain running throughout."""
def __init__(self, cm):
- """ Create a new Reattach instance
-
- Arguments:
-
- cm -- A ClusterManager instance
"""
+ Create a new Reattach instance.
+ Arguments:
+ cm -- A ClusterManager instance
+ """
CTSTest.__init__(self, cm)
self.name = "Reattach"
@@ -44,26 +41,24 @@ class Reattach(CTSTest):
self._stopall = SimulStopLite(cm)
def _is_managed(self, node):
- """ Are resources managed by the cluster? """
-
+ """Return whether resources are managed by the cluster."""
(_, is_managed) = self._rsh(node, "crm_attribute -t rsc_defaults -n is-managed -q -G -d true", verbose=1)
is_managed = is_managed[0].strip()
return is_managed == "true"
def _set_unmanaged(self, node):
- """ Disable resource management """
-
+ """Disable resource management."""
self.debug("Disable resource management")
self._rsh(node, "crm_attribute -t rsc_defaults -n is-managed -v false")
def _set_managed(self, node):
- """ Enable resource management """
-
+ """Enable resource management."""
self.debug("Re-enable resource management")
self._rsh(node, "crm_attribute -t rsc_defaults -n is-managed -D")
def _disable_incompatible_rscs(self, node):
- """ Disable resources that are incompatible with this test
+ """
+ Disable resources that are incompatible with this test.
Starts and stops of stonith-class resources are implemented internally
by Pacemaker, which means that they must stop when Pacemaker is
@@ -74,7 +69,6 @@ class Reattach(CTSTest):
Set target-role to "Stopped" for any of these resources in the CIB.
"""
-
self.debug("Disable incompatible (stonith/OCFS2) resources")
xml = """'<meta_attributes id="cts-lab-Reattach-meta">
<nvpair id="cts-lab-Reattach-target-role" name="target-role" value="Stopped"/>
@@ -86,26 +80,24 @@ class Reattach(CTSTest):
return self._rsh(node, self._cm.templates['CibAddXml'] % xml)
def _enable_incompatible_rscs(self, node):
- """ Re-enable resources that were incompatible with this test """
-
+ """Re-enable resources that were incompatible with this test."""
self.debug("Re-enable incompatible (stonith/OCFS2) resources")
xml = """<meta_attributes id="cts-lab-Reattach-meta">"""
return self._rsh(node, """cibadmin --delete --xml-text '%s'""" % xml)
def _reprobe(self, node):
- """ Reprobe all resources
+ """
+ Reprobe all resources.
The placement of some resources (such as promotable-1 in the
lab-generated CIB) is affected by constraints using node-attribute-based
rules. An earlier test may have erased the relevant node attribute, so
do a reprobe, which should add the attribute back.
"""
-
return self._rsh(node, """crm_resource --refresh""")
def setup(self, node):
- """ Setup this test """
-
+ """Set up this test."""
if not self._startall(None):
return self.failure("Startall failed")
@@ -123,8 +115,7 @@ class Reattach(CTSTest):
return self.success()
def teardown(self, node):
- """ Tear down this test """
-
+ """Tear down this test."""
# Make sure 'node' is up
start = StartTest(self._cm)
start(node)
@@ -144,8 +135,7 @@ class Reattach(CTSTest):
return self.success()
def __call__(self, node):
- """ Perform this test """
-
+ """Perform this test."""
self.incr("calls")
# Conveniently, the scheduler will display this message when disabling
@@ -178,7 +168,7 @@ class Reattach(CTSTest):
self.debug("Bringing the cluster back up")
ret = self._startall(None)
- time.sleep(5) # allow ping to update the CIB
+ time.sleep(5) # allow ping to update the CIB
if not ret:
self._set_managed(node)
return self.failure("Couldn't restart the cluster")
@@ -214,8 +204,7 @@ class Reattach(CTSTest):
@property
def errors_to_ignore(self):
- """ Return list of errors which should be ignored """
-
+ """Return a list of errors which should be ignored."""
return [
r"resource( was|s were) active at shutdown"
]
diff --git a/python/pacemaker/_cts/tests/remotebasic.py b/python/pacemaker/_cts/tests/remotebasic.py
index 2f25aaf..cfe9661 100644
--- a/python/pacemaker/_cts/tests/remotebasic.py
+++ b/python/pacemaker/_cts/tests/remotebasic.py
@@ -1,30 +1,28 @@
-""" Start and stop a remote node """
+"""Start and stop a remote node."""
__all__ = ["RemoteBasic"]
-__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__copyright__ = "Copyright 2000-2024 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
from pacemaker._cts.tests.remotedriver import RemoteDriver
class RemoteBasic(RemoteDriver):
- """ A concrete test that starts and stops a remote node """
+ """Start and stop a remote node."""
def __init__(self, cm):
- """ Create a new RemoteBasic instance
-
- Arguments:
-
- cm -- A ClusterManager instance
"""
+ Create a new RemoteBasic instance.
+ Arguments:
+ cm -- A ClusterManager instance
+ """
RemoteDriver.__init__(self, cm)
self.name = "RemoteBasic"
def __call__(self, node):
- """ Perform this test """
-
+ """Perform this test."""
if not self.start_new_test(node):
return self.failure(self.fail_string)
diff --git a/python/pacemaker/_cts/tests/remotedriver.py b/python/pacemaker/_cts/tests/remotedriver.py
index c5b0292..c24fe7f 100644
--- a/python/pacemaker/_cts/tests/remotedriver.py
+++ b/python/pacemaker/_cts/tests/remotedriver.py
@@ -1,7 +1,7 @@
-""" Base classes for CTS tests """
+"""Base classes for CTS tests."""
__all__ = ["RemoteDriver"]
-__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__copyright__ = "Copyright 2000-2024 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
import os
@@ -25,21 +25,22 @@ from pacemaker._cts.timer import Timer
class RemoteDriver(CTSTest):
- """ A specialized base class for cluster tests that run on Pacemaker
- Remote nodes. This builds on top of CTSTest to provide methods
- for starting and stopping services and resources, and managing
- remote nodes. This is still just an abstract class -- specific
- tests need to implement their own specialized behavior.
"""
+ A specialized base class for cluster tests that run on Pacemaker Remote nodes.
- def __init__(self, cm):
- """ Create a new RemoteDriver instance
-
- Arguments:
+ This builds on top of CTSTest to provide methods for starting and stopping
+ services and resources, and managing remote nodes. This is still just an
+ abstract class -- specific tests need to implement their own specialized
+ behavior.
+ """
- cm -- A ClusterManager instance
+ def __init__(self, cm):
"""
+ Create a new RemoteDriver instance.
+ Arguments:
+ cm -- A ClusterManager instance
+ """
CTSTest.__init__(self, cm)
self.name = "RemoteDriver"
@@ -54,10 +55,7 @@ class RemoteDriver(CTSTest):
self.reset()
def reset(self):
- """ Reset the state of this test back to what it was before the test
- was run
- """
-
+ """Reset the state of this test back to what it was before the test was run."""
self.failed = False
self.fail_string = ""
@@ -67,8 +65,7 @@ class RemoteDriver(CTSTest):
self._remote_use_reconnect_interval = self._env.random_gen.choice([True, False])
def fail(self, msg):
- """ Mark test as failed """
-
+ """Mark test as failed."""
self.failed = True
# Always log the failure.
@@ -79,11 +76,12 @@ class RemoteDriver(CTSTest):
self.fail_string = msg
def _get_other_node(self, node):
- """ Get the first cluster node out of the environment that is not the
- given node. Typically, this is used to find some node that will
- still be active that we can run cluster commands on.
"""
+ Get the first cluster node out of the environment that is not the given node.
+ Typically, this is used to find some node that will still be active that
+ we can run cluster commands on.
+ """
for othernode in self._env["nodes"]:
if othernode == node:
# we don't want to try and use the cib that we just shutdown.
@@ -93,58 +91,60 @@ class RemoteDriver(CTSTest):
return othernode
def _del_rsc(self, node, rsc):
- """ Delete the given named resource from the cluster. The given `node`
- is the cluster node on which we should *not* run the delete command.
"""
+ Delete the given named resource from the cluster.
+ The given `node` is the cluster node on which we should *not* run the
+ delete command.
+ """
othernode = self._get_other_node(node)
(rc, _) = self._rsh(othernode, "crm_resource -D -r %s -t primitive" % rsc)
if rc != 0:
self.fail("Removal of resource '%s' failed" % rsc)
def _add_rsc(self, node, rsc_xml):
- """ Add a resource given in XML format to the cluster. The given `node`
- is the cluster node on which we should *not* run the add command.
"""
+ Add a resource given in XML format to the cluster.
+ The given `node` is the cluster node on which we should *not* run the
+ add command.
+ """
othernode = self._get_other_node(node)
(rc, _) = self._rsh(othernode, "cibadmin -C -o resources -X '%s'" % rsc_xml)
if rc != 0:
self.fail("resource creation failed")
def _add_primitive_rsc(self, node):
- """ Add a primitive heartbeat resource for the remote node to the
- cluster. The given `node` is the cluster node on which we should
- *not* run the add command.
"""
+ Add a primitive heartbeat resource for the remote node to the cluster.
+ The given `node` is the cluster node on which we should *not* run the
+ add command.
+ """
rsc_xml = """
<primitive class="ocf" id="%(node)s" provider="heartbeat" type="Dummy">
<meta_attributes id="%(node)s-meta_attributes"/>
<operations>
<op id="%(node)s-monitor-interval-20s" interval="20s" name="monitor"/>
</operations>
-</primitive>""" % {
- "node": self._remote_rsc
-}
+</primitive>""" % {"node": self._remote_rsc}
self._add_rsc(node, rsc_xml)
if not self.failed:
self._remote_rsc_added = True
def _add_connection_rsc(self, node):
- """ Add a primitive connection resource for the remote node to the
- cluster. The given `node` is teh cluster node on which we should
- *not* run the add command.
"""
+ Add a primitive connection resource for the remote node to the cluster.
+ The given `node` is the cluster node on which we should *not* run the
+ add command.
+ """
rsc_xml = """
<primitive class="ocf" id="%(node)s" provider="pacemaker" type="remote">
<instance_attributes id="%(node)s-instance_attributes">
<nvpair id="%(node)s-instance_attributes-server" name="server" value="%(server)s"/>
-""" % {
- "node": self._remote_node, "server": node
-}
+""" % {"node": self._remote_node, "server": node}
if self._remote_use_reconnect_interval:
# Set reconnect interval on resource
@@ -159,17 +159,14 @@ class RemoteDriver(CTSTest):
<op id="%(node)s-monitor-20s" name="monitor" interval="20s" timeout="45s"/>
</operations>
</primitive>
-""" % {
- "node": self._remote_node
-}
+""" % {"node": self._remote_node}
self._add_rsc(node, rsc_xml)
if not self.failed:
self._remote_node_added = True
def _disable_services(self, node):
- """ Disable the corosync and pacemaker services on the given node """
-
+ """Disable the corosync and pacemaker services on the given node."""
self._corosync_enabled = self._env.service_is_enabled(node, "corosync")
if self._corosync_enabled:
self._env.disable_service(node, "corosync")
@@ -179,8 +176,7 @@ class RemoteDriver(CTSTest):
self._env.disable_service(node, "pacemaker")
def _enable_services(self, node):
- """ Enable the corosync and pacemaker services on the given node """
-
+ """Enable the corosync and pacemaker services on the given node."""
if self._corosync_enabled:
self._env.enable_service(node, "corosync")
@@ -188,8 +184,7 @@ class RemoteDriver(CTSTest):
self._env.enable_service(node, "pacemaker")
def _stop_pcmk_remote(self, node):
- """ Stop the Pacemaker Remote service on the given node """
-
+ """Stop the Pacemaker Remote service on the given node."""
for _ in range(10):
(rc, _) = self._rsh(node, "service pacemaker_remote stop")
if rc != 0:
@@ -198,8 +193,7 @@ class RemoteDriver(CTSTest):
break
def _start_pcmk_remote(self, node):
- """ Start the Pacemaker Remote service on the given node """
-
+ """Start the Pacemaker Remote service on the given node."""
for _ in range(10):
(rc, _) = self._rsh(node, "service pacemaker_remote start")
if rc != 0:
@@ -209,21 +203,20 @@ class RemoteDriver(CTSTest):
break
def _freeze_pcmk_remote(self, node):
- """ Simulate a Pacemaker Remote daemon failure """
-
+ """Simulate a Pacemaker Remote daemon failure."""
self._rsh(node, "killall -STOP pacemaker-remoted")
def _resume_pcmk_remote(self, node):
- """ Simulate the Pacemaker Remote daemon recovering """
-
+ """Simulate the Pacemaker Remote daemon recovering."""
self._rsh(node, "killall -CONT pacemaker-remoted")
def _start_metal(self, node):
- """ Setup a Pacemaker Remote configuration. Remove any existing
- connection resources or nodes. Start the pacemaker_remote service.
- Create a connection resource.
"""
+ Set up a Pacemaker Remote configuration.
+ Remove any existing connection resources or nodes. Start the
+ pacemaker_remote service. Create a connection resource.
+ """
# Cluster nodes are reused as remote nodes in remote tests. If cluster
# services were enabled at boot, in case the remote node got fenced, the
# cluster node would join instead of the expected remote one. Meanwhile
@@ -266,10 +259,7 @@ class RemoteDriver(CTSTest):
self.fail("Unmatched patterns: %s" % watch.unmatched)
def migrate_connection(self, node):
- """ Move the remote connection resource from the node it's currently
- running on to any other available node
- """
-
+ """Move the remote connection resource to any other available node."""
if self.failed:
return
@@ -294,10 +284,11 @@ class RemoteDriver(CTSTest):
self.fail("Unmatched patterns: %s" % watch.unmatched)
def fail_rsc(self, node):
- """ Cause the dummy resource running on a Pacemaker Remote node to fail
- and verify that the failure is logged correctly
"""
+ Cause the dummy resource running on a Pacemaker Remote node to fail.
+ Verify that the failure is logged correctly.
+ """
if self.failed:
return
@@ -321,11 +312,12 @@ class RemoteDriver(CTSTest):
self.fail("Unmatched patterns during rsc fail: %s" % watch.unmatched)
def fail_connection(self, node):
- """ Cause the remote connection resource to fail and verify that the
- node is fenced and the connection resource is restarted on another
- node.
"""
+ Cause the remote connection resource to fail.
+ Verify that the node is fenced and the connection resource is restarted
+ on another node.
+ """
if self.failed:
return
@@ -378,8 +370,7 @@ class RemoteDriver(CTSTest):
self.fail("Unmatched patterns: %s" % watch.unmatched)
def _add_dummy_rsc(self, node):
- """ Add a dummy resource that runs on the Pacemaker Remote node """
-
+ """Add a dummy resource that runs on the Pacemaker Remote node."""
if self.failed:
return
@@ -409,8 +400,7 @@ class RemoteDriver(CTSTest):
self.fail("Unmatched patterns: %s" % watch.unmatched)
def test_attributes(self, node):
- """ Verify that attributes can be set on the Pacemaker Remote node """
-
+ """Verify that attributes can be set on the Pacemaker Remote node."""
if self.failed:
return
@@ -431,11 +421,12 @@ class RemoteDriver(CTSTest):
self.fail("Failed to delete remote-node attribute")
def cleanup_metal(self, node):
- """ Clean up the Pacemaker Remote node configuration previously created by
- _setup_metal. Stop and remove dummy resources and connection resources.
- Stop the pacemaker_remote service. Remove the remote node itself.
"""
+ Clean up the Pacemaker Remote node configuration previously created by _setup_metal.
+ Stop and remove dummy resources and connection resources. Stop the
+ pacemaker_remote service. Remove the remote node itself.
+ """
self._enable_services(node)
if not self._pcmk_started:
@@ -483,10 +474,11 @@ class RemoteDriver(CTSTest):
self._rsh(self._get_other_node(node), "crm_node --force --remove %s" % self._remote_node)
def _setup_env(self, node):
- """ Setup the environment to allow Pacemaker Remote to function. This
- involves generating a key and copying it to all nodes in the cluster.
"""
+ Set up the environment to allow Pacemaker Remote to function.
+ This involves generating a key and copying it to all nodes in the cluster.
+ """
self._remote_node = "remote-%s" % node
# we are assuming if all nodes have a key, that it is
@@ -511,8 +503,7 @@ class RemoteDriver(CTSTest):
os.unlink(keyfile)
def is_applicable(self):
- """ Return True if this test is applicable in the current test configuration. """
-
+ """Return True if this test is applicable in the current test configuration."""
if not CTSTest.is_applicable(self):
return False
@@ -524,10 +515,7 @@ class RemoteDriver(CTSTest):
return True
def start_new_test(self, node):
- """ Prepare a remote test for running by setting up its environment
- and resources
- """
-
+ """Prepare a remote test for running by setting up its environment and resources."""
self.incr("calls")
self.reset()
@@ -541,14 +529,12 @@ class RemoteDriver(CTSTest):
return True
def __call__(self, node):
- """ Perform this test """
-
+ """Perform this test."""
raise NotImplementedError
@property
def errors_to_ignore(self):
- """ Return list of errors which should be ignored """
-
+ """Return list of errors which should be ignored."""
return [
r"""is running on remote.*which isn't allowed""",
r"""Connection terminated""",
diff --git a/python/pacemaker/_cts/tests/remotemigrate.py b/python/pacemaker/_cts/tests/remotemigrate.py
index e22e98f..e65dc70 100644
--- a/python/pacemaker/_cts/tests/remotemigrate.py
+++ b/python/pacemaker/_cts/tests/remotemigrate.py
@@ -1,7 +1,7 @@
-""" Move a connection resource from one node to another """
+"""Move a connection resource from one node to another."""
__all__ = ["RemoteMigrate"]
-__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__copyright__ = "Copyright 2000-2024 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
from pacemaker._cts.tests.remotedriver import RemoteDriver
@@ -16,23 +16,21 @@ from pacemaker._cts.tests.remotedriver import RemoteDriver
class RemoteMigrate(RemoteDriver):
- """ A concrete test that moves a connection resource from one node to another """
+ """Move a connection resource from one node to another."""
def __init__(self, cm):
- """ Create a new RemoteMigrate instance
-
- Arguments:
-
- cm -- A ClusterManager instance
"""
+ Create a new RemoteMigrate instance.
+ Arguments:
+ cm -- A ClusterManager instance
+ """
RemoteDriver.__init__(self, cm)
self.name = "RemoteMigrate"
def __call__(self, node):
- """ Perform this test """
-
+ """Perform this test."""
# This code is very similar to __call__ in remotestonithd.py, but I don't think
# it's worth turning into a library function nor making one a subclass of the
# other. I think that's more confusing than leaving the duplication.
@@ -52,8 +50,7 @@ class RemoteMigrate(RemoteDriver):
return self.success()
def is_applicable(self):
- """ Return True if this test is applicable in the current test configuration. """
-
+ """Return True if this test is applicable in the current test configuration."""
if not RemoteDriver.is_applicable(self):
return False
diff --git a/python/pacemaker/_cts/tests/remoterscfailure.py b/python/pacemaker/_cts/tests/remoterscfailure.py
index 6f221de..46e6b58 100644
--- a/python/pacemaker/_cts/tests/remoterscfailure.py
+++ b/python/pacemaker/_cts/tests/remoterscfailure.py
@@ -1,7 +1,7 @@
-""" Cause the Pacemaker Remote connection resource to fail """
+"""Cause the Pacemaker Remote connection resource to fail."""
__all__ = ["RemoteRscFailure"]
-__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__copyright__ = "Copyright 2000-2024 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
from pacemaker._cts.tests.remotedriver import RemoteDriver
@@ -16,24 +16,20 @@ from pacemaker._cts.tests.remotedriver import RemoteDriver
class RemoteRscFailure(RemoteDriver):
- """ A concrete test that causes the Pacemaker Remote connection resource
- to fail
- """
+ """Cause the Pacemaker Remote connection resource to fail."""
def __init__(self, cm):
- """ Create a new RemoteRscFailure instance
-
- Arguments:
-
- cm -- A ClusterManager instance
"""
+ Create a new RemoteRscFailure instance.
+ Arguments:
+ cm -- A ClusterManager instance
+ """
RemoteDriver.__init__(self, cm)
self.name = "RemoteRscFailure"
def __call__(self, node):
- """ Perform this test """
-
+ """Perform this test."""
if not self.start_new_test(node):
return self.failure(self.fail_string)
@@ -54,16 +50,14 @@ class RemoteRscFailure(RemoteDriver):
@property
def errors_to_ignore(self):
- """ Return list of errors which should be ignored """
-
+ """Return list of errors which should be ignored."""
return [
r"schedulerd.*: Recover\s+remote-rsc\s+\(.*\)",
r"Dummy.*: No process state file found"
] + super().errors_to_ignore
def is_applicable(self):
- """ Return True if this test is applicable in the current test configuration. """
-
+ """Return True if this test is applicable in the current test configuration."""
if not RemoteDriver.is_applicable(self):
return False
diff --git a/python/pacemaker/_cts/tests/remotestonithd.py b/python/pacemaker/_cts/tests/remotestonithd.py
index f684992..bb55318 100644
--- a/python/pacemaker/_cts/tests/remotestonithd.py
+++ b/python/pacemaker/_cts/tests/remotestonithd.py
@@ -1,32 +1,28 @@
-""" Fail the connection resource and fence the remote node """
+"""Fail the connection resource and fence the remote node."""
__all__ = ["RemoteStonithd"]
-__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__copyright__ = "Copyright 2000-2024 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
from pacemaker._cts.tests.remotedriver import RemoteDriver
class RemoteStonithd(RemoteDriver):
- """ A concrete test that fails the connection resource and fences the
- remote node
- """
+ """Fail the connection resource and fence the remote node."""
def __init__(self, cm):
- """ Create a new RemoteStonithd instance
-
- Arguments:
-
- cm -- A ClusterManager instance
"""
+ Create a new RemoteStonithd instance.
+ Arguments:
+ cm -- A ClusterManager instance
+ """
RemoteDriver.__init__(self, cm)
self.name = "RemoteStonithd"
def __call__(self, node):
- """ Perform this test """
-
+ """Perform this test."""
if not self.start_new_test(node):
return self.failure(self.fail_string)
@@ -41,8 +37,7 @@ class RemoteStonithd(RemoteDriver):
return self.success()
def is_applicable(self):
- """ Return True if this test is applicable in the current test configuration. """
-
+ """Return True if this test is applicable in the current test configuration."""
if not RemoteDriver.is_applicable(self):
return False
@@ -50,8 +45,7 @@ class RemoteStonithd(RemoteDriver):
@property
def errors_to_ignore(self):
- """ Return list of errors which should be ignored """
-
+ """Return list of errors which should be ignored."""
return [
r"Lost connection to Pacemaker Remote node",
r"Software caused connection abort",
diff --git a/python/pacemaker/_cts/tests/resourcerecover.py b/python/pacemaker/_cts/tests/resourcerecover.py
index 252eb1f..e4c1336 100644
--- a/python/pacemaker/_cts/tests/resourcerecover.py
+++ b/python/pacemaker/_cts/tests/resourcerecover.py
@@ -1,6 +1,6 @@
-""" Fail a random resource and verify its fail count increases """
+"""Fail a random resource and verify its fail count increases."""
-__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__copyright__ = "Copyright 2000-2024 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
from pacemaker._cts.audits import AuditResource
@@ -19,16 +19,15 @@ from pacemaker._cts.timer import Timer
class ResourceRecover(CTSTest):
- """ A concrete test that fails a random resource """
+ """Fail a random resource."""
def __init__(self, cm):
- """ Create a new ResourceRecover instance
-
- Arguments:
-
- cm -- A ClusterManager instance
"""
+ Create a new ResourceRecover instance.
+ Arguments:
+ cm -- A ClusterManager instance
+ """
CTSTest.__init__(self, cm)
self.benchmark = True
@@ -42,8 +41,7 @@ class ResourceRecover(CTSTest):
self._startall = SimulStartLite(cm)
def __call__(self, node):
- """ Perform this test """
-
+ """Perform this test."""
self.incr("calls")
if not self._startall(None):
@@ -90,8 +88,7 @@ class ResourceRecover(CTSTest):
return self.success()
def _choose_resource(self, node, resourcelist):
- """ Choose a random resource to target """
-
+ """Choose a random resource to target."""
self._rid = self._env.random_gen.choice(resourcelist)
self._rid_alt = self._rid
(_, lines) = self._rsh(node, "crm_resource -c", verbose=1)
@@ -108,14 +105,13 @@ class ResourceRecover(CTSTest):
return None
def _get_failcount(self, node):
- """ Check the fail count of targeted resource on given node """
-
+ """Check the fail count of targeted resource on given node."""
cmd = "crm_failcount --quiet --query --resource %s --operation %s --interval %d --node %s"
(rc, lines) = self._rsh(node, cmd % (self._rid, self._action, self._interval, node),
verbose=1)
if rc != 0 or len(lines) != 1:
- lines = [l.strip() for l in lines]
+ lines = [line.strip() for line in lines]
self._logger.log("crm_failcount on %s failed (%d): %s" % (node, rc, " // ".join(lines)))
return -1
@@ -128,8 +124,7 @@ class ResourceRecover(CTSTest):
return failcount
def _fail_resource(self, rsc, node, pats):
- """ Fail the targeted resource, and verify as expected """
-
+ """Fail the targeted resource, and verify as expected."""
orig_failcount = self._get_failcount(node)
watch = self.create_watch(pats, 60)
@@ -160,12 +155,12 @@ class ResourceRecover(CTSTest):
return self.failure("%s fail count is %d not %d"
% (self._rid, new_failcount, orig_failcount + 1))
- return 0 # Anything but None is success
+ # Anything but None is success
+ return 0
@property
def errors_to_ignore(self):
- """ Return list of errors which should be ignored """
-
+ """Return a list of errors which should be ignored."""
return [
r"Updating failcount for %s" % self._rid,
r"schedulerd.*: Recover\s+(%s|%s)\s+\(.*\)" % (self._rid, self._rid_alt),
diff --git a/python/pacemaker/_cts/tests/restartonebyone.py b/python/pacemaker/_cts/tests/restartonebyone.py
index 23b3a68..953a4f0 100644
--- a/python/pacemaker/_cts/tests/restartonebyone.py
+++ b/python/pacemaker/_cts/tests/restartonebyone.py
@@ -1,7 +1,7 @@
-""" Restart all nodes in order """
+"""Restart all nodes in order."""
__all__ = ["RestartOnebyOne"]
-__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__copyright__ = "Copyright 2000-2024 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
from pacemaker._cts.tests.ctstest import CTSTest
@@ -18,16 +18,15 @@ from pacemaker._cts.tests.simulstartlite import SimulStartLite
class RestartOnebyOne(CTSTest):
- """ A concrete test that restarts all nodes in order """
+ """Restart all nodes in order."""
def __init__(self, cm):
- """ Create a new RestartOnebyOne instance
-
- Arguments:
-
- cm -- A ClusterManager instance
"""
+ Create a new RestartOnebyOne instance.
+ Arguments:
+ cm -- A ClusterManager instance
+ """
CTSTest.__init__(self, cm)
self.name = "RestartOnebyOne"
@@ -36,8 +35,7 @@ class RestartOnebyOne(CTSTest):
self._startall = SimulStartLite(cm)
def __call__(self, dummy):
- """ Perform the test """
-
+ """Perform the test."""
self.incr("calls")
ret = self._startall(None)
diff --git a/python/pacemaker/_cts/tests/restarttest.py b/python/pacemaker/_cts/tests/restarttest.py
index 3b628ce..be8c2ac 100644
--- a/python/pacemaker/_cts/tests/restarttest.py
+++ b/python/pacemaker/_cts/tests/restarttest.py
@@ -1,7 +1,7 @@
-""" Stop and restart a node """
+"""Stop and restart a node."""
__all__ = ["RestartTest"]
-__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__copyright__ = "Copyright 2000-2024 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
from pacemaker._cts.tests.ctstest import CTSTest
@@ -10,16 +10,15 @@ from pacemaker._cts.tests.stoptest import StopTest
class RestartTest(CTSTest):
- """ A concrete test that stops and restarts a node """
+ """Stop and restart a node."""
def __init__(self, cm):
- """ Create a new RestartTest instance
-
- Arguments:
-
- cm -- A ClusterManager instance
"""
+ Create a new RestartTest instance.
+ Arguments:
+ cm -- A ClusterManager instance
+ """
CTSTest.__init__(self, cm)
self.benchmark = True
self.name = "Restart"
@@ -28,8 +27,7 @@ class RestartTest(CTSTest):
self._stop = StopTest(cm)
def __call__(self, node):
- """ Perform this test """
-
+ """Perform this test."""
self.incr("calls")
self.incr("node:%s" % node)
diff --git a/python/pacemaker/_cts/tests/resynccib.py b/python/pacemaker/_cts/tests/resynccib.py
index fe634d6..e9fa0e0 100644
--- a/python/pacemaker/_cts/tests/resynccib.py
+++ b/python/pacemaker/_cts/tests/resynccib.py
@@ -1,7 +1,7 @@
-""" Start the cluster without a CIB and verify it gets copied from another node """
+"""Start the cluster without a CIB and verify it gets copied from another node."""
__all__ = ["ResyncCIB"]
-__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__copyright__ = "Copyright 2000-2024 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
from pacemaker import BuildOptions
@@ -20,18 +20,15 @@ from pacemaker._cts.tests.simulstoplite import SimulStopLite
class ResyncCIB(CTSTest):
- """ A concrete test that starts the cluster on one node without a CIB and
- verifies the CIB is copied over when the remaining nodes join
- """
+ """Start the cluster on a node without a CIB and verify the CIB is copied over later."""
def __init__(self, cm):
- """ Create a new ResyncCIB instance
-
- Arguments:
-
- cm -- A ClusterManager instance
"""
+ Create a new ResyncCIB instance.
+ Arguments:
+ cm -- A ClusterManager instance
+ """
CTSTest.__init__(self, cm)
self.name = "ResyncCIB"
@@ -41,8 +38,7 @@ class ResyncCIB(CTSTest):
self._stopall = SimulStopLite(cm)
def __call__(self, node):
- """ Perform this test """
-
+ """Perform this test."""
self.incr("calls")
# Shut down all the nodes...
@@ -64,8 +60,7 @@ class ResyncCIB(CTSTest):
@property
def errors_to_ignore(self):
- """ Return list of errors which should be ignored """
-
+ """Return a list of errors which should be ignored."""
# Errors that occur as a result of the CIB being wiped
return [
r"error.*: v1 patchset error, patch failed to apply: Application of an update diff failed",
diff --git a/python/pacemaker/_cts/tests/simulstart.py b/python/pacemaker/_cts/tests/simulstart.py
index 88a7f2f..7dcf526 100644
--- a/python/pacemaker/_cts/tests/simulstart.py
+++ b/python/pacemaker/_cts/tests/simulstart.py
@@ -1,7 +1,7 @@
-""" Start all stopped nodes simultaneously """
+"""Start all stopped nodes simultaneously."""
__all__ = ["SimulStart"]
-__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__copyright__ = "Copyright 2000-2024 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
from pacemaker._cts.tests.ctstest import CTSTest
@@ -10,16 +10,15 @@ from pacemaker._cts.tests.simulstoplite import SimulStopLite
class SimulStart(CTSTest):
- """ A concrete test that starts all stopped nodes simultaneously """
+ """Start all stopped nodes simultaneously."""
def __init__(self, cm):
- """ Create a new SimulStart instance
-
- Arguments:
-
- cm -- A ClusterManager instance
"""
+ Create a new SimulStart instance.
+ Arguments:
+ cm -- A ClusterManager instance
+ """
CTSTest.__init__(self, cm)
self.name = "SimulStart"
@@ -28,8 +27,7 @@ class SimulStart(CTSTest):
self._stopall = SimulStopLite(cm)
def __call__(self, dummy):
- """ Perform this test """
-
+ """Perform this test."""
self.incr("calls")
ret = self._stopall(None)
diff --git a/python/pacemaker/_cts/tests/simulstartlite.py b/python/pacemaker/_cts/tests/simulstartlite.py
index c5c51e1..666b3a1 100644
--- a/python/pacemaker/_cts/tests/simulstartlite.py
+++ b/python/pacemaker/_cts/tests/simulstartlite.py
@@ -1,7 +1,7 @@
-""" Simultaneously start stopped nodes """
+"""Simultaneously start stopped nodes."""
__all__ = ["SimulStartLite"]
-__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__copyright__ = "Copyright 2000-2024 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
from pacemaker._cts.tests.ctstest import CTSTest
@@ -16,29 +16,25 @@ from pacemaker._cts.tests.ctstest import CTSTest
class SimulStartLite(CTSTest):
- """ A pseudo-test that is only used to set up conditions before running
- some other test. This class starts any stopped nodes more or less
- simultaneously.
+ """
+ A pseudo-test that sets up conditions before running some other test.
- Other test classes should not use this one as a superclass.
+ This class starts any stopped nodes more or less simultaneously. Other test
+ classes should not use this one as a superclass.
"""
def __init__(self, cm):
- """ Create a new SimulStartLite instance
-
- Arguments:
-
- cm -- A ClusterManager instance
"""
+ Create a new SimulStartLite instance.
+ Arguments:
+ cm -- A ClusterManager instance
+ """
CTSTest.__init__(self, cm)
self.name = "SimulStartLite"
def __call__(self, dummy):
- """ Start all stopped nodes more or less simultaneously, returning
- whether this succeeded or not.
- """
-
+ """Return whether starting all stopped nodes more or less simultaneously succeeds."""
self.incr("calls")
self.debug("Setup: %s" % self.name)
@@ -65,7 +61,7 @@ class SimulStartLite(CTSTest):
self.templates["Pat:PacemakerUp"] % node])
# Start all the nodes - at about the same time...
- watch = self.create_watch(watchpats, self._env["DeadTime"]+10)
+ watch = self.create_watch(watchpats, self._env["DeadTime"] + 10)
watch.set_watch()
stonith = self._cm.prepare_fencing_watcher()
@@ -128,6 +124,5 @@ class SimulStartLite(CTSTest):
return self.success()
def is_applicable(self):
- """ SimulStartLite is a setup test and never applicable """
-
+ """Return True if this test is applicable in the current test configuration."""
return False
diff --git a/python/pacemaker/_cts/tests/simulstop.py b/python/pacemaker/_cts/tests/simulstop.py
index 174c533..2ce85e3 100644
--- a/python/pacemaker/_cts/tests/simulstop.py
+++ b/python/pacemaker/_cts/tests/simulstop.py
@@ -1,7 +1,7 @@
-""" Stop all running nodes simultaneously """
+"""Stop all running nodes simultaneously."""
__all__ = ["SimulStop"]
-__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__copyright__ = "Copyright 2000-2024 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
from pacemaker._cts.tests.ctstest import CTSTest
@@ -10,16 +10,15 @@ from pacemaker._cts.tests.simulstoplite import SimulStopLite
class SimulStop(CTSTest):
- """ A concrete test that stops all running nodes simultaneously """
+ """Stop all running nodes simultaneously."""
def __init__(self, cm):
- """ Create a new SimulStop instance
-
- Arguments:
-
- cm -- A ClusterManager instance
"""
+ Create a new SimulStop instance.
+ Arguments:
+ cm -- A ClusterManager instance
+ """
CTSTest.__init__(self, cm)
self.name = "SimulStop"
@@ -28,8 +27,7 @@ class SimulStop(CTSTest):
self._stopall = SimulStopLite(cm)
def __call__(self, dummy):
- """ Perform this test """
-
+ """Perform this test."""
self.incr("calls")
ret = self._startall(None)
diff --git a/python/pacemaker/_cts/tests/simulstoplite.py b/python/pacemaker/_cts/tests/simulstoplite.py
index d2e687e..69f1b9c 100644
--- a/python/pacemaker/_cts/tests/simulstoplite.py
+++ b/python/pacemaker/_cts/tests/simulstoplite.py
@@ -1,7 +1,7 @@
-""" Simultaneously stop running nodes """
+"""Simultaneously stop running nodes."""
__all__ = ["SimulStopLite"]
-__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__copyright__ = "Copyright 2000-2024 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
from pacemaker._cts.tests.ctstest import CTSTest
@@ -18,30 +18,26 @@ from pacemaker._cts.tests.ctstest import CTSTest
class SimulStopLite(CTSTest):
- """ A pseudo-test that is only used to set up conditions before running
- some other test. This class stops any running nodes more or less
- simultaneously. It can be used both to set up a test or to clean up
- a test.
+ """
+ A pseudo-test that sets up conditions before running some other test.
- Other test classes should not use this one as a superclass.
+ This class stops any running nodes more or less simultaneously. It can be
+ used both to set up a test or to clean up a test. Other test classes
+ should not use this one as a superclass.
"""
def __init__(self, cm):
- """ Create a new SimulStopLite instance
-
- Arguments:
-
- cm -- A ClusterManager instance
"""
+ Create a new SimulStopLite instance.
+ Arguments:
+ cm -- A ClusterManager instance
+ """
CTSTest.__init__(self, cm)
self.name = "SimulStopLite"
def __call__(self, dummy):
- """ Stop all running nodes more or less simultaneously, returning
- whether this succeeded or not.
- """
-
+ """Return whether stopping all running nodes more or less simultaneously succeeds."""
self.incr("calls")
self.debug("Setup: %s" % self.name)
@@ -57,7 +53,7 @@ class SimulStopLite(CTSTest):
return self.success()
# Stop all the nodes - at about the same time...
- watch = self.create_watch(watchpats, self._env["DeadTime"]+10)
+ watch = self.create_watch(watchpats, self._env["DeadTime"] + 10)
watch.set_watch()
self.set_timer()
@@ -86,6 +82,5 @@ class SimulStopLite(CTSTest):
return self.failure("Missing log message: %s " % watch.unmatched)
def is_applicable(self):
- """ SimulStopLite is a setup test and never applicable """
-
+ """Return True if this test is applicable in the current test configuration."""
return False
diff --git a/python/pacemaker/_cts/tests/splitbraintest.py b/python/pacemaker/_cts/tests/splitbraintest.py
index 09d5f55..711e6da 100644
--- a/python/pacemaker/_cts/tests/splitbraintest.py
+++ b/python/pacemaker/_cts/tests/splitbraintest.py
@@ -1,7 +1,7 @@
-""" Create a split brain cluster and verify a resource is multiply managed """
+"""Create a split brain cluster and verify a resource is multiply managed."""
__all__ = ["SplitBrainTest"]
-__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__copyright__ = "Copyright 2000-2024 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
import time
@@ -23,19 +23,20 @@ from pacemaker._cts.tests.starttest import StartTest
class SplitBrainTest(CTSTest):
- """ A concrete test that creates a split brain cluster and verifies that
- one node in each partition takes over the resource, resulting in two
- nodes running the same resource.
"""
+ Create a split brain cluster.
- def __init__(self, cm):
- """ Create a new SplitBrainTest instance
-
- Arguments:
+ This test verifies that one node in each partition takes over the
+ resource, resulting in two nodes running the same resource.
+ """
- cm -- A ClusterManager instance
+ def __init__(self, cm):
"""
+ Create a new SplitBrainTest instance.
+ Arguments:
+ cm -- A ClusterManager instance
+ """
CTSTest.__init__(self, cm)
self.is_experimental = True
@@ -45,8 +46,7 @@ class SplitBrainTest(CTSTest):
self._startall = SimulStartLite(cm)
def _isolate_partition(self, partition):
- """ Create a new partition containing the given nodes """
-
+ """Create a new partition containing the given nodes."""
other_nodes = self._env["nodes"].copy()
for node in partition:
@@ -67,8 +67,7 @@ class SplitBrainTest(CTSTest):
return
def _heal_partition(self, partition):
- """ Move the given nodes out of their own partition back into the cluster """
-
+ """Move the given nodes out of their own partition back into the cluster."""
other_nodes = self._env["nodes"].copy()
for node in partition:
@@ -87,8 +86,7 @@ class SplitBrainTest(CTSTest):
self._cm.unisolate_node(node, other_nodes)
def __call__(self, node):
- """ Perform this test """
-
+ """Perform this test."""
self.incr("calls")
self.passed = True
partitions = {}
@@ -197,8 +195,7 @@ class SplitBrainTest(CTSTest):
@property
def errors_to_ignore(self):
- """ Return list of errors which should be ignored """
-
+ """Return a list of errors which should be ignored."""
return [
r"Another DC detected:",
r"(ERROR|error).*: .*Application of an update diff failed",
@@ -207,8 +204,7 @@ class SplitBrainTest(CTSTest):
]
def is_applicable(self):
- """ Return True if this test is applicable in the current test configuration. """
-
+ """Return True if this test is applicable in the current test configuration."""
if not CTSTest.is_applicable(self):
return False
diff --git a/python/pacemaker/_cts/tests/standbytest.py b/python/pacemaker/_cts/tests/standbytest.py
index a9ce8ec..a3e1734 100644
--- a/python/pacemaker/_cts/tests/standbytest.py
+++ b/python/pacemaker/_cts/tests/standbytest.py
@@ -1,7 +1,7 @@
-""" Put a node into standby mode and check that resources migrate """
+"""Put a node into standby mode and check that resources migrate."""
__all__ = ["StandbyTest"]
-__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__copyright__ = "Copyright 2000-2024 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
from pacemaker._cts.tests.ctstest import CTSTest
@@ -18,18 +18,15 @@ from pacemaker._cts.tests.starttest import StartTest
class StandbyTest(CTSTest):
- """ A concrete tests that puts a node into standby and checks that resources
- migrate away from the node
- """
+ """Put a node into standby and check that resources migrate away from it."""
def __init__(self, cm):
- """ Create a new StandbyTest instance
-
- Arguments:
-
- cm -- A ClusterManager instance
"""
+ Create a new StandbyTest instance.
+ Arguments:
+ cm -- A ClusterManager instance
+ """
CTSTest.__init__(self, cm)
self.benchmark = True
@@ -45,8 +42,7 @@ class StandbyTest(CTSTest):
# check resources, resources should have been migrated back (SHOULD THEY?)
def __call__(self, node):
- """ Perform this test """
-
+ """Perform this test."""
self.incr("calls")
ret = self._startall(None)
if not ret:
@@ -65,7 +61,7 @@ class StandbyTest(CTSTest):
watchpats = [
r"State transition .* -> S_POLICY_ENGINE",
]
- watch = self.create_watch(watchpats, self._env["DeadTime"]+10)
+ watch = self.create_watch(watchpats, self._env["DeadTime"] + 10)
watch.set_watch()
self.debug("Setting node %s to standby mode" % node)
diff --git a/python/pacemaker/_cts/tests/startonebyone.py b/python/pacemaker/_cts/tests/startonebyone.py
index 6a01097..550dacd 100644
--- a/python/pacemaker/_cts/tests/startonebyone.py
+++ b/python/pacemaker/_cts/tests/startonebyone.py
@@ -1,7 +1,7 @@
-""" Start all stopped nodes serially """
+"""Start all stopped nodes serially."""
__all__ = ["StartOnebyOne"]
-__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__copyright__ = "Copyright 2000-2024 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
from pacemaker._cts.tests.ctstest import CTSTest
@@ -18,16 +18,15 @@ from pacemaker._cts.tests.starttest import StartTest
class StartOnebyOne(CTSTest):
- """ A concrete test that starts all stopped nodes serially """
+ """Start all stopped nodes serially."""
def __init__(self, cm):
- """ Create a new StartOnebyOne instance
-
- Arguments:
-
- cm -- A ClusterManager instance
"""
+ Create a new StartOnebyOne instance.
+ Arguments:
+ cm -- A ClusterManager instance
+ """
CTSTest.__init__(self, cm)
self.name = "StartOnebyOne"
@@ -35,8 +34,7 @@ class StartOnebyOne(CTSTest):
self._stopall = SimulStopLite(cm)
def __call__(self, dummy):
- """ Perform this test """
-
+ """Perform this test."""
self.incr("calls")
ret = self._stopall(None)
diff --git a/python/pacemaker/_cts/tests/starttest.py b/python/pacemaker/_cts/tests/starttest.py
index 6387511..b8103d4 100644
--- a/python/pacemaker/_cts/tests/starttest.py
+++ b/python/pacemaker/_cts/tests/starttest.py
@@ -1,7 +1,7 @@
-""" Start the cluster manager on a given node """
+"""Start the cluster manager on a given node."""
__all__ = ["StartTest"]
-__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__copyright__ = "Copyright 2000-2024 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
from pacemaker._cts.tests.ctstest import CTSTest
@@ -16,27 +16,25 @@ from pacemaker._cts.tests.ctstest import CTSTest
class StartTest(CTSTest):
- """ A pseudo-test that is only used to set up conditions before running
- some other test. This class starts the cluster manager on a given
- node.
+ """
+ A pseudo-test that sets up conditions before running some other test.
- Other test classes should not use this one as a superclass.
+ This class starts the cluster manager on a given node. Other test classes
+ should not use this one as a superclass.
"""
def __init__(self, cm):
- """ Create a new StartTest instance
-
- Arguments:
-
- cm -- A ClusterManager instance
"""
+ Create a new StartTest instance.
+ Arguments:
+ cm -- A ClusterManager instance
+ """
CTSTest.__init__(self, cm)
self.name = "Start"
def __call__(self, node):
- """ Start the given node, returning whether this succeeded or not """
-
+ """Start the given node, returning whether this succeeded or not."""
self.incr("calls")
if self._cm.upcount() == 0:
diff --git a/python/pacemaker/_cts/tests/stonithdtest.py b/python/pacemaker/_cts/tests/stonithdtest.py
index 0dce291..7c65459 100644
--- a/python/pacemaker/_cts/tests/stonithdtest.py
+++ b/python/pacemaker/_cts/tests/stonithdtest.py
@@ -1,7 +1,7 @@
-""" Fence a running node and wait for it to restart """
+"""Fence a running node and wait for it to restart."""
__all__ = ["StonithdTest"]
-__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__copyright__ = "Copyright 2000-2024 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
from pacemaker.exitstatus import ExitStatus
@@ -21,16 +21,15 @@ from pacemaker._cts.timer import Timer
class StonithdTest(CTSTest):
- """ A concrete test that fences a running node and waits for it to restart """
+ """Fence a running node and wait for it to restart."""
def __init__(self, cm):
- """ Create a new StonithdTest instance
-
- Arguments:
-
- cm -- A ClusterManager instance
"""
+ Create a new StonithdTest instance.
+ Arguments:
+ cm -- A ClusterManager instance
+ """
CTSTest.__init__(self, cm)
self.benchmark = True
self.name = "Stonithd"
@@ -38,8 +37,7 @@ class StonithdTest(CTSTest):
self._startall = SimulStartLite(cm)
def __call__(self, node):
- """ Perform this test """
-
+ """Perform this test."""
self.incr("calls")
if len(self._env["nodes"]) < 2:
return self.skipped()
@@ -122,8 +120,7 @@ class StonithdTest(CTSTest):
@property
def errors_to_ignore(self):
- """ Return list of errors which should be ignored """
-
+ """Return a list of errors which should be ignored."""
return [
self.templates["Pat:Fencing_start"] % ".*",
self.templates["Pat:Fencing_ok"] % ".*",
@@ -132,8 +129,7 @@ class StonithdTest(CTSTest):
]
def is_applicable(self):
- """ Return True if this test is applicable in the current test configuration. """
-
+ """Return True if this test is applicable in the current test configuration."""
if not CTSTest.is_applicable(self):
return False
diff --git a/python/pacemaker/_cts/tests/stoponebyone.py b/python/pacemaker/_cts/tests/stoponebyone.py
index d75d282..4fdfe5c 100644
--- a/python/pacemaker/_cts/tests/stoponebyone.py
+++ b/python/pacemaker/_cts/tests/stoponebyone.py
@@ -1,7 +1,7 @@
-""" Stop all running nodes serially """
+"""Stop all running nodes serially."""
__all__ = ["StopOnebyOne"]
-__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__copyright__ = "Copyright 2000-2024 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
from pacemaker._cts.tests.ctstest import CTSTest
@@ -18,16 +18,15 @@ from pacemaker._cts.tests.stoptest import StopTest
class StopOnebyOne(CTSTest):
- """ A concrete test that stops all running nodes serially """
+ """Stop all running nodes serially."""
def __init__(self, cm):
- """ Create a new StartOnebyOne instance
-
- Arguments:
-
- cm -- A ClusterManager instance
"""
+ Create a new StartOnebyOne instance.
+ Arguments:
+ cm -- A ClusterManager instance
+ """
CTSTest.__init__(self, cm)
self.name = "StopOnebyOne"
@@ -36,8 +35,7 @@ class StopOnebyOne(CTSTest):
self._stop = StopTest(cm)
def __call__(self, dummy):
- """ Perform this test """
-
+ """Perform this test."""
self.incr("calls")
ret = self._startall(None)
diff --git a/python/pacemaker/_cts/tests/stoptest.py b/python/pacemaker/_cts/tests/stoptest.py
index 8f496d3..48ef73a 100644
--- a/python/pacemaker/_cts/tests/stoptest.py
+++ b/python/pacemaker/_cts/tests/stoptest.py
@@ -1,7 +1,7 @@
-""" Stop the cluster manager on a given node """
+"""Stop the cluster manager on a given node."""
__all__ = ["StopTest"]
-__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__copyright__ = "Copyright 2000-2024 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
from pacemaker._cts.tests.ctstest import CTSTest
@@ -18,27 +18,25 @@ from pacemaker._cts.tests.ctstest import CTSTest
class StopTest(CTSTest):
- """ A pseudo-test that is only used to set up conditions before running
- some other test. This class stops the cluster manager on a given
- node.
+ """
+ A pseudo-test that sets up conditions before running some other test.
- Other test classes should not use this one as a superclass.
+ This class stops the cluster manager on a given node. Other test classes
+ should not use this one as a superclass.
"""
def __init__(self, cm):
- """ Create a new StopTest instance
-
- Arguments:
-
- cm -- A ClusterManager instance
"""
+ Create a new StopTest instance.
+ Arguments:
+ cm -- A ClusterManager instance
+ """
CTSTest.__init__(self, cm)
self.name = "Stop"
def __call__(self, node):
- """ Stop the given node, returning whether this succeeded or not """
-
+ """Stop the given node, returning whether this succeeded or not."""
self.incr("calls")
if self._cm.expected_status[node] != "up":
return self.skipped()
@@ -52,7 +50,7 @@ class StopTest(CTSTest):
# (note that this won't work if we have multiple partitions)
for other in self._env["nodes"]:
if self._cm.expected_status[other] == "up" and other != node:
- patterns.append(self.templates["Pat:They_stopped"] %(other, node))
+ patterns.append(self.templates["Pat:They_stopped"] % (other, node))
watch = self.create_watch(patterns, self._env["DeadTime"])
watch.set_watch()
diff --git a/python/pacemaker/_cts/timer.py b/python/pacemaker/_cts/timer.py
index 122b70b..e31f18b 100644
--- a/python/pacemaker/_cts/timer.py
+++ b/python/pacemaker/_cts/timer.py
@@ -1,63 +1,63 @@
-""" Timer-related utilities for CTS """
+"""Timer-related utilities for CTS."""
__all__ = ["Timer"]
-__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__copyright__ = "Copyright 2000-2024 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
import time
+
class Timer:
- """ A class for measuring the runtime of some task. A Timer may be used
- manually or as a context manager, like so:
+ """
+ A class for measuring the runtime of some task.
+
+ A Timer may be used manually or as a context manager, like so:
with Timer(logger, "SomeTest", "SomeTimer"):
...
- A Timer runs from when start() is called until the timer is deleted
- or reset() is called. There is no explicit stop method.
+ A Timer runs from when start() is called until the timer is deleted or
+ reset() is called. There is no explicit stop method.
"""
def __init__(self, logger, test_name, timer_name):
- """ Create a new Timer instance.
-
- Arguments:
-
- logger -- A Logger instance that can be used to record when
- the timer stopped
- test_name -- The name of the test this timer is being run for
- timer_name -- The name of this timer
"""
+ Create a new Timer instance.
+ Arguments:
+ logger -- A Logger instance that can be used to record when
+ the timer stopped
+ test_name -- The name of the test this timer is being run for
+ timer_name -- The name of this timer
+ """
self._logger = logger
self._start_time = None
self._test_name = test_name
self._timer_name = timer_name
def __enter__(self):
+ """When used as a context manager, start the timer."""
self.start()
return self
def __exit__(self, *args):
+ """When used as a context manager, log the elapsed time."""
self._logger.debug("%s:%s runtime: %.2f" % (self._test_name, self._timer_name, self.elapsed))
def reset(self):
- """ Restart the timer """
-
+ """Restart the timer."""
self.start()
def start(self):
- """ Start the timer """
-
+ """Start the timer."""
self._start_time = time.time()
@property
def start_time(self):
- """ When did the timer start? """
-
+ """Return when the timer started."""
return self._start_time
@property
def elapsed(self):
- """ How long has the timer been running for? """
-
+ """Return how long the timer has been running for."""
return time.time() - self._start_time
diff --git a/python/pacemaker/_cts/watcher.py b/python/pacemaker/_cts/watcher.py
index 3e6d702..7870a9f 100644
--- a/python/pacemaker/_cts/watcher.py
+++ b/python/pacemaker/_cts/watcher.py
@@ -1,55 +1,53 @@
-""" Log searching classes for Pacemaker's Cluster Test Suite (CTS) """
+"""Log searching classes for Pacemaker's Cluster Test Suite (CTS)."""
__all__ = ["LogKind", "LogWatcher"]
-__copyright__ = "Copyright 2014-2023 the Pacemaker project contributors"
+__copyright__ = "Copyright 2014-2024 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
-from enum import Enum, unique
+from enum import Enum, auto, unique
import re
import time
import threading
+from dateutil.parser import isoparser
+
from pacemaker.buildoptions import BuildOptions
+from pacemaker._cts.errors import OutputNotFoundError
from pacemaker._cts.logging import LogFactory
from pacemaker._cts.remote import RemoteFactory
LOG_WATCHER_BIN = "%s/cts-log-watcher" % BuildOptions.DAEMON_DIR
+
@unique
class LogKind(Enum):
- """ The various kinds of log files that can be watched """
+ """The various kinds of log files that can be watched."""
- ANY = 0
- FILE = 1
- REMOTE_FILE = 2
- JOURNAL = 3
+ LOCAL_FILE = auto() # From a local aggregation file on the exerciser
+ REMOTE_FILE = auto() # From a file on each cluster node
+ JOURNAL = auto() # From the systemd journal on each cluster node
def __str__(self):
- if self.value == 0:
- return "any"
- if self.value == 1:
- return "combined syslog"
- if self.value == 2:
- return "remote"
+ """Return a printable string for a LogKind value."""
+ return self.name.lower().replace('_', ' ')
- return "journal"
class SearchObj:
- """ The base class for various kinds of log watchers. Log-specific watchers
- need to be built on top of this one.
"""
+ The base class for various kinds of log watchers.
- def __init__(self, filename, host=None, name=None):
- """ Create a new SearchObj instance
-
- Arguments:
+ Log-specific watchers need to be built on top of this one.
+ """
- filename -- The log to watch
- host -- The cluster node on which to watch the log
- name -- A unique name to use when logging about this watch
+ def __init__(self, filename, host=None, name=None):
"""
+ Create a new SearchObj instance.
- self.cache = []
+ Arguments:
+ filename -- The log to watch
+ host -- The cluster node on which to watch the log
+ name -- A unique name to use when logging about this watch
+ """
self.filename = filename
self.limit = None
self.logger = LogFactory()
@@ -62,6 +60,12 @@ class SearchObj:
else:
self.host = "localhost"
+ self._cache = []
+ self._delegate = None
+
+ async_task = self.harvest_async()
+ async_task.join()
+
def __str__(self):
if self.host:
return "%s:%s" % (self.host, self.filename)
@@ -69,71 +73,72 @@ class SearchObj:
return self.filename
def log(self, args):
- """ Log a message """
-
+ """Log a message."""
message = "lw: %s: %s" % (self, args)
self.logger.log(message)
def debug(self, args):
- """ Log a debug message """
-
+ """Log a debug message."""
message = "lw: %s: %s" % (self, args)
self.logger.debug(message)
- def harvest(self, delegate=None):
- """ Collect lines from a log, optionally calling delegate when complete """
-
- async_task = self.harvest_async(delegate)
- async_task.join()
-
def harvest_async(self, delegate=None):
- """ Collect lines from a log asynchronously, optionally calling delegate
- when complete. This method must be implemented by all subclasses.
"""
+ Collect lines from a log asynchronously.
+ Optionally, also call delegate when complete. This method must be
+ implemented by all subclasses.
+ """
+ raise NotImplementedError
+
+ def harvest_cached(self):
+ """
+ Return cached logs from before the limit timestamp.
+ """
raise NotImplementedError
def end(self):
- """ Mark that a log is done being watched, resetting internal data structures
- to the beginning of the file. Subsequent watches will therefore start
- from the beginning again.
"""
+ Mark that a log is done being watched.
- self.debug("Unsetting the limit")
+ This function also resets internal data structures to the beginning
+ of the file. Subsequent watches will therefore start from the
+ beginning again.
+ """
+ self.debug("Clearing cache and unsetting limit")
+ self._cache = []
self.limit = None
+
class FileObj(SearchObj):
- """ A specialized SearchObj subclass for watching log files """
+ """A specialized SearchObj subclass for watching log files."""
def __init__(self, filename, host=None, name=None):
- """ Create a new FileObj instance
-
- Arguments:
-
- filename -- The file to watch
- host -- The cluster node on which to watch the file
- name -- A unique name to use when logging about this watch
"""
+ Create a new FileObj instance.
+ Arguments:
+ filename -- The file to watch
+ host -- The cluster node on which to watch the file
+ name -- A unique name to use when logging about this watch
+ """
SearchObj.__init__(self, filename, host, name)
- self._delegate = None
-
- self.harvest()
def async_complete(self, pid, returncode, out, err):
- """ Called when an asynchronous log file read is complete. This function
- saves the output from that read for look()/look_for_all() to process
- and records the current position in the journal. Future reads will
- pick back up from that spot.
+ """
+ Handle completion of an asynchronous log file read.
- Arguments:
+ This function saves the output from that read for look()/look_for_all()
+ to process and records the current position in the journal. Future
+ reads will pick back up from that spot.
- pid -- The ID of the process that did the read
- returncode -- The return code of the process that did the read
- out -- stdout from the file read
- err -- stderr from the file read
+ Arguments:
+ pid -- The ID of the process that did the read
+ returncode -- The return code of the process that did the read
+ out -- stdout from the file read
+ err -- stderr from the file read
"""
-
+ messages = []
for line in out:
match = re.search(r"^CTSwatcher:Last read: (\d+)", line)
@@ -145,20 +150,20 @@ class FileObj(SearchObj):
elif re.search(r"^CTSwatcher:", line):
self.debug("Got control line: %s" % line)
else:
- self.cache.append(line)
+ messages.append(line)
if self._delegate:
- self._delegate.async_complete(pid, returncode, self.cache, err)
+ self._delegate.async_complete(pid, returncode, messages, err)
def harvest_async(self, delegate=None):
- """ Collect lines from the log file on a single host asynchronously,
- optionally calling delegate when complete. This can be called
- repeatedly, reading a chunk each time or until the end of the log
- file is hit.
"""
+ Collect lines from the log file on a single host asynchronously.
+ Optionally, call delegate when complete. This can be called
+ repeatedly, reading a chunk each time or until the end of the
+ log file is hit.
+ """
self._delegate = delegate
- self.cache = []
if self.limit and (self.offset == "EOF" or int(self.offset) > self.limit):
if self._delegate:
@@ -166,24 +171,34 @@ class FileObj(SearchObj):
return None
- return self.rsh.call_async(self.host,
- "%s -t %s -p CTSwatcher: -l 200 -f %s -o %s" % (LOG_WATCHER_BIN, self.name, self.filename, self.offset),
- delegate=self)
+ cmd = ("%s -p CTSwatcher: -l 200 -f %s -o %s"
+ % (LOG_WATCHER_BIN, self.filename, self.offset))
+
+ return self.rsh.call_async(self.host, cmd, delegate=self)
+
+ def harvest_cached(self):
+ """
+ Return cached logs from before the limit timestamp.
+ """
+ # cts-log-watcher script renders caching unnecessary for FileObj.
+ # @TODO Caching might be slightly more efficient, if not too complex.
+ return []
def set_end(self):
- """ Internally record where we expect to find the end of a log file,
- which is just the number of lines in the file. Calls to harvest
- from the log file will not go any farther than what this function
- records.
"""
+ Internally record where we expect to find the end of a log file.
+ Calls to harvest from the log file will not go any farther than
+ what this function records.
+ """
if self.limit:
return
+ cmd = ("%s -p CTSwatcher: -l 2 -f %s -o EOF"
+ % (LOG_WATCHER_BIN, self.filename))
+
# pylint: disable=not-callable
- (_, lines) = self.rsh(self.host,
- "%s -t %s -p CTSwatcher: -l 2 -f %s -o %s" % (LOG_WATCHER_BIN, self.name, self.filename, "EOF"),
- verbose=0)
+ (_, lines) = self.rsh(self.host, cmd, verbose=0)
for line in lines:
match = re.search(r"^CTSwatcher:Last read: (\d+)", line)
@@ -191,138 +206,179 @@ class FileObj(SearchObj):
self.limit = int(match.group(1))
self.debug("Set limit to: %d" % self.limit)
+
class JournalObj(SearchObj):
- """ A specialized SearchObj subclass for watching systemd journals """
+ """A specialized SearchObj subclass for watching systemd journals."""
def __init__(self, host=None, name=None):
- """ Create a new JournalObj instance
+ """
+ Create a new JournalObj instance.
- Arguments:
+ Arguments:
+ host -- The cluster node on which to watch the journal
+ name -- A unique name to use when logging about this watch
+ """
+ SearchObj.__init__(self, name, host, name)
+ self._parser = isoparser()
- host -- The cluster node on which to watch the journal
- name -- A unique name to use when logging about this watch
+ def _msg_after_limit(self, msg):
"""
+ Check whether a message was logged after the limit timestamp.
- SearchObj.__init__(self, name, host, name)
- self._delegate = None
- self._hit_limit = False
+ Arguments:
+ msg -- Message to check
- self.harvest()
+ Returns `True` if `msg` was logged after `self.limit`, or `False`
+ otherwise.
+ """
+ if not self.limit:
+ return False
- def async_complete(self, pid, returncode, out, err):
- """ Called when an asynchronous journal read is complete. This function
- saves the output from that read for look()/look_for_all() to process
- and records the current position in the journal. Future reads will
- pick back up from that spot.
+ match = re.search(r"^\S+", msg)
+ if not match:
+ return False
- Arguments:
+ msg_timestamp = match.group(0)
+ msg_dt = self._parser.isoparse(msg_timestamp)
+ return msg_dt > self.limit
- pid -- The ID of the process that did the journal read
- returncode -- The return code of the process that did the journal read
- out -- stdout from the journal read
- err -- stderr from the journal read
+ def _split_msgs_by_limit(self, msgs):
"""
+ Split a sorted list of messages relative to the limit timestamp.
- found_cursor = False
- for line in out:
- match = re.search(r"^-- cursor: ([^.]+)", line)
+ Arguments:
+ msgs -- List of messages to split
- if match:
- found_cursor = True
- self.offset = match.group(1).strip()
- self.debug("Got %d lines, new cursor: %s" % (len(out), self.offset))
- else:
- self.cache.append(line)
+ Returns a tuple:
+ (list of messages logged on or before limit timestamp,
+ list of messages logged after limit timestamp).
+ """
+ # If last message was logged before limit, all messages were
+ if msgs and self._msg_after_limit(msgs[-1]):
+
+ # Else find index of first message logged after limit
+ for idx, msg in enumerate(msgs):
+ if self._msg_after_limit(msg):
+ self.debug("Got %d lines before passing limit timestamp"
+ % idx)
+ return msgs[:idx], msgs[idx:]
+
+ self.debug("Got %s lines" % len(msgs))
+ return msgs, []
+
+ def async_complete(self, pid, returncode, out, err):
+ """
+ Handle completion of an asynchronous journal read.
+
+ This function saves the output from that read for look()/look_for_all()
+ to process and records the current position in the journal. Future
+ reads will pick back up from that spot.
+
+ Arguments:
+ pid -- The ID of the process that did the journal read
+ returncode -- The return code of the process that did the journal read
+ out -- stdout from the journal read
+ err -- stderr from the journal read
+ """
+ if out:
+ # Cursor should always be last line of journalctl output
+ out, cursor_line = out[:-1], out[-1]
+ match = re.search(r"^-- cursor: ([^.]+)", cursor_line)
+ if not match:
+ raise OutputNotFoundError('Cursor not found at end of output:'
+ + '\n%s' % out)
- if self.limit and not found_cursor:
- self._hit_limit = True
- self.debug("Got %d lines but no cursor: %s" % (len(out), self.offset))
+ self.offset = match.group(1).strip()
+ self.debug("Got new cursor: %s" % self.offset)
- # Get the current cursor
- # pylint: disable=not-callable
- (_, out) = self.rsh(self.host, "journalctl -q -n 0 --show-cursor", verbose=0)
- for line in out:
- match = re.search(r"^-- cursor: ([^.]+)", line)
+ before, after = self._split_msgs_by_limit(out)
- if match:
- self.offset = match.group(1).strip()
- self.debug("Got %d lines, new cursor: %s" % (len(out), self.offset))
- else:
- self.log("Not a new cursor: %s" % line)
- self.cache.append(line)
+ # Save remaining messages after limit for later processing
+ self._cache.extend(after)
if self._delegate:
- self._delegate.async_complete(pid, returncode, self.cache, err)
+ self._delegate.async_complete(pid, returncode, before, err)
def harvest_async(self, delegate=None):
- """ Collect lines from the journal on a single host asynchronously,
- optionally calling delegate when complete. This can be called
- repeatedly, reading a chunk each time or until the end of the
- journal is hit.
"""
+ Collect lines from the journal on a single host asynchronously.
+ Optionally, call delegate when complete. This can be called
+ repeatedly, reading a chunk each time or until the end of the journal
+ is hit.
+ """
self._delegate = delegate
- self.cache = []
-
- # Use --lines to prevent journalctl from overflowing the Popen input buffer
- if self.limit and self._hit_limit:
- return None
+ # Use --lines to prevent journalctl from overflowing the Popen input
+ # buffer
+ command = "journalctl --quiet --output=short-iso --show-cursor"
if self.offset == "EOF":
- command = "journalctl -q -n 0 --show-cursor"
- elif self.limit:
- command = "journalctl -q --after-cursor='%s' --until '%s' --lines=200 --show-cursor" % (self.offset, self.limit)
+ command += " --lines 0"
else:
- command = "journalctl -q --after-cursor='%s' --lines=200 --show-cursor" % (self.offset)
+ command += " --after-cursor='%s' --lines=200" % self.offset
return self.rsh.call_async(self.host, command, delegate=self)
+ def harvest_cached(self):
+ """
+ Return cached logs from before the limit timestamp.
+ """
+ before, self._cache = self._split_msgs_by_limit(self._cache)
+ return before
+
def set_end(self):
- """ Internally record where we expect to find the end of a host's journal,
- which is just the current time. Calls to harvest from the journal will
- not go any farther than what this function records.
"""
+ Internally record where we expect to find the end of a host's journal.
+ Calls to harvest from the journal will not go any farther than what
+ this function records.
+ """
if self.limit:
return
- self._hit_limit = False
+ # --iso-8601=seconds yields YYYY-MM-DDTHH:MM:SSZ, where Z is timezone
+ # as offset from UTC
+
# pylint: disable=not-callable
- (rc, lines) = self.rsh(self.host, "date +'%Y-%m-%d %H:%M:%S'", verbose=0)
+ (rc, lines) = self.rsh(self.host, "date --iso-8601=seconds", verbose=0)
if rc == 0 and len(lines) == 1:
- self.limit = lines[0].strip()
+ self.limit = self._parser.isoparse(lines[0].strip())
self.debug("Set limit to: %s" % self.limit)
else:
self.debug("Unable to set limit for %s because date returned %d lines with status %d"
% (self.host, len(lines), rc))
-class LogWatcher:
- """ A class for watching a single log file or journal across multiple hosts,
- looking for lines that match given regular expressions.
- The way you use this class is as follows:
- - Construct a LogWatcher object
- - Call set_watch() when you want to start watching the log
- - Call look() to scan the log looking for the patterns
+class LogWatcher:
"""
+ Watch a single log file or journal across multiple hosts.
- def __init__(self, log, regexes, hosts, kind=LogKind.ANY, name="Anon", timeout=10, silent=False):
- """ Create a new LogWatcher instance.
+ Instances of this class look for lines that match given regular
+ expressions.
- Arguments:
+ The way you use this class is as follows:
+ - Construct a LogWatcher object
+ - Call set_watch() when you want to start watching the log
+ - Call look() to scan the log looking for the patterns
+ """
- log -- The log file to watch
- regexes -- A list of regular expressions to match against the log
- hosts -- A list of cluster nodes on which to watch the log
- kind -- What type of log is this object watching?
- name -- A unique name to use when logging about this watch
- timeout -- Default number of seconds to watch a log file at a time;
- this can be overridden by the timeout= parameter to
- self.look on an as-needed basis
- silent -- If False, log extra information
+ def __init__(self, log, regexes, hosts, kind, name="Anon", timeout=10,
+ silent=False):
+ """
+ Create a new LogWatcher instance.
+
+ Arguments:
+ log -- The log file to watch
+ regexes -- A list of regular expressions to match against the log
+ hosts -- A list of cluster nodes on which to watch the log
+ kind -- What type of log is this object watching?
+ name -- A unique name to use when logging about this watch
+ timeout -- Default number of seconds to watch a log file at a time;
+ this can be overridden by the timeout= parameter to
+ self.look on an as-needed basis
+ silent -- If False, log extra information
"""
-
self.filename = log
self.hosts = hosts
self.kind = kind
@@ -352,15 +408,16 @@ class LogWatcher:
self._debug("Looking for regex: %s" % regex)
def _debug(self, args):
- """ Log a debug message """
-
+ """Log a debug message."""
message = "lw: %s: %s" % (self.name, args)
self._logger.debug(message)
def set_watch(self):
- """ Mark the place to start watching the log from """
+ """Mark the place to start watching the log from."""
+ if self.kind == LogKind.LOCAL_FILE:
+ self._file_list.append(FileObj(self.filename))
- if self.kind == LogKind.REMOTE_FILE:
+ elif self.kind == LogKind.REMOTE_FILE:
for node in self.hosts:
self._file_list.append(FileObj(self.filename, node, self.name))
@@ -368,26 +425,21 @@ class LogWatcher:
for node in self.hosts:
self._file_list.append(JournalObj(node, self.name))
- else:
- self._file_list.append(FileObj(self.filename))
-
def async_complete(self, pid, returncode, out, err):
- """ Called when an asynchronous log file read is complete. This function
- saves the output from that read for look()/look_for_all() to process
- and records the current position. Future reads will pick back up
- from that spot.
+ """
+ Handle completion of an asynchronous log file read.
- Arguments:
+ This function saves the output from that read for look()/look_for_all()
+ to process and records the current position. Future reads will pick
+ back up from that spot.
- pid -- The ID of the process that did the read
- returncode -- The return code of the process that did the read
- out -- stdout from the file read
- err -- stderr from the file read
+ Arguments:
+ pid -- The ID of the process that did the read
+ returncode -- The return code of the process that did the read
+ out -- stdout from the file read
+ err -- stderr from the file read
"""
-
- # It's not clear to me whether this function ever gets called as
- # delegate somewhere, which is what would pass returncode and err
- # as parameters. Just disable the warning for now.
+ # Called as delegate through {File,Journal}Obj.async_complete()
# pylint: disable=unused-argument
# TODO: Probably need a lock for updating self._line_cache
@@ -398,17 +450,23 @@ class LogWatcher:
self._line_cache.extend(out)
def __get_lines(self):
- """ Iterate over all watched log files and collect new lines from each """
-
+ """Iterate over all watched log files and collect new lines from each."""
if not self._file_list:
raise ValueError("No sources to read from")
pending = []
for f in self._file_list:
- t = f.harvest_async(self)
- if t:
- pending.append(t)
+ cached = f.harvest_cached()
+ if cached:
+ self._debug("Got %d lines from %s cache (total %d)"
+ % (len(cached), f.name, len(self._line_cache)))
+ with self._cache_lock:
+ self._line_cache.extend(cached)
+ else:
+ t = f.harvest_async(self)
+ if t:
+ pending.append(t)
for t in pending:
t.join(60.0)
@@ -417,31 +475,30 @@ class LogWatcher:
return
def end(self):
- """ Mark that a log is done being watched, resetting internal data structures
- to the beginning of the file. Subsequent watches will therefore start
- from the beginning again.
"""
+ Mark that a log is done being watched.
+ This function also resets internal data structures to the beginning
+ of the file. Subsequent watches will therefore start from the
+ beginning again.
+ """
for f in self._file_list:
f.end()
def look(self, timeout=None):
- """ Examine the log looking for the regexes that were given when this
- object was created. It starts looking from the place marked by
- set_watch(), continuing through the file in the fashion of
- `tail -f`. It properly recovers from log file truncation but not
- from removing and recreating the log.
-
- Arguments:
+ """
+ Examine the log looking for the regexes in this object.
- timeout -- Number of seconds to watch the log file; defaults to
- seconds argument passed when this object was created
+ It starts looking from the place marked by set_watch(), continuing
+ through the file in the fashion of `tail -f`. It properly recovers
+ from log file truncation but not from removing and recreating the log.
- Returns:
+ Arguments:
+ timeout -- Number of seconds to watch the log file; defaults to
+ seconds argument passed when this object was created
- The first line which matches any regex
+ Returns the first line which matches any regex
"""
-
if not timeout:
timeout = self._timeout
@@ -495,29 +552,23 @@ class LogWatcher:
self._debug("Waiting: start=%d, end=%d, now=%d, lines=%d" % (begin, end, time.time(), len(self._line_cache)))
time.sleep(1)
- self._debug("How did we get here")
- return None
-
def look_for_all(self, allow_multiple_matches=False, silent=False):
- """ Like look(), but looks for matches for multiple regexes. This function
- returns when the timeout is reached or all regexes were matched. As a
- side effect, self.unmatched will contain regexes that were not matched.
- This can be inspected by the caller.
-
- Arguments:
+ """
+ Like look(), but looks for matches for multiple regexes.
- allow_multiple_matches -- If True, allow each regex to match more than
- once. If False (the default), once a regex
- matches a line, it will no longer be searched
- for.
- silent -- If False, log extra information
+ This function returns when the timeout is reached or all regexes were
+ matched. As a side effect, self.unmatched will contain regexes that
+ were not matched. This can be inspected by the caller.
- Returns:
+ Arguments:
+ allow_multiple_matches -- If True, allow each regex to match more than
+ once. If False (the default), once a regex
+ matches a line, it will no longer be searched
+ for.
+ silent -- If False, log extra information
- If all regexes are matched, return the matching lines. Otherwise, return
- None.
+ Returns the matching lines if all regexes are matched, or None.
"""
-
save_regexes = self.regexes
result = []
diff --git a/python/pacemaker/buildoptions.py.in b/python/pacemaker/buildoptions.py.in
index 17fe981..a97640c 100644
--- a/python/pacemaker/buildoptions.py.in
+++ b/python/pacemaker/buildoptions.py.in
@@ -1,60 +1,65 @@
-""" A module providing information on build-time configuration of pacemaker """
+"""A module providing information on build-time configuration of pacemaker."""
__all__ = ["BuildOptions"]
-__copyright__ = "Copyright 2023 the Pacemaker project contributors"
+__copyright__ = "Copyright 2023-2024 the Pacemaker project contributors"
__license__ = "GNU Lesser General Public License version 2.1 or later (LGPLv2.1+)"
+
class BuildOptions:
- """ Variables generated as part of the ./configure && make process. These
- affect how pacemaker was configured and where its various parts get
- installed.
+ """
+ Variables generated as part of the ./configure && make process.
+
+ These affect how pacemaker was configured and where its various parts
+ get installed.
"""
BASH_PATH = "@BASH_PATH@"
- """ Path to the bash shell """
+ """Path to the bash shell."""
_BUILD_DIR = "@abs_top_builddir@"
- """ Top-level build directory
- NOTE: This is not especially useful on installed systems, but is useful for
- running various programs from a source checkout
+ """
+ Top-level build directory.
+
+ NOTE: This is not especially useful on installed systems, but is useful for
+ running various programs from a source checkout
"""
CIB_DIR = "@CRM_CONFIG_DIR@"
- """ Where CIB files are stored """
+ """Where CIB files are stored."""
CIB_SCHEMA_VERSION = "@CIB_VERSION@"
- """ Latest supported CIB schema version number """
+ """Latest supported CIB schema version number."""
COROSYNC_CONFIG_FILE = "@PCMK__COROSYNC_CONF@"
- """ Path to the corosync config file """
+ """Path to the corosync config file."""
DAEMON_DIR = "@CRM_DAEMON_DIR@"
- """ Where Pacemaker daemons are installed """
+ """Where Pacemaker daemons are installed."""
DAEMON_USER = "@CRM_DAEMON_USER@"
- """ User to run Pacemaker daemons as """
+ """User to run Pacemaker daemons as."""
LOCAL_STATE_DIR = "@localstatedir@"
- """ Where miscellaneous temporary state files are stored """
+ """Where miscellaneous temporary state files are stored."""
LOG_DIR = "@CRM_LOG_DIR@"
- """ Where Pacemaker log files are stored """
+ """Where Pacemaker log files are stored."""
OCF_RA_INSTALL_DIR = "@OCF_RA_INSTALL_DIR@"
- """ Where resource agents are installed """
+ """Where resource agents are installed."""
OCF_ROOT_DIR = "@OCF_ROOT_DIR@"
- """ Root directory for OCF resource agents and libraries """
+ """Root directory for OCF resource agents and libraries."""
RSC_TMP_DIR = "@CRM_RSCTMP_DIR@"
- """ Where resource agents should keep state files """
+ """Where resource agents should keep state files."""
# pylint: disable=comparison-of-constants
REMOTE_ENABLED = "@PC_NAME_GNUTLS@" != ""
- """ Was Pacemaker Remote support built? """
+ """True if Pacemaker Remote support is enabled."""
SBIN_DIR = "@sbindir@"
- """ Where administrative programs are installed """
+ """Where administrative programs are installed."""
SCHEMA_DIR = "@CRM_SCHEMA_DIRECTORY@"
- """ Where Relax-NG schema files are stored """
+ """Where Relax-NG schema files are stored."""
diff --git a/python/pacemaker/exitstatus.py b/python/pacemaker/exitstatus.py
index f74f9ec..7294d51 100644
--- a/python/pacemaker/exitstatus.py
+++ b/python/pacemaker/exitstatus.py
@@ -1,59 +1,62 @@
-""" A module providing constants relating to why a process or function exited """
+"""A module providing constants relating to why a process or function exited."""
__all__ = ["ExitStatus"]
-__copyright__ = "Copyright 2023 the Pacemaker project contributors"
+__copyright__ = "Copyright 2023-2024 the Pacemaker project contributors"
__license__ = "GNU Lesser General Public License version 2.1 or later (LGPLv2.1+)"
from enum import IntEnum, unique
+
# These values must be kept in sync with include/crm/common/results.h
@unique
class ExitStatus(IntEnum):
- """ Why did a function or process exit? These constants describe both success
- and failure conditions.
+ """
+ Exit status codes for a function or process.
+
+ These constants describe both success and failure conditions.
"""
- OK = 0
- ERROR = 1
- INVALID_PARAM = 2
- UNIMPLEMENT_FEATURE = 3
- INSUFFICIENT_PRIV = 4
- NOT_INSTALLED = 5
- NOT_CONFIGURED = 6
- NOT_RUNNING = 7
- PROMOTED = 8
- FAILED_PROMOTED = 9
- USAGE = 64
- DATAERR = 65
- NOINPUT = 66
- NOUSER = 67
- NOHOST = 68
- UNAVAILABLE = 69
- SOFTWARE = 70
- OSERR = 71
- OSFILE = 72
- CANTCREAT = 73
- IOERR = 74
- TEMPFAIL = 75
- PROTOCOL = 76
- NOPERM = 77
- CONFIG = 78
- FATAL = 100
- PANIC = 101
- DISCONNECT = 102
- OLD = 103
- DIGEST = 104
- NOSUCH = 105
- QUORUM = 106
- UNSAFE = 107
- EXISTS = 108
- MULTIPLE = 109
- EXPIRED = 110
- NOT_YET_IN_EFFECT = 111
- INDETERMINATE = 112
- UNSATISFIED = 113
- TIMEOUT = 124
- DEGRADED = 190
- DEGRADED_PROMOTED = 191
- NONE = 193
- MAX = 255
+ OK = 0
+ ERROR = 1
+ INVALID_PARAM = 2
+ UNIMPLEMENT_FEATURE = 3
+ INSUFFICIENT_PRIV = 4
+ NOT_INSTALLED = 5
+ NOT_CONFIGURED = 6
+ NOT_RUNNING = 7
+ PROMOTED = 8
+ FAILED_PROMOTED = 9
+ USAGE = 64
+ DATAERR = 65
+ NOINPUT = 66
+ NOUSER = 67
+ NOHOST = 68
+ UNAVAILABLE = 69
+ SOFTWARE = 70
+ OSERR = 71
+ OSFILE = 72
+ CANTCREAT = 73
+ IOERR = 74
+ TEMPFAIL = 75
+ PROTOCOL = 76
+ NOPERM = 77
+ CONFIG = 78
+ FATAL = 100
+ PANIC = 101
+ DISCONNECT = 102
+ OLD = 103
+ DIGEST = 104
+ NOSUCH = 105
+ QUORUM = 106
+ UNSAFE = 107
+ EXISTS = 108
+ MULTIPLE = 109
+ EXPIRED = 110
+ NOT_YET_IN_EFFECT = 111
+ INDETERMINATE = 112
+ UNSATISFIED = 113
+ TIMEOUT = 124
+ DEGRADED = 190
+ DEGRADED_PROMOTED = 191
+ NONE = 193
+ MAX = 255
diff --git a/python/tests/test_cts_network.py b/python/tests/test_cts_network.py
index 4aea8b9..3dec000 100644
--- a/python/tests/test_cts_network.py
+++ b/python/tests/test_cts_network.py
@@ -8,6 +8,7 @@ import unittest
from pacemaker._cts.network import next_ip
+
# next_ip makes a bunch of assumptions that we are not going to test here:
#
# * The env argument actually contains an "IPBase" key with a string in it
diff --git a/python/tests/test_exitstatus.py b/python/tests/test_exitstatus.py
index 571f6b4..b8543aa 100644
--- a/python/tests/test_exitstatus.py
+++ b/python/tests/test_exitstatus.py
@@ -8,6 +8,7 @@ import unittest
from pacemaker.exitstatus import ExitStatus
+
class ExitStatusTestCase(unittest.TestCase):
def test_min_max(self):
self.assertEqual(ExitStatus.OK, 0)