summaryrefslogtreecommitdiffstats
path: root/src/cephadm/tests
diff options
context:
space:
mode:
Diffstat (limited to 'src/cephadm/tests')
-rw-r--r--src/cephadm/tests/__init__.py0
-rw-r--r--src/cephadm/tests/fixtures.py162
-rw-r--r--src/cephadm/tests/test_agent.py800
-rw-r--r--src/cephadm/tests/test_cephadm.py2708
-rw-r--r--src/cephadm/tests/test_container_engine.py54
-rw-r--r--src/cephadm/tests/test_enclosure.py72
-rw-r--r--src/cephadm/tests/test_ingress.py350
-rw-r--r--src/cephadm/tests/test_networks.py233
-rw-r--r--src/cephadm/tests/test_nfs.py239
-rw-r--r--src/cephadm/tests/test_util_funcs.py808
10 files changed, 5426 insertions, 0 deletions
diff --git a/src/cephadm/tests/__init__.py b/src/cephadm/tests/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/cephadm/tests/__init__.py
diff --git a/src/cephadm/tests/fixtures.py b/src/cephadm/tests/fixtures.py
new file mode 100644
index 000000000..76ac0b44c
--- /dev/null
+++ b/src/cephadm/tests/fixtures.py
@@ -0,0 +1,162 @@
+import mock
+import os
+import pytest
+import time
+
+from contextlib import contextmanager
+from pyfakefs import fake_filesystem
+
+from typing import Dict, List, Optional
+
+
+def import_cephadm():
+ """Import cephadm as a module."""
+ import cephadm as _cephadm
+
+ return _cephadm
+
+
+def mock_docker():
+ _cephadm = import_cephadm()
+ docker = mock.Mock(_cephadm.Docker)
+ docker.path = '/usr/bin/docker'
+ return docker
+
+
+def mock_podman():
+ _cephadm = import_cephadm()
+ podman = mock.Mock(_cephadm.Podman)
+ podman.path = '/usr/bin/podman'
+ podman.version = (2, 1, 0)
+ return podman
+
+
+def _daemon_path():
+ return os.getcwd()
+
+
+def mock_bad_firewalld():
+ def raise_bad_firewalld():
+ raise Exception('Called bad firewalld')
+
+ _cephadm = import_cephadm()
+ f = mock.Mock(_cephadm.Firewalld)
+ f.enable_service_for = lambda _: raise_bad_firewalld()
+ f.apply_rules = lambda: raise_bad_firewalld()
+ f.open_ports = lambda _: raise_bad_firewalld()
+
+
+def _mock_scrape_host(obj, interval):
+ try:
+ raise ValueError("wah")
+ except Exception as e:
+ obj._handle_thread_exception(e, 'host')
+
+
+def _mock_run(obj):
+ t = obj._create_thread(obj._scrape_host_facts, 'host', 5)
+ time.sleep(1)
+ if not t.is_alive():
+ obj.cephadm_cache.update_health('host', "inactive", "host thread stopped")
+
+
+@pytest.fixture()
+def cephadm_fs(
+ fs: fake_filesystem.FakeFilesystem,
+):
+ """
+ use pyfakefs to stub filesystem calls
+ """
+ uid = os.getuid()
+ gid = os.getgid()
+
+ def fchown(fd, _uid, _gid):
+ """pyfakefs doesn't provide a working fchown or fchmod.
+ In order to get permissions working generally across renames
+ we need to provide our own implemenation.
+ """
+ file_obj = fs.get_open_file(fd).get_object()
+ file_obj.st_uid = _uid
+ file_obj.st_gid = _gid
+
+ _cephadm = import_cephadm()
+ with mock.patch('os.fchown', side_effect=fchown), \
+ mock.patch('os.fchmod'), \
+ mock.patch('platform.processor', return_value='x86_64'), \
+ mock.patch('cephadm.extract_uid_gid', return_value=(uid, gid)):
+
+ try:
+ if not fake_filesystem.is_root():
+ fake_filesystem.set_uid(0)
+ except AttributeError:
+ pass
+
+ fs.create_dir(_cephadm.DATA_DIR)
+ fs.create_dir(_cephadm.LOG_DIR)
+ fs.create_dir(_cephadm.LOCK_DIR)
+ fs.create_dir(_cephadm.LOGROTATE_DIR)
+ fs.create_dir(_cephadm.UNIT_DIR)
+ fs.create_dir('/sys/block')
+
+ yield fs
+
+
+@pytest.fixture()
+def host_sysfs(fs: fake_filesystem.FakeFilesystem):
+ """Create a fake filesystem to represent sysfs"""
+ enc_path = '/sys/class/scsi_generic/sg2/device/enclosure/0:0:1:0'
+ dev_path = '/sys/class/scsi_generic/sg2/device'
+ slot_count = 12
+ fs.create_dir(dev_path)
+ fs.create_file(os.path.join(dev_path, 'vendor'), contents="EnclosuresInc")
+ fs.create_file(os.path.join(dev_path, 'model'), contents="D12")
+ fs.create_file(os.path.join(enc_path, 'id'), contents='1')
+ fs.create_file(os.path.join(enc_path, 'components'), contents=str(slot_count))
+ for slot_num in range(slot_count):
+ slot_dir = os.path.join(enc_path, str(slot_num))
+ fs.create_file(os.path.join(slot_dir, 'locate'), contents='0')
+ fs.create_file(os.path.join(slot_dir, 'fault'), contents='0')
+ fs.create_file(os.path.join(slot_dir, 'slot'), contents=str(slot_num))
+ if slot_num < 6:
+ fs.create_file(os.path.join(slot_dir, 'status'), contents='Ok')
+ slot_dev = os.path.join(slot_dir, 'device')
+ fs.create_dir(slot_dev)
+ fs.create_file(os.path.join(slot_dev, 'vpd_pg80'), contents=f'fake{slot_num:0>3}')
+ else:
+ fs.create_file(os.path.join(slot_dir, 'status'), contents='not installed')
+
+ yield fs
+
+
+@contextmanager
+def with_cephadm_ctx(
+ cmd: List[str],
+ list_networks: Optional[Dict[str, Dict[str, List[str]]]] = None,
+ hostname: Optional[str] = None,
+):
+ """
+ :param cmd: cephadm command argv
+ :param list_networks: mock 'list-networks' return
+ :param hostname: mock 'socket.gethostname' return
+ """
+ if not hostname:
+ hostname = 'host1'
+
+ _cephadm = import_cephadm()
+ with mock.patch('cephadm.attempt_bind'), \
+ mock.patch('cephadm.call', return_value=('', '', 0)), \
+ mock.patch('cephadm.call_timeout', return_value=0), \
+ mock.patch('cephadm.find_executable', return_value='foo'), \
+ mock.patch('cephadm.get_container_info', return_value=None), \
+ mock.patch('cephadm.is_available', return_value=True), \
+ mock.patch('cephadm.json_loads_retry', return_value={'epoch' : 1}), \
+ mock.patch('cephadm.logger'), \
+ mock.patch('socket.gethostname', return_value=hostname):
+ ctx: _cephadm.CephadmContext = _cephadm.cephadm_init_ctx(cmd)
+ ctx.container_engine = mock_podman()
+ if list_networks is not None:
+ with mock.patch('cephadm.list_networks', return_value=list_networks):
+ yield ctx
+ else:
+ yield ctx
+
diff --git a/src/cephadm/tests/test_agent.py b/src/cephadm/tests/test_agent.py
new file mode 100644
index 000000000..f9cf201e2
--- /dev/null
+++ b/src/cephadm/tests/test_agent.py
@@ -0,0 +1,800 @@
+from unittest import mock
+import copy, datetime, json, os, socket, threading
+
+import pytest
+
+from tests.fixtures import with_cephadm_ctx, cephadm_fs, import_cephadm
+
+from typing import Optional
+
+_cephadm = import_cephadm()
+
+
+FSID = "beefbeef-beef-beef-1234-beefbeefbeef"
+AGENT_ID = 'host1'
+AGENT_DIR = f'/var/lib/ceph/{FSID}/agent.{AGENT_ID}'
+
+
+def test_agent_validate():
+ required_files = _cephadm.CephadmAgent.required_files
+ with with_cephadm_ctx([]) as ctx:
+ agent = _cephadm.CephadmAgent(ctx, FSID, AGENT_ID)
+ for i in range(len(required_files)):
+ incomplete_files = {s: 'text' for s in [f for j, f in enumerate(required_files) if j != i]}
+ with pytest.raises(_cephadm.Error, match=f'required file missing from config: {required_files[i]}'):
+ agent.validate(incomplete_files)
+ all_files = {s: 'text' for s in required_files}
+ agent.validate(all_files)
+
+
+def _check_file(path, content):
+ assert os.path.exists(path)
+ with open(path) as f:
+ fcontent = f.read()
+ assert fcontent == content
+
+
+@mock.patch('cephadm.call_throws')
+def test_agent_deploy_daemon_unit(_call_throws, cephadm_fs):
+ _call_throws.return_value = ('', '', 0)
+ agent_id = AGENT_ID
+
+ with with_cephadm_ctx([]) as ctx:
+ ctx.meta_json = json.dumps({'meta': 'data'})
+ agent = _cephadm.CephadmAgent(ctx, FSID, agent_id)
+ cephadm_fs.create_dir(AGENT_DIR)
+
+ with pytest.raises(_cephadm.Error, match='Agent needs a config'):
+ agent.deploy_daemon_unit()
+
+ config = {s: f'text for {s}' for s in _cephadm.CephadmAgent.required_files}
+ config['not-required-file.txt'] = 'don\'t write me'
+
+ agent.deploy_daemon_unit(config)
+
+ # check required config file were all created
+ for fname in _cephadm.CephadmAgent.required_files:
+ _check_file(f'{AGENT_DIR}/{fname}', f'text for {fname}')
+
+ # assert non-required file was not written
+ assert not os.path.exists(f'{AGENT_DIR}/not-required-file.txt')
+
+ # check unit.run file was created correctly
+ _check_file(f'{AGENT_DIR}/unit.run', agent.unit_run())
+
+ # check unit.meta file created correctly
+ _check_file(f'{AGENT_DIR}/unit.meta', json.dumps({'meta': 'data'}, indent=4) + '\n')
+
+ # check unit file was created correctly
+ _check_file(f'{ctx.unit_dir}/{agent.unit_name()}', agent.unit_file())
+
+ expected_call_throws_calls = [
+ mock.call(ctx, ['systemctl', 'daemon-reload']),
+ mock.call(ctx, ['systemctl', 'enable', '--now', agent.unit_name()]),
+ ]
+ _call_throws.assert_has_calls(expected_call_throws_calls)
+
+ expected_call_calls = [
+ mock.call(ctx, ['systemctl', 'stop', agent.unit_name()], verbosity=_cephadm.CallVerbosity.DEBUG),
+ mock.call(ctx, ['systemctl', 'reset-failed', agent.unit_name()], verbosity=_cephadm.CallVerbosity.DEBUG),
+ ]
+ _cephadm.call.assert_has_calls(expected_call_calls)
+
+
+@mock.patch('threading.Thread.is_alive')
+def test_agent_shutdown(_is_alive):
+ with with_cephadm_ctx([]) as ctx:
+ agent = _cephadm.CephadmAgent(ctx, FSID, AGENT_ID)
+ _is_alive.return_value = True
+ assert agent.stop == False
+ assert agent.mgr_listener.stop == False
+ assert agent.ls_gatherer.stop == False
+ assert agent.volume_gatherer.stop == False
+ agent.shutdown()
+ assert agent.stop == True
+ assert agent.mgr_listener.stop == True
+ assert agent.ls_gatherer.stop == True
+ assert agent.volume_gatherer.stop == True
+
+
+def test_agent_wakeup():
+ with with_cephadm_ctx([]) as ctx:
+ agent = _cephadm.CephadmAgent(ctx, FSID, AGENT_ID)
+ assert agent.event.is_set() == False
+ agent.wakeup()
+ assert agent.event.is_set() == True
+
+
+@mock.patch("cephadm.CephadmAgent.shutdown")
+@mock.patch("cephadm.AgentGatherer.update_func")
+def test_pull_conf_settings(_update_func, _shutdown, cephadm_fs):
+ target_ip = '192.168.0.0'
+ target_port = 9876
+ refresh_period = 20
+ listener_port = 5678
+ host = AGENT_ID
+ device_enhanced_scan = 'True'
+ with with_cephadm_ctx([]) as ctx:
+ agent = _cephadm.CephadmAgent(ctx, FSID, AGENT_ID)
+ full_config = {
+ 'target_ip': target_ip,
+ 'target_port': target_port,
+ 'refresh_period': refresh_period,
+ 'listener_port': listener_port,
+ 'host': host,
+ 'device_enhanced_scan': device_enhanced_scan
+ }
+ cephadm_fs.create_dir(AGENT_DIR)
+ with open(agent.config_path, 'w') as f:
+ f.write(json.dumps(full_config))
+
+ with pytest.raises(_cephadm.Error, match="Failed to get agent keyring:"):
+ agent.pull_conf_settings()
+ _shutdown.assert_called()
+ with open(agent.keyring_path, 'w') as f:
+ f.write('keyring')
+
+ assert agent.device_enhanced_scan == False
+ agent.pull_conf_settings()
+ assert agent.host == host
+ assert agent.target_ip == target_ip
+ assert agent.target_port == target_port
+ assert agent.loop_interval == refresh_period
+ assert agent.starting_port == listener_port
+ assert agent.device_enhanced_scan == True
+ assert agent.keyring == 'keyring'
+ _update_func.assert_called()
+
+ full_config.pop('target_ip')
+ with open(agent.config_path, 'w') as f:
+ f.write(json.dumps(full_config))
+ with pytest.raises(_cephadm.Error, match="Failed to get agent target ip and port from config:"):
+ agent.pull_conf_settings()
+
+
+@mock.patch("cephadm.command_ceph_volume")
+def test_agent_ceph_volume(_ceph_volume):
+
+ def _ceph_volume_outputter(_):
+ print("ceph-volume output")
+
+ def _ceph_volume_empty(_):
+ pass
+
+ with with_cephadm_ctx([]) as ctx:
+ agent = _cephadm.CephadmAgent(ctx, FSID, AGENT_ID)
+
+ _ceph_volume.side_effect = _ceph_volume_outputter
+ out, _ = agent._ceph_volume(False)
+ assert ctx.command == ['inventory', '--format=json']
+ assert out == "ceph-volume output\n"
+
+ out, _ = agent._ceph_volume(True)
+ assert ctx.command == ['inventory', '--format=json', '--with-lsm']
+ assert out == "ceph-volume output\n"
+
+ _ceph_volume.side_effect = _ceph_volume_empty
+ with pytest.raises(Exception, match='ceph-volume returned empty value'):
+ out, _ = agent._ceph_volume(False)
+
+
+def test_agent_daemon_ls_subset(cephadm_fs):
+ # Basing part of this test on some actual sample output
+
+ # Some sample "podman stats --format '{{.ID}},{{.MemUsage}}' --no-stream" output
+ # 3f2b31d19ecd,456.4MB / 41.96GB
+ # 5aca2499e0f8,7.082MB / 41.96GB
+ # fe0cef07d5f7,35.91MB / 41.96GB
+
+ # Sample "podman ps --format '{{.ID}},{{.Names}}' --no-trunc" output with the same containers
+ # fe0cef07d5f71c5c604f7d1b4a4ac2e27873c96089d015014524e803361b4a30,ceph-4434fa7c-5602-11ed-b719-5254006ef86b-mon-host1
+ # 3f2b31d19ecdd586640cc9c6ef7c0fe62157a3f7a71fcb60c91e70660340cd1f,ceph-4434fa7c-5602-11ed-b719-5254006ef86b-mgr-host1-pntmho
+ # 5aca2499e0f8fb903788ff90eb03fe6ed58c7ed177caf278fed199936aff7b4a,ceph-4434fa7c-5602-11ed-b719-5254006ef86b-crash-host1
+
+ # Some of the components from that output
+ mgr_cid = '3f2b31d19ecdd586640cc9c6ef7c0fe62157a3f7a71fcb60c91e70660340cd1f'
+ mon_cid = 'fe0cef07d5f71c5c604f7d1b4a4ac2e27873c96089d015014524e803361b4a30'
+ crash_cid = '5aca2499e0f8fb903788ff90eb03fe6ed58c7ed177caf278fed199936aff7b4a'
+ mgr_short_cid = mgr_cid[0:12]
+ mon_short_cid = mon_cid[0:12]
+ crash_short_cid = crash_cid[0:12]
+
+ #Rebuilding the output but with our testing FSID and components (to allow alteration later for whatever reason)
+ mem_out = f"""{mgr_short_cid},456.4MB / 41.96GB
+{crash_short_cid},7.082MB / 41.96GB
+{mon_short_cid},35.91MB / 41.96GB"""
+
+ ps_out = f"""{mon_cid},ceph-{FSID}-mon-host1
+{mgr_cid},ceph-{FSID}-mgr-host1-pntmho
+{crash_cid},ceph-{FSID}-crash-host1"""
+
+ def _fake_call(ctx, cmd, desc=None, verbosity=_cephadm.CallVerbosity.VERBOSE_ON_FAILURE, timeout=_cephadm.DEFAULT_TIMEOUT, **kwargs):
+ if 'stats' in cmd:
+ return (mem_out, '', 0)
+ elif 'ps' in cmd:
+ return (ps_out, '', 0)
+ return ('out', 'err', 0)
+
+ cephadm_fs.create_dir(AGENT_DIR)
+ cephadm_fs.create_dir(f'/var/lib/ceph/mon/ceph-host1') # legacy daemon
+ cephadm_fs.create_dir(f'/var/lib/ceph/osd/nothing') # improper directory, should be skipped
+ cephadm_fs.create_dir(f'/var/lib/ceph/{FSID}/mgr.host1.pntmho') # cephadm daemon
+ cephadm_fs.create_dir(f'/var/lib/ceph/{FSID}/crash.host1') # cephadm daemon
+
+ with with_cephadm_ctx([]) as ctx:
+ ctx.fsid = FSID
+ agent = _cephadm.CephadmAgent(ctx, FSID, AGENT_ID)
+ _cephadm.call.side_effect = _fake_call
+ daemons = agent._daemon_ls_subset()
+
+ assert 'agent.host1' in daemons
+ assert 'mgr.host1.pntmho' in daemons
+ assert 'crash.host1' in daemons
+ assert 'mon.host1' in daemons
+
+ assert daemons['mon.host1']['style'] == 'legacy'
+ assert daemons['mgr.host1.pntmho']['style'] == 'cephadm:v1'
+ assert daemons['crash.host1']['style'] == 'cephadm:v1'
+ assert daemons['agent.host1']['style'] == 'cephadm:v1'
+
+ assert daemons['mgr.host1.pntmho']['systemd_unit'] == f'ceph-{FSID}@mgr.host1.pntmho'
+ assert daemons['agent.host1']['systemd_unit'] == f'ceph-{FSID}@agent.host1'
+ assert daemons['crash.host1']['systemd_unit'] == f'ceph-{FSID}@crash.host1'
+
+ assert daemons['mgr.host1.pntmho']['container_id'] == mgr_cid
+ assert daemons['crash.host1']['container_id'] == crash_cid
+
+ assert daemons['mgr.host1.pntmho']['memory_usage'] == 478570086 # 456.4 MB
+ assert daemons['crash.host1']['memory_usage'] == 7426015 # 7.082 MB
+
+
+@mock.patch("cephadm.list_daemons")
+@mock.patch("cephadm.CephadmAgent._daemon_ls_subset")
+def test_agent_get_ls(_ls_subset, _ls, cephadm_fs):
+ ls_out = [{
+ "style": "cephadm:v1",
+ "name": "mgr.host1.pntmho",
+ "fsid": FSID,
+ "systemd_unit": f"ceph-{FSID}@mgr.host1.pntmho",
+ "enabled": True,
+ "state": "running",
+ "service_name": "mgr",
+ "memory_request": None,
+ "memory_limit": None,
+ "ports": [
+ 9283,
+ 8765
+ ],
+ "container_id": "3f2b31d19ecdd586640cc9c6ef7c0fe62157a3f7a71fcb60c91e70660340cd1f",
+ "container_image_name": "quay.io/ceph/ceph:testing",
+ "container_image_id": "3300e39269f0c13ae45026cf233d8b3fff1303d52f2598a69c7fba0bb8405164",
+ "container_image_digests": [
+ "quay.io/ceph/ceph@sha256:d4f3522528ee79904f9e530bdce438acac30a039e9a0b3cf31d8b614f9f96a30"
+ ],
+ "memory_usage": 507510784,
+ "cpu_percentage": "5.95%",
+ "version": "18.0.0-556-gb4d1a199",
+ "started": "2022-10-27T14:19:36.086664Z",
+ "created": "2022-10-27T14:19:36.282281Z",
+ "deployed": "2022-10-27T14:19:35.377275Z",
+ "configured": "2022-10-27T14:22:40.316912Z"
+ },{
+ "style": "cephadm:v1",
+ "name": "agent.host1",
+ "fsid": FSID,
+ "systemd_unit": f"ceph-{FSID}@agent.host1",
+ "enabled": True,
+ "state": "running",
+ "service_name": "agent",
+ "ports": [],
+ "ip": None,
+ "deployed_by": [
+ "quay.io/ceph/ceph@sha256:d4f3522528ee79904f9e530bdce438acac30a039e9a0b3cf31d8b614f9f96a30"
+ ],
+ "rank": None,
+ "rank_generation": None,
+ "extra_container_args": None,
+ "container_id": None,
+ "container_image_name": None,
+ "container_image_id": None,
+ "container_image_digests": None,
+ "version": None,
+ "started": None,
+ "created": "2022-10-27T19:46:49.751594Z",
+ "deployed": None,
+ "configured": "2022-10-27T19:46:49.751594Z"
+ }, {
+ "style": "legacy",
+ "name": "mon.host1",
+ "fsid": FSID,
+ "systemd_unit": "ceph-mon@host1",
+ "enabled": False,
+ "state": "stopped",
+ "host_version": None
+ }]
+
+ ls_subset_out = {
+ 'mgr.host1.pntmho': {
+ "style": "cephadm:v1",
+ "fsid": FSID,
+ "systemd_unit": f"ceph-{FSID}@mgr.host1.pntmho",
+ "enabled": True,
+ "state": "running",
+ "container_id": "3f2b31d19ecdd586640cc9c6ef7c0fe62157a3f7a71fcb60c91e70660340cd1f",
+ "memory_usage": 507510784,
+ },
+ 'agent.host1': {
+ "style": "cephadm:v1",
+ "fsid": FSID,
+ "systemd_unit": f"ceph-{FSID}@agent.host1",
+ "enabled": True,
+ "state": "running",
+ "container_id": None
+ }, 'mon.host1': {
+ "style": "legacy",
+ "name": "mon.host1",
+ "fsid": FSID,
+ "systemd_unit": "ceph-mon@host1",
+ "enabled": False,
+ "state": "stopped",
+ "host_version": None
+ }}
+
+ _ls.return_value = ls_out
+ _ls_subset.return_value = ls_subset_out
+
+ with with_cephadm_ctx([]) as ctx:
+ ctx.fsid = FSID
+ agent = _cephadm.CephadmAgent(ctx, FSID, AGENT_ID)
+
+ # first pass, no cached daemon metadata
+ daemons, changed = agent._get_ls()
+ assert daemons == ls_out
+ assert changed
+
+ # second pass, should recognize that daemons have not changed and just keep cached values
+ daemons, changed = agent._get_ls()
+ assert daemons == daemons
+ assert not changed
+
+ # change a container id so it needs to get more info
+ ls_subset_out2 = copy.deepcopy(ls_subset_out)
+ ls_out2 = copy.deepcopy(ls_out)
+ ls_subset_out2['mgr.host1.pntmho']['container_id'] = '3f2b31d19ecdd586640cc9c6ef7c0fe62157a3f7a71fcb60c91e7066034aaaaa'
+ ls_out2[0]['container_id'] = '3f2b31d19ecdd586640cc9c6ef7c0fe62157a3f7a71fcb60c91e7066034aaaaa'
+ _ls.return_value = ls_out2
+ _ls_subset.return_value = ls_subset_out2
+ assert agent.cached_ls_values['mgr.host1.pntmho']['container_id'] == "3f2b31d19ecdd586640cc9c6ef7c0fe62157a3f7a71fcb60c91e70660340cd1f"
+ daemons, changed = agent._get_ls()
+ assert daemons == ls_out2
+ assert changed
+
+ # run again with the same data so it should use cached values
+ daemons, changed = agent._get_ls()
+ assert daemons == ls_out2
+ assert not changed
+
+ # change the state of a container so new daemon metadata is needed
+ ls_subset_out3 = copy.deepcopy(ls_subset_out2)
+ ls_out3 = copy.deepcopy(ls_out2)
+ ls_subset_out3['mgr.host1.pntmho']['enabled'] = False
+ ls_out3[0]['enabled'] = False
+ _ls.return_value = ls_out3
+ _ls_subset.return_value = ls_subset_out3
+ assert agent.cached_ls_values['mgr.host1.pntmho']['enabled'] == True
+ daemons, changed = agent._get_ls()
+ assert daemons == ls_out3
+ assert changed
+
+ # run again with the same data so it should use cached values
+ daemons, changed = agent._get_ls()
+ assert daemons == ls_out3
+ assert not changed
+
+ # remove a daemon so new metadats is needed
+ ls_subset_out4 = copy.deepcopy(ls_subset_out3)
+ ls_out4 = copy.deepcopy(ls_out3)
+ ls_subset_out4.pop('mon.host1')
+ ls_out4.pop()
+ _ls.return_value = ls_out4
+ _ls_subset.return_value = ls_subset_out4
+ assert 'mon.host1' in agent.cached_ls_values
+ daemons, changed = agent._get_ls()
+ assert daemons == ls_out4
+ assert changed
+
+ # run again with the same data so it should use cached values
+ daemons, changed = agent._get_ls()
+ assert daemons == ls_out4
+ assert not changed
+
+
+@mock.patch("threading.Event.clear")
+@mock.patch("threading.Event.wait")
+@mock.patch("urllib.request.Request.__init__")
+@mock.patch("cephadm.urlopen")
+@mock.patch("cephadm.list_networks")
+@mock.patch("cephadm.HostFacts.dump")
+@mock.patch("cephadm.HostFacts.__init__", lambda _, __: None)
+@mock.patch("ssl.SSLContext.load_verify_locations")
+@mock.patch("threading.Thread.is_alive")
+@mock.patch("cephadm.MgrListener.start")
+@mock.patch("cephadm.AgentGatherer.start")
+@mock.patch("cephadm.port_in_use")
+@mock.patch("cephadm.CephadmAgent.pull_conf_settings")
+def test_agent_run(_pull_conf_settings, _port_in_use, _gatherer_start,
+ _listener_start, _is_alive, _load_verify_locations,
+ _HF_dump, _list_networks, _urlopen, _RQ_init, _wait, _clear):
+ target_ip = '192.168.0.0'
+ target_port = '9999'
+ refresh_period = 20
+ listener_port = 7770
+ open_listener_port = 7777
+ host = AGENT_ID
+ device_enhanced_scan = False
+
+ def _fake_port_in_use(ctx, endpoint):
+ if endpoint.port == open_listener_port:
+ return False
+ return True
+
+ network_data: Dict[str, Dict[str, Set[str]]] = {
+ "10.2.1.0/24": {
+ "eth1": set(["10.2.1.122"])
+ },
+ "192.168.122.0/24": {
+ "eth0": set(["192.168.122.221"])
+ },
+ "fe80::/64": {
+ "eth0": set(["fe80::5054:ff:fe3f:d94e"]),
+ "eth1": set(["fe80::5054:ff:fe3f:aa4a"]),
+ }
+ }
+
+ # the json serializable version of the networks data
+ # we expect the agent to actually send
+ network_data_no_sets: Dict[str, Dict[str, List[str]]] = {
+ "10.2.1.0/24": {
+ "eth1": ["10.2.1.122"]
+ },
+ "192.168.122.0/24": {
+ "eth0": ["192.168.122.221"]
+ },
+ "fe80::/64": {
+ "eth0": ["fe80::5054:ff:fe3f:d94e"],
+ "eth1": ["fe80::5054:ff:fe3f:aa4a"],
+ }
+ }
+
+ class FakeHTTPResponse():
+ def __init__(self):
+ pass
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, type, value, tb):
+ pass
+
+ def read(self):
+ return json.dumps({'valid': 'output', 'result': '400'})
+
+ _port_in_use.side_effect = _fake_port_in_use
+ _is_alive.return_value = False
+ _HF_dump.return_value = 'Host Facts'
+ _list_networks.return_value = network_data
+ _urlopen.side_effect = lambda *args, **kwargs: FakeHTTPResponse()
+ _RQ_init.side_effect = lambda *args, **kwargs: None
+ with with_cephadm_ctx([]) as ctx:
+ ctx.fsid = FSID
+ agent = _cephadm.CephadmAgent(ctx, FSID, AGENT_ID)
+ agent.keyring = 'agent keyring'
+ agent.ack = 7
+ agent.volume_gatherer.ack = 7
+ agent.volume_gatherer.data = 'ceph-volume inventory data'
+ agent.ls_gatherer.ack = 7
+ agent.ls_gatherer.data = [{'valid_daemon': 'valid_metadata'}]
+
+ def _set_conf():
+ agent.target_ip = target_ip
+ agent.target_port = target_port
+ agent.loop_interval = refresh_period
+ agent.starting_port = listener_port
+ agent.host = host
+ agent.device_enhanced_scan = device_enhanced_scan
+ _pull_conf_settings.side_effect = _set_conf
+
+ # technically the run function loops forever unless the agent
+ # is told to stop. To get around that we're going to have the
+ # event.wait() (which happens at the end of the loop) to throw
+ # a special exception type. If we catch this exception we can
+ # consider it as being a "success" run
+ class EventCleared(Exception):
+ pass
+
+ _clear.side_effect = EventCleared('SUCCESS')
+ with pytest.raises(EventCleared, match='SUCCESS'):
+ agent.run()
+
+ expected_data = {
+ 'host': host,
+ 'ls': [{'valid_daemon': 'valid_metadata'}],
+ 'networks': network_data_no_sets,
+ 'facts': 'Host Facts',
+ 'volume': 'ceph-volume inventory data',
+ 'ack': str(7),
+ 'keyring': 'agent keyring',
+ 'port': str(open_listener_port)
+ }
+ _RQ_init.assert_called_with(
+ f'https://{target_ip}:{target_port}/data/',
+ json.dumps(expected_data).encode('ascii'),
+ {'Content-Type': 'application/json'}
+ )
+ _listener_start.assert_called()
+ _gatherer_start.assert_called()
+ _urlopen.assert_called()
+
+ # agent should not go down if connections fail
+ _urlopen.side_effect = Exception()
+ with pytest.raises(EventCleared, match='SUCCESS'):
+ agent.run()
+
+ # should fail if no ports are open for listener
+ _port_in_use.side_effect = lambda _, __: True
+ agent.listener_port = None
+ with pytest.raises(Exception, match='Failed to pick port for agent to listen on: All 1000 ports starting at 7770 taken.'):
+ agent.run()
+
+
+@mock.patch("cephadm.CephadmAgent.pull_conf_settings")
+@mock.patch("cephadm.CephadmAgent.wakeup")
+def test_mgr_listener_handle_json_payload(_agent_wakeup, _pull_conf_settings, cephadm_fs):
+ with with_cephadm_ctx([]) as ctx:
+ ctx.fsid = FSID
+ agent = _cephadm.CephadmAgent(ctx, FSID, AGENT_ID)
+ cephadm_fs.create_dir(AGENT_DIR)
+
+ data_no_config = {
+ 'counter': 7
+ }
+ agent.mgr_listener.handle_json_payload(data_no_config)
+ _agent_wakeup.assert_not_called()
+ _pull_conf_settings.assert_not_called()
+ assert not any(os.path.exists(os.path.join(AGENT_DIR, s)) for s in agent.required_files)
+
+ data_with_config = {
+ 'counter': 7,
+ 'config': {
+ 'unrequired-file': 'unrequired-text'
+ }
+ }
+ data_with_config['config'].update({s: f'{s} text' for s in agent.required_files if s != agent.required_files[2]})
+ agent.mgr_listener.handle_json_payload(data_with_config)
+ _agent_wakeup.assert_called()
+ _pull_conf_settings.assert_called()
+ assert all(os.path.exists(os.path.join(AGENT_DIR, s)) for s in agent.required_files if s != agent.required_files[2])
+ assert not os.path.exists(os.path.join(AGENT_DIR, agent.required_files[2]))
+ assert not os.path.exists(os.path.join(AGENT_DIR, 'unrequired-file'))
+
+
+@mock.patch("socket.socket")
+@mock.patch("ssl.SSLContext.wrap_socket")
+@mock.patch("cephadm.MgrListener.handle_json_payload")
+@mock.patch("ssl.SSLContext.load_verify_locations")
+@mock.patch("ssl.SSLContext.load_cert_chain")
+def test_mgr_listener_run(_load_cert_chain, _load_verify_locations, _handle_json_payload,
+ _wrap_context, _socket, cephadm_fs):
+
+ with with_cephadm_ctx([]) as ctx:
+ ctx.fsid = FSID
+ agent = _cephadm.CephadmAgent(ctx, FSID, AGENT_ID)
+ cephadm_fs.create_dir(AGENT_DIR)
+
+ payload = json.dumps({'counter': 3,
+ 'config': {s: f'{s} text' for s in agent.required_files if s != agent.required_files[1]}})
+
+ class FakeSocket:
+
+ def __init__(self, family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0, fileno=None):
+ self.family = family
+ self.type = type
+
+ def bind(*args, **kwargs):
+ return
+
+ def settimeout(*args, **kwargs):
+ return
+
+ def listen(*args, **kwargs):
+ return
+
+ class FakeSecureSocket:
+
+ def __init__(self, pload):
+ self.payload = pload
+ self._conn = FakeConn(self.payload)
+ self.accepted = False
+
+ def accept(self):
+ # to make mgr listener run loop stop running,
+ # set it to stop after accepting a "connection"
+ # on our fake socket so only one iteration of the loop
+ # actually happens
+ agent.mgr_listener.stop = True
+ accepted = True
+ return self._conn, None
+
+ def load_cert_chain(*args, **kwargs):
+ return
+
+ def load_verify_locations(*args, **kwargs):
+ return
+
+ class FakeConn:
+
+ def __init__(self, payload: str = ''):
+ payload_len_str = str(len(payload.encode('utf-8')))
+ while len(payload_len_str.encode('utf-8')) < 10:
+ payload_len_str = '0' + payload_len_str
+ self.payload = (payload_len_str + payload).encode('utf-8')
+ self.buffer_len = len(self.payload)
+
+ def recv(self, len: Optional[int] = None):
+ if not len or len >= self.buffer_len:
+ ret = self.payload
+ self.payload = b''
+ self.buffer_len = 0
+ return ret
+ else:
+ ret = self.payload[:len]
+ self.payload = self.payload[len:]
+ self.buffer_len = self.buffer_len - len
+ return ret
+
+ FSS_good_data = FakeSecureSocket(payload)
+ FSS_bad_json = FakeSecureSocket('bad json')
+ _socket = FakeSocket
+ agent.listener_port = 7777
+
+ # first run, should successfully receive properly structured json payload
+ _wrap_context.side_effect = [FSS_good_data]
+ agent.mgr_listener.stop = False
+ FakeConn.send = mock.Mock(return_value=None)
+ agent.mgr_listener.run()
+
+ # verify payload was correctly extracted
+ assert _handle_json_payload.called_with(json.loads(payload))
+ FakeConn.send.assert_called_once_with(b'ACK')
+
+ # second run, with bad json data received
+ _wrap_context.side_effect = [FSS_bad_json]
+ agent.mgr_listener.stop = False
+ FakeConn.send = mock.Mock(return_value=None)
+ agent.mgr_listener.run()
+ FakeConn.send.assert_called_once_with(b'Failed to extract json payload from message: Expecting value: line 1 column 1 (char 0)')
+
+ # third run, no proper length as beginning og payload
+ FSS_no_length = FakeSecureSocket(payload)
+ FSS_no_length.payload = FSS_no_length.payload[10:]
+ FSS_no_length._conn.payload = FSS_no_length._conn.payload[10:]
+ FSS_no_length._conn.buffer_len -= 10
+ _wrap_context.side_effect = [FSS_no_length]
+ agent.mgr_listener.stop = False
+ FakeConn.send = mock.Mock(return_value=None)
+ agent.mgr_listener.run()
+ FakeConn.send.assert_called_once_with(b'Failed to extract length of payload from message: invalid literal for int() with base 10: \'{"counter"\'')
+
+ # some exception handling for full coverage
+ FSS_exc_testing = FakeSecureSocket(payload)
+ FSS_exc_testing.accept = mock.MagicMock()
+
+ def _accept(*args, **kwargs):
+ if not FSS_exc_testing.accepted:
+ FSS_exc_testing.accepted = True
+ raise socket.timeout()
+ else:
+ agent.mgr_listener.stop = True
+ raise Exception()
+
+ FSS_exc_testing.accept.side_effect = _accept
+ _wrap_context.side_effect = [FSS_exc_testing]
+ agent.mgr_listener.stop = False
+ FakeConn.send = mock.Mock(return_value=None)
+ agent.mgr_listener.run()
+ FakeConn.send.assert_not_called()
+ FSS_exc_testing.accept.call_count == 3
+
+
+@mock.patch("cephadm.CephadmAgent._get_ls")
+def test_gatherer_update_func(_get_ls, cephadm_fs):
+ with with_cephadm_ctx([]) as ctx:
+ ctx.fsid = FSID
+ agent = _cephadm.CephadmAgent(ctx, FSID, AGENT_ID)
+ cephadm_fs.create_dir(AGENT_DIR)
+
+ def _sample_func():
+ return 7
+
+ agent.ls_gatherer.func()
+ _get_ls.assert_called()
+
+ _get_ls = mock.MagicMock()
+ agent.ls_gatherer.update_func(_sample_func)
+ out = agent.ls_gatherer.func()
+ assert out == 7
+ _get_ls.assert_not_called()
+
+
+@mock.patch("cephadm.CephadmAgent.wakeup")
+@mock.patch("time.monotonic")
+@mock.patch("threading.Event.wait")
+def test_gatherer_run(_wait, _time, _agent_wakeup, cephadm_fs):
+ with with_cephadm_ctx([]) as ctx:
+ ctx.fsid = FSID
+ agent = _cephadm.CephadmAgent(ctx, FSID, AGENT_ID)
+ cephadm_fs.create_dir(AGENT_DIR)
+ agent.loop_interval = 30
+ agent.ack = 23
+
+ _sample_func = lambda *args, **kwargs: ('sample out', True)
+ agent.ls_gatherer.update_func(_sample_func)
+ agent.ls_gatherer.ack = 20
+ agent.ls_gatherer.stop = False
+
+ def _fake_clear(*args, **kwargs):
+ agent.ls_gatherer.stop = True
+
+ _time.side_effect = [0, 20, 0, 20, 0, 20] # start at time 0, complete at time 20
+ _wait.return_value = None
+
+ with mock.patch("threading.Event.clear") as _clear:
+ _clear.side_effect = _fake_clear
+ agent.ls_gatherer.run()
+
+ _wait.assert_called_with(10) # agent loop_interval - run time
+ assert agent.ls_gatherer.data == 'sample out'
+ assert agent.ls_gatherer.ack == 23
+ _agent_wakeup.assert_called_once()
+ _clear.assert_called_once()
+
+ _exc_func = lambda *args, **kwargs: Exception()
+ agent.ls_gatherer.update_func(_exc_func)
+ agent.ls_gatherer.ack = 20
+ agent.ls_gatherer.stop = False
+
+ with mock.patch("threading.Event.clear") as _clear:
+ _clear.side_effect = _fake_clear
+ agent.ls_gatherer.run()
+ assert agent.ls_gatherer.data is None
+ assert agent.ls_gatherer.ack == agent.ack
+ # should have run full loop despite exception
+ _clear.assert_called_once()
+
+ # test general exception for full coverage
+ _agent_wakeup.side_effect = [Exception()]
+ agent.ls_gatherer.update_func(_sample_func)
+ agent.ls_gatherer.stop = False
+ # just to force only one iteration
+ _time.side_effect = _fake_clear
+ with mock.patch("threading.Event.clear") as _clear:
+ _clear.side_effect = Exception()
+ agent.ls_gatherer.run()
+ assert agent.ls_gatherer.data == 'sample out'
+ assert agent.ls_gatherer.ack == agent.ack
+ # should not have gotten to end of loop
+ _clear.assert_not_called()
+
+
+@mock.patch("cephadm.CephadmAgent.run")
+def test_command_agent(_agent_run, cephadm_fs):
+ with with_cephadm_ctx([]) as ctx:
+ ctx.fsid = FSID
+ ctx.daemon_id = AGENT_ID
+
+ with pytest.raises(Exception, match=f"Agent daemon directory {AGENT_DIR} does not exist. Perhaps agent was never deployed?"):
+ _cephadm.command_agent(ctx)
+
+ cephadm_fs.create_dir(AGENT_DIR)
+ _cephadm.command_agent(ctx)
+ _agent_run.assert_called()
diff --git a/src/cephadm/tests/test_cephadm.py b/src/cephadm/tests/test_cephadm.py
new file mode 100644
index 000000000..d310215f6
--- /dev/null
+++ b/src/cephadm/tests/test_cephadm.py
@@ -0,0 +1,2708 @@
+# type: ignore
+
+import errno
+import json
+import mock
+import os
+import pytest
+import socket
+import unittest
+from textwrap import dedent
+
+from .fixtures import (
+ cephadm_fs,
+ mock_docker,
+ mock_podman,
+ with_cephadm_ctx,
+ mock_bad_firewalld,
+ import_cephadm,
+)
+
+from pyfakefs import fake_filesystem
+from pyfakefs import fake_filesystem_unittest
+
+_cephadm = import_cephadm()
+
+
+def get_ceph_conf(
+ fsid='00000000-0000-0000-0000-0000deadbeef',
+ mon_host='[v2:192.168.1.1:3300/0,v1:192.168.1.1:6789/0]'):
+ return f'''
+# minimal ceph.conf for {fsid}
+[global]
+ fsid = {fsid}
+ mon_host = {mon_host}
+'''
+
+class TestCephAdm(object):
+
+ def test_docker_unit_file(self):
+ ctx = _cephadm.CephadmContext()
+ ctx.container_engine = mock_docker()
+ r = _cephadm.get_unit_file(ctx, '9b9d7609-f4d5-4aba-94c8-effa764d96c9')
+ assert 'Requires=docker.service' in r
+ ctx.container_engine = mock_podman()
+ r = _cephadm.get_unit_file(ctx, '9b9d7609-f4d5-4aba-94c8-effa764d96c9')
+ assert 'Requires=docker.service' not in r
+
+ @mock.patch('cephadm.logger')
+ def test_attempt_bind(self, _logger):
+ ctx = None
+ address = None
+ port = 0
+
+ def os_error(errno):
+ _os_error = OSError()
+ _os_error.errno = errno
+ return _os_error
+
+ for side_effect, expected_exception in (
+ (os_error(errno.EADDRINUSE), _cephadm.PortOccupiedError),
+ (os_error(errno.EAFNOSUPPORT), OSError),
+ (os_error(errno.EADDRNOTAVAIL), OSError),
+ (None, None),
+ ):
+ _socket = mock.Mock()
+ _socket.bind.side_effect = side_effect
+ try:
+ _cephadm.attempt_bind(ctx, _socket, address, port)
+ except Exception as e:
+ assert isinstance(e, expected_exception)
+ else:
+ if expected_exception is not None:
+ assert False
+
+ @mock.patch('cephadm.attempt_bind')
+ @mock.patch('cephadm.logger')
+ def test_port_in_use(self, _logger, _attempt_bind):
+ empty_ctx = None
+
+ assert _cephadm.port_in_use(empty_ctx, _cephadm.EndPoint('0.0.0.0', 9100)) == False
+
+ _attempt_bind.side_effect = _cephadm.PortOccupiedError('msg')
+ assert _cephadm.port_in_use(empty_ctx, _cephadm.EndPoint('0.0.0.0', 9100)) == True
+
+ os_error = OSError()
+ os_error.errno = errno.EADDRNOTAVAIL
+ _attempt_bind.side_effect = os_error
+ assert _cephadm.port_in_use(empty_ctx, _cephadm.EndPoint('0.0.0.0', 9100)) == False
+
+ os_error = OSError()
+ os_error.errno = errno.EAFNOSUPPORT
+ _attempt_bind.side_effect = os_error
+ assert _cephadm.port_in_use(empty_ctx, _cephadm.EndPoint('0.0.0.0', 9100)) == False
+
+ @mock.patch('cephadm.socket.socket.bind')
+ @mock.patch('cephadm.logger')
+ def test_port_in_use_special_cases(self, _logger, _bind):
+ # port_in_use has special handling for
+ # EAFNOSUPPORT and EADDRNOTAVAIL errno OSErrors.
+ # If we get those specific errors when attempting
+ # to bind to the ip:port we should not say the
+ # port is in use
+
+ def os_error(errno):
+ _os_error = OSError()
+ _os_error.errno = errno
+ return _os_error
+
+ _bind.side_effect = os_error(errno.EADDRNOTAVAIL)
+ in_use = _cephadm.port_in_use(None, _cephadm.EndPoint('1.2.3.4', 10000))
+ assert in_use == False
+
+ _bind.side_effect = os_error(errno.EAFNOSUPPORT)
+ in_use = _cephadm.port_in_use(None, _cephadm.EndPoint('1.2.3.4', 10000))
+ assert in_use == False
+
+ # this time, have it raise the actual port taken error
+ # so it should report the port is in use
+ _bind.side_effect = os_error(errno.EADDRINUSE)
+ in_use = _cephadm.port_in_use(None, _cephadm.EndPoint('1.2.3.4', 10000))
+ assert in_use == True
+
+ @mock.patch('cephadm.attempt_bind')
+ @mock.patch('cephadm.logger')
+ def test_port_in_use_with_specific_ips(self, _logger, _attempt_bind):
+ empty_ctx = None
+
+ def _fake_attempt_bind(ctx, s: socket.socket, addr: str, port: int) -> None:
+ occupied_error = _cephadm.PortOccupiedError('msg')
+ if addr.startswith('200'):
+ raise occupied_error
+ if addr.startswith('100'):
+ if port == 4567:
+ raise occupied_error
+
+ _attempt_bind.side_effect = _fake_attempt_bind
+
+ assert _cephadm.port_in_use(empty_ctx, _cephadm.EndPoint('200.0.0.0', 9100)) == True
+ assert _cephadm.port_in_use(empty_ctx, _cephadm.EndPoint('100.0.0.0', 9100)) == False
+ assert _cephadm.port_in_use(empty_ctx, _cephadm.EndPoint('100.0.0.0', 4567)) == True
+ assert _cephadm.port_in_use(empty_ctx, _cephadm.EndPoint('155.0.0.0', 4567)) == False
+
+ @mock.patch('socket.socket')
+ @mock.patch('cephadm.logger')
+ def test_check_ip_port_success(self, _logger, _socket):
+ ctx = _cephadm.CephadmContext()
+ ctx.skip_ping_check = False # enables executing port check with `check_ip_port`
+
+ for address, address_family in (
+ ('0.0.0.0', socket.AF_INET),
+ ('::', socket.AF_INET6),
+ ):
+ try:
+ _cephadm.check_ip_port(ctx, _cephadm.EndPoint(address, 9100))
+ except:
+ assert False
+ else:
+ assert _socket.call_args == mock.call(address_family, socket.SOCK_STREAM)
+
+ @mock.patch('socket.socket')
+ @mock.patch('cephadm.logger')
+ def test_check_ip_port_failure(self, _logger, _socket):
+ ctx = _cephadm.CephadmContext()
+ ctx.skip_ping_check = False # enables executing port check with `check_ip_port`
+
+ def os_error(errno):
+ _os_error = OSError()
+ _os_error.errno = errno
+ return _os_error
+
+ for address, address_family in (
+ ('0.0.0.0', socket.AF_INET),
+ ('::', socket.AF_INET6),
+ ):
+ for side_effect, expected_exception in (
+ (os_error(errno.EADDRINUSE), _cephadm.PortOccupiedError),
+ (os_error(errno.EADDRNOTAVAIL), OSError),
+ (os_error(errno.EAFNOSUPPORT), OSError),
+ (None, None),
+ ):
+ mock_socket_obj = mock.Mock()
+ mock_socket_obj.bind.side_effect = side_effect
+ _socket.return_value = mock_socket_obj
+ try:
+ _cephadm.check_ip_port(ctx, _cephadm.EndPoint(address, 9100))
+ except Exception as e:
+ assert isinstance(e, expected_exception)
+ else:
+ if side_effect is not None:
+ assert False
+
+
+ def test_is_not_fsid(self):
+ assert not _cephadm.is_fsid('no-uuid')
+
+ def test_is_fsid(self):
+ assert _cephadm.is_fsid('e863154d-33c7-4350-bca5-921e0467e55b')
+
+ def test__get_parser_image(self):
+ args = _cephadm._parse_args(['--image', 'foo', 'version'])
+ assert args.image == 'foo'
+
+ def test_check_required_global_args(self):
+ ctx = _cephadm.CephadmContext()
+ mock_fn = mock.Mock()
+ mock_fn.return_value = 0
+ require_image = _cephadm.require_image(mock_fn)
+
+ with pytest.raises(_cephadm.Error, match='This command requires the global --image option to be set'):
+ require_image(ctx)
+
+ ctx.image = 'sample-image'
+ require_image(ctx)
+
+ @mock.patch('cephadm.logger')
+ def test_parse_mem_usage(self, _logger):
+ len, summary = _cephadm._parse_mem_usage(0, 'c6290e3f1489,-- / --')
+ assert summary == {}
+
+ def test_CustomValidation(self):
+ assert _cephadm._parse_args(['deploy', '--name', 'mon.a', '--fsid', 'fsid'])
+
+ with pytest.raises(SystemExit):
+ _cephadm._parse_args(['deploy', '--name', 'wrong', '--fsid', 'fsid'])
+
+ @pytest.mark.parametrize("test_input, expected", [
+ ("1.6.2", (1,6,2)),
+ ("1.6.2-stable2", (1,6,2)),
+ ])
+ def test_parse_podman_version(self, test_input, expected):
+ assert _cephadm._parse_podman_version(test_input) == expected
+
+ def test_parse_podman_version_invalid(self):
+ with pytest.raises(ValueError) as res:
+ _cephadm._parse_podman_version('inval.id')
+ assert 'inval' in str(res.value)
+
+ @mock.patch('cephadm.logger')
+ def test_is_ipv6(self, _logger):
+ for good in ("[::1]", "::1",
+ "fff:ffff:ffff:ffff:ffff:ffff:ffff:ffff"):
+ assert _cephadm.is_ipv6(good)
+ for bad in ("127.0.0.1",
+ "ffff:ffff:ffff:ffff:ffff:ffff:ffff:fffg",
+ "1:2:3:4:5:6:7:8:9", "fd00::1::1", "[fg::1]"):
+ assert not _cephadm.is_ipv6(bad)
+
+ def test_unwrap_ipv6(self):
+ def unwrap_test(address, expected):
+ assert _cephadm.unwrap_ipv6(address) == expected
+
+ tests = [
+ ('::1', '::1'), ('[::1]', '::1'),
+ ('[fde4:8dba:82e1:0:5054:ff:fe6a:357]', 'fde4:8dba:82e1:0:5054:ff:fe6a:357'),
+ ('can actually be any string', 'can actually be any string'),
+ ('[but needs to be stripped] ', '[but needs to be stripped] ')]
+ for address, expected in tests:
+ unwrap_test(address, expected)
+
+ def test_wrap_ipv6(self):
+ def wrap_test(address, expected):
+ assert _cephadm.wrap_ipv6(address) == expected
+
+ tests = [
+ ('::1', '[::1]'), ('[::1]', '[::1]'),
+ ('fde4:8dba:82e1:0:5054:ff:fe6a:357',
+ '[fde4:8dba:82e1:0:5054:ff:fe6a:357]'),
+ ('myhost.example.com', 'myhost.example.com'),
+ ('192.168.0.1', '192.168.0.1'),
+ ('', ''), ('fd00::1::1', 'fd00::1::1')]
+ for address, expected in tests:
+ wrap_test(address, expected)
+
+ @mock.patch('cephadm.Firewalld', mock_bad_firewalld)
+ @mock.patch('cephadm.logger')
+ def test_skip_firewalld(self, _logger, cephadm_fs):
+ """
+ test --skip-firewalld actually skips changing firewall
+ """
+
+ ctx = _cephadm.CephadmContext()
+ with pytest.raises(Exception):
+ _cephadm.update_firewalld(ctx, 'mon')
+
+ ctx.skip_firewalld = True
+ _cephadm.update_firewalld(ctx, 'mon')
+
+ ctx.skip_firewalld = False
+ with pytest.raises(Exception):
+ _cephadm.update_firewalld(ctx, 'mon')
+
+ ctx = _cephadm.CephadmContext()
+ ctx.ssl_dashboard_port = 8888
+ ctx.dashboard_key = None
+ ctx.dashboard_password_noupdate = True
+ ctx.initial_dashboard_password = 'password'
+ ctx.initial_dashboard_user = 'User'
+ with pytest.raises(Exception):
+ _cephadm.prepare_dashboard(ctx, 0, 0, lambda _, extra_mounts=None, ___=None : '5', lambda : None)
+
+ ctx.skip_firewalld = True
+ _cephadm.prepare_dashboard(ctx, 0, 0, lambda _, extra_mounts=None, ___=None : '5', lambda : None)
+
+ ctx.skip_firewalld = False
+ with pytest.raises(Exception):
+ _cephadm.prepare_dashboard(ctx, 0, 0, lambda _, extra_mounts=None, ___=None : '5', lambda : None)
+
+ @mock.patch('cephadm.logger')
+ @mock.patch('cephadm.fetch_custom_config_files')
+ @mock.patch('cephadm.get_container')
+ def test_get_deployment_container(self, _get_container, _get_config, _logger):
+ """
+ test get_deployment_container properly makes use of extra container args and custom conf files
+ """
+
+ ctx = _cephadm.CephadmContext()
+ ctx.config_json = '-'
+ ctx.extra_container_args = [
+ '--pids-limit=12345',
+ '--something',
+ ]
+ ctx.data_dir = 'data'
+ _get_config.return_value = [
+ {
+ 'mount_path': '/etc/testing.str',
+ 'content': 'this\nis\na\nstring',
+ }
+ ]
+ _get_container.return_value = _cephadm.CephContainer.for_daemon(
+ ctx,
+ fsid='9b9d7609-f4d5-4aba-94c8-effa764d96c9',
+ daemon_type='grafana',
+ daemon_id='host1',
+ entrypoint='',
+ args=[],
+ container_args=[],
+ volume_mounts={},
+ bind_mounts=[],
+ envs=[],
+ privileged=False,
+ ptrace=False,
+ host_network=True,
+ )
+ c = _cephadm.get_deployment_container(ctx,
+ '9b9d7609-f4d5-4aba-94c8-effa764d96c9',
+ 'grafana',
+ 'host1',)
+
+ assert '--pids-limit=12345' in c.container_args
+ assert '--something' in c.container_args
+ assert os.path.join('data', '9b9d7609-f4d5-4aba-94c8-effa764d96c9', 'custom_config_files', 'grafana.host1', 'testing.str') in c.volume_mounts
+ assert c.volume_mounts[os.path.join('data', '9b9d7609-f4d5-4aba-94c8-effa764d96c9', 'custom_config_files', 'grafana.host1', 'testing.str')] == '/etc/testing.str'
+
+ @mock.patch('cephadm.logger')
+ @mock.patch('cephadm.FileLock')
+ @mock.patch('cephadm.deploy_daemon')
+ @mock.patch('cephadm.fetch_configs')
+ @mock.patch('cephadm.make_var_run')
+ @mock.patch('cephadm.migrate_sysctl_dir')
+ @mock.patch('cephadm.check_unit', lambda *args, **kwargs: (None, 'running', None))
+ @mock.patch('cephadm.get_unit_name', lambda *args, **kwargs: 'mon-unit-name')
+ @mock.patch('cephadm.get_deployment_container')
+ @mock.patch('cephadm.read_configuration_source', lambda c: {})
+ @mock.patch('cephadm.apply_deploy_config_to_ctx', lambda d, c: None)
+ @mock.patch('cephadm.extract_uid_gid', lambda *args, **kwargs: ('ceph', 'ceph'))
+ def test_mon_crush_location(self, _get_deployment_container, _migrate_sysctl, _make_var_run, _fetch_configs, _deploy_daemon, _file_lock, _logger):
+ """
+ test that crush location for mon is set if it is included in config_json
+ """
+
+ ctx = _cephadm.CephadmContext()
+ ctx.name = 'mon.test'
+ ctx.fsid = '9b9d7609-f4d5-4aba-94c8-effa764d96c9'
+ ctx.reconfig = False
+ ctx.container_engine = mock_docker()
+ ctx.allow_ptrace = True
+ ctx.config_json = '-'
+ ctx.osd_fsid = '0'
+ ctx.tcp_ports = '3300 6789'
+ _fetch_configs.return_value = {
+ 'crush_location': 'database=a'
+ }
+
+ _get_deployment_container.return_value = _cephadm.CephContainer.for_daemon(
+ ctx,
+ fsid='9b9d7609-f4d5-4aba-94c8-effa764d96c9',
+ daemon_type='mon',
+ daemon_id='test',
+ entrypoint='',
+ args=[],
+ container_args=[],
+ volume_mounts={},
+ bind_mounts=[],
+ envs=[],
+ privileged=False,
+ ptrace=False,
+ host_network=True,
+ )
+
+ def _crush_location_checker(ctx, fsid, daemon_type, daemon_id, container, uid, gid, **kwargs):
+ print(container.args)
+ raise Exception(' '.join(container.args))
+
+ _deploy_daemon.side_effect = _crush_location_checker
+
+ with pytest.raises(Exception, match='--set-crush-location database=a'):
+ _cephadm.command_deploy_from(ctx)
+
+ @mock.patch('cephadm.logger')
+ @mock.patch('cephadm.fetch_custom_config_files')
+ def test_write_custom_conf_files(self, _get_config, _logger, cephadm_fs):
+ """
+ test _write_custom_conf_files writes the conf files correctly
+ """
+
+ ctx = _cephadm.CephadmContext()
+ ctx.config_json = '-'
+ ctx.data_dir = _cephadm.DATA_DIR
+ _get_config.return_value = [
+ {
+ 'mount_path': '/etc/testing.str',
+ 'content': 'this\nis\na\nstring',
+ },
+ {
+ 'mount_path': '/etc/testing.conf',
+ 'content': 'very_cool_conf_setting: very_cool_conf_value\nx: y',
+ },
+ {
+ 'mount_path': '/etc/no-content.conf',
+ },
+ ]
+ _cephadm._write_custom_conf_files(ctx, 'mon', 'host1', 'fsid', 0, 0)
+ with open(os.path.join(_cephadm.DATA_DIR, 'fsid', 'custom_config_files', 'mon.host1', 'testing.str'), 'r') as f:
+ assert 'this\nis\na\nstring' == f.read()
+ with open(os.path.join(_cephadm.DATA_DIR, 'fsid', 'custom_config_files', 'mon.host1', 'testing.conf'), 'r') as f:
+ assert 'very_cool_conf_setting: very_cool_conf_value\nx: y' == f.read()
+ with pytest.raises(FileNotFoundError):
+ open(os.path.join(_cephadm.DATA_DIR, 'fsid', 'custom_config_files', 'mon.host1', 'no-content.conf'), 'r')
+
+ @mock.patch('cephadm.call_throws')
+ @mock.patch('cephadm.get_parm')
+ @mock.patch('cephadm.logger')
+ def test_registry_login(self, _logger, _get_parm, _call_throws):
+ # test normal valid login with url, username and password specified
+ _call_throws.return_value = '', '', 0
+ ctx: _cephadm.CephadmContext = _cephadm.cephadm_init_ctx(
+ ['registry-login', '--registry-url', 'sample-url',
+ '--registry-username', 'sample-user', '--registry-password',
+ 'sample-pass'])
+ ctx.container_engine = mock_docker()
+ retval = _cephadm.command_registry_login(ctx)
+ assert retval == 0
+
+ # test bad login attempt with invalid arguments given
+ ctx: _cephadm.CephadmContext = _cephadm.cephadm_init_ctx(
+ ['registry-login', '--registry-url', 'bad-args-url'])
+ with pytest.raises(Exception) as e:
+ assert _cephadm.command_registry_login(ctx)
+ assert str(e.value) == ('Invalid custom registry arguments received. To login to a custom registry include '
+ '--registry-url, --registry-username and --registry-password options or --registry-json option')
+
+ # test normal valid login with json file
+ _get_parm.return_value = {"url": "sample-url", "username": "sample-username", "password": "sample-password"}
+ ctx: _cephadm.CephadmContext = _cephadm.cephadm_init_ctx(
+ ['registry-login', '--registry-json', 'sample-json'])
+ ctx.container_engine = mock_docker()
+ retval = _cephadm.command_registry_login(ctx)
+ assert retval == 0
+
+ # test bad login attempt with bad json file
+ _get_parm.return_value = {"bad-json": "bad-json"}
+ ctx: _cephadm.CephadmContext = _cephadm.cephadm_init_ctx(
+ ['registry-login', '--registry-json', 'sample-json'])
+ with pytest.raises(Exception) as e:
+ assert _cephadm.command_registry_login(ctx)
+ assert str(e.value) == ("json provided for custom registry login did not include all necessary fields. "
+ "Please setup json file as\n"
+ "{\n"
+ " \"url\": \"REGISTRY_URL\",\n"
+ " \"username\": \"REGISTRY_USERNAME\",\n"
+ " \"password\": \"REGISTRY_PASSWORD\"\n"
+ "}\n")
+
+ # test login attempt with valid arguments where login command fails
+ _call_throws.side_effect = Exception
+ ctx: _cephadm.CephadmContext = _cephadm.cephadm_init_ctx(
+ ['registry-login', '--registry-url', 'sample-url',
+ '--registry-username', 'sample-user', '--registry-password',
+ 'sample-pass'])
+ with pytest.raises(Exception) as e:
+ _cephadm.command_registry_login(ctx)
+ assert str(e.value) == "Failed to login to custom registry @ sample-url as sample-user with given password"
+
+ def test_get_image_info_from_inspect(self):
+ # podman
+ out = """204a01f9b0b6710dd0c0af7f37ce7139c47ff0f0105d778d7104c69282dfbbf1,[docker.io/ceph/ceph@sha256:1cc9b824e1b076cdff52a9aa3f0cc8557d879fb2fbbba0cafed970aca59a3992]"""
+ r = _cephadm.get_image_info_from_inspect(out, 'registry/ceph/ceph:latest')
+ print(r)
+ assert r == {
+ 'image_id': '204a01f9b0b6710dd0c0af7f37ce7139c47ff0f0105d778d7104c69282dfbbf1',
+ 'repo_digests': ['docker.io/ceph/ceph@sha256:1cc9b824e1b076cdff52a9aa3f0cc8557d879fb2fbbba0cafed970aca59a3992']
+ }
+
+ # docker
+ out = """sha256:16f4549cf7a8f112bbebf7946749e961fbbd1b0838627fe619aab16bc17ce552,[quay.ceph.io/ceph-ci/ceph@sha256:4e13da36c1bd6780b312a985410ae678984c37e6a9493a74c87e4a50b9bda41f]"""
+ r = _cephadm.get_image_info_from_inspect(out, 'registry/ceph/ceph:latest')
+ assert r == {
+ 'image_id': '16f4549cf7a8f112bbebf7946749e961fbbd1b0838627fe619aab16bc17ce552',
+ 'repo_digests': ['quay.ceph.io/ceph-ci/ceph@sha256:4e13da36c1bd6780b312a985410ae678984c37e6a9493a74c87e4a50b9bda41f']
+ }
+
+ # multiple digests (podman)
+ out = """e935122ab143a64d92ed1fbb27d030cf6e2f0258207be1baf1b509c466aeeb42,[docker.io/prom/prometheus@sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4 docker.io/prom/prometheus@sha256:efd99a6be65885c07c559679a0df4ec709604bcdd8cd83f0d00a1a683b28fb6a]"""
+ r = _cephadm.get_image_info_from_inspect(out, 'registry/prom/prometheus:latest')
+ assert r == {
+ 'image_id': 'e935122ab143a64d92ed1fbb27d030cf6e2f0258207be1baf1b509c466aeeb42',
+ 'repo_digests': [
+ 'docker.io/prom/prometheus@sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4',
+ 'docker.io/prom/prometheus@sha256:efd99a6be65885c07c559679a0df4ec709604bcdd8cd83f0d00a1a683b28fb6a',
+ ]
+ }
+
+
+ def test_dict_get(self):
+ result = _cephadm.dict_get({'a': 1}, 'a', require=True)
+ assert result == 1
+ result = _cephadm.dict_get({'a': 1}, 'b')
+ assert result is None
+ result = _cephadm.dict_get({'a': 1}, 'b', default=2)
+ assert result == 2
+
+ def test_dict_get_error(self):
+ with pytest.raises(_cephadm.Error):
+ _cephadm.dict_get({'a': 1}, 'b', require=True)
+
+ def test_dict_get_join(self):
+ result = _cephadm.dict_get_join({'foo': ['a', 'b']}, 'foo')
+ assert result == 'a\nb'
+ result = _cephadm.dict_get_join({'foo': [1, 2]}, 'foo')
+ assert result == '1\n2'
+ result = _cephadm.dict_get_join({'bar': 'a'}, 'bar')
+ assert result == 'a'
+ result = _cephadm.dict_get_join({'a': 1}, 'a')
+ assert result == 1
+
+ @mock.patch('os.listdir', return_value=[])
+ @mock.patch('cephadm.logger')
+ def test_infer_local_ceph_image(self, _logger, _listdir):
+ ctx = _cephadm.CephadmContext()
+ ctx.fsid = '00000000-0000-0000-0000-0000deadbeez'
+ ctx.container_engine = mock_podman()
+
+ # make sure the right image is selected when container is found
+ cinfo = _cephadm.ContainerInfo('935b549714b8f007c6a4e29c758689cf9e8e69f2e0f51180506492974b90a972',
+ 'registry.hub.docker.com/rkachach/ceph:custom-v0.5',
+ '514e6a882f6e74806a5856468489eeff8d7106095557578da96935e4d0ba4d9d',
+ '2022-04-19 13:45:20.97146228 +0000 UTC',
+ '')
+ out = '''quay.ceph.io/ceph-ci/ceph@sha256:87f200536bb887b36b959e887d5984dd7a3f008a23aa1f283ab55d48b22c6185|dad864ee21e9|main|2022-03-23 16:29:19 +0000 UTC
+ quay.ceph.io/ceph-ci/ceph@sha256:b50b130fcda2a19f8507ddde3435bb4722266956e1858ac395c838bc1dcf1c0e|514e6a882f6e|pacific|2022-03-23 15:58:34 +0000 UTC
+ docker.io/ceph/ceph@sha256:939a46c06b334e094901560c8346de33c00309e3e3968a2db240eb4897c6a508|666bbfa87e8d|v15.2.5|2020-09-16 14:15:15 +0000 UTC'''
+ with mock.patch('cephadm.call_throws', return_value=(out, '', '')):
+ with mock.patch('cephadm.get_container_info', return_value=cinfo):
+ image = _cephadm.infer_local_ceph_image(ctx, ctx.container_engine)
+ assert image == 'quay.ceph.io/ceph-ci/ceph@sha256:b50b130fcda2a19f8507ddde3435bb4722266956e1858ac395c838bc1dcf1c0e'
+
+ # make sure first valid image is used when no container_info is found
+ out = '''quay.ceph.io/ceph-ci/ceph@sha256:87f200536bb887b36b959e887d5984dd7a3f008a23aa1f283ab55d48b22c6185|dad864ee21e9|main|2022-03-23 16:29:19 +0000 UTC
+ quay.ceph.io/ceph-ci/ceph@sha256:b50b130fcda2a19f8507ddde3435bb4722266956e1858ac395c838bc1dcf1c0e|514e6a882f6e|pacific|2022-03-23 15:58:34 +0000 UTC
+ docker.io/ceph/ceph@sha256:939a46c06b334e094901560c8346de33c00309e3e3968a2db240eb4897c6a508|666bbfa87e8d|v15.2.5|2020-09-16 14:15:15 +0000 UTC'''
+ with mock.patch('cephadm.call_throws', return_value=(out, '', '')):
+ with mock.patch('cephadm.get_container_info', return_value=None):
+ image = _cephadm.infer_local_ceph_image(ctx, ctx.container_engine)
+ assert image == 'quay.ceph.io/ceph-ci/ceph@sha256:87f200536bb887b36b959e887d5984dd7a3f008a23aa1f283ab55d48b22c6185'
+
+ # make sure images without digest are discarded (no container_info is found)
+ out = '''quay.ceph.io/ceph-ci/ceph@|||
+ docker.io/ceph/ceph@|||
+ docker.io/ceph/ceph@sha256:939a46c06b334e094901560c8346de33c00309e3e3968a2db240eb4897c6a508|666bbfa87e8d|v15.2.5|2020-09-16 14:15:15 +0000 UTC'''
+ with mock.patch('cephadm.call_throws', return_value=(out, '', '')):
+ with mock.patch('cephadm.get_container_info', return_value=None):
+ image = _cephadm.infer_local_ceph_image(ctx, ctx.container_engine)
+ assert image == 'docker.io/ceph/ceph@sha256:939a46c06b334e094901560c8346de33c00309e3e3968a2db240eb4897c6a508'
+
+
+
+ @pytest.mark.parametrize('daemon_filter, by_name, daemon_list, container_stats, output',
+ [
+ # get container info by type ('mon')
+ (
+ 'mon',
+ False,
+ [
+ {'name': 'mon.ceph-node-0', 'fsid': '00000000-0000-0000-0000-0000deadbeef'},
+ {'name': 'mgr.ceph-node-0', 'fsid': '00000000-0000-0000-0000-0000deadbeef'},
+ ],
+ ("935b549714b8f007c6a4e29c758689cf9e8e69f2e0f51180506492974b90a972,registry.hub.docker.com/rkachach/ceph:custom-v0.5,666bbfa87e8df05702d6172cae11dd7bc48efb1d94f1b9e492952f19647199a4,2022-04-19 13:45:20.97146228 +0000 UTC,",
+ "",
+ 0),
+ _cephadm.ContainerInfo('935b549714b8f007c6a4e29c758689cf9e8e69f2e0f51180506492974b90a972',
+ 'registry.hub.docker.com/rkachach/ceph:custom-v0.5',
+ '666bbfa87e8df05702d6172cae11dd7bc48efb1d94f1b9e492952f19647199a4',
+ '2022-04-19 13:45:20.97146228 +0000 UTC',
+ '')
+ ),
+ # get container info by name ('mon.ceph-node-0')
+ (
+ 'mon.ceph-node-0',
+ True,
+ [
+ {'name': 'mgr.ceph-node-0', 'fsid': '00000000-0000-0000-0000-0000deadbeef'},
+ {'name': 'mon.ceph-node-0', 'fsid': '00000000-0000-0000-0000-0000deadbeef'},
+ ],
+ ("935b549714b8f007c6a4e29c758689cf9e8e69f2e0f51180506492974b90a972,registry.hub.docker.com/rkachach/ceph:custom-v0.5,666bbfa87e8df05702d6172cae11dd7bc48efb1d94f1b9e492952f19647199a4,2022-04-19 13:45:20.97146228 +0000 UTC,",
+ "",
+ 0),
+ _cephadm.ContainerInfo('935b549714b8f007c6a4e29c758689cf9e8e69f2e0f51180506492974b90a972',
+ 'registry.hub.docker.com/rkachach/ceph:custom-v0.5',
+ '666bbfa87e8df05702d6172cae11dd7bc48efb1d94f1b9e492952f19647199a4',
+ '2022-04-19 13:45:20.97146228 +0000 UTC',
+ '')
+ ),
+ # get container info by name (same daemon but two different fsids)
+ (
+ 'mon.ceph-node-0',
+ True,
+ [
+ {'name': 'mon.ceph-node-0', 'fsid': '10000000-0000-0000-0000-0000deadbeef'},
+ {'name': 'mon.ceph-node-0', 'fsid': '00000000-0000-0000-0000-0000deadbeef'},
+ ],
+ ("935b549714b8f007c6a4e29c758689cf9e8e69f2e0f51180506492974b90a972,registry.hub.docker.com/rkachach/ceph:custom-v0.5,666bbfa87e8df05702d6172cae11dd7bc48efb1d94f1b9e492952f19647199a4,2022-04-19 13:45:20.97146228 +0000 UTC,",
+ "",
+ 0),
+ _cephadm.ContainerInfo('935b549714b8f007c6a4e29c758689cf9e8e69f2e0f51180506492974b90a972',
+ 'registry.hub.docker.com/rkachach/ceph:custom-v0.5',
+ '666bbfa87e8df05702d6172cae11dd7bc48efb1d94f1b9e492952f19647199a4',
+ '2022-04-19 13:45:20.97146228 +0000 UTC',
+ '')
+ ),
+ # get container info by type (bad container stats: 127 code)
+ (
+ 'mon',
+ False,
+ [
+ {'name': 'mon.ceph-node-0', 'fsid': '00000000-FFFF-0000-0000-0000deadbeef'},
+ {'name': 'mon.ceph-node-0', 'fsid': '00000000-0000-0000-0000-0000deadbeef'},
+ ],
+ ("",
+ "",
+ 127),
+ None
+ ),
+ # get container info by name (bad container stats: 127 code)
+ (
+ 'mon.ceph-node-0',
+ True,
+ [
+ {'name': 'mgr.ceph-node-0', 'fsid': '00000000-0000-0000-0000-0000deadbeef'},
+ {'name': 'mon.ceph-node-0', 'fsid': '00000000-0000-0000-0000-0000deadbeef'},
+ ],
+ ("",
+ "",
+ 127),
+ None
+ ),
+ # get container info by invalid name (doens't contain '.')
+ (
+ 'mon-ceph-node-0',
+ True,
+ [
+ {'name': 'mon.ceph-node-0', 'fsid': '00000000-0000-0000-0000-0000deadbeef'},
+ {'name': 'mon.ceph-node-0', 'fsid': '00000000-0000-0000-0000-0000deadbeef'},
+ ],
+ ("935b549714b8f007c6a4e29c758689cf9e8e69f2e0f51180506492974b90a972,registry.hub.docker.com/rkachach/ceph:custom-v0.5,666bbfa87e8df05702d6172cae11dd7bc48efb1d94f1b9e492952f19647199a4,2022-04-19 13:45:20.97146228 +0000 UTC,",
+ "",
+ 0),
+ None
+ ),
+ # get container info by invalid name (empty)
+ (
+ '',
+ True,
+ [
+ {'name': 'mon.ceph-node-0', 'fsid': '00000000-0000-0000-0000-0000deadbeef'},
+ {'name': 'mon.ceph-node-0', 'fsid': '00000000-0000-0000-0000-0000deadbeef'},
+ ],
+ ("935b549714b8f007c6a4e29c758689cf9e8e69f2e0f51180506492974b90a972,registry.hub.docker.com/rkachach/ceph:custom-v0.5,666bbfa87e8df05702d6172cae11dd7bc48efb1d94f1b9e492952f19647199a4,2022-04-19 13:45:20.97146228 +0000 UTC,",
+ "",
+ 0),
+ None
+ ),
+ # get container info by invalid type (empty)
+ (
+ '',
+ False,
+ [
+ {'name': 'mon.ceph-node-0', 'fsid': '00000000-0000-0000-0000-0000deadbeef'},
+ {'name': 'mon.ceph-node-0', 'fsid': '00000000-0000-0000-0000-0000deadbeef'},
+ ],
+ ("935b549714b8f007c6a4e29c758689cf9e8e69f2e0f51180506492974b90a972,registry.hub.docker.com/rkachach/ceph:custom-v0.5,666bbfa87e8df05702d6172cae11dd7bc48efb1d94f1b9e492952f19647199a4,2022-04-19 13:45:20.97146228 +0000 UTC,",
+ "",
+ 0),
+ None
+ ),
+ # get container info by name: no match (invalid fsid)
+ (
+ 'mon',
+ False,
+ [
+ {'name': 'mon.ceph-node-0', 'fsid': '00000000-1111-0000-0000-0000deadbeef'},
+ {'name': 'mon.ceph-node-0', 'fsid': '00000000-2222-0000-0000-0000deadbeef'},
+ ],
+ ("935b549714b8f007c6a4e29c758689cf9e8e69f2e0f51180506492974b90a972,registry.hub.docker.com/rkachach/ceph:custom-v0.5,666bbfa87e8df05702d6172cae11dd7bc48efb1d94f1b9e492952f19647199a4,2022-04-19 13:45:20.97146228 +0000 UTC,",
+ "",
+ 0),
+ None
+ ),
+ # get container info by name: no match
+ (
+ 'mon.ceph-node-0',
+ True,
+ [],
+ None,
+ None
+ ),
+ # get container info by type: no match
+ (
+ 'mgr',
+ False,
+ [],
+ None,
+ None
+ ),
+ ])
+ @mock.patch('cephadm.logger')
+ def test_get_container_info(self, _logger, daemon_filter, by_name, daemon_list, container_stats, output):
+ ctx = _cephadm.CephadmContext()
+ ctx.fsid = '00000000-0000-0000-0000-0000deadbeef'
+ ctx.container_engine = mock_podman()
+ with mock.patch('cephadm.list_daemons', return_value=daemon_list):
+ with mock.patch('cephadm.get_container_stats', return_value=container_stats):
+ assert _cephadm.get_container_info(ctx, daemon_filter, by_name) == output
+
+ def test_should_log_to_journald(self):
+ ctx = _cephadm.CephadmContext()
+ # explicit
+ ctx.log_to_journald = True
+ assert _cephadm.should_log_to_journald(ctx)
+
+ ctx.log_to_journald = None
+ # enable if podman support --cgroup=split
+ ctx.container_engine = mock_podman()
+ ctx.container_engine.version = (2, 1, 0)
+ assert _cephadm.should_log_to_journald(ctx)
+
+ # disable on old podman
+ ctx.container_engine.version = (2, 0, 0)
+ assert not _cephadm.should_log_to_journald(ctx)
+
+ # disable on docker
+ ctx.container_engine = mock_docker()
+ assert not _cephadm.should_log_to_journald(ctx)
+
+ def test_normalize_image_digest(self):
+ s = 'myhostname:5000/ceph/ceph@sha256:753886ad9049004395ae990fbb9b096923b5a518b819283141ee8716ddf55ad1'
+ assert _cephadm.normalize_image_digest(s) == s
+
+ s = 'ceph/ceph:latest'
+ assert _cephadm.normalize_image_digest(s) == f'{_cephadm.DEFAULT_REGISTRY}/{s}'
+
+ @pytest.mark.parametrize('fsid, ceph_conf, list_daemons, result, err, ',
+ [
+ (
+ None,
+ None,
+ [],
+ None,
+ None,
+ ),
+ (
+ '00000000-0000-0000-0000-0000deadbeef',
+ None,
+ [],
+ '00000000-0000-0000-0000-0000deadbeef',
+ None,
+ ),
+ (
+ '00000000-0000-0000-0000-0000deadbeef',
+ None,
+ [
+ {'fsid': '10000000-0000-0000-0000-0000deadbeef'},
+ {'fsid': '20000000-0000-0000-0000-0000deadbeef'},
+ ],
+ '00000000-0000-0000-0000-0000deadbeef',
+ None,
+ ),
+ (
+ None,
+ None,
+ [
+ {'fsid': '00000000-0000-0000-0000-0000deadbeef'},
+ ],
+ '00000000-0000-0000-0000-0000deadbeef',
+ None,
+ ),
+ (
+ None,
+ None,
+ [
+ {'fsid': '10000000-0000-0000-0000-0000deadbeef'},
+ {'fsid': '20000000-0000-0000-0000-0000deadbeef'},
+ ],
+ None,
+ r'Cannot infer an fsid',
+ ),
+ (
+ None,
+ get_ceph_conf(fsid='00000000-0000-0000-0000-0000deadbeef'),
+ [],
+ '00000000-0000-0000-0000-0000deadbeef',
+ None,
+ ),
+ (
+ None,
+ get_ceph_conf(fsid='00000000-0000-0000-0000-0000deadbeef'),
+ [
+ {'fsid': '00000000-0000-0000-0000-0000deadbeef'},
+ ],
+ '00000000-0000-0000-0000-0000deadbeef',
+ None,
+ ),
+ (
+ None,
+ get_ceph_conf(fsid='00000000-0000-0000-0000-0000deadbeef'),
+ [
+ {'fsid': '10000000-0000-0000-0000-0000deadbeef'},
+ {'fsid': '20000000-0000-0000-0000-0000deadbeef'},
+ ],
+ None,
+ r'Cannot infer an fsid',
+ ),
+ ])
+ @mock.patch('cephadm.call')
+ @mock.patch('cephadm.logger')
+ def test_infer_fsid(self, _logger, _call, fsid, ceph_conf, list_daemons, result, err, cephadm_fs):
+ # build the context
+ ctx = _cephadm.CephadmContext()
+ ctx.fsid = fsid
+
+ # mock the decorator
+ mock_fn = mock.Mock()
+ mock_fn.return_value = 0
+ infer_fsid = _cephadm.infer_fsid(mock_fn)
+
+ # mock the ceph.conf file content
+ if ceph_conf:
+ f = cephadm_fs.create_file('ceph.conf', contents=ceph_conf)
+ ctx.config = f.path
+
+ # test
+ with mock.patch('cephadm.list_daemons', return_value=list_daemons):
+ if err:
+ with pytest.raises(_cephadm.Error, match=err):
+ infer_fsid(ctx)
+ else:
+ infer_fsid(ctx)
+ assert ctx.fsid == result
+
+ @pytest.mark.parametrize('fsid, other_conf_files, config, name, list_daemons, result, ',
+ [
+ # per cluster conf has more precedence than default conf
+ (
+ '00000000-0000-0000-0000-0000deadbeef',
+ [_cephadm.CEPH_DEFAULT_CONF],
+ None,
+ None,
+ [],
+ '/var/lib/ceph/00000000-0000-0000-0000-0000deadbeef/config/ceph.conf',
+ ),
+ # mon daemon conf has more precedence than cluster conf and default conf
+ (
+ '00000000-0000-0000-0000-0000deadbeef',
+ ['/var/lib/ceph/00000000-0000-0000-0000-0000deadbeef/config/ceph.conf',
+ _cephadm.CEPH_DEFAULT_CONF],
+ None,
+ None,
+ [{'name': 'mon.a', 'fsid': '00000000-0000-0000-0000-0000deadbeef', 'style': 'cephadm:v1'}],
+ '/var/lib/ceph/00000000-0000-0000-0000-0000deadbeef/mon.a/config',
+ ),
+ # daemon conf (--name option) has more precedence than cluster, default and mon conf
+ (
+ '00000000-0000-0000-0000-0000deadbeef',
+ ['/var/lib/ceph/00000000-0000-0000-0000-0000deadbeef/config/ceph.conf',
+ '/var/lib/ceph/00000000-0000-0000-0000-0000deadbeef/mon.a/config',
+ _cephadm.CEPH_DEFAULT_CONF],
+ None,
+ 'osd.0',
+ [{'name': 'mon.a', 'fsid': '00000000-0000-0000-0000-0000deadbeef', 'style': 'cephadm:v1'},
+ {'name': 'osd.0', 'fsid': '00000000-0000-0000-0000-0000deadbeef'}],
+ '/var/lib/ceph/00000000-0000-0000-0000-0000deadbeef/osd.0/config',
+ ),
+ # user provided conf ('/foo/ceph.conf') more precedence than any other conf
+ (
+ '00000000-0000-0000-0000-0000deadbeef',
+ ['/var/lib/ceph/00000000-0000-0000-0000-0000deadbeef/config/ceph.conf',
+ _cephadm.CEPH_DEFAULT_CONF,
+ '/var/lib/ceph/00000000-0000-0000-0000-0000deadbeef/mon.a/config'],
+ '/foo/ceph.conf',
+ None,
+ [{'name': 'mon.a', 'fsid': '00000000-0000-0000-0000-0000deadbeef', 'style': 'cephadm:v1'}],
+ '/foo/ceph.conf',
+ ),
+ ])
+ @mock.patch('cephadm.call')
+ @mock.patch('cephadm.logger')
+ def test_infer_config_precedence(self, _logger, _call, other_conf_files, fsid, config, name, list_daemons, result, cephadm_fs):
+ # build the context
+ ctx = _cephadm.CephadmContext()
+ ctx.fsid = fsid
+ ctx.config = config
+ ctx.name = name
+
+ # mock the decorator
+ mock_fn = mock.Mock()
+ mock_fn.return_value = 0
+ infer_config = _cephadm.infer_config(mock_fn)
+
+ # mock the config file
+ cephadm_fs.create_file(result)
+
+ # mock other potential config files
+ for f in other_conf_files:
+ cephadm_fs.create_file(f)
+
+ # test
+ with mock.patch('cephadm.list_daemons', return_value=list_daemons):
+ infer_config(ctx)
+ assert ctx.config == result
+
+ @pytest.mark.parametrize('fsid, config, name, list_daemons, result, ',
+ [
+ (
+ None,
+ '/foo/bar.conf',
+ None,
+ [],
+ '/foo/bar.conf',
+ ),
+ (
+ '00000000-0000-0000-0000-0000deadbeef',
+ None,
+ None,
+ [],
+ _cephadm.CEPH_DEFAULT_CONF,
+ ),
+ (
+ '00000000-0000-0000-0000-0000deadbeef',
+ None,
+ None,
+ [],
+ '/var/lib/ceph/00000000-0000-0000-0000-0000deadbeef/config/ceph.conf',
+ ),
+ (
+ '00000000-0000-0000-0000-0000deadbeef',
+ None,
+ None,
+ [{'name': 'mon.a', 'fsid': '00000000-0000-0000-0000-0000deadbeef', 'style': 'cephadm:v1'}],
+ '/var/lib/ceph/00000000-0000-0000-0000-0000deadbeef/mon.a/config',
+ ),
+ (
+ '00000000-0000-0000-0000-0000deadbeef',
+ None,
+ None,
+ [{'name': 'mon.a', 'fsid': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', 'style': 'cephadm:v1'}],
+ _cephadm.CEPH_DEFAULT_CONF,
+ ),
+ (
+ '00000000-0000-0000-0000-0000deadbeef',
+ None,
+ None,
+ [{'name': 'mon.a', 'fsid': '00000000-0000-0000-0000-0000deadbeef', 'style': 'legacy'}],
+ _cephadm.CEPH_DEFAULT_CONF,
+ ),
+ (
+ '00000000-0000-0000-0000-0000deadbeef',
+ None,
+ None,
+ [{'name': 'osd.0'}],
+ _cephadm.CEPH_DEFAULT_CONF,
+ ),
+ (
+ '00000000-0000-0000-0000-0000deadbeef',
+ '/foo/bar.conf',
+ 'mon.a',
+ [{'name': 'mon.a', 'style': 'cephadm:v1'}],
+ '/foo/bar.conf',
+ ),
+ (
+ '00000000-0000-0000-0000-0000deadbeef',
+ None,
+ 'mon.a',
+ [],
+ '/var/lib/ceph/00000000-0000-0000-0000-0000deadbeef/mon.a/config',
+ ),
+ (
+ '00000000-0000-0000-0000-0000deadbeef',
+ None,
+ 'osd.0',
+ [],
+ '/var/lib/ceph/00000000-0000-0000-0000-0000deadbeef/osd.0/config',
+ ),
+ (
+ None,
+ None,
+ None,
+ [],
+ _cephadm.CEPH_DEFAULT_CONF,
+ ),
+ ])
+ @mock.patch('cephadm.call')
+ @mock.patch('cephadm.logger')
+ def test_infer_config(self, _logger, _call, fsid, config, name, list_daemons, result, cephadm_fs):
+ # build the context
+ ctx = _cephadm.CephadmContext()
+ ctx.fsid = fsid
+ ctx.config = config
+ ctx.name = name
+
+ # mock the decorator
+ mock_fn = mock.Mock()
+ mock_fn.return_value = 0
+ infer_config = _cephadm.infer_config(mock_fn)
+
+ # mock the config file
+ cephadm_fs.create_file(result)
+
+ # test
+ with mock.patch('cephadm.list_daemons', return_value=list_daemons):
+ infer_config(ctx)
+ assert ctx.config == result
+
+ @mock.patch('cephadm.call')
+ def test_extract_uid_gid_fail(self, _call):
+ err = """Error: container_linux.go:370: starting container process caused: process_linux.go:459: container init caused: process_linux.go:422: setting cgroup config for procHooks process caused: Unit libpod-056038e1126191fba41d8a037275136f2d7aeec9710b9ee
+ff792c06d8544b983.scope not found.: OCI runtime error"""
+ _call.return_value = ('', err, 127)
+ ctx = _cephadm.CephadmContext()
+ ctx.container_engine = mock_podman()
+ with pytest.raises(_cephadm.Error, match='OCI'):
+ _cephadm.extract_uid_gid(ctx)
+
+ @pytest.mark.parametrize('test_input, expected', [
+ ([_cephadm.make_fsid(), _cephadm.make_fsid(), _cephadm.make_fsid()], 3),
+ ([_cephadm.make_fsid(), 'invalid-fsid', _cephadm.make_fsid(), '0b87e50c-8e77-11ec-b890-'], 2),
+ (['f6860ec2-8e76-11ec-', '0b87e50c-8e77-11ec-b890-', ''], 0),
+ ([], 0),
+ ])
+ def test_get_ceph_cluster_count(self, test_input, expected):
+ ctx = _cephadm.CephadmContext()
+ with mock.patch('os.listdir', return_value=test_input):
+ assert _cephadm.get_ceph_cluster_count(ctx) == expected
+
+ def test_set_image_minimize_config(self):
+ def throw_cmd(cmd):
+ raise _cephadm.Error(' '.join(cmd))
+ ctx = _cephadm.CephadmContext()
+ ctx.image = 'test_image'
+ ctx.no_minimize_config = True
+ fake_cli = lambda cmd, __=None, ___=None: throw_cmd(cmd)
+ with pytest.raises(_cephadm.Error, match='config set global container_image test_image'):
+ _cephadm.finish_bootstrap_config(
+ ctx=ctx,
+ fsid=_cephadm.make_fsid(),
+ config='',
+ mon_id='a', mon_dir='mon_dir',
+ mon_network=None, ipv6=False,
+ cli=fake_cli,
+ cluster_network=None,
+ ipv6_cluster_network=False
+ )
+
+
+class TestCustomContainer(unittest.TestCase):
+ cc: _cephadm.CustomContainer
+
+ def setUp(self):
+ self.cc = _cephadm.CustomContainer(
+ 'e863154d-33c7-4350-bca5-921e0467e55b',
+ 'container',
+ config_json={
+ 'entrypoint': 'bash',
+ 'gid': 1000,
+ 'args': [
+ '--no-healthcheck',
+ '-p 6800:6800'
+ ],
+ 'envs': ['SECRET=password'],
+ 'ports': [8080, 8443],
+ 'volume_mounts': {
+ '/CONFIG_DIR': '/foo/conf',
+ 'bar/config': '/bar:ro'
+ },
+ 'bind_mounts': [
+ [
+ 'type=bind',
+ 'source=/CONFIG_DIR',
+ 'destination=/foo/conf',
+ ''
+ ],
+ [
+ 'type=bind',
+ 'source=bar/config',
+ 'destination=/bar:ro',
+ 'ro=true'
+ ]
+ ]
+ },
+ image='docker.io/library/hello-world:latest'
+ )
+
+ def test_entrypoint(self):
+ self.assertEqual(self.cc.entrypoint, 'bash')
+
+ def test_uid_gid(self):
+ self.assertEqual(self.cc.uid, 65534)
+ self.assertEqual(self.cc.gid, 1000)
+
+ def test_ports(self):
+ self.assertEqual(self.cc.ports, [8080, 8443])
+
+ def test_get_container_args(self):
+ result = self.cc.get_container_args()
+ self.assertEqual(result, [
+ '--no-healthcheck',
+ '-p 6800:6800'
+ ])
+
+ def test_get_container_envs(self):
+ result = self.cc.get_container_envs()
+ self.assertEqual(result, ['SECRET=password'])
+
+ def test_get_container_mounts(self):
+ result = self.cc.get_container_mounts('/xyz')
+ self.assertDictEqual(result, {
+ '/CONFIG_DIR': '/foo/conf',
+ '/xyz/bar/config': '/bar:ro'
+ })
+
+ def test_get_container_binds(self):
+ result = self.cc.get_container_binds('/xyz')
+ self.assertEqual(result, [
+ [
+ 'type=bind',
+ 'source=/CONFIG_DIR',
+ 'destination=/foo/conf',
+ ''
+ ],
+ [
+ 'type=bind',
+ 'source=/xyz/bar/config',
+ 'destination=/bar:ro',
+ 'ro=true'
+ ]
+ ])
+
+
+class TestMaintenance:
+ systemd_target = "ceph.00000000-0000-0000-0000-000000c0ffee.target"
+ fsid = '0ea8cdd0-1bbf-11ec-a9c7-5254002763fa'
+
+ def test_systemd_target_OK(self, tmp_path):
+ base = tmp_path
+ wants = base / "ceph.target.wants"
+ wants.mkdir()
+ target = wants / TestMaintenance.systemd_target
+ target.touch()
+ ctx = _cephadm.CephadmContext()
+ ctx.unit_dir = str(base)
+
+ assert _cephadm.systemd_target_state(ctx, target.name)
+
+ def test_systemd_target_NOTOK(self, tmp_path):
+ base = tmp_path
+ ctx = _cephadm.CephadmContext()
+ ctx.unit_dir = str(base)
+ assert not _cephadm.systemd_target_state(ctx, TestMaintenance.systemd_target)
+
+ def test_parser_OK(self):
+ args = _cephadm._parse_args(['host-maintenance', 'enter'])
+ assert args.maintenance_action == 'enter'
+
+ def test_parser_BAD(self):
+ with pytest.raises(SystemExit):
+ _cephadm._parse_args(['host-maintenance', 'wah'])
+
+ @mock.patch('os.listdir', return_value=[])
+ @mock.patch('cephadm.call')
+ @mock.patch('cephadm.logger')
+ @mock.patch('cephadm.systemd_target_state')
+ def test_enter_failure_1(self, _target_state, _logger, _call, _listdir):
+ _call.return_value = '', '', 999
+ _target_state.return_value = True
+ ctx: _cephadm.CephadmContext = _cephadm.cephadm_init_ctx(
+ ['host-maintenance', 'enter', '--fsid', TestMaintenance.fsid])
+ ctx.container_engine = mock_podman()
+ retval = _cephadm.command_maintenance(ctx)
+ assert retval.startswith('failed')
+
+ @mock.patch('os.listdir', return_value=[])
+ @mock.patch('cephadm.call')
+ @mock.patch('cephadm.logger')
+ @mock.patch('cephadm.systemd_target_state')
+ def test_enter_failure_2(self, _target_state, _logger, _call, _listdir):
+ _call.side_effect = [('', '', 0), ('', '', 999), ('', '', 0), ('', '', 999)]
+ _target_state.return_value = True
+ ctx: _cephadm.CephadmContext = _cephadm.cephadm_init_ctx(
+ ['host-maintenance', 'enter', '--fsid', TestMaintenance.fsid])
+ ctx.container_engine = mock_podman()
+ retval = _cephadm.command_maintenance(ctx)
+ assert retval.startswith('failed')
+
+ @mock.patch('os.listdir', return_value=[])
+ @mock.patch('cephadm.call')
+ @mock.patch('cephadm.logger')
+ @mock.patch('cephadm.systemd_target_state')
+ @mock.patch('cephadm.target_exists')
+ def test_exit_failure_1(self, _target_exists, _target_state, _logger, _call, _listdir):
+ _call.return_value = '', '', 999
+ _target_state.return_value = False
+ _target_exists.return_value = True
+ ctx: _cephadm.CephadmContext = _cephadm.cephadm_init_ctx(
+ ['host-maintenance', 'exit', '--fsid', TestMaintenance.fsid])
+ ctx.container_engine = mock_podman()
+ retval = _cephadm.command_maintenance(ctx)
+ assert retval.startswith('failed')
+
+ @mock.patch('os.listdir', return_value=[])
+ @mock.patch('cephadm.call')
+ @mock.patch('cephadm.logger')
+ @mock.patch('cephadm.systemd_target_state')
+ @mock.patch('cephadm.target_exists')
+ def test_exit_failure_2(self, _target_exists, _target_state, _logger, _call, _listdir):
+ _call.side_effect = [('', '', 0), ('', '', 999), ('', '', 0), ('', '', 999)]
+ _target_state.return_value = False
+ _target_exists.return_value = True
+ ctx: _cephadm.CephadmContext = _cephadm.cephadm_init_ctx(
+ ['host-maintenance', 'exit', '--fsid', TestMaintenance.fsid])
+ ctx.container_engine = mock_podman()
+ retval = _cephadm.command_maintenance(ctx)
+ assert retval.startswith('failed')
+
+
+class TestMonitoring(object):
+ @mock.patch('cephadm.call')
+ def test_get_version_alertmanager(self, _call):
+ ctx = _cephadm.CephadmContext()
+ ctx.container_engine = mock_podman()
+ daemon_type = 'alertmanager'
+
+ # binary `prometheus`
+ _call.return_value = '', '{}, version 0.16.1'.format(daemon_type), 0
+ version = _cephadm.Monitoring.get_version(ctx, 'container_id', daemon_type)
+ assert version == '0.16.1'
+
+ # binary `prometheus-alertmanager`
+ _call.side_effect = (
+ ('', '', 1),
+ ('', '{}, version 0.16.1'.format(daemon_type), 0),
+ )
+ version = _cephadm.Monitoring.get_version(ctx, 'container_id', daemon_type)
+ assert version == '0.16.1'
+
+ @mock.patch('cephadm.call')
+ def test_get_version_prometheus(self, _call):
+ ctx = _cephadm.CephadmContext()
+ ctx.container_engine = mock_podman()
+ daemon_type = 'prometheus'
+ _call.return_value = '', '{}, version 0.16.1'.format(daemon_type), 0
+ version = _cephadm.Monitoring.get_version(ctx, 'container_id', daemon_type)
+ assert version == '0.16.1'
+
+ def test_prometheus_external_url(self):
+ ctx = _cephadm.CephadmContext()
+ ctx.config_json = json.dumps({'files': {}, 'retention_time': '15d'})
+ daemon_type = 'prometheus'
+ daemon_id = 'home'
+ fsid = 'aaf5a720-13fe-4a3b-82b9-2d99b7fd9704'
+ args = _cephadm.get_daemon_args(ctx, fsid, daemon_type, daemon_id)
+ assert any([x.startswith('--web.external-url=http://') for x in args])
+
+ @mock.patch('cephadm.call')
+ def test_get_version_node_exporter(self, _call):
+ ctx = _cephadm.CephadmContext()
+ ctx.container_engine = mock_podman()
+ daemon_type = 'node-exporter'
+ _call.return_value = '', '{}, version 0.16.1'.format(daemon_type.replace('-', '_')), 0
+ version = _cephadm.Monitoring.get_version(ctx, 'container_id', daemon_type)
+ assert version == '0.16.1'
+
+ def test_create_daemon_dirs_prometheus(self, cephadm_fs):
+ """
+ Ensures the required and optional files given in the configuration are
+ created and mapped correctly inside the container. Tests absolute and
+ relative file paths given in the configuration.
+ """
+
+ fsid = 'aaf5a720-13fe-4a3b-82b9-2d99b7fd9704'
+ daemon_type = 'prometheus'
+ uid, gid = 50, 50
+ daemon_id = 'home'
+ ctx = _cephadm.CephadmContext()
+ ctx.data_dir = '/somedir'
+ ctx.config_json = json.dumps({
+ 'files': {
+ 'prometheus.yml': 'foo',
+ '/etc/prometheus/alerting/ceph_alerts.yml': 'bar'
+ }
+ })
+
+ _cephadm.create_daemon_dirs(ctx,
+ fsid,
+ daemon_type,
+ daemon_id,
+ uid,
+ gid,
+ config=None,
+ keyring=None)
+
+ prefix = '{data_dir}/{fsid}/{daemon_type}.{daemon_id}'.format(
+ data_dir=ctx.data_dir,
+ fsid=fsid,
+ daemon_type=daemon_type,
+ daemon_id=daemon_id
+ )
+
+ expected = {
+ 'etc/prometheus/prometheus.yml': 'foo',
+ 'etc/prometheus/alerting/ceph_alerts.yml': 'bar',
+ }
+
+ for file,content in expected.items():
+ file = os.path.join(prefix, file)
+ assert os.path.exists(file)
+ with open(file) as f:
+ assert f.read() == content
+
+ # assert uid/gid after redeploy
+ new_uid = uid+1
+ new_gid = gid+1
+ _cephadm.create_daemon_dirs(ctx,
+ fsid,
+ daemon_type,
+ daemon_id,
+ new_uid,
+ new_gid,
+ config=None,
+ keyring=None)
+ for file,content in expected.items():
+ file = os.path.join(prefix, file)
+ assert os.stat(file).st_uid == new_uid
+ assert os.stat(file).st_gid == new_gid
+
+
+class TestBootstrap(object):
+
+ @staticmethod
+ def _get_cmd(*args):
+ return [
+ 'bootstrap',
+ '--allow-mismatched-release',
+ '--skip-prepare-host',
+ '--skip-dashboard',
+ *args,
+ ]
+
+
+###############################################3
+
+ def test_config(self, cephadm_fs):
+ conf_file = 'foo'
+ cmd = self._get_cmd(
+ '--mon-ip', '192.168.1.1',
+ '--skip-mon-network',
+ '--config', conf_file,
+ )
+
+ with with_cephadm_ctx(cmd) as ctx:
+ msg = r'No such file or directory'
+ with pytest.raises(_cephadm.Error, match=msg):
+ _cephadm.command_bootstrap(ctx)
+
+ cephadm_fs.create_file(conf_file)
+ with with_cephadm_ctx(cmd) as ctx:
+ retval = _cephadm.command_bootstrap(ctx)
+ assert retval == 0
+
+ def test_no_mon_addr(self, cephadm_fs):
+ cmd = self._get_cmd()
+ with with_cephadm_ctx(cmd) as ctx:
+ msg = r'must specify --mon-ip or --mon-addrv'
+ with pytest.raises(_cephadm.Error, match=msg):
+ _cephadm.command_bootstrap(ctx)
+
+ def test_skip_mon_network(self, cephadm_fs):
+ cmd = self._get_cmd('--mon-ip', '192.168.1.1')
+
+ with with_cephadm_ctx(cmd, list_networks={}) as ctx:
+ msg = r'--skip-mon-network'
+ with pytest.raises(_cephadm.Error, match=msg):
+ _cephadm.command_bootstrap(ctx)
+
+ cmd += ['--skip-mon-network']
+ with with_cephadm_ctx(cmd, list_networks={}) as ctx:
+ retval = _cephadm.command_bootstrap(ctx)
+ assert retval == 0
+
+ @pytest.mark.parametrize('mon_ip, list_networks, result',
+ [
+ # IPv4
+ (
+ 'eth0',
+ {'192.168.1.0/24': {'eth0': ['192.168.1.1']}},
+ False,
+ ),
+ (
+ '0.0.0.0',
+ {'192.168.1.0/24': {'eth0': ['192.168.1.1']}},
+ False,
+ ),
+ (
+ '192.168.1.0',
+ {'192.168.1.0/24': {'eth0': ['192.168.1.1']}},
+ False,
+ ),
+ (
+ '192.168.1.1',
+ {'192.168.1.0/24': {'eth0': ['192.168.1.1']}},
+ True,
+ ),
+ (
+ '192.168.1.1:1234',
+ {'192.168.1.0/24': {'eth0': ['192.168.1.1']}},
+ True,
+ ),
+ (
+ '192.168.1.1:0123',
+ {'192.168.1.0/24': {'eth0': ['192.168.1.1']}},
+ True,
+ ),
+ # IPv6
+ (
+ '::',
+ {'192.168.1.0/24': {'eth0': ['192.168.1.1']}},
+ False,
+ ),
+ (
+ '::ffff:192.168.1.0',
+ {"ffff::/64": {"eth0": ["::ffff:c0a8:101"]}},
+ False,
+ ),
+ (
+ '::ffff:192.168.1.1',
+ {"ffff::/64": {"eth0": ["::ffff:c0a8:101"]}},
+ True,
+ ),
+ (
+ '::ffff:c0a8:101',
+ {"ffff::/64": {"eth0": ["::ffff:c0a8:101"]}},
+ True,
+ ),
+ (
+ '[::ffff:c0a8:101]:1234',
+ {"ffff::/64": {"eth0": ["::ffff:c0a8:101"]}},
+ True,
+ ),
+ (
+ '[::ffff:c0a8:101]:0123',
+ {"ffff::/64": {"eth0": ["::ffff:c0a8:101"]}},
+ True,
+ ),
+ (
+ '0000:0000:0000:0000:0000:FFFF:C0A8:0101',
+ {"ffff::/64": {"eth0": ["::ffff:c0a8:101"]}},
+ True,
+ ),
+ ])
+ def test_mon_ip(self, mon_ip, list_networks, result, cephadm_fs):
+ cmd = self._get_cmd('--mon-ip', mon_ip)
+ if not result:
+ with with_cephadm_ctx(cmd, list_networks=list_networks) as ctx:
+ msg = r'--skip-mon-network'
+ with pytest.raises(_cephadm.Error, match=msg):
+ _cephadm.command_bootstrap(ctx)
+ else:
+ with with_cephadm_ctx(cmd, list_networks=list_networks) as ctx:
+ retval = _cephadm.command_bootstrap(ctx)
+ assert retval == 0
+
+ @pytest.mark.parametrize('mon_addrv, list_networks, err',
+ [
+ # IPv4
+ (
+ '192.168.1.1',
+ {'192.168.1.0/24': {'eth0': ['192.168.1.1']}},
+ r'must use square brackets',
+ ),
+ (
+ '[192.168.1.1]',
+ {'192.168.1.0/24': {'eth0': ['192.168.1.1']}},
+ r'must include port number',
+ ),
+ (
+ '[192.168.1.1:1234]',
+ {'192.168.1.0/24': {'eth0': ['192.168.1.1']}},
+ None,
+ ),
+ (
+ '[192.168.1.1:0123]',
+ {'192.168.1.0/24': {'eth0': ['192.168.1.1']}},
+ None,
+ ),
+ (
+ '[v2:192.168.1.1:3300,v1:192.168.1.1:6789]',
+ {'192.168.1.0/24': {'eth0': ['192.168.1.1']}},
+ None,
+ ),
+ # IPv6
+ (
+ '[::ffff:192.168.1.1:1234]',
+ {'ffff::/64': {'eth0': ['::ffff:c0a8:101']}},
+ None,
+ ),
+ (
+ '[::ffff:192.168.1.1:0123]',
+ {'ffff::/64': {'eth0': ['::ffff:c0a8:101']}},
+ None,
+ ),
+ (
+ '[0000:0000:0000:0000:0000:FFFF:C0A8:0101:1234]',
+ {'ffff::/64': {'eth0': ['::ffff:c0a8:101']}},
+ None,
+ ),
+ (
+ '[v2:0000:0000:0000:0000:0000:FFFF:C0A8:0101:3300,v1:0000:0000:0000:0000:0000:FFFF:C0A8:0101:6789]',
+ {'ffff::/64': {'eth0': ['::ffff:c0a8:101']}},
+ None,
+ ),
+ ])
+ def test_mon_addrv(self, mon_addrv, list_networks, err, cephadm_fs):
+ cmd = self._get_cmd('--mon-addrv', mon_addrv)
+ if err:
+ with with_cephadm_ctx(cmd, list_networks=list_networks) as ctx:
+ with pytest.raises(_cephadm.Error, match=err):
+ _cephadm.command_bootstrap(ctx)
+ else:
+ with with_cephadm_ctx(cmd, list_networks=list_networks) as ctx:
+ retval = _cephadm.command_bootstrap(ctx)
+ assert retval == 0
+
+ def test_allow_fqdn_hostname(self, cephadm_fs):
+ hostname = 'foo.bar'
+ cmd = self._get_cmd(
+ '--mon-ip', '192.168.1.1',
+ '--skip-mon-network',
+ )
+
+ with with_cephadm_ctx(cmd, hostname=hostname) as ctx:
+ msg = r'--allow-fqdn-hostname'
+ with pytest.raises(_cephadm.Error, match=msg):
+ _cephadm.command_bootstrap(ctx)
+
+ cmd += ['--allow-fqdn-hostname']
+ with with_cephadm_ctx(cmd, hostname=hostname) as ctx:
+ retval = _cephadm.command_bootstrap(ctx)
+ assert retval == 0
+
+ @pytest.mark.parametrize('fsid, err',
+ [
+ ('', None),
+ ('00000000-0000-0000-0000-0000deadbeef', None),
+ ('00000000-0000-0000-0000-0000deadbeez', 'not an fsid'),
+ ])
+ def test_fsid(self, fsid, err, cephadm_fs):
+ cmd = self._get_cmd(
+ '--mon-ip', '192.168.1.1',
+ '--skip-mon-network',
+ '--fsid', fsid,
+ )
+
+ with with_cephadm_ctx(cmd) as ctx:
+ if err:
+ with pytest.raises(_cephadm.Error, match=err):
+ _cephadm.command_bootstrap(ctx)
+ else:
+ retval = _cephadm.command_bootstrap(ctx)
+ assert retval == 0
+
+
+class TestShell(object):
+
+ def test_fsid(self, cephadm_fs):
+ fsid = '00000000-0000-0000-0000-0000deadbeef'
+
+ cmd = ['shell', '--fsid', fsid]
+ with with_cephadm_ctx(cmd) as ctx:
+ retval = _cephadm.command_shell(ctx)
+ assert retval == 0
+ assert ctx.fsid == fsid
+
+ cmd = ['shell', '--fsid', '00000000-0000-0000-0000-0000deadbeez']
+ with with_cephadm_ctx(cmd) as ctx:
+ err = 'not an fsid'
+ with pytest.raises(_cephadm.Error, match=err):
+ retval = _cephadm.command_shell(ctx)
+ assert retval == 1
+ assert ctx.fsid == None
+
+ s = get_ceph_conf(fsid=fsid)
+ f = cephadm_fs.create_file('ceph.conf', contents=s)
+
+ cmd = ['shell', '--fsid', fsid, '--config', f.path]
+ with with_cephadm_ctx(cmd) as ctx:
+ retval = _cephadm.command_shell(ctx)
+ assert retval == 0
+ assert ctx.fsid == fsid
+
+ cmd = ['shell', '--fsid', '10000000-0000-0000-0000-0000deadbeef', '--config', f.path]
+ with with_cephadm_ctx(cmd) as ctx:
+ err = 'fsid does not match ceph.conf'
+ with pytest.raises(_cephadm.Error, match=err):
+ retval = _cephadm.command_shell(ctx)
+ assert retval == 1
+ assert ctx.fsid == None
+
+ def test_name(self, cephadm_fs):
+ cmd = ['shell', '--name', 'foo']
+ with with_cephadm_ctx(cmd) as ctx:
+ retval = _cephadm.command_shell(ctx)
+ assert retval == 0
+
+ cmd = ['shell', '--name', 'foo.bar']
+ with with_cephadm_ctx(cmd) as ctx:
+ err = r'must pass --fsid'
+ with pytest.raises(_cephadm.Error, match=err):
+ retval = _cephadm.command_shell(ctx)
+ assert retval == 1
+
+ fsid = '00000000-0000-0000-0000-0000deadbeef'
+ cmd = ['shell', '--name', 'foo.bar', '--fsid', fsid]
+ with with_cephadm_ctx(cmd) as ctx:
+ retval = _cephadm.command_shell(ctx)
+ assert retval == 0
+
+ def test_config(self, cephadm_fs):
+ cmd = ['shell']
+ with with_cephadm_ctx(cmd) as ctx:
+ retval = _cephadm.command_shell(ctx)
+ assert retval == 0
+ assert ctx.config == None
+
+ cephadm_fs.create_file(_cephadm.CEPH_DEFAULT_CONF)
+ with with_cephadm_ctx(cmd) as ctx:
+ retval = _cephadm.command_shell(ctx)
+ assert retval == 0
+ assert ctx.config == _cephadm.CEPH_DEFAULT_CONF
+
+ cmd = ['shell', '--config', 'foo']
+ with with_cephadm_ctx(cmd) as ctx:
+ retval = _cephadm.command_shell(ctx)
+ assert retval == 0
+ assert ctx.config == 'foo'
+
+ def test_keyring(self, cephadm_fs):
+ cmd = ['shell']
+ with with_cephadm_ctx(cmd) as ctx:
+ retval = _cephadm.command_shell(ctx)
+ assert retval == 0
+ assert ctx.keyring == None
+
+ cephadm_fs.create_file(_cephadm.CEPH_DEFAULT_KEYRING)
+ with with_cephadm_ctx(cmd) as ctx:
+ retval = _cephadm.command_shell(ctx)
+ assert retval == 0
+ assert ctx.keyring == _cephadm.CEPH_DEFAULT_KEYRING
+
+ cmd = ['shell', '--keyring', 'foo']
+ with with_cephadm_ctx(cmd) as ctx:
+ retval = _cephadm.command_shell(ctx)
+ assert retval == 0
+ assert ctx.keyring == 'foo'
+
+ @mock.patch('cephadm.CephContainer')
+ def test_mount_no_dst(self, _ceph_container, cephadm_fs):
+ cmd = ['shell', '--mount', '/etc/foo']
+ with with_cephadm_ctx(cmd) as ctx:
+ retval = _cephadm.command_shell(ctx)
+ assert retval == 0
+ assert _ceph_container.call_args.kwargs['volume_mounts']['/etc/foo'] == '/mnt/foo'
+
+ @mock.patch('cephadm.CephContainer')
+ def test_mount_with_dst_no_opt(self, _ceph_container, cephadm_fs):
+ cmd = ['shell', '--mount', '/etc/foo:/opt/foo/bar']
+ with with_cephadm_ctx(cmd) as ctx:
+ retval = _cephadm.command_shell(ctx)
+ assert retval == 0
+ assert _ceph_container.call_args.kwargs['volume_mounts']['/etc/foo'] == '/opt/foo/bar'
+
+ @mock.patch('cephadm.CephContainer')
+ def test_mount_with_dst_and_opt(self, _ceph_container, cephadm_fs):
+ cmd = ['shell', '--mount', '/etc/foo:/opt/foo/bar:Z']
+ with with_cephadm_ctx(cmd) as ctx:
+ retval = _cephadm.command_shell(ctx)
+ assert retval == 0
+ assert _ceph_container.call_args.kwargs['volume_mounts']['/etc/foo'] == '/opt/foo/bar:Z'
+
+class TestCephVolume(object):
+
+ @staticmethod
+ def _get_cmd(*args):
+ return [
+ 'ceph-volume',
+ *args,
+ '--', 'inventory', '--format', 'json'
+ ]
+
+ def test_noop(self, cephadm_fs):
+ cmd = self._get_cmd()
+ with with_cephadm_ctx(cmd) as ctx:
+ _cephadm.command_ceph_volume(ctx)
+ assert ctx.fsid == None
+ assert ctx.config == None
+ assert ctx.keyring == None
+ assert ctx.config_json == None
+
+ def test_fsid(self, cephadm_fs):
+ fsid = '00000000-0000-0000-0000-0000deadbeef'
+
+ cmd = self._get_cmd('--fsid', fsid)
+ with with_cephadm_ctx(cmd) as ctx:
+ _cephadm.command_ceph_volume(ctx)
+ assert ctx.fsid == fsid
+
+ cmd = self._get_cmd('--fsid', '00000000-0000-0000-0000-0000deadbeez')
+ with with_cephadm_ctx(cmd) as ctx:
+ err = 'not an fsid'
+ with pytest.raises(_cephadm.Error, match=err):
+ retval = _cephadm.command_shell(ctx)
+ assert retval == 1
+ assert ctx.fsid == None
+
+ s = get_ceph_conf(fsid=fsid)
+ f = cephadm_fs.create_file('ceph.conf', contents=s)
+
+ cmd = self._get_cmd('--fsid', fsid, '--config', f.path)
+ with with_cephadm_ctx(cmd) as ctx:
+ _cephadm.command_ceph_volume(ctx)
+ assert ctx.fsid == fsid
+
+ cmd = self._get_cmd('--fsid', '10000000-0000-0000-0000-0000deadbeef', '--config', f.path)
+ with with_cephadm_ctx(cmd) as ctx:
+ err = 'fsid does not match ceph.conf'
+ with pytest.raises(_cephadm.Error, match=err):
+ _cephadm.command_ceph_volume(ctx)
+ assert ctx.fsid == None
+
+ def test_config(self, cephadm_fs):
+ cmd = self._get_cmd('--config', 'foo')
+ with with_cephadm_ctx(cmd) as ctx:
+ err = r'No such file or directory'
+ with pytest.raises(_cephadm.Error, match=err):
+ _cephadm.command_ceph_volume(ctx)
+
+ cephadm_fs.create_file('bar')
+ cmd = self._get_cmd('--config', 'bar')
+ with with_cephadm_ctx(cmd) as ctx:
+ _cephadm.command_ceph_volume(ctx)
+ assert ctx.config == 'bar'
+
+ def test_keyring(self, cephadm_fs):
+ cmd = self._get_cmd('--keyring', 'foo')
+ with with_cephadm_ctx(cmd) as ctx:
+ err = r'No such file or directory'
+ with pytest.raises(_cephadm.Error, match=err):
+ _cephadm.command_ceph_volume(ctx)
+
+ cephadm_fs.create_file('bar')
+ cmd = self._get_cmd('--keyring', 'bar')
+ with with_cephadm_ctx(cmd) as ctx:
+ _cephadm.command_ceph_volume(ctx)
+ assert ctx.keyring == 'bar'
+
+
+class TestIscsi:
+ def test_unit_run(self, cephadm_fs):
+ fsid = '9b9d7609-f4d5-4aba-94c8-effa764d96c9'
+ config_json = {
+ 'files': {'iscsi-gateway.cfg': ''}
+ }
+ with with_cephadm_ctx(['--image=ceph/ceph'], list_networks={}) as ctx:
+ import json
+ ctx.container_engine = mock_docker()
+ ctx.config_json = json.dumps(config_json)
+ ctx.fsid = fsid
+ _cephadm.get_parm.return_value = config_json
+ c = _cephadm.get_container(ctx, fsid, 'iscsi', 'daemon_id')
+
+ _cephadm.make_data_dir(ctx, fsid, 'iscsi', 'daemon_id')
+ _cephadm.deploy_daemon_units(
+ ctx,
+ fsid,
+ 0, 0,
+ 'iscsi',
+ 'daemon_id',
+ c,
+ True, True
+ )
+
+ with open('/var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/unit.run') as f:
+ assert f.read() == """set -e
+if ! grep -qs /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/configfs /proc/mounts; then mount -t configfs none /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/configfs; fi
+# iscsi tcmu-runner container
+! /usr/bin/docker rm -f ceph-9b9d7609-f4d5-4aba-94c8-effa764d96c9-iscsi.daemon_id-tcmu 2> /dev/null
+! /usr/bin/docker rm -f ceph-9b9d7609-f4d5-4aba-94c8-effa764d96c9-iscsi-daemon_id-tcmu 2> /dev/null
+/usr/bin/docker run --rm --ipc=host --stop-signal=SIGTERM --ulimit nofile=1048576 --net=host --entrypoint /usr/local/scripts/tcmu-runner-entrypoint.sh --privileged --group-add=disk --init --name ceph-9b9d7609-f4d5-4aba-94c8-effa764d96c9-iscsi-daemon_id-tcmu --pids-limit=0 -e CONTAINER_IMAGE=ceph/ceph -e NODE_NAME=host1 -e CEPH_USE_RANDOM_NONCE=1 -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/config:/etc/ceph/ceph.conf:z -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/keyring:/etc/ceph/keyring:z -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/iscsi-gateway.cfg:/etc/ceph/iscsi-gateway.cfg:z -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/configfs:/sys/kernel/config -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/tcmu-runner-entrypoint.sh:/usr/local/scripts/tcmu-runner-entrypoint.sh -v /var/log/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9:/var/log:z -v /dev:/dev --mount type=bind,source=/lib/modules,destination=/lib/modules,ro=true ceph/ceph &
+# iscsi.daemon_id
+! /usr/bin/docker rm -f ceph-9b9d7609-f4d5-4aba-94c8-effa764d96c9-iscsi.daemon_id 2> /dev/null
+! /usr/bin/docker rm -f ceph-9b9d7609-f4d5-4aba-94c8-effa764d96c9-iscsi-daemon_id 2> /dev/null
+/usr/bin/docker run --rm --ipc=host --stop-signal=SIGTERM --ulimit nofile=1048576 --net=host --entrypoint /usr/bin/rbd-target-api --privileged --group-add=disk --init --name ceph-9b9d7609-f4d5-4aba-94c8-effa764d96c9-iscsi-daemon_id --pids-limit=0 -e CONTAINER_IMAGE=ceph/ceph -e NODE_NAME=host1 -e CEPH_USE_RANDOM_NONCE=1 -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/config:/etc/ceph/ceph.conf:z -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/keyring:/etc/ceph/keyring:z -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/iscsi-gateway.cfg:/etc/ceph/iscsi-gateway.cfg:z -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/configfs:/sys/kernel/config -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/tcmu-runner-entrypoint.sh:/usr/local/scripts/tcmu-runner-entrypoint.sh -v /var/log/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9:/var/log:z -v /dev:/dev --mount type=bind,source=/lib/modules,destination=/lib/modules,ro=true ceph/ceph
+"""
+
+ def test_get_container(self):
+ """
+ Due to a combination of socket.getfqdn() and podman's behavior to
+ add the container name into the /etc/hosts file, we cannot use periods
+ in container names. But we need to be able to detect old existing containers.
+ Assert this behaviour. I think we can remove this in Ceph R
+ """
+ fsid = '9b9d7609-f4d5-4aba-94c8-effa764d96c9'
+ with with_cephadm_ctx(['--image=ceph/ceph'], list_networks={}) as ctx:
+ ctx.fsid = fsid
+ c = _cephadm.get_container(ctx, fsid, 'iscsi', 'something')
+ assert c.cname == 'ceph-9b9d7609-f4d5-4aba-94c8-effa764d96c9-iscsi-something'
+ assert c.old_cname == 'ceph-9b9d7609-f4d5-4aba-94c8-effa764d96c9-iscsi.something'
+
+
+class TestCheckHost:
+
+ @mock.patch('cephadm.find_executable', return_value='foo')
+ @mock.patch('cephadm.check_time_sync', return_value=True)
+ @mock.patch('cephadm.logger')
+ def test_container_engine(self, _logger, _find_executable, _check_time_sync):
+ ctx = _cephadm.CephadmContext()
+
+ ctx.container_engine = None
+ err = r'No container engine binary found'
+ with pytest.raises(_cephadm.Error, match=err):
+ _cephadm.command_check_host(ctx)
+
+ ctx.container_engine = mock_podman()
+ _cephadm.command_check_host(ctx)
+
+ ctx.container_engine = mock_docker()
+ _cephadm.command_check_host(ctx)
+
+
+class TestRmRepo:
+
+ @pytest.mark.parametrize('os_release',
+ [
+ # Apt
+ dedent("""
+ NAME="Ubuntu"
+ VERSION="20.04 LTS (Focal Fossa)"
+ ID=ubuntu
+ ID_LIKE=debian
+ PRETTY_NAME="Ubuntu 20.04 LTS"
+ VERSION_ID="20.04"
+ HOME_URL="https://www.ubuntu.com/"
+ SUPPORT_URL="https://help.ubuntu.com/"
+ BUG_REPORT_URL="https://bugs.launchpad.net/ubuntu/"
+ PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy"
+ VERSION_CODENAME=focal
+ UBUNTU_CODENAME=focal
+ """),
+
+ # YumDnf
+ dedent("""
+ NAME="CentOS Linux"
+ VERSION="8 (Core)"
+ ID="centos"
+ ID_LIKE="rhel fedora"
+ VERSION_ID="8"
+ PLATFORM_ID="platform:el8"
+ PRETTY_NAME="CentOS Linux 8 (Core)"
+ ANSI_COLOR="0;31"
+ CPE_NAME="cpe:/o:centos:centos:8"
+ HOME_URL="https://www.centos.org/"
+ BUG_REPORT_URL="https://bugs.centos.org/"
+
+ CENTOS_MANTISBT_PROJECT="CentOS-8"
+ CENTOS_MANTISBT_PROJECT_VERSION="8"
+ REDHAT_SUPPORT_PRODUCT="centos"
+ REDHAT_SUPPORT_PRODUCT_VERSION="8"
+ """),
+
+ # Zypper
+ dedent("""
+ NAME="openSUSE Tumbleweed"
+ # VERSION="20210810"
+ ID="opensuse-tumbleweed"
+ ID_LIKE="opensuse suse"
+ VERSION_ID="20210810"
+ PRETTY_NAME="openSUSE Tumbleweed"
+ ANSI_COLOR="0;32"
+ CPE_NAME="cpe:/o:opensuse:tumbleweed:20210810"
+ BUG_REPORT_URL="https://bugs.opensuse.org"
+ HOME_URL="https://www.opensuse.org/"
+ DOCUMENTATION_URL="https://en.opensuse.org/Portal:Tumbleweed"
+ LOGO="distributor-logo"
+ """),
+ ])
+ @mock.patch('cephadm.find_executable', return_value='foo')
+ def test_container_engine(self, _find_executable, os_release, cephadm_fs):
+ cephadm_fs.create_file('/etc/os-release', contents=os_release)
+ ctx = _cephadm.CephadmContext()
+
+ ctx.container_engine = None
+ _cephadm.command_rm_repo(ctx)
+
+ ctx.container_engine = mock_podman()
+ _cephadm.command_rm_repo(ctx)
+
+ ctx.container_engine = mock_docker()
+ _cephadm.command_rm_repo(ctx)
+
+
+class TestValidateRepo:
+
+ @pytest.mark.parametrize('values',
+ [
+ # Apt - no checks
+ dict(
+ version="",
+ release="pacific",
+ err_text="",
+ os_release=dedent("""
+ NAME="Ubuntu"
+ VERSION="20.04 LTS (Focal Fossa)"
+ ID=ubuntu
+ ID_LIKE=debian
+ PRETTY_NAME="Ubuntu 20.04 LTS"
+ VERSION_ID="20.04"
+ HOME_URL="https://www.ubuntu.com/"
+ SUPPORT_URL="https://help.ubuntu.com/"
+ BUG_REPORT_URL="https://bugs.launchpad.net/ubuntu/"
+ PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy"
+ VERSION_CODENAME=focal
+ UBUNTU_CODENAME=focal
+ """)),
+
+ # YumDnf on Centos8 - OK
+ dict(
+ version="",
+ release="pacific",
+ err_text="",
+ os_release=dedent("""
+ NAME="CentOS Linux"
+ VERSION="8 (Core)"
+ ID="centos"
+ ID_LIKE="rhel fedora"
+ VERSION_ID="8"
+ PLATFORM_ID="platform:el8"
+ PRETTY_NAME="CentOS Linux 8 (Core)"
+ ANSI_COLOR="0;31"
+ CPE_NAME="cpe:/o:centos:centos:8"
+ HOME_URL="https://www.centos.org/"
+ BUG_REPORT_URL="https://bugs.centos.org/"
+
+ CENTOS_MANTISBT_PROJECT="CentOS-8"
+ CENTOS_MANTISBT_PROJECT_VERSION="8"
+ REDHAT_SUPPORT_PRODUCT="centos"
+ REDHAT_SUPPORT_PRODUCT_VERSION="8"
+ """)),
+
+ # YumDnf on Fedora - Fedora not supported
+ dict(
+ version="",
+ release="pacific",
+ err_text="does not build Fedora",
+ os_release=dedent("""
+ NAME="Fedora Linux"
+ VERSION="35 (Cloud Edition)"
+ ID=fedora
+ VERSION_ID=35
+ VERSION_CODENAME=""
+ PLATFORM_ID="platform:f35"
+ PRETTY_NAME="Fedora Linux 35 (Cloud Edition)"
+ ANSI_COLOR="0;38;2;60;110;180"
+ LOGO=fedora-logo-icon
+ CPE_NAME="cpe:/o:fedoraproject:fedora:35"
+ HOME_URL="https://fedoraproject.org/"
+ DOCUMENTATION_URL="https://docs.fedoraproject.org/en-US/fedora/f35/system-administrators-guide/"
+ SUPPORT_URL="https://ask.fedoraproject.org/"
+ BUG_REPORT_URL="https://bugzilla.redhat.com/"
+ REDHAT_BUGZILLA_PRODUCT="Fedora"
+ REDHAT_BUGZILLA_PRODUCT_VERSION=35
+ REDHAT_SUPPORT_PRODUCT="Fedora"
+ REDHAT_SUPPORT_PRODUCT_VERSION=35
+ PRIVACY_POLICY_URL="https://fedoraproject.org/wiki/Legal:PrivacyPolicy"
+ VARIANT="Cloud Edition"
+ VARIANT_ID=cloud
+ """)),
+
+ # YumDnf on Centos 7 - no pacific
+ dict(
+ version="",
+ release="pacific",
+ err_text="does not support pacific",
+ os_release=dedent("""
+ NAME="CentOS Linux"
+ VERSION="7 (Core)"
+ ID="centos"
+ ID_LIKE="rhel fedora"
+ VERSION_ID="7"
+ PRETTY_NAME="CentOS Linux 7 (Core)"
+ ANSI_COLOR="0;31"
+ CPE_NAME="cpe:/o:centos:centos:7"
+ HOME_URL="https://www.centos.org/"
+ BUG_REPORT_URL="https://bugs.centos.org/"
+
+ CENTOS_MANTISBT_PROJECT="CentOS-7"
+ CENTOS_MANTISBT_PROJECT_VERSION="7"
+ REDHAT_SUPPORT_PRODUCT="centos"
+ REDHAT_SUPPORT_PRODUCT_VERSION="7"
+ """)),
+
+ # YumDnf on Centos 7 - nothing after pacific
+ dict(
+ version="",
+ release="zillions",
+ err_text="does not support pacific",
+ os_release=dedent("""
+ NAME="CentOS Linux"
+ VERSION="7 (Core)"
+ ID="centos"
+ ID_LIKE="rhel fedora"
+ VERSION_ID="7"
+ PRETTY_NAME="CentOS Linux 7 (Core)"
+ ANSI_COLOR="0;31"
+ CPE_NAME="cpe:/o:centos:centos:7"
+ HOME_URL="https://www.centos.org/"
+ BUG_REPORT_URL="https://bugs.centos.org/"
+
+ CENTOS_MANTISBT_PROJECT="CentOS-7"
+ CENTOS_MANTISBT_PROJECT_VERSION="7"
+ REDHAT_SUPPORT_PRODUCT="centos"
+ REDHAT_SUPPORT_PRODUCT_VERSION="7"
+ """)),
+
+ # YumDnf on Centos 7 - nothing v16 or higher
+ dict(
+ version="v16.1.3",
+ release="",
+ err_text="does not support",
+ os_release=dedent("""
+ NAME="CentOS Linux"
+ VERSION="7 (Core)"
+ ID="centos"
+ ID_LIKE="rhel fedora"
+ VERSION_ID="7"
+ PRETTY_NAME="CentOS Linux 7 (Core)"
+ ANSI_COLOR="0;31"
+ CPE_NAME="cpe:/o:centos:centos:7"
+ HOME_URL="https://www.centos.org/"
+ BUG_REPORT_URL="https://bugs.centos.org/"
+
+ CENTOS_MANTISBT_PROJECT="CentOS-7"
+ CENTOS_MANTISBT_PROJECT_VERSION="7"
+ REDHAT_SUPPORT_PRODUCT="centos"
+ REDHAT_SUPPORT_PRODUCT_VERSION="7"
+ """)),
+ ])
+ @mock.patch('cephadm.find_executable', return_value='foo')
+ def test_distro_validation(self, _find_executable, values, cephadm_fs):
+ os_release = values['os_release']
+ release = values['release']
+ version = values['version']
+ err_text = values['err_text']
+
+ cephadm_fs.create_file('/etc/os-release', contents=os_release)
+ ctx = _cephadm.CephadmContext()
+ ctx.repo_url = 'http://localhost'
+ pkg = _cephadm.create_packager(ctx, stable=release, version=version)
+
+ if err_text:
+ with pytest.raises(_cephadm.Error, match=err_text):
+ pkg.validate()
+ else:
+ with mock.patch('cephadm.urlopen', return_value=None):
+ pkg.validate()
+
+ @pytest.mark.parametrize('values',
+ [
+ # Apt - not checked
+ dict(
+ version="",
+ release="pacific",
+ err_text="",
+ os_release=dedent("""
+ NAME="Ubuntu"
+ VERSION="20.04 LTS (Focal Fossa)"
+ ID=ubuntu
+ ID_LIKE=debian
+ PRETTY_NAME="Ubuntu 20.04 LTS"
+ VERSION_ID="20.04"
+ HOME_URL="https://www.ubuntu.com/"
+ SUPPORT_URL="https://help.ubuntu.com/"
+ BUG_REPORT_URL="https://bugs.launchpad.net/ubuntu/"
+ PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy"
+ VERSION_CODENAME=focal
+ UBUNTU_CODENAME=focal
+ """)),
+
+ # YumDnf on Centos8 - force failure
+ dict(
+ version="",
+ release="foobar",
+ err_text="failed to fetch repository metadata",
+ os_release=dedent("""
+ NAME="CentOS Linux"
+ VERSION="8 (Core)"
+ ID="centos"
+ ID_LIKE="rhel fedora"
+ VERSION_ID="8"
+ PLATFORM_ID="platform:el8"
+ PRETTY_NAME="CentOS Linux 8 (Core)"
+ ANSI_COLOR="0;31"
+ CPE_NAME="cpe:/o:centos:centos:8"
+ HOME_URL="https://www.centos.org/"
+ BUG_REPORT_URL="https://bugs.centos.org/"
+
+ CENTOS_MANTISBT_PROJECT="CentOS-8"
+ CENTOS_MANTISBT_PROJECT_VERSION="8"
+ REDHAT_SUPPORT_PRODUCT="centos"
+ REDHAT_SUPPORT_PRODUCT_VERSION="8"
+ """)),
+ ])
+ @mock.patch('cephadm.find_executable', return_value='foo')
+ @mock.patch('cephadm.logger')
+ def test_http_validation(self, _logger, _find_executable, values, cephadm_fs):
+ from urllib.error import HTTPError
+
+ os_release = values['os_release']
+ release = values['release']
+ version = values['version']
+ err_text = values['err_text']
+
+ cephadm_fs.create_file('/etc/os-release', contents=os_release)
+ ctx = _cephadm.CephadmContext()
+ ctx.repo_url = 'http://localhost'
+ pkg = _cephadm.create_packager(ctx, stable=release, version=version)
+
+ with mock.patch('cephadm.urlopen') as _urlopen:
+ _urlopen.side_effect = HTTPError(ctx.repo_url, 404, "not found", None, fp=None)
+ if err_text:
+ with pytest.raises(_cephadm.Error, match=err_text):
+ pkg.validate()
+ else:
+ pkg.validate()
+
+
+class TestPull:
+
+ @mock.patch('time.sleep')
+ @mock.patch('cephadm.call', return_value=('', '', 0))
+ @mock.patch('cephadm.get_image_info_from_inspect', return_value={})
+ @mock.patch('cephadm.logger')
+ def test_error(self, _logger, _get_image_info_from_inspect, _call, _sleep):
+ ctx = _cephadm.CephadmContext()
+ ctx.container_engine = mock_podman()
+ ctx.insecure = False
+
+ _call.return_value = ('', '', 0)
+ retval = _cephadm.command_pull(ctx)
+ assert retval == 0
+
+ err = 'maximum retries reached'
+
+ _call.return_value = ('', 'foobar', 1)
+ with pytest.raises(_cephadm.Error) as e:
+ _cephadm.command_pull(ctx)
+ assert err not in str(e.value)
+
+ _call.return_value = ('', 'net/http: TLS handshake timeout', 1)
+ with pytest.raises(_cephadm.Error) as e:
+ _cephadm.command_pull(ctx)
+ assert err in str(e.value)
+
+ @mock.patch('cephadm.get_image_info_from_inspect', return_value={})
+ @mock.patch('cephadm.infer_local_ceph_image', return_value='last_local_ceph_image')
+ def test_image(self, _infer_local_ceph_image, _get_image_info_from_inspect):
+ cmd = ['pull']
+ with with_cephadm_ctx(cmd) as ctx:
+ retval = _cephadm.command_pull(ctx)
+ assert retval == 0
+ assert ctx.image == _cephadm.DEFAULT_IMAGE
+
+ with mock.patch.dict(os.environ, {"CEPHADM_IMAGE": 'cephadm_image_environ'}):
+ cmd = ['pull']
+ with with_cephadm_ctx(cmd) as ctx:
+ retval = _cephadm.command_pull(ctx)
+ assert retval == 0
+ assert ctx.image == 'cephadm_image_environ'
+
+ cmd = ['--image', 'cephadm_image_param', 'pull']
+ with with_cephadm_ctx(cmd) as ctx:
+ retval = _cephadm.command_pull(ctx)
+ assert retval == 0
+ assert ctx.image == 'cephadm_image_param'
+
+
+class TestApplySpec:
+
+ def test_extract_host_info_from_applied_spec(self, cephadm_fs):
+ yaml = '''---
+service_type: host
+hostname: vm-00
+addr: 192.168.122.44
+labels:
+ - example1
+ - example2
+---
+service_type: host
+hostname: vm-01
+addr: 192.168.122.247
+labels:
+ - grafana
+---
+service_type: host
+hostname: vm-02
+---
+---
+service_type: rgw
+service_id: myrgw
+spec:
+ rgw_frontend_ssl_certificate: |
+ -----BEGIN PRIVATE KEY-----
+ V2VyIGRhcyBsaWVzdCBpc3QgZG9vZi4gTG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFt
+ ZXQsIGNvbnNldGV0dXIgc2FkaXBzY2luZyBlbGl0ciwgc2VkIGRpYW0gbm9udW15
+ IGVpcm1vZCB0ZW1wb3IgaW52aWR1bnQgdXQgbGFib3JlIGV0IGRvbG9yZSBtYWdu
+ YSBhbGlxdXlhbSBlcmF0LCBzZWQgZGlhbSB2b2x1cHR1YS4gQXQgdmVybyBlb3Mg
+ ZXQgYWNjdXNhbSBldCBqdXN0byBkdW8=
+ -----END PRIVATE KEY-----
+ -----BEGIN CERTIFICATE-----
+ V2VyIGRhcyBsaWVzdCBpc3QgZG9vZi4gTG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFt
+ ZXQsIGNvbnNldGV0dXIgc2FkaXBzY2luZyBlbGl0ciwgc2VkIGRpYW0gbm9udW15
+ IGVpcm1vZCB0ZW1wb3IgaW52aWR1bnQgdXQgbGFib3JlIGV0IGRvbG9yZSBtYWdu
+ YSBhbGlxdXlhbSBlcmF0LCBzZWQgZGlhbSB2b2x1cHR1YS4gQXQgdmVybyBlb3Mg
+ ZXQgYWNjdXNhbSBldCBqdXN0byBkdW8=
+ -----END CERTIFICATE-----
+ ssl: true
+---
+'''
+
+ cephadm_fs.create_file('spec.yml', contents=yaml)
+ retdic = [{'hostname': 'vm-00', 'addr': '192.168.122.44'},
+ {'hostname': 'vm-01', 'addr': '192.168.122.247'},
+ {'hostname': 'vm-02',}]
+
+ with open('spec.yml') as f:
+ dic = _cephadm._extract_host_info_from_applied_spec(f)
+ assert dic == retdic
+
+ @mock.patch('cephadm.call', return_value=('', '', 0))
+ @mock.patch('cephadm.logger')
+ def test_distribute_ssh_keys(self, _logger, _call):
+ ctx = _cephadm.CephadmContext()
+ ctx.ssh_public_key = None
+ ctx.ssh_user = 'root'
+
+ host_spec = {'service_type': 'host', 'hostname': 'vm-02', 'addr': '192.168.122.165'}
+
+ retval = _cephadm._distribute_ssh_keys(ctx, host_spec, 'bootstrap_hostname')
+
+ assert retval == 0
+
+ _call.return_value = ('', '', 1)
+
+ retval = _cephadm._distribute_ssh_keys(ctx, host_spec, 'bootstrap_hostname')
+
+ assert retval == 1
+
+
+class TestSNMPGateway:
+ V2c_config = {
+ 'snmp_community': 'public',
+ 'destination': '192.168.1.10:162',
+ 'snmp_version': 'V2c',
+ }
+ V3_no_priv_config = {
+ 'destination': '192.168.1.10:162',
+ 'snmp_version': 'V3',
+ 'snmp_v3_auth_username': 'myuser',
+ 'snmp_v3_auth_password': 'mypassword',
+ 'snmp_v3_auth_protocol': 'SHA',
+ 'snmp_v3_engine_id': '8000C53F00000000',
+ }
+ V3_priv_config = {
+ 'destination': '192.168.1.10:162',
+ 'snmp_version': 'V3',
+ 'snmp_v3_auth_username': 'myuser',
+ 'snmp_v3_auth_password': 'mypassword',
+ 'snmp_v3_auth_protocol': 'SHA',
+ 'snmp_v3_priv_protocol': 'DES',
+ 'snmp_v3_priv_password': 'mysecret',
+ 'snmp_v3_engine_id': '8000C53F00000000',
+ }
+ no_destination_config = {
+ 'snmp_version': 'V3',
+ 'snmp_v3_auth_username': 'myuser',
+ 'snmp_v3_auth_password': 'mypassword',
+ 'snmp_v3_auth_protocol': 'SHA',
+ 'snmp_v3_priv_protocol': 'DES',
+ 'snmp_v3_priv_password': 'mysecret',
+ 'snmp_v3_engine_id': '8000C53F00000000',
+ }
+ bad_version_config = {
+ 'snmp_community': 'public',
+ 'destination': '192.168.1.10:162',
+ 'snmp_version': 'V1',
+ }
+
+ def test_unit_run_V2c(self, cephadm_fs):
+ fsid = 'ca734440-3dc6-11ec-9b98-5254002537a6'
+ with with_cephadm_ctx(['--image=docker.io/maxwo/snmp-notifier:v1.2.1'], list_networks={}) as ctx:
+ import json
+ ctx.config_json = json.dumps(self.V2c_config)
+ ctx.fsid = fsid
+ ctx.tcp_ports = '9464'
+ _cephadm.get_parm.return_value = self.V2c_config
+ c = _cephadm.get_container(ctx, fsid, 'snmp-gateway', 'daemon_id')
+
+ _cephadm.make_data_dir(ctx, fsid, 'snmp-gateway', 'daemon_id')
+
+ _cephadm.create_daemon_dirs(ctx, fsid, 'snmp-gateway', 'daemon_id', 0, 0)
+ with open(f'/var/lib/ceph/{fsid}/snmp-gateway.daemon_id/snmp-gateway.conf', 'r') as f:
+ conf = f.read().rstrip()
+ assert conf == 'SNMP_NOTIFIER_COMMUNITY=public'
+
+ _cephadm.deploy_daemon_units(
+ ctx,
+ fsid,
+ 0, 0,
+ 'snmp-gateway',
+ 'daemon_id',
+ c,
+ True, True
+ )
+ with open(f'/var/lib/ceph/{fsid}/snmp-gateway.daemon_id/unit.run', 'r') as f:
+ run_cmd = f.readlines()[-1].rstrip()
+ assert run_cmd.endswith('docker.io/maxwo/snmp-notifier:v1.2.1 --web.listen-address=:9464 --snmp.destination=192.168.1.10:162 --snmp.version=V2c --log.level=info --snmp.trap-description-template=/etc/snmp_notifier/description-template.tpl')
+
+ def test_unit_run_V3_noPriv(self, cephadm_fs):
+ fsid = 'ca734440-3dc6-11ec-9b98-5254002537a6'
+ with with_cephadm_ctx(['--image=docker.io/maxwo/snmp-notifier:v1.2.1'], list_networks={}) as ctx:
+ import json
+ ctx.config_json = json.dumps(self.V3_no_priv_config)
+ ctx.fsid = fsid
+ ctx.tcp_ports = '9465'
+ _cephadm.get_parm.return_value = self.V3_no_priv_config
+ c = _cephadm.get_container(ctx, fsid, 'snmp-gateway', 'daemon_id')
+
+ _cephadm.make_data_dir(ctx, fsid, 'snmp-gateway', 'daemon_id')
+
+ _cephadm.create_daemon_dirs(ctx, fsid, 'snmp-gateway', 'daemon_id', 0, 0)
+ with open(f'/var/lib/ceph/{fsid}/snmp-gateway.daemon_id/snmp-gateway.conf', 'r') as f:
+ conf = f.read()
+ assert conf == 'SNMP_NOTIFIER_AUTH_USERNAME=myuser\nSNMP_NOTIFIER_AUTH_PASSWORD=mypassword\n'
+
+ _cephadm.deploy_daemon_units(
+ ctx,
+ fsid,
+ 0, 0,
+ 'snmp-gateway',
+ 'daemon_id',
+ c,
+ True, True
+ )
+ with open(f'/var/lib/ceph/{fsid}/snmp-gateway.daemon_id/unit.run', 'r') as f:
+ run_cmd = f.readlines()[-1].rstrip()
+ assert run_cmd.endswith('docker.io/maxwo/snmp-notifier:v1.2.1 --web.listen-address=:9465 --snmp.destination=192.168.1.10:162 --snmp.version=V3 --log.level=info --snmp.trap-description-template=/etc/snmp_notifier/description-template.tpl --snmp.authentication-enabled --snmp.authentication-protocol=SHA --snmp.security-engine-id=8000C53F00000000')
+
+ def test_unit_run_V3_Priv(self, cephadm_fs):
+ fsid = 'ca734440-3dc6-11ec-9b98-5254002537a6'
+ with with_cephadm_ctx(['--image=docker.io/maxwo/snmp-notifier:v1.2.1'], list_networks={}) as ctx:
+ import json
+ ctx.config_json = json.dumps(self.V3_priv_config)
+ ctx.fsid = fsid
+ ctx.tcp_ports = '9464'
+ _cephadm.get_parm.return_value = self.V3_priv_config
+ c = _cephadm.get_container(ctx, fsid, 'snmp-gateway', 'daemon_id')
+
+ _cephadm.make_data_dir(ctx, fsid, 'snmp-gateway', 'daemon_id')
+
+ _cephadm.create_daemon_dirs(ctx, fsid, 'snmp-gateway', 'daemon_id', 0, 0)
+ with open(f'/var/lib/ceph/{fsid}/snmp-gateway.daemon_id/snmp-gateway.conf', 'r') as f:
+ conf = f.read()
+ assert conf == 'SNMP_NOTIFIER_AUTH_USERNAME=myuser\nSNMP_NOTIFIER_AUTH_PASSWORD=mypassword\nSNMP_NOTIFIER_PRIV_PASSWORD=mysecret\n'
+
+ _cephadm.deploy_daemon_units(
+ ctx,
+ fsid,
+ 0, 0,
+ 'snmp-gateway',
+ 'daemon_id',
+ c,
+ True, True
+ )
+ with open(f'/var/lib/ceph/{fsid}/snmp-gateway.daemon_id/unit.run', 'r') as f:
+ run_cmd = f.readlines()[-1].rstrip()
+ assert run_cmd.endswith('docker.io/maxwo/snmp-notifier:v1.2.1 --web.listen-address=:9464 --snmp.destination=192.168.1.10:162 --snmp.version=V3 --log.level=info --snmp.trap-description-template=/etc/snmp_notifier/description-template.tpl --snmp.authentication-enabled --snmp.authentication-protocol=SHA --snmp.security-engine-id=8000C53F00000000 --snmp.private-enabled --snmp.private-protocol=DES')
+
+ def test_unit_run_no_dest(self, cephadm_fs):
+ fsid = 'ca734440-3dc6-11ec-9b98-5254002537a6'
+ with with_cephadm_ctx(['--image=docker.io/maxwo/snmp-notifier:v1.2.1'], list_networks={}) as ctx:
+ import json
+ ctx.config_json = json.dumps(self.no_destination_config)
+ ctx.fsid = fsid
+ ctx.tcp_ports = '9464'
+ _cephadm.get_parm.return_value = self.no_destination_config
+
+ with pytest.raises(Exception) as e:
+ c = _cephadm.get_container(ctx, fsid, 'snmp-gateway', 'daemon_id')
+ assert str(e.value) == "config is missing destination attribute(<ip>:<port>) of the target SNMP listener"
+
+ def test_unit_run_bad_version(self, cephadm_fs):
+ fsid = 'ca734440-3dc6-11ec-9b98-5254002537a6'
+ with with_cephadm_ctx(['--image=docker.io/maxwo/snmp-notifier:v1.2.1'], list_networks={}) as ctx:
+ import json
+ ctx.config_json = json.dumps(self.bad_version_config)
+ ctx.fsid = fsid
+ ctx.tcp_ports = '9464'
+ _cephadm.get_parm.return_value = self.bad_version_config
+
+ with pytest.raises(Exception) as e:
+ c = _cephadm.get_container(ctx, fsid, 'snmp-gateway', 'daemon_id')
+ assert str(e.value) == 'not a valid snmp version: V1'
+
+class TestNetworkValidation:
+
+ def test_ipv4_subnet(self):
+ rc, v, msg = _cephadm.check_subnet('192.168.1.0/24')
+ assert rc == 0 and v[0] == 4
+
+ def test_ipv4_subnet_list(self):
+ rc, v, msg = _cephadm.check_subnet('192.168.1.0/24,10.90.90.0/24')
+ assert rc == 0 and not msg
+
+ def test_ipv4_subnet_list_with_spaces(self):
+ rc, v, msg = _cephadm.check_subnet('192.168.1.0/24, 10.90.90.0/24 ')
+ assert rc == 0 and not msg
+
+ def test_ipv4_subnet_badlist(self):
+ rc, v, msg = _cephadm.check_subnet('192.168.1.0/24,192.168.1.1')
+ assert rc == 1 and msg
+
+ def test_ipv4_subnet_mixed(self):
+ rc, v, msg = _cephadm.check_subnet('192.168.100.0/24,fe80::/64')
+ assert rc == 0 and v == [4,6]
+
+ def test_ipv6_subnet(self):
+ rc, v, msg = _cephadm.check_subnet('fe80::/64')
+ assert rc == 0 and v[0] == 6
+
+ def test_subnet_mask_missing(self):
+ rc, v, msg = _cephadm.check_subnet('192.168.1.58')
+ assert rc == 1 and msg
+
+ def test_subnet_mask_junk(self):
+ rc, v, msg = _cephadm.check_subnet('wah')
+ assert rc == 1 and msg
+
+ def test_ip_in_subnet(self):
+ # valid ip and only one valid subnet
+ rc = _cephadm.ip_in_subnets('192.168.100.1', '192.168.100.0/24')
+ assert rc is True
+
+ # valid ip and valid subnets list without spaces
+ rc = _cephadm.ip_in_subnets('192.168.100.1', '192.168.100.0/24,10.90.90.0/24')
+ assert rc is True
+
+ # valid ip and valid subnets list with spaces
+ rc = _cephadm.ip_in_subnets('10.90.90.2', '192.168.1.0/24, 192.168.100.0/24, 10.90.90.0/24')
+ assert rc is True
+
+ # valid ip that doesn't belong to any subnet
+ rc = _cephadm.ip_in_subnets('192.168.100.2', '192.168.50.0/24, 10.90.90.0/24')
+ assert rc is False
+
+ # valid ip that doesn't belong to the subnet (only 14 hosts)
+ rc = _cephadm.ip_in_subnets('192.168.100.20', '192.168.100.0/28')
+ assert rc is False
+
+ # valid ip and valid IPV6 network
+ rc = _cephadm.ip_in_subnets('fe80::5054:ff:fef4:873a', 'fe80::/64')
+ assert rc is True
+
+ # valid wrapped ip and valid IPV6 network
+ rc = _cephadm.ip_in_subnets('[fe80::5054:ff:fef4:873a]', 'fe80::/64')
+ assert rc is True
+
+ # valid ip and that doesn't belong to IPV6 network
+ rc = _cephadm.ip_in_subnets('fe80::5054:ff:fef4:873a', '2001:db8:85a3::/64')
+ assert rc is False
+
+ # invalid IPv4 and valid subnets list
+ with pytest.raises(Exception):
+ rc = _cephadm.ip_in_sublets('10.90.200.', '192.168.1.0/24, 192.168.100.0/24, 10.90.90.0/24')
+
+ # invalid IPv6 and valid subnets list
+ with pytest.raises(Exception):
+ rc = _cephadm.ip_in_sublets('fe80:2030:31:24', 'fe80::/64')
+
+ @pytest.mark.parametrize("conf", [
+ """[global]
+public_network='1.1.1.0/24,2.2.2.0/24'
+cluster_network="3.3.3.0/24, 4.4.4.0/24"
+""",
+ """[global]
+public_network=" 1.1.1.0/24,2.2.2.0/24 "
+cluster_network=3.3.3.0/24, 4.4.4.0/24
+""",
+ """[global]
+ public_network= 1.1.1.0/24, 2.2.2.0/24
+ cluster_network='3.3.3.0/24,4.4.4.0/24'
+"""])
+ @mock.patch('cephadm.list_networks')
+ @mock.patch('cephadm.logger')
+ def test_get_networks_from_conf(self, _logger, _list_networks, conf, cephadm_fs):
+ cephadm_fs.create_file('ceph.conf', contents=conf)
+ _list_networks.return_value = {'1.1.1.0/24': {'eth0': ['1.1.1.1']},
+ '2.2.2.0/24': {'eth1': ['2.2.2.2']},
+ '3.3.3.0/24': {'eth2': ['3.3.3.3']},
+ '4.4.4.0/24': {'eth3': ['4.4.4.4']}}
+ ctx = _cephadm.CephadmContext()
+ ctx.config = 'ceph.conf'
+ ctx.mon_ip = '1.1.1.1'
+ ctx.cluster_network = None
+ # what the cephadm module does with the public network string is
+ # [x.strip() for x in out.split(',')]
+ # so we must make sure our output, through that alteration,
+ # generates correctly formatted networks
+ def _str_to_networks(s):
+ return [x.strip() for x in s.split(',')]
+ public_network = _cephadm.get_public_net_from_cfg(ctx)
+ assert _str_to_networks(public_network) == ['1.1.1.0/24', '2.2.2.0/24']
+ cluster_network, ipv6 = _cephadm.prepare_cluster_network(ctx)
+ assert not ipv6
+ assert _str_to_networks(cluster_network) == ['3.3.3.0/24', '4.4.4.0/24']
+
+class TestSysctl:
+ @mock.patch('cephadm.sysctl_get')
+ def test_filter_sysctl_settings(self, _sysctl_get):
+ ctx = _cephadm.CephadmContext()
+ input = [
+ # comment-only lines should be ignored
+ "# just a comment",
+ # As should whitespace-only lines",
+ " \t ",
+ " = \t ",
+ # inline comments are stripped when querying
+ "something = value # inline comment",
+ "fs.aio-max-nr = 1048576",
+ "kernel.pid_max = 4194304",
+ "vm.lowmem_reserve_ratio = 256\t256\t32\t0\t0",
+ " vm.max_map_count = 65530 ",
+ " vm.max_map_count = 65530 ",
+ ]
+ _sysctl_get.side_effect = [
+ "value",
+ "1",
+ "4194304",
+ "256\t256\t32\t0\t0",
+ "65530",
+ "something else",
+ ]
+ result = _cephadm.filter_sysctl_settings(ctx, input)
+ assert len(_sysctl_get.call_args_list) == 6
+ assert _sysctl_get.call_args_list[0].args[1] == "something"
+ assert _sysctl_get.call_args_list[1].args[1] == "fs.aio-max-nr"
+ assert _sysctl_get.call_args_list[2].args[1] == "kernel.pid_max"
+ assert _sysctl_get.call_args_list[3].args[1] == "vm.lowmem_reserve_ratio"
+ assert _sysctl_get.call_args_list[4].args[1] == "vm.max_map_count"
+ assert _sysctl_get.call_args_list[5].args[1] == "vm.max_map_count"
+ assert result == [
+ "fs.aio-max-nr = 1048576",
+ " vm.max_map_count = 65530 ",
+ ]
+
+class TestJaeger:
+ single_es_node_conf = {
+ 'elasticsearch_nodes': 'http://192.168.0.1:9200'}
+ multiple_es_nodes_conf = {
+ 'elasticsearch_nodes': 'http://192.168.0.1:9200,http://192.168.0.2:9300'}
+ agent_conf = {
+ 'collector_nodes': 'test:14250'}
+
+ def test_single_es(self, cephadm_fs):
+ fsid = 'ca734440-3dc6-11ec-9b98-5254002537a6'
+ with with_cephadm_ctx(['--image=quay.io/jaegertracing/jaeger-collector:1.29'], list_networks={}) as ctx:
+ import json
+ ctx.config_json = json.dumps(self.single_es_node_conf)
+ ctx.fsid = fsid
+ c = _cephadm.get_container(ctx, fsid, 'jaeger-collector', 'daemon_id')
+ _cephadm.create_daemon_dirs(ctx, fsid, 'jaeger-collector', 'daemon_id', 0, 0)
+ _cephadm.deploy_daemon_units(
+ ctx,
+ fsid,
+ 0, 0,
+ 'jaeger-collector',
+ 'daemon_id',
+ c,
+ True, True
+ )
+ with open(f'/var/lib/ceph/{fsid}/jaeger-collector.daemon_id/unit.run', 'r') as f:
+ run_cmd = f.readlines()[-1].rstrip()
+ assert run_cmd.endswith('SPAN_STORAGE_TYPE=elasticsearch -e ES_SERVER_URLS=http://192.168.0.1:9200 quay.io/jaegertracing/jaeger-collector:1.29')
+
+ def test_multiple_es(self, cephadm_fs):
+ fsid = 'ca734440-3dc6-11ec-9b98-5254002537a6'
+ with with_cephadm_ctx(['--image=quay.io/jaegertracing/jaeger-collector:1.29'], list_networks={}) as ctx:
+ import json
+ ctx.config_json = json.dumps(self.multiple_es_nodes_conf)
+ ctx.fsid = fsid
+ c = _cephadm.get_container(ctx, fsid, 'jaeger-collector', 'daemon_id')
+ _cephadm.create_daemon_dirs(ctx, fsid, 'jaeger-collector', 'daemon_id', 0, 0)
+ _cephadm.deploy_daemon_units(
+ ctx,
+ fsid,
+ 0, 0,
+ 'jaeger-collector',
+ 'daemon_id',
+ c,
+ True, True
+ )
+ with open(f'/var/lib/ceph/{fsid}/jaeger-collector.daemon_id/unit.run', 'r') as f:
+ run_cmd = f.readlines()[-1].rstrip()
+ assert run_cmd.endswith('SPAN_STORAGE_TYPE=elasticsearch -e ES_SERVER_URLS=http://192.168.0.1:9200,http://192.168.0.2:9300 quay.io/jaegertracing/jaeger-collector:1.29')
+
+ def test_jaeger_agent(self, cephadm_fs):
+ fsid = 'ca734440-3dc6-11ec-9b98-5254002537a6'
+ with with_cephadm_ctx(['--image=quay.io/jaegertracing/jaeger-agent:1.29'], list_networks={}) as ctx:
+ import json
+ ctx.config_json = json.dumps(self.agent_conf)
+ ctx.fsid = fsid
+ c = _cephadm.get_container(ctx, fsid, 'jaeger-agent', 'daemon_id')
+ _cephadm.create_daemon_dirs(ctx, fsid, 'jaeger-agent', 'daemon_id', 0, 0)
+ _cephadm.deploy_daemon_units(
+ ctx,
+ fsid,
+ 0, 0,
+ 'jaeger-agent',
+ 'daemon_id',
+ c,
+ True, True
+ )
+ with open(f'/var/lib/ceph/{fsid}/jaeger-agent.daemon_id/unit.run', 'r') as f:
+ run_cmd = f.readlines()[-1].rstrip()
+ assert run_cmd.endswith('quay.io/jaegertracing/jaeger-agent:1.29 --reporter.grpc.host-port=test:14250 --processor.jaeger-compact.server-host-port=6799')
+
+class TestRescan(fake_filesystem_unittest.TestCase):
+
+ def setUp(self):
+ self.setUpPyfakefs()
+ if not fake_filesystem.is_root():
+ fake_filesystem.set_uid(0)
+
+ self.fs.create_dir('/sys/class')
+ self.ctx = _cephadm.CephadmContext()
+ self.ctx.func = _cephadm.command_rescan_disks
+
+ @mock.patch('cephadm.logger')
+ def test_no_hbas(self, _logger):
+ out = _cephadm.command_rescan_disks(self.ctx)
+ assert out == 'Ok. No compatible HBAs found'
+
+ @mock.patch('cephadm.logger')
+ def test_success(self, _logger):
+ self.fs.create_file('/sys/class/scsi_host/host0/scan')
+ self.fs.create_file('/sys/class/scsi_host/host1/scan')
+ out = _cephadm.command_rescan_disks(self.ctx)
+ assert out.startswith('Ok. 2 adapters detected: 2 rescanned, 0 skipped, 0 failed')
+
+ @mock.patch('cephadm.logger')
+ def test_skip_usb_adapter(self, _logger):
+ self.fs.create_file('/sys/class/scsi_host/host0/scan')
+ self.fs.create_file('/sys/class/scsi_host/host1/scan')
+ self.fs.create_file('/sys/class/scsi_host/host1/proc_name', contents='usb-storage')
+ out = _cephadm.command_rescan_disks(self.ctx)
+ assert out.startswith('Ok. 2 adapters detected: 1 rescanned, 1 skipped, 0 failed')
+
+ @mock.patch('cephadm.logger')
+ def test_skip_unknown_adapter(self, _logger):
+ self.fs.create_file('/sys/class/scsi_host/host0/scan')
+ self.fs.create_file('/sys/class/scsi_host/host1/scan')
+ self.fs.create_file('/sys/class/scsi_host/host1/proc_name', contents='unknown')
+ out = _cephadm.command_rescan_disks(self.ctx)
+ assert out.startswith('Ok. 2 adapters detected: 1 rescanned, 1 skipped, 0 failed')
diff --git a/src/cephadm/tests/test_container_engine.py b/src/cephadm/tests/test_container_engine.py
new file mode 100644
index 000000000..433f01270
--- /dev/null
+++ b/src/cephadm/tests/test_container_engine.py
@@ -0,0 +1,54 @@
+from unittest import mock
+
+import pytest
+
+from tests.fixtures import with_cephadm_ctx, import_cephadm
+
+_cephadm = import_cephadm()
+
+
+def test_container_engine():
+ with pytest.raises(NotImplementedError):
+ _cephadm.ContainerEngine()
+
+ class PhonyContainerEngine(_cephadm.ContainerEngine):
+ EXE = "true"
+
+ with mock.patch("cephadm.find_program") as find_program:
+ find_program.return_value = "/usr/bin/true"
+ pce = PhonyContainerEngine()
+ assert str(pce) == "true (/usr/bin/true)"
+
+
+def test_podman():
+ with mock.patch("cephadm.find_program") as find_program:
+ find_program.return_value = "/usr/bin/podman"
+ pm = _cephadm.Podman()
+ find_program.assert_called()
+ with pytest.raises(RuntimeError):
+ pm.version
+ with mock.patch("cephadm.call_throws") as call_throws:
+ call_throws.return_value = ("4.9.9", None, None)
+ with with_cephadm_ctx([]) as ctx:
+ pm.get_version(ctx)
+ assert pm.version == (4, 9, 9)
+ assert str(pm) == "podman (/usr/bin/podman) version 4.9.9"
+
+
+def test_podman_badversion():
+ with mock.patch("cephadm.find_program") as find_program:
+ find_program.return_value = "/usr/bin/podman"
+ pm = _cephadm.Podman()
+ find_program.assert_called()
+ with mock.patch("cephadm.call_throws") as call_throws:
+ call_throws.return_value = ("4.10.beta2", None, None)
+ with with_cephadm_ctx([]) as ctx:
+ with pytest.raises(ValueError):
+ pm.get_version(ctx)
+
+
+def test_docker():
+ with mock.patch("cephadm.find_program") as find_program:
+ find_program.return_value = "/usr/bin/docker"
+ docker = _cephadm.Docker()
+ assert str(docker) == "docker (/usr/bin/docker)"
diff --git a/src/cephadm/tests/test_enclosure.py b/src/cephadm/tests/test_enclosure.py
new file mode 100644
index 000000000..1ea419fb3
--- /dev/null
+++ b/src/cephadm/tests/test_enclosure.py
@@ -0,0 +1,72 @@
+import pytest
+
+from unittest import mock
+from tests.fixtures import host_sysfs, import_cephadm
+
+_cephadm = import_cephadm()
+
+
+@pytest.fixture
+def enclosure(host_sysfs):
+ e = _cephadm.Enclosure(
+ enc_id='1',
+ enc_path='/sys/class/scsi_generic/sg2/device/enclosure/0:0:1:0',
+ dev_path='/sys/class/scsi_generic/sg2')
+ yield e
+
+
+class TestEnclosure:
+
+ def test_enc_metadata(self, enclosure):
+ """Check metadata for the enclosure e.g. vendor and model"""
+
+ assert enclosure.vendor == "EnclosuresInc"
+ assert enclosure.components == '12'
+ assert enclosure.model == "D12"
+ assert enclosure.enc_id == '1'
+
+ assert enclosure.ses_paths == ['sg2']
+ assert enclosure.path_count == 1
+
+ def test_enc_slots(self, enclosure):
+ """Check slot count"""
+
+ assert len(enclosure.slot_map) == 12
+
+ def test_enc_slot_format(self, enclosure):
+ """Check the attributes of a slot are as expected"""
+
+ assert all(k in ['fault', 'locate', 'serial', 'status']
+ for k, _v in enclosure.slot_map['0'].items())
+
+ def test_enc_slot_status(self, enclosure):
+ """Check the number of occupied slots is correct"""
+
+ occupied_slots = [slot_id for slot_id in enclosure.slot_map
+ if enclosure.slot_map[slot_id].get('status').upper() == 'OK']
+
+ assert len(occupied_slots) == 6
+
+ def test_enc_disk_count(self, enclosure):
+ """Check the disks found matches the slot info"""
+
+ assert len(enclosure.device_lookup) == 6
+ assert enclosure.device_count == 6
+
+ def test_enc_device_serial(self, enclosure):
+ """Check the device serial numbers are as expected"""
+
+ assert all(fake_serial in enclosure.device_lookup.keys()
+ for fake_serial in [
+ 'fake000',
+ 'fake001',
+ 'fake002',
+ 'fake003',
+ 'fake004',
+ 'fake005'])
+
+ def test_enc_slot_to_serial(self, enclosure):
+ """Check serial number to slot matches across slot_map and device_lookup"""
+
+ for serial, slot in enclosure.device_lookup.items():
+ assert enclosure.slot_map[slot].get('serial') == serial
diff --git a/src/cephadm/tests/test_ingress.py b/src/cephadm/tests/test_ingress.py
new file mode 100644
index 000000000..798c73708
--- /dev/null
+++ b/src/cephadm/tests/test_ingress.py
@@ -0,0 +1,350 @@
+from unittest import mock
+import json
+
+import pytest
+
+from tests.fixtures import with_cephadm_ctx, cephadm_fs, import_cephadm
+
+_cephadm = import_cephadm()
+
+SAMPLE_UUID = "2d018a3f-8a8f-4cb9-a7cf-48bebb2cbaae"
+SAMPLE_HAPROXY_IMAGE = "registry.example.net/haproxy/haproxy:latest"
+SAMPLE_KEEPALIVED_IMAGE = "registry.example.net/keepalive/keepalived:latest"
+
+
+def good_haproxy_json():
+ return haproxy_json(files=True)
+
+
+def haproxy_json(**kwargs):
+ if kwargs.get("files"):
+ return {
+ "files": {
+ "haproxy.cfg": "",
+ },
+ }
+ return {}
+
+
+def good_keepalived_json():
+ return keepalived_json(files=True)
+
+
+def keepalived_json(**kwargs):
+ if kwargs.get("files"):
+ return {
+ "files": {
+ "keepalived.conf": "",
+ },
+ }
+ return {}
+
+
+@pytest.mark.parametrize(
+ "args",
+ # args: <fsid>, <daemon_id>, <config_json>, <image>
+ [
+ # fail due to: invalid fsid
+ (["foobar", "wilma", good_haproxy_json(), SAMPLE_HAPROXY_IMAGE]),
+ # fail due to: invalid daemon_id
+ ([SAMPLE_UUID, "", good_haproxy_json(), SAMPLE_HAPROXY_IMAGE]),
+ # fail due to: invalid image
+ ([SAMPLE_UUID, "wilma", good_haproxy_json(), ""]),
+ # fail due to: no files in config_json
+ (
+ [
+ SAMPLE_UUID,
+ "wilma",
+ haproxy_json(files=False),
+ SAMPLE_HAPROXY_IMAGE,
+ ]
+ ),
+ ],
+)
+def test_haproxy_validation_errors(args):
+ with pytest.raises(_cephadm.Error):
+ with with_cephadm_ctx([]) as ctx:
+ _cephadm.HAproxy(ctx, *args)
+
+
+def test_haproxy_init():
+ with with_cephadm_ctx([]) as ctx:
+ ctx.config_json = json.dumps(good_haproxy_json())
+ ctx.image = SAMPLE_HAPROXY_IMAGE
+ hap = _cephadm.HAproxy.init(
+ ctx,
+ SAMPLE_UUID,
+ "wilma",
+ )
+ assert hap.fsid == SAMPLE_UUID
+ assert hap.daemon_id == "wilma"
+ assert hap.image == SAMPLE_HAPROXY_IMAGE
+
+
+def test_haproxy_container_mounts():
+ with with_cephadm_ctx([]) as ctx:
+ hap = _cephadm.HAproxy(
+ ctx,
+ SAMPLE_UUID,
+ "wilma",
+ good_haproxy_json(),
+ SAMPLE_HAPROXY_IMAGE,
+ )
+ cmounts = hap.get_container_mounts("/var/tmp")
+ assert len(cmounts) == 1
+ assert cmounts["/var/tmp/haproxy"] == "/var/lib/haproxy"
+
+
+def test_haproxy_get_daemon_name():
+ with with_cephadm_ctx([]) as ctx:
+ hap = _cephadm.HAproxy(
+ ctx,
+ SAMPLE_UUID,
+ "wilma",
+ good_haproxy_json(),
+ SAMPLE_HAPROXY_IMAGE,
+ )
+ assert hap.get_daemon_name() == "haproxy.wilma"
+
+
+def test_haproxy_get_container_name():
+ with with_cephadm_ctx([]) as ctx:
+ hap = _cephadm.HAproxy(
+ ctx,
+ SAMPLE_UUID,
+ "wilma",
+ good_haproxy_json(),
+ SAMPLE_HAPROXY_IMAGE,
+ )
+ name1 = hap.get_container_name()
+ assert (
+ name1 == "ceph-2d018a3f-8a8f-4cb9-a7cf-48bebb2cbaae-haproxy.wilma"
+ )
+ name2 = hap.get_container_name(desc="extra")
+ assert (
+ name2
+ == "ceph-2d018a3f-8a8f-4cb9-a7cf-48bebb2cbaae-haproxy.wilma-extra"
+ )
+
+
+def test_haproxy_get_daemon_args():
+ with with_cephadm_ctx([]) as ctx:
+ hap = _cephadm.HAproxy(
+ ctx,
+ SAMPLE_UUID,
+ "wilma",
+ good_haproxy_json(),
+ SAMPLE_HAPROXY_IMAGE,
+ )
+ args = hap.get_daemon_args()
+ assert args == ["haproxy", "-f", "/var/lib/haproxy/haproxy.cfg"]
+
+
+@mock.patch("cephadm.logger")
+def test_haproxy_create_daemon_dirs(_logger, cephadm_fs):
+ with with_cephadm_ctx([]) as ctx:
+ hap = _cephadm.HAproxy(
+ ctx,
+ SAMPLE_UUID,
+ "wilma",
+ good_haproxy_json(),
+ SAMPLE_HAPROXY_IMAGE,
+ )
+ with pytest.raises(OSError):
+ hap.create_daemon_dirs("/var/tmp", 45, 54)
+ cephadm_fs.create_dir("/var/tmp")
+ hap.create_daemon_dirs("/var/tmp", 45, 54)
+ # TODO: make assertions about the dirs created
+
+
+def test_haproxy_extract_uid_gid_haproxy():
+ with with_cephadm_ctx([]) as ctx:
+ hap = _cephadm.HAproxy(
+ ctx,
+ SAMPLE_UUID,
+ "wilma",
+ good_haproxy_json(),
+ SAMPLE_HAPROXY_IMAGE,
+ )
+ with mock.patch("cephadm.CephContainer") as cc:
+ cc.return_value.run.return_value = "500 500"
+ uid, gid = hap.extract_uid_gid_haproxy()
+ cc.return_value.run.assert_called()
+ assert uid == 500
+ assert gid == 500
+
+
+def test_haproxy_get_sysctl_settings():
+ with with_cephadm_ctx([]) as ctx:
+ hap = _cephadm.HAproxy(
+ ctx,
+ SAMPLE_UUID,
+ "wilma",
+ good_haproxy_json(),
+ SAMPLE_HAPROXY_IMAGE,
+ )
+ ss = hap.get_sysctl_settings()
+ assert len(ss) == 3
+
+
+@pytest.mark.parametrize(
+ "args",
+ # args: <fsid>, <daemon_id>, <config_json>, <image>
+ [
+ # fail due to: invalid fsid
+ (
+ [
+ "foobar",
+ "barney",
+ good_keepalived_json(),
+ SAMPLE_KEEPALIVED_IMAGE,
+ ]
+ ),
+ # fail due to: invalid daemon_id
+ ([SAMPLE_UUID, "", good_keepalived_json(), SAMPLE_KEEPALIVED_IMAGE]),
+ # fail due to: invalid image
+ ([SAMPLE_UUID, "barney", good_keepalived_json(), ""]),
+ # fail due to: no files in config_json
+ (
+ [
+ SAMPLE_UUID,
+ "barney",
+ keepalived_json(files=False),
+ SAMPLE_KEEPALIVED_IMAGE,
+ ]
+ ),
+ ],
+)
+def test_keepalived_validation_errors(args):
+ with pytest.raises(_cephadm.Error):
+ with with_cephadm_ctx([]) as ctx:
+ _cephadm.Keepalived(ctx, *args)
+
+
+def test_keepalived_init():
+ with with_cephadm_ctx([]) as ctx:
+ ctx.config_json = json.dumps(good_keepalived_json())
+ ctx.image = SAMPLE_KEEPALIVED_IMAGE
+ kad = _cephadm.Keepalived.init(
+ ctx,
+ SAMPLE_UUID,
+ "barney",
+ )
+ assert kad.fsid == SAMPLE_UUID
+ assert kad.daemon_id == "barney"
+ assert kad.image == SAMPLE_KEEPALIVED_IMAGE
+
+
+def test_keepalived_container_mounts():
+ with with_cephadm_ctx([]) as ctx:
+ kad = _cephadm.Keepalived(
+ ctx,
+ SAMPLE_UUID,
+ "barney",
+ good_keepalived_json(),
+ SAMPLE_KEEPALIVED_IMAGE,
+ )
+ cmounts = kad.get_container_mounts("/var/tmp")
+ assert len(cmounts) == 1
+ assert (
+ cmounts["/var/tmp/keepalived.conf"]
+ == "/etc/keepalived/keepalived.conf"
+ )
+
+
+def test_keepalived_get_daemon_name():
+ with with_cephadm_ctx([]) as ctx:
+ kad = _cephadm.Keepalived(
+ ctx,
+ SAMPLE_UUID,
+ "barney",
+ good_keepalived_json(),
+ SAMPLE_KEEPALIVED_IMAGE,
+ )
+ assert kad.get_daemon_name() == "keepalived.barney"
+
+
+def test_keepalived_get_container_name():
+ with with_cephadm_ctx([]) as ctx:
+ kad = _cephadm.Keepalived(
+ ctx,
+ SAMPLE_UUID,
+ "barney",
+ good_keepalived_json(),
+ SAMPLE_KEEPALIVED_IMAGE,
+ )
+ name1 = kad.get_container_name()
+ assert (
+ name1
+ == "ceph-2d018a3f-8a8f-4cb9-a7cf-48bebb2cbaae-keepalived.barney"
+ )
+ name2 = kad.get_container_name(desc="extra")
+ assert (
+ name2
+ == "ceph-2d018a3f-8a8f-4cb9-a7cf-48bebb2cbaae-keepalived.barney-extra"
+ )
+
+
+def test_keepalived_get_container_envs():
+ with with_cephadm_ctx([]) as ctx:
+ kad = _cephadm.Keepalived(
+ ctx,
+ SAMPLE_UUID,
+ "barney",
+ good_keepalived_json(),
+ SAMPLE_KEEPALIVED_IMAGE,
+ )
+ args = kad.get_container_envs()
+ assert args == [
+ "KEEPALIVED_AUTOCONF=false",
+ "KEEPALIVED_CONF=/etc/keepalived/keepalived.conf",
+ "KEEPALIVED_CMD=/usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf",
+ "KEEPALIVED_DEBUG=false",
+ ]
+
+
+@mock.patch("cephadm.logger")
+def test_keepalived_create_daemon_dirs(_logger, cephadm_fs):
+ with with_cephadm_ctx([]) as ctx:
+ kad = _cephadm.Keepalived(
+ ctx,
+ SAMPLE_UUID,
+ "barney",
+ good_keepalived_json(),
+ SAMPLE_KEEPALIVED_IMAGE,
+ )
+ with pytest.raises(OSError):
+ kad.create_daemon_dirs("/var/tmp", 45, 54)
+ cephadm_fs.create_dir("/var/tmp")
+ kad.create_daemon_dirs("/var/tmp", 45, 54)
+ # TODO: make assertions about the dirs created
+
+
+def test_keepalived_extract_uid_gid_keepalived():
+ with with_cephadm_ctx([]) as ctx:
+ kad = _cephadm.Keepalived(
+ ctx,
+ SAMPLE_UUID,
+ "barney",
+ good_keepalived_json(),
+ SAMPLE_KEEPALIVED_IMAGE,
+ )
+ with mock.patch("cephadm.CephContainer") as cc:
+ cc.return_value.run.return_value = "500 500"
+ uid, gid = kad.extract_uid_gid_keepalived()
+ cc.return_value.run.assert_called()
+ assert uid == 500
+ assert gid == 500
+
+
+def test_keepalived_get_sysctl_settings():
+ with with_cephadm_ctx([]) as ctx:
+ kad = _cephadm.Keepalived(
+ ctx,
+ SAMPLE_UUID,
+ "barney",
+ good_keepalived_json(),
+ SAMPLE_KEEPALIVED_IMAGE,
+ )
+ ss = kad.get_sysctl_settings()
+ assert len(ss) == 3
diff --git a/src/cephadm/tests/test_networks.py b/src/cephadm/tests/test_networks.py
new file mode 100644
index 000000000..7c0575046
--- /dev/null
+++ b/src/cephadm/tests/test_networks.py
@@ -0,0 +1,233 @@
+import json
+from textwrap import dedent
+from unittest import mock
+
+import pytest
+
+from tests.fixtures import with_cephadm_ctx, cephadm_fs, import_cephadm
+
+_cephadm = import_cephadm()
+
+
+class TestCommandListNetworks:
+ @pytest.mark.parametrize("test_input, expected", [
+ (
+ dedent("""
+ default via 192.168.178.1 dev enxd89ef3f34260 proto dhcp metric 100
+ 10.0.0.0/8 via 10.4.0.1 dev tun0 proto static metric 50
+ 10.3.0.0/21 via 10.4.0.1 dev tun0 proto static metric 50
+ 10.4.0.1 dev tun0 proto kernel scope link src 10.4.0.2 metric 50
+ 137.1.0.0/16 via 10.4.0.1 dev tun0 proto static metric 50
+ 138.1.0.0/16 via 10.4.0.1 dev tun0 proto static metric 50
+ 139.1.0.0/16 via 10.4.0.1 dev tun0 proto static metric 50
+ 140.1.0.0/17 via 10.4.0.1 dev tun0 proto static metric 50
+ 141.1.0.0/16 via 10.4.0.1 dev tun0 proto static metric 50
+ 172.16.100.34 via 172.16.100.34 dev eth1 proto kernel scope link src 172.16.100.34
+ 192.168.122.1 dev ens3 proto dhcp scope link src 192.168.122.236 metric 100
+ 169.254.0.0/16 dev docker0 scope link metric 1000
+ 172.17.0.0/16 dev docker0 proto kernel scope link src 172.17.0.1
+ 192.168.39.0/24 dev virbr1 proto kernel scope link src 192.168.39.1 linkdown
+ 192.168.122.0/24 dev virbr0 proto kernel scope link src 192.168.122.1 linkdown
+ 192.168.178.0/24 dev enxd89ef3f34260 proto kernel scope link src 192.168.178.28 metric 100
+ 192.168.178.1 dev enxd89ef3f34260 proto static scope link metric 100
+ 195.135.221.12 via 192.168.178.1 dev enxd89ef3f34260 proto static metric 100
+ """),
+ {
+ '172.16.100.34/32': {'eth1': {'172.16.100.34'}},
+ '192.168.122.1/32': {'ens3': {'192.168.122.236'}},
+ '10.4.0.1/32': {'tun0': {'10.4.0.2'}},
+ '172.17.0.0/16': {'docker0': {'172.17.0.1'}},
+ '192.168.39.0/24': {'virbr1': {'192.168.39.1'}},
+ '192.168.122.0/24': {'virbr0': {'192.168.122.1'}},
+ '192.168.178.0/24': {'enxd89ef3f34260': {'192.168.178.28'}}
+ }
+ ), (
+ dedent("""
+ default via 10.3.64.1 dev eno1 proto static metric 100
+ 10.3.64.0/24 dev eno1 proto kernel scope link src 10.3.64.23 metric 100
+ 10.3.64.0/24 dev eno1 proto kernel scope link src 10.3.64.27 metric 100
+ 10.88.0.0/16 dev cni-podman0 proto kernel scope link src 10.88.0.1 linkdown
+ 172.21.0.0/20 via 172.21.3.189 dev tun0
+ 172.21.1.0/20 via 172.21.3.189 dev tun0
+ 172.21.2.1 via 172.21.3.189 dev tun0
+ 172.21.3.1 dev tun0 proto kernel scope link src 172.21.3.2
+ 172.21.4.0/24 via 172.21.3.1 dev tun0
+ 172.21.5.0/24 via 172.21.3.1 dev tun0
+ 172.21.6.0/24 via 172.21.3.1 dev tun0
+ 172.21.7.0/24 via 172.21.3.1 dev tun0
+ 192.168.122.0/24 dev virbr0 proto kernel scope link src 192.168.122.1 linkdown
+ 192.168.122.0/24 dev virbr0 proto kernel scope link src 192.168.122.1 linkdown
+ 192.168.122.0/24 dev virbr0 proto kernel scope link src 192.168.122.1 linkdown
+ 192.168.122.0/24 dev virbr0 proto kernel scope link src 192.168.122.1 linkdown
+ """),
+ {
+ '10.3.64.0/24': {'eno1': {'10.3.64.23', '10.3.64.27'}},
+ '10.88.0.0/16': {'cni-podman0': {'10.88.0.1'}},
+ '172.21.3.1/32': {'tun0': {'172.21.3.2'}},
+ '192.168.122.0/24': {'virbr0': {'192.168.122.1'}}
+ }
+ ),
+ ])
+ def test_parse_ipv4_route(self, test_input, expected):
+ assert _cephadm._parse_ipv4_route(test_input) == expected
+
+ @pytest.mark.parametrize("test_routes, test_ips, expected", [
+ (
+ dedent("""
+ ::1 dev lo proto kernel metric 256 pref medium
+ fe80::/64 dev eno1 proto kernel metric 100 pref medium
+ fe80::/64 dev br-3d443496454c proto kernel metric 256 linkdown pref medium
+ fe80::/64 dev tun0 proto kernel metric 256 pref medium
+ fe80::/64 dev br-4355f5dbb528 proto kernel metric 256 pref medium
+ fe80::/64 dev docker0 proto kernel metric 256 linkdown pref medium
+ fe80::/64 dev cni-podman0 proto kernel metric 256 linkdown pref medium
+ fe80::/64 dev veth88ba1e8 proto kernel metric 256 pref medium
+ fe80::/64 dev vethb6e5fc7 proto kernel metric 256 pref medium
+ fe80::/64 dev vethaddb245 proto kernel metric 256 pref medium
+ fe80::/64 dev vethbd14d6b proto kernel metric 256 pref medium
+ fe80::/64 dev veth13e8fd2 proto kernel metric 256 pref medium
+ fe80::/64 dev veth1d3aa9e proto kernel metric 256 pref medium
+ fe80::/64 dev vethe485ca9 proto kernel metric 256 pref medium
+ """),
+ dedent("""
+ 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 state UNKNOWN qlen 1000
+ inet6 ::1/128 scope host
+ valid_lft forever preferred_lft forever
+ 2: eno1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 state UP qlen 1000
+ inet6 fe80::225:90ff:fee5:26e8/64 scope link noprefixroute
+ valid_lft forever preferred_lft forever
+ 6: br-3d443496454c: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 state DOWN
+ inet6 fe80::42:23ff:fe9d:ee4/64 scope link
+ valid_lft forever preferred_lft forever
+ 7: br-4355f5dbb528: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 state UP
+ inet6 fe80::42:6eff:fe35:41fe/64 scope link
+ valid_lft forever preferred_lft forever
+ 8: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 state DOWN
+ inet6 fe80::42:faff:fee6:40a0/64 scope link
+ valid_lft forever preferred_lft forever
+ 11: tun0: <POINTOPOINT,MULTICAST,NOARP,UP,LOWER_UP> mtu 1500 state UNKNOWN qlen 100
+ inet6 fe80::98a6:733e:dafd:350/64 scope link stable-privacy
+ valid_lft forever preferred_lft forever
+ 28: cni-podman0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 state DOWN qlen 1000
+ inet6 fe80::3449:cbff:fe89:b87e/64 scope link
+ valid_lft forever preferred_lft forever
+ 31: vethaddb245@if30: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 state UP
+ inet6 fe80::90f7:3eff:feed:a6bb/64 scope link
+ valid_lft forever preferred_lft forever
+ 33: veth88ba1e8@if32: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 state UP
+ inet6 fe80::d:f5ff:fe73:8c82/64 scope link
+ valid_lft forever preferred_lft forever
+ 35: vethbd14d6b@if34: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 state UP
+ inet6 fe80::b44f:8ff:fe6f:813d/64 scope link
+ valid_lft forever preferred_lft forever
+ 37: vethb6e5fc7@if36: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 state UP
+ inet6 fe80::4869:c6ff:feaa:8afe/64 scope link
+ valid_lft forever preferred_lft forever
+ 39: veth13e8fd2@if38: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 state UP
+ inet6 fe80::78f4:71ff:fefe:eb40/64 scope link
+ valid_lft forever preferred_lft forever
+ 41: veth1d3aa9e@if40: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 state UP
+ inet6 fe80::24bd:88ff:fe28:5b18/64 scope link
+ valid_lft forever preferred_lft forever
+ 43: vethe485ca9@if42: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 state UP
+ inet6 fe80::6425:87ff:fe42:b9f0/64 scope link
+ valid_lft forever preferred_lft forever
+ """),
+ {
+ "fe80::/64": {
+ "eno1": {"fe80::225:90ff:fee5:26e8"},
+ "br-3d443496454c": {"fe80::42:23ff:fe9d:ee4"},
+ "tun0": {"fe80::98a6:733e:dafd:350"},
+ "br-4355f5dbb528": {"fe80::42:6eff:fe35:41fe"},
+ "docker0": {"fe80::42:faff:fee6:40a0"},
+ "cni-podman0": {"fe80::3449:cbff:fe89:b87e"},
+ "veth88ba1e8": {"fe80::d:f5ff:fe73:8c82"},
+ "vethb6e5fc7": {"fe80::4869:c6ff:feaa:8afe"},
+ "vethaddb245": {"fe80::90f7:3eff:feed:a6bb"},
+ "vethbd14d6b": {"fe80::b44f:8ff:fe6f:813d"},
+ "veth13e8fd2": {"fe80::78f4:71ff:fefe:eb40"},
+ "veth1d3aa9e": {"fe80::24bd:88ff:fe28:5b18"},
+ "vethe485ca9": {"fe80::6425:87ff:fe42:b9f0"},
+ }
+ }
+ ),
+ (
+ dedent("""
+ ::1 dev lo proto kernel metric 256 pref medium
+ 2001:1458:301:eb::100:1a dev ens20f0 proto kernel metric 100 pref medium
+ 2001:1458:301:eb::/64 dev ens20f0 proto ra metric 100 pref medium
+ fd01:1458:304:5e::/64 dev ens20f0 proto ra metric 100 pref medium
+ fe80::/64 dev ens20f0 proto kernel metric 100 pref medium
+ default proto ra metric 100
+ nexthop via fe80::46ec:ce00:b8a0:d3c8 dev ens20f0 weight 1
+ nexthop via fe80::46ec:ce00:b8a2:33c8 dev ens20f0 weight 1 pref medium
+ """),
+ dedent("""
+ 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 state UNKNOWN qlen 1000
+ inet6 ::1/128 scope host
+ valid_lft forever preferred_lft forever
+ 2: ens20f0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 state UP qlen 1000
+ inet6 2001:1458:301:eb::100:1a/128 scope global dynamic noprefixroute
+ valid_lft 590879sec preferred_lft 590879sec
+ inet6 fe80::2e60:cff:fef8:da41/64 scope link noprefixroute
+ valid_lft forever preferred_lft forever
+ inet6 fe80::2e60:cff:fef8:da41/64 scope link noprefixroute
+ valid_lft forever preferred_lft forever
+ inet6 fe80::2e60:cff:fef8:da41/64 scope link noprefixroute
+ valid_lft forever preferred_lft forever
+ """),
+ {
+ '2001:1458:301:eb::100:1a/128': {
+ 'ens20f0': {
+ '2001:1458:301:eb::100:1a'
+ },
+ },
+ '2001:1458:301:eb::/64': {
+ 'ens20f0': set(),
+ },
+ 'fe80::/64': {
+ 'ens20f0': {'fe80::2e60:cff:fef8:da41'},
+ },
+ 'fd01:1458:304:5e::/64': {
+ 'ens20f0': set()
+ },
+ }
+ ),
+ (
+ dedent("""
+ ::1 dev lo proto kernel metric 256 pref medium
+ fe80::/64 dev ceph-brx proto kernel metric 256 pref medium
+ fe80::/64 dev brx.0 proto kernel metric 256 pref medium
+ default via fe80::327c:5e00:6487:71e0 dev enp3s0f1 proto ra metric 1024 expires 1790sec hoplimit 64 pref medium """),
+ dedent("""
+ 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 state UNKNOWN qlen 1000
+ inet6 ::1/128 scope host
+ valid_lft forever preferred_lft forever
+ 5: enp3s0f1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 state UP qlen 1000
+ inet6 fe80::ec4:7aff:fe8f:cb83/64 scope link noprefixroute
+ valid_lft forever preferred_lft forever
+ 6: ceph-brx: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 state UP qlen 1000
+ inet6 fe80::d8a1:69ff:fede:8f58/64 scope link
+ valid_lft forever preferred_lft forever
+ 7: brx.0@eno1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 state UP qlen 1000
+ inet6 fe80::a4cb:54ff:fecc:f2a2/64 scope link
+ valid_lft forever preferred_lft forever
+ """),
+ {
+ 'fe80::/64': {
+ 'brx.0': {'fe80::a4cb:54ff:fecc:f2a2'},
+ 'ceph-brx': {'fe80::d8a1:69ff:fede:8f58'}
+ }
+ }
+ ),
+ ])
+ def test_parse_ipv6_route(self, test_routes, test_ips, expected):
+ assert _cephadm._parse_ipv6_route(test_routes, test_ips) == expected
+
+ @mock.patch.object(_cephadm, 'call_throws', return_value=('10.4.0.1 dev tun0 proto kernel scope link src 10.4.0.2 metric 50\n', '', ''))
+ def test_command_list_networks(self, cephadm_fs, capsys):
+ with with_cephadm_ctx([]) as ctx:
+ _cephadm.command_list_networks(ctx)
+ assert json.loads(capsys.readouterr().out) == {
+ '10.4.0.1/32': {'tun0': ['10.4.0.2']}
+ }
diff --git a/src/cephadm/tests/test_nfs.py b/src/cephadm/tests/test_nfs.py
new file mode 100644
index 000000000..0649ef934
--- /dev/null
+++ b/src/cephadm/tests/test_nfs.py
@@ -0,0 +1,239 @@
+from unittest import mock
+import json
+
+import pytest
+
+from tests.fixtures import with_cephadm_ctx, cephadm_fs, import_cephadm
+
+_cephadm = import_cephadm()
+
+
+SAMPLE_UUID = "2d018a3f-8a8f-4cb9-a7cf-48bebb2cbaae"
+
+
+def good_nfs_json():
+ return nfs_json(
+ pool=True,
+ files=True,
+ )
+
+
+def nfs_json(**kwargs):
+ result = {}
+ if kwargs.get("pool"):
+ result["pool"] = "party"
+ if kwargs.get("files"):
+ result["files"] = {
+ "ganesha.conf": "",
+ }
+ if kwargs.get("rgw_content"):
+ result["rgw"] = dict(kwargs["rgw_content"])
+ elif kwargs.get("rgw"):
+ result["rgw"] = {
+ "keyring": "foobar",
+ "user": "jsmith",
+ }
+ return result
+
+
+@pytest.mark.parametrize(
+ "args,kwargs",
+ # args: <fsid>, <daemon_id>, <config_json>; kwargs: <image>
+ [
+ # fail due to: invalid fsid
+ (["foobar", "fred", good_nfs_json()], {}),
+ # fail due to: invalid daemon_id
+ ([SAMPLE_UUID, "", good_nfs_json()], {}),
+ # fail due to: invalid image
+ (
+ [SAMPLE_UUID, "fred", good_nfs_json()],
+ {"image": ""},
+ ),
+ # fail due to: no files in config_json
+ (
+ [
+ SAMPLE_UUID,
+ "fred",
+ nfs_json(pool=True),
+ ],
+ {},
+ ),
+ # fail due to: no pool in config_json
+ (
+ [
+ SAMPLE_UUID,
+ "fred",
+ nfs_json(files=True),
+ ],
+ {},
+ ),
+ # fail due to: bad rgw content
+ (
+ [
+ SAMPLE_UUID,
+ "fred",
+ nfs_json(pool=True, files=True, rgw_content={"foo": True}),
+ ],
+ {},
+ ),
+ # fail due to: rgw keyring given but no user
+ (
+ [
+ SAMPLE_UUID,
+ "fred",
+ nfs_json(
+ pool=True, files=True, rgw_content={"keyring": "foo"}
+ ),
+ ],
+ {},
+ ),
+ ],
+)
+def test_nfsganesha_validation_errors(args, kwargs):
+ with pytest.raises(_cephadm.Error):
+ with with_cephadm_ctx([]) as ctx:
+ _cephadm.NFSGanesha(ctx, *args, **kwargs)
+
+
+def test_nfsganesha_init():
+ with with_cephadm_ctx([]) as ctx:
+ ctx.config_json = json.dumps(good_nfs_json())
+ ctx.image = "test_image"
+ nfsg = _cephadm.NFSGanesha.init(
+ ctx,
+ SAMPLE_UUID,
+ "fred",
+ )
+ assert nfsg.fsid == SAMPLE_UUID
+ assert nfsg.daemon_id == "fred"
+ assert nfsg.pool == "party"
+
+
+def test_nfsganesha_container_mounts():
+ with with_cephadm_ctx([]) as ctx:
+ nfsg = _cephadm.NFSGanesha(
+ ctx,
+ SAMPLE_UUID,
+ "fred",
+ good_nfs_json(),
+ )
+ cmounts = nfsg.get_container_mounts("/var/tmp")
+ assert len(cmounts) == 3
+ assert cmounts["/var/tmp/config"] == "/etc/ceph/ceph.conf:z"
+ assert cmounts["/var/tmp/keyring"] == "/etc/ceph/keyring:z"
+ assert cmounts["/var/tmp/etc/ganesha"] == "/etc/ganesha:z"
+
+ with with_cephadm_ctx([]) as ctx:
+ nfsg = _cephadm.NFSGanesha(
+ ctx,
+ SAMPLE_UUID,
+ "fred",
+ nfs_json(pool=True, files=True, rgw=True),
+ )
+ cmounts = nfsg.get_container_mounts("/var/tmp")
+ assert len(cmounts) == 4
+ assert cmounts["/var/tmp/config"] == "/etc/ceph/ceph.conf:z"
+ assert cmounts["/var/tmp/keyring"] == "/etc/ceph/keyring:z"
+ assert cmounts["/var/tmp/etc/ganesha"] == "/etc/ganesha:z"
+ assert (
+ cmounts["/var/tmp/keyring.rgw"]
+ == "/var/lib/ceph/radosgw/ceph-jsmith/keyring:z"
+ )
+
+
+def test_nfsganesha_container_envs():
+ with with_cephadm_ctx([]) as ctx:
+ nfsg = _cephadm.NFSGanesha(
+ ctx,
+ SAMPLE_UUID,
+ "fred",
+ good_nfs_json(),
+ )
+ envs = nfsg.get_container_envs()
+ assert len(envs) == 1
+ assert envs[0] == "CEPH_CONF=/etc/ceph/ceph.conf"
+
+
+def test_nfsganesha_get_version():
+ with with_cephadm_ctx([]) as ctx:
+ nfsg = _cephadm.NFSGanesha(
+ ctx,
+ SAMPLE_UUID,
+ "fred",
+ good_nfs_json(),
+ )
+
+ with mock.patch("cephadm.call") as _call:
+ _call.return_value = ("NFS-Ganesha Release = V100", "", 0)
+ ver = nfsg.get_version(ctx, "fake_version")
+ _call.assert_called()
+ assert ver == "100"
+
+
+def test_nfsganesha_get_daemon_name():
+ with with_cephadm_ctx([]) as ctx:
+ nfsg = _cephadm.NFSGanesha(
+ ctx,
+ SAMPLE_UUID,
+ "fred",
+ good_nfs_json(),
+ )
+ assert nfsg.get_daemon_name() == "nfs.fred"
+
+
+def test_nfsganesha_get_container_name():
+ with with_cephadm_ctx([]) as ctx:
+ nfsg = _cephadm.NFSGanesha(
+ ctx,
+ SAMPLE_UUID,
+ "fred",
+ good_nfs_json(),
+ )
+ name1 = nfsg.get_container_name()
+ assert name1 == "ceph-2d018a3f-8a8f-4cb9-a7cf-48bebb2cbaae-nfs.fred"
+ name2 = nfsg.get_container_name(desc="extra")
+ assert (
+ name2 == "ceph-2d018a3f-8a8f-4cb9-a7cf-48bebb2cbaae-nfs.fred-extra"
+ )
+
+
+def test_nfsganesha_get_daemon_args():
+ with with_cephadm_ctx([]) as ctx:
+ nfsg = _cephadm.NFSGanesha(
+ ctx,
+ SAMPLE_UUID,
+ "fred",
+ good_nfs_json(),
+ )
+ args = nfsg.get_daemon_args()
+ assert args == ["-F", "-L", "STDERR"]
+
+
+@mock.patch("cephadm.logger")
+def test_nfsganesha_create_daemon_dirs(_logger, cephadm_fs):
+ with with_cephadm_ctx([]) as ctx:
+ nfsg = _cephadm.NFSGanesha(
+ ctx,
+ SAMPLE_UUID,
+ "fred",
+ good_nfs_json(),
+ )
+ with pytest.raises(OSError):
+ nfsg.create_daemon_dirs("/var/tmp", 45, 54)
+ cephadm_fs.create_dir("/var/tmp")
+ nfsg.create_daemon_dirs("/var/tmp", 45, 54)
+ # TODO: make assertions about the dirs created
+
+
+@mock.patch("cephadm.logger")
+def test_nfsganesha_create_daemon_dirs_rgw(_logger, cephadm_fs):
+ with with_cephadm_ctx([]) as ctx:
+ nfsg = _cephadm.NFSGanesha(
+ ctx,
+ SAMPLE_UUID,
+ "fred",
+ nfs_json(pool=True, files=True, rgw=True),
+ )
+ cephadm_fs.create_dir("/var/tmp")
+ nfsg.create_daemon_dirs("/var/tmp", 45, 54)
+ # TODO: make assertions about the dirs created
diff --git a/src/cephadm/tests/test_util_funcs.py b/src/cephadm/tests/test_util_funcs.py
new file mode 100644
index 000000000..270753a55
--- /dev/null
+++ b/src/cephadm/tests/test_util_funcs.py
@@ -0,0 +1,808 @@
+# Tests for various assorted utility functions found within cephadm
+#
+from unittest import mock
+
+import functools
+import io
+import os
+import sys
+
+import pytest
+
+from tests.fixtures import with_cephadm_ctx, import_cephadm
+
+_cephadm = import_cephadm()
+
+
+class TestCopyTree:
+ def _copy_tree(self, *args, **kwargs):
+ with with_cephadm_ctx([]) as ctx:
+ with mock.patch("cephadm.extract_uid_gid") as eug:
+ eug.return_value = (os.getuid(), os.getgid())
+ _cephadm.copy_tree(ctx, *args, **kwargs)
+
+ def test_one_dir(self, tmp_path):
+ """Copy one dir into a non-existing dest dir."""
+ src1 = tmp_path / "src1"
+ dst = tmp_path / "dst"
+ src1.mkdir(parents=True)
+
+ with (src1 / "foo.txt").open("w") as fh:
+ fh.write("hello\n")
+ fh.write("earth\n")
+
+ assert not (dst / "foo.txt").exists()
+
+ self._copy_tree([src1], dst)
+ assert (dst / "foo.txt").exists()
+
+ def test_one_existing_dir(self, tmp_path):
+ """Copy one dir into an existing dest dir."""
+ src1 = tmp_path / "src1"
+ dst = tmp_path / "dst"
+ src1.mkdir(parents=True)
+ dst.mkdir(parents=True)
+
+ with (src1 / "foo.txt").open("w") as fh:
+ fh.write("hello\n")
+ fh.write("earth\n")
+
+ assert not (dst / "src1").exists()
+
+ self._copy_tree([src1], dst)
+ assert (dst / "src1/foo.txt").exists()
+
+ def test_two_dirs(self, tmp_path):
+ """Copy two source directories into an existing dest dir."""
+ src1 = tmp_path / "src1"
+ src2 = tmp_path / "src2"
+ dst = tmp_path / "dst"
+ src1.mkdir(parents=True)
+ src2.mkdir(parents=True)
+ dst.mkdir(parents=True)
+
+ with (src1 / "foo.txt").open("w") as fh:
+ fh.write("hello\n")
+ fh.write("earth\n")
+ with (src2 / "bar.txt").open("w") as fh:
+ fh.write("goodbye\n")
+ fh.write("mars\n")
+
+ assert not (dst / "src1").exists()
+ assert not (dst / "src2").exists()
+
+ self._copy_tree([src1, src2], dst)
+ assert (dst / "src1/foo.txt").exists()
+ assert (dst / "src2/bar.txt").exists()
+
+ def test_one_dir_set_uid(self, tmp_path):
+ """Explicity pass uid/gid values and assert these are passed to chown."""
+ # Because this test will often be run by non-root users it is necessary
+ # to mock os.chown or we too easily run into perms issues.
+ src1 = tmp_path / "src1"
+ dst = tmp_path / "dst"
+ src1.mkdir(parents=True)
+
+ with (src1 / "foo.txt").open("w") as fh:
+ fh.write("hello\n")
+ fh.write("earth\n")
+
+ assert not (dst / "foo.txt").exists()
+
+ with mock.patch("os.chown") as _chown:
+ _chown.return_value = None
+ self._copy_tree([src1], dst, uid=0, gid=0)
+ assert len(_chown.mock_calls) >= 2
+ for c in _chown.mock_calls:
+ assert c == mock.call(mock.ANY, 0, 0)
+ assert (dst / "foo.txt").exists()
+
+
+class TestCopyFiles:
+ def _copy_files(self, *args, **kwargs):
+ with with_cephadm_ctx([]) as ctx:
+ with mock.patch("cephadm.extract_uid_gid") as eug:
+ eug.return_value = (os.getuid(), os.getgid())
+ _cephadm.copy_files(ctx, *args, **kwargs)
+
+ def test_one_file(self, tmp_path):
+ """Copy one file into the dest dir."""
+ file1 = tmp_path / "f1.txt"
+ dst = tmp_path / "dst"
+ dst.mkdir(parents=True)
+
+ with file1.open("w") as fh:
+ fh.write("its test time\n")
+
+ self._copy_files([file1], dst)
+ assert (dst / "f1.txt").exists()
+
+ def test_one_file_nodest(self, tmp_path):
+ """Copy one file to the given destination path."""
+ file1 = tmp_path / "f1.txt"
+ dst = tmp_path / "dst"
+
+ with file1.open("w") as fh:
+ fh.write("its test time\n")
+
+ self._copy_files([file1], dst)
+ assert not dst.is_dir()
+ assert dst.is_file()
+ assert dst.open("r").read() == "its test time\n"
+
+ def test_three_files(self, tmp_path):
+ """Copy one file into the dest dir."""
+ file1 = tmp_path / "f1.txt"
+ file2 = tmp_path / "f2.txt"
+ file3 = tmp_path / "f3.txt"
+ dst = tmp_path / "dst"
+ dst.mkdir(parents=True)
+
+ with file1.open("w") as fh:
+ fh.write("its test time\n")
+ with file2.open("w") as fh:
+ fh.write("f2\n")
+ with file3.open("w") as fh:
+ fh.write("f3\n")
+
+ self._copy_files([file1, file2, file3], dst)
+ assert (dst / "f1.txt").exists()
+ assert (dst / "f2.txt").exists()
+ assert (dst / "f3.txt").exists()
+
+ def test_three_files_nodest(self, tmp_path):
+ """Copy files to dest path (not a dir). This is not a useful operation."""
+ file1 = tmp_path / "f1.txt"
+ file2 = tmp_path / "f2.txt"
+ file3 = tmp_path / "f3.txt"
+ dst = tmp_path / "dst"
+
+ with file1.open("w") as fh:
+ fh.write("its test time\n")
+ with file2.open("w") as fh:
+ fh.write("f2\n")
+ with file3.open("w") as fh:
+ fh.write("f3\n")
+
+ self._copy_files([file1, file2, file3], dst)
+ assert not dst.is_dir()
+ assert dst.is_file()
+ assert dst.open("r").read() == "f3\n"
+
+ def test_one_file_set_uid(self, tmp_path):
+ """Explicity pass uid/gid values and assert these are passed to chown."""
+ # Because this test will often be run by non-root users it is necessary
+ # to mock os.chown or we too easily run into perms issues.
+ file1 = tmp_path / "f1.txt"
+ dst = tmp_path / "dst"
+ dst.mkdir(parents=True)
+
+ with file1.open("w") as fh:
+ fh.write("its test time\n")
+
+ assert not (dst / "f1.txt").exists()
+
+ with mock.patch("os.chown") as _chown:
+ _chown.return_value = None
+ self._copy_files([file1], dst, uid=0, gid=0)
+ assert len(_chown.mock_calls) >= 1
+ for c in _chown.mock_calls:
+ assert c == mock.call(mock.ANY, 0, 0)
+ assert (dst / "f1.txt").exists()
+
+
+class TestMoveFiles:
+ def _move_files(self, *args, **kwargs):
+ with with_cephadm_ctx([]) as ctx:
+ with mock.patch("cephadm.extract_uid_gid") as eug:
+ eug.return_value = (os.getuid(), os.getgid())
+ _cephadm.move_files(ctx, *args, **kwargs)
+
+ def test_one_file(self, tmp_path):
+ """Move a named file to test dest path."""
+ file1 = tmp_path / "f1.txt"
+ dst = tmp_path / "dst"
+
+ with file1.open("w") as fh:
+ fh.write("lets moove\n")
+
+ assert not dst.exists()
+ assert file1.is_file()
+
+ self._move_files([file1], dst)
+ assert dst.is_file()
+ assert not file1.exists()
+
+ def test_one_file_destdir(self, tmp_path):
+ """Move a file into an existing dest dir."""
+ file1 = tmp_path / "f1.txt"
+ dst = tmp_path / "dst"
+ dst.mkdir(parents=True)
+
+ with file1.open("w") as fh:
+ fh.write("lets moove\n")
+
+ assert not (dst / "f1.txt").exists()
+ assert file1.is_file()
+
+ self._move_files([file1], dst)
+ assert (dst / "f1.txt").is_file()
+ assert not file1.exists()
+
+ def test_one_file_one_link(self, tmp_path):
+ """Move a file and a symlink to that file to a dest dir."""
+ file1 = tmp_path / "f1.txt"
+ link1 = tmp_path / "lnk"
+ dst = tmp_path / "dst"
+ dst.mkdir(parents=True)
+
+ with file1.open("w") as fh:
+ fh.write("lets moove\n")
+ os.symlink("f1.txt", link1)
+
+ assert not (dst / "f1.txt").exists()
+ assert file1.is_file()
+ assert link1.exists()
+
+ self._move_files([file1, link1], dst)
+ assert (dst / "f1.txt").is_file()
+ assert (dst / "lnk").is_symlink()
+ assert not file1.exists()
+ assert not link1.exists()
+ assert (dst / "f1.txt").open("r").read() == "lets moove\n"
+ assert (dst / "lnk").open("r").read() == "lets moove\n"
+
+ def test_one_file_set_uid(self, tmp_path):
+ """Explicity pass uid/gid values and assert these are passed to chown."""
+ # Because this test will often be run by non-root users it is necessary
+ # to mock os.chown or we too easily run into perms issues.
+ file1 = tmp_path / "f1.txt"
+ dst = tmp_path / "dst"
+
+ with file1.open("w") as fh:
+ fh.write("lets moove\n")
+
+ assert not dst.exists()
+ assert file1.is_file()
+
+ with mock.patch("os.chown") as _chown:
+ _chown.return_value = None
+ self._move_files([file1], dst, uid=0, gid=0)
+ assert len(_chown.mock_calls) >= 1
+ for c in _chown.mock_calls:
+ assert c == mock.call(mock.ANY, 0, 0)
+ assert dst.is_file()
+ assert not file1.exists()
+
+
+def test_recursive_chown(tmp_path):
+ d1 = tmp_path / "dir1"
+ d2 = d1 / "dir2"
+ f1 = d2 / "file1.txt"
+ d2.mkdir(parents=True)
+
+ with f1.open("w") as fh:
+ fh.write("low down\n")
+
+ with mock.patch("os.chown") as _chown:
+ _chown.return_value = None
+ _cephadm.recursive_chown(str(d1), uid=500, gid=500)
+ assert len(_chown.mock_calls) == 3
+ assert _chown.mock_calls[0] == mock.call(str(d1), 500, 500)
+ assert _chown.mock_calls[1] == mock.call(str(d2), 500, 500)
+ assert _chown.mock_calls[2] == mock.call(str(f1), 500, 500)
+
+
+class TestFindExecutable:
+ def test_standard_exe(self):
+ # pretty much every system will have `true` on the path. It's a safe choice
+ # for the first assertion
+ exe = _cephadm.find_executable("true")
+ assert exe.endswith("true")
+
+ def test_custom_path(self, tmp_path):
+ foo_sh = tmp_path / "foo.sh"
+ with open(foo_sh, "w") as fh:
+ fh.write("#!/bin/sh\n")
+ fh.write("echo foo\n")
+ foo_sh.chmod(0o755)
+
+ exe = _cephadm.find_executable(foo_sh)
+ assert str(exe) == str(foo_sh)
+
+ def test_no_path(self, monkeypatch):
+ monkeypatch.delenv("PATH")
+ exe = _cephadm.find_executable("true")
+ assert exe.endswith("true")
+
+ def test_no_path_no_confstr(self, monkeypatch):
+ def _fail(_):
+ raise ValueError("fail")
+
+ monkeypatch.delenv("PATH")
+ monkeypatch.setattr("os.confstr", _fail)
+ exe = _cephadm.find_executable("true")
+ assert exe.endswith("true")
+
+ def test_unset_path(self):
+ exe = _cephadm.find_executable("true", path="")
+ assert exe is None
+
+ def test_no_such_exe(self):
+ exe = _cephadm.find_executable("foo_bar-baz.noway")
+ assert exe is None
+
+
+def test_find_program():
+ exe = _cephadm.find_program("true")
+ assert exe.endswith("true")
+
+ with pytest.raises(ValueError):
+ _cephadm.find_program("foo_bar-baz.noway")
+
+
+def _mk_fake_call(enabled, active):
+ def _fake_call(ctx, cmd, **kwargs):
+ if "is-enabled" in cmd:
+ if isinstance(enabled, Exception):
+ raise enabled
+ return enabled
+ if "is-active" in cmd:
+ if isinstance(active, Exception):
+ raise active
+ return active
+ raise ValueError("should not get here")
+
+ return _fake_call
+
+
+@pytest.mark.parametrize(
+ "enabled_out, active_out, expected",
+ [
+ (
+ # ok, all is well
+ ("", "", 0),
+ ("active", "", 0),
+ (True, "running", True),
+ ),
+ (
+ # disabled, unknown if active
+ ("disabled", "", 1),
+ ("", "", 0),
+ (False, "unknown", True),
+ ),
+ (
+ # is-enabled error (not disabled, unknown if active
+ ("bleh", "", 1),
+ ("", "", 0),
+ (False, "unknown", False),
+ ),
+ (
+ # is-enabled ok, inactive is stopped
+ ("", "", 0),
+ ("inactive", "", 0),
+ (True, "stopped", True),
+ ),
+ (
+ # is-enabled ok, failed is error
+ ("", "", 0),
+ ("failed", "", 0),
+ (True, "error", True),
+ ),
+ (
+ # is-enabled ok, auto-restart is error
+ ("", "", 0),
+ ("auto-restart", "", 0),
+ (True, "error", True),
+ ),
+ (
+ # error exec'ing is-enabled cmd
+ ValueError("bonk"),
+ ("active", "", 0),
+ (False, "running", False),
+ ),
+ (
+ # error exec'ing is-enabled cmd
+ ("", "", 0),
+ ValueError("blat"),
+ (True, "unknown", True),
+ ),
+ ],
+)
+def test_check_unit(enabled_out, active_out, expected):
+ with with_cephadm_ctx([]) as ctx:
+ _cephadm.call.side_effect = _mk_fake_call(
+ enabled=enabled_out,
+ active=active_out,
+ )
+ enabled, state, installed = _cephadm.check_unit(ctx, "foobar")
+ assert (enabled, state, installed) == expected
+
+
+class FakeEnabler:
+ def __init__(self, should_be_called):
+ self._should_be_called = should_be_called
+ self._services = []
+
+ def enable_service(self, service):
+ self._services.append(service)
+
+ def check_expected(self):
+ if not self._should_be_called:
+ assert not self._services
+ return
+ # there are currently seven chron/chrony type services that
+ # cephadm looks for. Make sure it probed for each of them
+ # or more in case someone adds to the list.
+ assert len(self._services) >= 7
+ assert "chrony.service" in self._services
+ assert "ntp.service" in self._services
+
+
+@pytest.mark.parametrize(
+ "call_fn, enabler, expected",
+ [
+ # Test that time sync services are not enabled
+ (
+ _mk_fake_call(
+ enabled=("", "", 1),
+ active=("", "", 1),
+ ),
+ None,
+ False,
+ ),
+ # Test that time sync service is enabled
+ (
+ _mk_fake_call(
+ enabled=("", "", 0),
+ active=("active", "", 0),
+ ),
+ None,
+ True,
+ ),
+ # Test that time sync is not enabled, and try to enable them.
+ # This one needs to be not running, but installed in order to
+ # call the enabler. It should call the enabler with every known
+ # service name.
+ (
+ _mk_fake_call(
+ enabled=("disabled", "", 1),
+ active=("", "", 1),
+ ),
+ FakeEnabler(True),
+ False,
+ ),
+ # Test that time sync is enabled, with an enabler passed which
+ # will check that the enabler was never called.
+ (
+ _mk_fake_call(
+ enabled=("", "", 0),
+ active=("active", "", 0),
+ ),
+ FakeEnabler(False),
+ True,
+ ),
+ ],
+)
+def test_check_time_sync(call_fn, enabler, expected):
+ """The check_time_sync call actually checks if a time synchronization service
+ is enabled. It is also the only consumer of check_units.
+ """
+ with with_cephadm_ctx([]) as ctx:
+ _cephadm.call.side_effect = call_fn
+ result = _cephadm.check_time_sync(ctx, enabler=enabler)
+ assert result == expected
+ if enabler is not None:
+ enabler.check_expected()
+
+
+@pytest.mark.parametrize(
+ "content, expected",
+ [
+ (
+ """#JUNK
+ FOO=1
+ """,
+ (None, None, None),
+ ),
+ (
+ """# A sample from a real centos system
+NAME="CentOS Stream"
+VERSION="8"
+ID="centos"
+ID_LIKE="rhel fedora"
+VERSION_ID="8"
+PLATFORM_ID="platform:el8"
+PRETTY_NAME="CentOS Stream 8"
+ANSI_COLOR="0;31"
+CPE_NAME="cpe:/o:centos:centos:8"
+HOME_URL="https://centos.org/"
+BUG_REPORT_URL="https://bugzilla.redhat.com/"
+REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux 8"
+REDHAT_SUPPORT_PRODUCT_VERSION="CentOS Stream"
+ """,
+ ("centos", "8", None),
+ ),
+ (
+ """# Minimal but complete, made up vals
+ID="hpec"
+VERSION_ID="33"
+VERSION_CODENAME="hpec nimda"
+ """,
+ ("hpec", "33", "hpec nimda"),
+ ),
+ (
+ """# Minimal but complete, no quotes
+ID=hpec
+VERSION_ID=33
+VERSION_CODENAME=hpec nimda
+ """,
+ ("hpec", "33", "hpec nimda"),
+ ),
+ ],
+)
+def test_get_distro(monkeypatch, content, expected):
+ def _fake_open(*args, **kwargs):
+ return io.StringIO(content)
+
+ monkeypatch.setattr("builtins.open", _fake_open)
+ assert _cephadm.get_distro() == expected
+
+
+class FakeContext:
+ """FakeContext is a minimal type for passing as a ctx, when
+ with_cephadm_ctx is not appropriate (it enables too many mocks, etc).
+ """
+
+ timeout = 30
+
+
+def _has_non_zero_exit(clog):
+ assert any("Non-zero exit" in ll for _, _, ll in clog.record_tuples)
+
+
+def _has_values_somewhere(clog, values, non_zero=True):
+ if non_zero:
+ _has_non_zero_exit(clog)
+ for value in values:
+ assert any(value in ll for _, _, ll in clog.record_tuples)
+
+
+@pytest.mark.parametrize(
+ "pyline, expected, call_kwargs, log_check",
+ [
+ pytest.param(
+ "import time; time.sleep(0.1)",
+ ("", "", 0),
+ {},
+ None,
+ id="brief-sleep",
+ ),
+ pytest.param(
+ "import sys; sys.exit(2)",
+ ("", "", 2),
+ {},
+ _has_non_zero_exit,
+ id="exit-non-zero",
+ ),
+ pytest.param(
+ "import sys; sys.exit(0)",
+ ("", "", 0),
+ {"desc": "success"},
+ None,
+ id="success-with-desc",
+ ),
+ pytest.param(
+ "print('foo'); print('bar')",
+ ("foo\nbar\n", "", 0),
+ {"desc": "stdout"},
+ None,
+ id="stdout-print",
+ ),
+ pytest.param(
+ "import sys; sys.stderr.write('la\\nla\\nla\\n')",
+ ("", "la\nla\nla\n", 0),
+ {"desc": "stderr"},
+ None,
+ id="stderr-print",
+ ),
+ pytest.param(
+ "for i in range(501): print(i, flush=True)",
+ lambda r: r[2] == 0 and r[1] == "" and "500" in r[0].splitlines(),
+ {},
+ None,
+ id="stdout-long",
+ ),
+ pytest.param(
+ "for i in range(1000000): print(i, flush=True)",
+ lambda r: r[2] == 0
+ and r[1] == ""
+ and len(r[0].splitlines()) == 1000000,
+ {},
+ None,
+ id="stdout-very-long",
+ ),
+ pytest.param(
+ "import sys; sys.stderr.write('pow\\noof\\nouch\\n'); sys.exit(1)",
+ ("", "pow\noof\nouch\n", 1),
+ {"desc": "stderr"},
+ functools.partial(
+ _has_values_somewhere,
+ values=["pow", "oof", "ouch"],
+ non_zero=True,
+ ),
+ id="stderr-logged-non-zero",
+ ),
+ pytest.param(
+ "import time; time.sleep(4)",
+ ("", "", 124),
+ {"timeout": 1},
+ None,
+ id="long-sleep",
+ ),
+ pytest.param(
+ "import time\nfor i in range(100):\n\tprint(i, flush=True); time.sleep(0.01)",
+ ("", "", 124),
+ {"timeout": 0.5},
+ None,
+ id="slow-print-timeout",
+ ),
+ # Commands that time out collect no logs, return empty std{out,err} strings
+ ],
+)
+def test_call(caplog, monkeypatch, pyline, expected, call_kwargs, log_check):
+ import logging
+
+ caplog.set_level(logging.INFO)
+ monkeypatch.setattr("cephadm.logger", logging.getLogger())
+ ctx = FakeContext()
+ result = _cephadm.call(ctx, [sys.executable, "-c", pyline], **call_kwargs)
+ if callable(expected):
+ assert expected(result)
+ else:
+ assert result == expected
+ if callable(log_check):
+ log_check(caplog)
+
+
+class TestWriteNew:
+ def test_success(self, tmp_path):
+ "Test the simple basic feature of writing a file."
+ dest = tmp_path / "foo.txt"
+ with _cephadm.write_new(dest) as fh:
+ fh.write("something\n")
+ fh.write("something else\n")
+
+ with open(dest, "r") as fh:
+ assert fh.read() == "something\nsomething else\n"
+
+ def test_write_ower_mode(self, tmp_path):
+ "Test that the owner and perms options function."
+ dest = tmp_path / "foo.txt"
+
+ # if this is test run as non-root, we can't really change ownership
+ uid = os.getuid()
+ gid = os.getgid()
+
+ with _cephadm.write_new(dest, owner=(uid, gid), perms=0o600) as fh:
+ fh.write("xomething\n")
+ fh.write("xomething else\n")
+
+ with open(dest, "r") as fh:
+ assert fh.read() == "xomething\nxomething else\n"
+ sr = os.fstat(fh.fileno())
+ assert sr.st_uid == uid
+ assert sr.st_gid == gid
+ assert (sr.st_mode & 0o777) == 0o600
+
+ def test_encoding(self, tmp_path):
+ "Test that the encoding option functions."
+ dest = tmp_path / "foo.txt"
+ msg = "\u2603\u26C5\n"
+ with _cephadm.write_new(dest, encoding='utf-8') as fh:
+ fh.write(msg)
+ with open(dest, "rb") as fh:
+ b1 = fh.read()
+ assert b1.decode('utf-8') == msg
+
+ dest = tmp_path / "foo2.txt"
+ with _cephadm.write_new(dest, encoding='utf-16le') as fh:
+ fh.write(msg)
+ with open(dest, "rb") as fh:
+ b2 = fh.read()
+ assert b2.decode('utf-16le') == msg
+
+ # the binary data should differ due to the different encodings
+ assert b1 != b2
+
+ def test_cleanup(self, tmp_path):
+ "Test that an exception during write leaves no file behind."
+ dest = tmp_path / "foo.txt"
+ with pytest.raises(ValueError):
+ with _cephadm.write_new(dest) as fh:
+ fh.write("hello\n")
+ raise ValueError("foo")
+ fh.write("world\n")
+ assert not dest.exists()
+ assert not dest.with_name(dest.name+".new").exists()
+ assert list(dest.parent.iterdir()) == []
+
+
+class CompareContext1:
+ cfg_data = {
+ "name": "mane",
+ "fsid": "foobar",
+ "image": "fake.io/noway/nohow:gndn",
+ "meta": {
+ "fruit": "banana",
+ "vegetable": "carrot",
+ },
+ "params": {
+ "osd_fsid": "robble",
+ "tcp_ports": [404, 9999],
+ },
+ "config_blobs": {
+ "alpha": {"sloop": "John B"},
+ "beta": {"forest": "birch"},
+ "gamma": {"forest": "pine"},
+ },
+ }
+
+ def check(self, ctx):
+ assert ctx.name == 'mane'
+ assert ctx.fsid == 'foobar'
+ assert ctx.image == 'fake.io/noway/nohow:gndn'
+ assert ctx.meta_properties == {"fruit": "banana", "vegetable": "carrot"}
+ assert ctx.config_blobs == {
+ "alpha": {"sloop": "John B"},
+ "beta": {"forest": "birch"},
+ "gamma": {"forest": "pine"},
+ }
+ assert ctx.osd_fsid == "robble"
+ assert ctx.tcp_ports == [404, 9999]
+
+
+class CompareContext2:
+ cfg_data = {
+ "name": "cc2",
+ "fsid": "foobar",
+ "meta": {
+ "fruit": "banana",
+ "vegetable": "carrot",
+ },
+ "params": {},
+ "config_blobs": {
+ "alpha": {"sloop": "John B"},
+ "beta": {"forest": "birch"},
+ "gamma": {"forest": "pine"},
+ },
+ }
+
+ def check(self, ctx):
+ assert ctx.name == 'cc2'
+ assert ctx.fsid == 'foobar'
+ assert ctx.image == 'quay.io/ceph/ceph:v18'
+ assert ctx.meta_properties == {"fruit": "banana", "vegetable": "carrot"}
+ assert ctx.config_blobs == {
+ "alpha": {"sloop": "John B"},
+ "beta": {"forest": "birch"},
+ "gamma": {"forest": "pine"},
+ }
+ assert ctx.osd_fsid is None
+ assert ctx.tcp_ports is None
+
+
+@pytest.mark.parametrize(
+ "cc",
+ [
+ CompareContext1(),
+ CompareContext2(),
+ ],
+)
+def test_apply_deploy_config_to_ctx(cc, monkeypatch):
+ import logging
+
+ monkeypatch.setattr("cephadm.logger", logging.getLogger())
+ ctx = FakeContext()
+ _cephadm.apply_deploy_config_to_ctx(cc.cfg_data, ctx)
+ cc.check(ctx)