summaryrefslogtreecommitdiffstats
path: root/src/pybind/mgr/cephadm/tests
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-23 16:45:17 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-23 16:45:44 +0000
commit17d6a993fc17d533460c5f40f3908c708e057c18 (patch)
tree1a3bd93e0ecd74fa02f93a528fe2f87e5314c4b5 /src/pybind/mgr/cephadm/tests
parentReleasing progress-linux version 18.2.2-0progress7.99u1. (diff)
downloadceph-17d6a993fc17d533460c5f40f3908c708e057c18.tar.xz
ceph-17d6a993fc17d533460c5f40f3908c708e057c18.zip
Merging upstream version 18.2.3.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/pybind/mgr/cephadm/tests')
-rw-r--r--src/pybind/mgr/cephadm/tests/node_proxy_data.py3
-rw-r--r--src/pybind/mgr/cephadm/tests/test_autotune.py11
-rw-r--r--src/pybind/mgr/cephadm/tests/test_cephadm.py73
-rw-r--r--src/pybind/mgr/cephadm/tests/test_configchecks.py5
-rw-r--r--src/pybind/mgr/cephadm/tests/test_node_proxy.py312
-rw-r--r--src/pybind/mgr/cephadm/tests/test_scheduling.py58
-rw-r--r--src/pybind/mgr/cephadm/tests/test_service_discovery.py17
-rw-r--r--src/pybind/mgr/cephadm/tests/test_services.py42
8 files changed, 499 insertions, 22 deletions
diff --git a/src/pybind/mgr/cephadm/tests/node_proxy_data.py b/src/pybind/mgr/cephadm/tests/node_proxy_data.py
new file mode 100644
index 000000000..37e6aaa46
--- /dev/null
+++ b/src/pybind/mgr/cephadm/tests/node_proxy_data.py
@@ -0,0 +1,3 @@
+full_set_with_critical = {'host': 'host01', 'sn': '12345', 'status': {'storage': {'disk.bay.0:enclosure.internal.0-1:raid.integrated.1-1': {'description': 'Solid State Disk 0:1:0', 'entity': 'RAID.Integrated.1-1', 'capacity_bytes': 959656755200, 'model': 'KPM5XVUG960G', 'protocol': 'SAS', 'serial_number': '8080A1CRTP5F', 'status': {'health': 'Critical', 'healthrollup': 'OK', 'state': 'Enabled'}, 'physical_location': {'partlocation': {'locationordinalvalue': 0, 'locationtype': 'Slot'}}}, 'disk.bay.9:enclosure.internal.0-1': {'description': 'PCIe SSD in Slot 9 in Bay 1', 'entity': 'CPU.1', 'capacity_bytes': 1600321314816, 'model': 'Dell Express Flash NVMe P4610 1.6TB SFF', 'protocol': 'PCIe', 'serial_number': 'PHLN035305MN1P6AGN', 'status': {'health': 'Critical', 'healthrollup': 'OK', 'state': 'Enabled'}, 'physical_location': {'partlocation': {'locationordinalvalue': 9, 'locationtype': 'Slot'}}}}, 'processors': {'cpu.socket.2': {'description': 'Represents the properties of a Processor attached to this System', 'total_cores': 20, 'total_threads': 40, 'processor_type': 'CPU', 'model': 'Intel(R) Xeon(R) Gold 6230 CPU @ 2.10GHz', 'status': {'health': 'OK', 'state': 'Enabled'}, 'manufacturer': 'Intel'}}, 'network': {'nic.slot.1-1-1': {'description': 'NIC in Slot 1 Port 1 Partition 1', 'name': 'System Ethernet Interface', 'speed_mbps': 0, 'status': {'health': 'OK', 'state': 'StandbyOffline'}}}, 'memory': {'dimm.socket.a1': {'description': 'DIMM A1', 'memory_device_type': 'DDR4', 'capacity_mi_b': 31237, 'status': {'health': 'Critical', 'state': 'Enabled'}}}}, 'firmwares': {}}
+mgr_inventory_cache = {'host01': {'hostname': 'host01', 'addr': '10.10.10.11', 'labels': ['_admin'], 'status': '', 'oob': {'hostname': '10.10.10.11', 'username': 'root', 'password': 'ceph123'}}, 'host02': {'hostname': 'host02', 'addr': '10.10.10.12', 'labels': [], 'status': '', 'oob': {'hostname': '10.10.10.12', 'username': 'root', 'password': 'ceph123'}}}
+full_set = {'host01': {'host': 'host01', 'sn': 'FR8Y5X3', 'status': {'storage': {'disk.bay.8:enclosure.internal.0-1:nonraid.slot.2-1': {'description': 'Disk 8 in Backplane 1 of Storage Controller in Slot 2', 'entity': 'NonRAID.Slot.2-1', 'capacity_bytes': 20000588955136, 'model': 'ST20000NM008D-3D', 'protocol': 'SATA', 'serial_number': 'ZVT99QLL', 'status': {'health': 'OK', 'healthrollup': 'OK', 'state': 'Enabled'}, 'physical_location': {'partlocation': {'locationordinalvalue': 8, 'locationtype': 'Slot'}}}}, 'processors': {'cpu.socket.2': {'description': 'Represents the properties of a Processor attached to this System', 'total_cores': 16, 'total_threads': 32, 'processor_type': 'CPU', 'model': 'Intel(R) Xeon(R) Silver 4314 CPU @ 2.40GHz', 'status': {'health': 'OK', 'state': 'Enabled'}, 'manufacturer': 'Intel'}, 'cpu.socket.1': {'description': 'Represents the properties of a Processor attached to this System', 'total_cores': 16, 'total_threads': 32, 'processor_type': 'CPU', 'model': 'Intel(R) Xeon(R) Silver 4314 CPU @ 2.40GHz', 'status': {'health': 'OK', 'state': 'Enabled'}, 'manufacturer': 'Intel'}}, 'network': {'oslogicalnetwork.2': {'description': 'eno8303', 'name': 'eno8303', 'speed_mbps': 0, 'status': {'health': 'OK', 'state': 'Enabled'}}}, 'memory': {'dimm.socket.a1': {'description': 'DIMM A1', 'memory_device_type': 'DDR4', 'capacity_mi_b': 16384, 'status': {'health': 'OK', 'state': 'Enabled'}}}, 'power': {'0': {'name': 'PS1 Status', 'model': 'PWR SPLY,800W,RDNT,LTON', 'manufacturer': 'DELL', 'status': {'health': 'OK', 'state': 'Enabled'}}, '1': {'name': 'PS2 Status', 'model': 'PWR SPLY,800W,RDNT,LTON', 'manufacturer': 'DELL', 'status': {'health': 'OK', 'state': 'Enabled'}}}, 'fans': {'0': {'name': 'System Board Fan1A', 'physical_context': 'SystemBoard', 'status': {'health': 'OK', 'state': 'Enabled'}}}}, 'firmwares': {'installed-28897-6.10.30.20__usc.embedded.1:lc.embedded.1': {'name': 'Lifecycle Controller', 'description': 'Represents Firmware Inventory', 'release_date': '00:00:00Z', 'version': '6.10.30.20', 'updateable': True, 'status': {'health': 'OK', 'state': 'Enabled'}}}}, 'host02': {'host': 'host02', 'sn': 'FR8Y5X4', 'status': {'storage': {'disk.bay.8:enclosure.internal.0-1:nonraid.slot.2-1': {'description': 'Disk 8 in Backplane 1 of Storage Controller in Slot 2', 'entity': 'NonRAID.Slot.2-1', 'capacity_bytes': 20000588955136, 'model': 'ST20000NM008D-3D', 'protocol': 'SATA', 'serial_number': 'ZVT99QLL', 'status': {'health': 'OK', 'healthrollup': 'OK', 'state': 'Enabled'}, 'physical_location': {'partlocation': {'locationordinalvalue': 8, 'locationtype': 'Slot'}}}}, 'processors': {'cpu.socket.2': {'description': 'Represents the properties of a Processor attached to this System', 'total_cores': 16, 'total_threads': 32, 'processor_type': 'CPU', 'model': 'Intel(R) Xeon(R) Silver 4314 CPU @ 2.40GHz', 'status': {'health': 'OK', 'state': 'Enabled'}, 'manufacturer': 'Intel'}, 'cpu.socket.1': {'description': 'Represents the properties of a Processor attached to this System', 'total_cores': 16, 'total_threads': 32, 'processor_type': 'CPU', 'model': 'Intel(R) Xeon(R) Silver 4314 CPU @ 2.40GHz', 'status': {'health': 'OK', 'state': 'Enabled'}, 'manufacturer': 'Intel'}}, 'network': {'oslogicalnetwork.2': {'description': 'eno8303', 'name': 'eno8303', 'speed_mbps': 0, 'status': {'health': 'OK', 'state': 'Enabled'}}}, 'memory': {'dimm.socket.a1': {'description': 'DIMM A1', 'memory_device_type': 'DDR4', 'capacity_mi_b': 16384, 'status': {'health': 'OK', 'state': 'Enabled'}}}, 'power': {'0': {'name': 'PS1 Status', 'model': 'PWR SPLY,800W,RDNT,LTON', 'manufacturer': 'DELL', 'status': {'health': 'OK', 'state': 'Enabled'}}, '1': {'name': 'PS2 Status', 'model': 'PWR SPLY,800W,RDNT,LTON', 'manufacturer': 'DELL', 'status': {'health': 'OK', 'state': 'Enabled'}}}, 'fans': {'0': {'name': 'System Board Fan1A', 'physical_context': 'SystemBoard', 'status': {'health': 'OK', 'state': 'Enabled'}}}}, 'firmwares': {'installed-28897-6.10.30.20__usc.embedded.1:lc.embedded.1': {'name': 'Lifecycle Controller', 'description': 'Represents Firmware Inventory', 'release_date': '00:00:00Z', 'version': '6.10.30.20', 'updateable': True, 'status': {'health': 'OK', 'state': 'Enabled'}}}}}
diff --git a/src/pybind/mgr/cephadm/tests/test_autotune.py b/src/pybind/mgr/cephadm/tests/test_autotune.py
index 524da9c00..7994c390a 100644
--- a/src/pybind/mgr/cephadm/tests/test_autotune.py
+++ b/src/pybind/mgr/cephadm/tests/test_autotune.py
@@ -46,6 +46,17 @@ from orchestrator import DaemonDescription
],
{},
62 * 1024 * 1024 * 1024,
+ ),
+ (
+ 128 * 1024 * 1024 * 1024,
+ [
+ DaemonDescription('mgr', 'a', 'host1'),
+ DaemonDescription('osd', '1', 'host1'),
+ DaemonDescription('osd', '2', 'host1'),
+ DaemonDescription('nvmeof', 'a', 'host1'),
+ ],
+ {},
+ 60 * 1024 * 1024 * 1024,
)
])
def test_autotune(total, daemons, config, result):
diff --git a/src/pybind/mgr/cephadm/tests/test_cephadm.py b/src/pybind/mgr/cephadm/tests/test_cephadm.py
index 24fcb0280..2477de13e 100644
--- a/src/pybind/mgr/cephadm/tests/test_cephadm.py
+++ b/src/pybind/mgr/cephadm/tests/test_cephadm.py
@@ -400,6 +400,42 @@ class TestCephadm(object):
assert 'myerror' in ''.join(evs)
+ @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
+ def test_daemon_action_event_timestamp_update(self, cephadm_module: CephadmOrchestrator):
+ # Test to make sure if a new daemon event is created with the same subject
+ # and message that the timestamp of the event is updated to let users know
+ # when it most recently occurred.
+ cephadm_module.service_cache_timeout = 10
+ with with_host(cephadm_module, 'test'):
+ with with_service(cephadm_module, RGWSpec(service_id='myrgw.foobar', unmanaged=True)) as _, \
+ with_daemon(cephadm_module, RGWSpec(service_id='myrgw.foobar'), 'test') as daemon_id:
+
+ d_name = 'rgw.' + daemon_id
+
+ now = str_to_datetime('2023-10-18T22:45:29.119250Z')
+ with mock.patch("cephadm.inventory.datetime_now", lambda: now):
+ c = cephadm_module.daemon_action('redeploy', d_name)
+ assert wait(cephadm_module,
+ c) == f"Scheduled to redeploy rgw.{daemon_id} on host 'test'"
+
+ CephadmServe(cephadm_module)._check_daemons()
+
+ d_events = cephadm_module.events.get_for_daemon(d_name)
+ assert len(d_events) == 1
+ assert d_events[0].created == now
+
+ later = str_to_datetime('2023-10-18T23:46:37.119250Z')
+ with mock.patch("cephadm.inventory.datetime_now", lambda: later):
+ c = cephadm_module.daemon_action('redeploy', d_name)
+ assert wait(cephadm_module,
+ c) == f"Scheduled to redeploy rgw.{daemon_id} on host 'test'"
+
+ CephadmServe(cephadm_module)._check_daemons()
+
+ d_events = cephadm_module.events.get_for_daemon(d_name)
+ assert len(d_events) == 1
+ assert d_events[0].created == later
+
@pytest.mark.parametrize(
"action",
[
@@ -1157,7 +1193,8 @@ class TestCephadm(object):
@mock.patch('cephadm.services.osd.OSDService.driveselection_to_ceph_volume')
@mock.patch('cephadm.services.osd.OsdIdClaims.refresh', lambda _: None)
@mock.patch('cephadm.services.osd.OsdIdClaims.get', lambda _: {})
- def test_limit_not_reached(self, d_to_cv, _run_cv_cmd, cephadm_module):
+ @mock.patch('cephadm.inventory.HostCache.get_daemons_by_service')
+ def test_limit_not_reached(self, _get_daemons_by_service, d_to_cv, _run_cv_cmd, cephadm_module):
with with_host(cephadm_module, 'test'):
dg = DriveGroupSpec(placement=PlacementSpec(host_pattern='test'),
data_devices=DeviceSelection(limit=5, rotational=1),
@@ -1167,12 +1204,14 @@ class TestCephadm(object):
'[{"data": "/dev/vdb", "data_size": "50.00 GB", "encryption": "None"}, {"data": "/dev/vdc", "data_size": "50.00 GB", "encryption": "None"}]']
d_to_cv.return_value = 'foo'
_run_cv_cmd.side_effect = async_side_effect((disks_found, '', 0))
+ _get_daemons_by_service.return_value = [DaemonDescription(daemon_type='osd', hostname='test', service_name='not_enough')]
preview = cephadm_module.osd_service.generate_previews([dg], 'test')
for osd in preview:
assert 'notes' in osd
assert osd['notes'] == [
- 'NOTE: Did not find enough disks matching filter on host test to reach data device limit (Found: 2 | Limit: 5)']
+ ('NOTE: Did not find enough disks matching filter on host test to reach '
+ 'data device limit\n(New Devices: 2 | Existing Matching Daemons: 1 | Limit: 5)')]
@mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
def test_prepare_drivegroup(self, cephadm_module):
@@ -1251,7 +1290,11 @@ class TestCephadm(object):
))
@mock.patch("cephadm.services.osd.OSD.exists", True)
@mock.patch("cephadm.services.osd.RemoveUtil.get_pg_count", lambda _, __: 0)
- def test_remove_osds(self, cephadm_module):
+ @mock.patch("cephadm.services.osd.RemoveUtil.get_weight")
+ @mock.patch("cephadm.services.osd.RemoveUtil.reweight_osd")
+ def test_remove_osds(self, _reweight_osd, _get_weight, cephadm_module):
+ osd_initial_weight = 2.1
+ _get_weight.return_value = osd_initial_weight
with with_host(cephadm_module, 'test'):
CephadmServe(cephadm_module)._refresh_host_daemons('test')
c = cephadm_module.list_daemons()
@@ -1261,13 +1304,23 @@ class TestCephadm(object):
out = wait(cephadm_module, c)
assert out == ["Removed osd.0 from host 'test'"]
- cephadm_module.to_remove_osds.enqueue(OSD(osd_id=0,
- replace=False,
- force=False,
- hostname='test',
- process_started_at=datetime_now(),
- remove_util=cephadm_module.to_remove_osds.rm_util
- ))
+ osd_0 = OSD(osd_id=0,
+ replace=False,
+ force=False,
+ hostname='test',
+ process_started_at=datetime_now(),
+ remove_util=cephadm_module.to_remove_osds.rm_util
+ )
+
+ cephadm_module.to_remove_osds.enqueue(osd_0)
+ _get_weight.assert_called()
+
+ # test that OSD is properly reweighted on removal
+ cephadm_module.stop_remove_osds([0])
+ _reweight_osd.assert_called_with(mock.ANY, osd_initial_weight)
+
+ # add OSD back to queue and test normal removal queue processing
+ cephadm_module.to_remove_osds.enqueue(osd_0)
cephadm_module.to_remove_osds.process_removal_queue()
assert cephadm_module.to_remove_osds == OSDRemovalQueue(cephadm_module)
diff --git a/src/pybind/mgr/cephadm/tests/test_configchecks.py b/src/pybind/mgr/cephadm/tests/test_configchecks.py
index 3cae0a27d..ff1e21861 100644
--- a/src/pybind/mgr/cephadm/tests/test_configchecks.py
+++ b/src/pybind/mgr/cephadm/tests/test_configchecks.py
@@ -238,6 +238,7 @@ class FakeMgr:
self.default_version = 'quincy'
self.version_overrides = {}
self.daemon_to_host = {}
+ self.config_checks_enabled = True
self.cache = HostCache(self)
self.upgrade = CephadmUpgrade(self)
@@ -623,9 +624,7 @@ class TestConfigCheck:
assert 'ceph_release' in checker.skipped_checks
def test_skip_when_disabled(self, mgr):
- mgr.module_option.update({
- "config_checks_enabled": "false"
- })
+ mgr.config_checks_enabled = False
checker = CephadmConfigChecks(mgr)
checker.cluster_network_list = []
checker.public_network_list = ['10.9.64.0/24']
diff --git a/src/pybind/mgr/cephadm/tests/test_node_proxy.py b/src/pybind/mgr/cephadm/tests/test_node_proxy.py
new file mode 100644
index 000000000..b19bb5dbc
--- /dev/null
+++ b/src/pybind/mgr/cephadm/tests/test_node_proxy.py
@@ -0,0 +1,312 @@
+import cherrypy
+import json
+from _pytest.monkeypatch import MonkeyPatch
+from urllib.error import URLError
+from cherrypy.test import helper
+from cephadm.agent import NodeProxyEndpoint
+from unittest.mock import MagicMock, call, patch
+from cephadm.inventory import AgentCache, NodeProxyCache, Inventory
+from cephadm.ssl_cert_utils import SSLCerts
+from . import node_proxy_data
+
+PORT = 58585
+
+
+class FakeMgr:
+ def __init__(self) -> None:
+ self.log = MagicMock()
+ self.get_store = MagicMock(return_value=json.dumps(node_proxy_data.mgr_inventory_cache))
+ self.set_store = MagicMock()
+ self.set_health_warning = MagicMock()
+ self.remove_health_warning = MagicMock()
+ self.inventory = Inventory(self)
+ self.agent_cache = AgentCache(self)
+ self.agent_cache.agent_ports = {"host01": 1234}
+ self.node_proxy_cache = NodeProxyCache(self)
+ self.node_proxy_cache.save = MagicMock()
+ self.node_proxy = MagicMock()
+ self.http_server = MagicMock()
+ self.http_server.agent = MagicMock()
+ self.http_server.agent.ssl_certs = SSLCerts()
+ self.http_server.agent.ssl_certs.generate_root_cert(self.get_mgr_ip())
+
+ def get_mgr_ip(self) -> str:
+ return '0.0.0.0'
+
+
+class TestNodeProxyEndpoint(helper.CPWebCase):
+ mgr = FakeMgr()
+ app = NodeProxyEndpoint(mgr)
+ mgr.node_proxy_cache.keyrings = {"host01": "fake-secret01",
+ "host02": "fake-secret02"}
+ mgr.node_proxy_cache.oob = {"host01": {"username": "oob-user01",
+ "password": "oob-pass01"},
+ "host02": {"username": "oob-user02",
+ "password": "oob-pass02"}}
+ mgr.node_proxy_cache.data = node_proxy_data.full_set
+
+ @classmethod
+ def setup_server(cls):
+ # cherrypy.tree.mount(NodeProxyEndpoint(TestNodeProxyEndpoint.mgr))
+ cherrypy.tree.mount(TestNodeProxyEndpoint.app)
+ cherrypy.config.update({'global': {
+ 'server.socket_host': '127.0.0.1',
+ 'server.socket_port': PORT}})
+
+ def setUp(self):
+ self.PORT = PORT
+ self.monkeypatch = MonkeyPatch()
+
+ def test_oob_data_misses_cephx_field(self):
+ data = '{}'
+ self.getPage("/oob", method="POST", body=data, headers=[('Content-Type', 'application/json'),
+ ('Content-Length', str(len(data)))])
+ self.assertStatus('400 Bad Request')
+
+ def test_oob_data_misses_name_field(self):
+ data = '{"cephx": {"secret": "fake-secret"}}'
+ self.getPage("/oob", method="POST", body=data, headers=[('Content-Type', 'application/json'),
+ ('Content-Length', str(len(data)))])
+ self.assertStatus('400 Bad Request')
+
+ def test_oob_data_misses_secret_field(self):
+ data = '{"cephx": {"name": "node-proxy.host01"}}'
+ self.getPage("/oob", method="POST", body=data, headers=[('Content-Type', 'application/json'),
+ ('Content-Length', str(len(data)))])
+ self.assertStatus('400 Bad Request')
+
+ def test_oob_agent_not_running(self):
+ data = '{"cephx": {"name": "node-proxy.host03", "secret": "fake-secret03"}}'
+ self.getPage("/oob", method="POST", body=data, headers=[('Content-Type', 'application/json'),
+ ('Content-Length', str(len(data)))])
+ self.assertStatus('502 Bad Gateway')
+
+ def test_oob_wrong_keyring(self):
+ data = '{"cephx": {"name": "node-proxy.host01", "secret": "wrong-keyring"}}'
+ self.getPage("/oob", method="POST", body=data, headers=[('Content-Type', 'application/json'),
+ ('Content-Length', str(len(data)))])
+ self.assertStatus('403 Forbidden')
+
+ def test_oob_ok(self):
+ data = '{"cephx": {"name": "node-proxy.host01", "secret": "fake-secret01"}}'
+ self.getPage("/oob", method="POST", body=data, headers=[('Content-Type', 'application/json'),
+ ('Content-Length', str(len(data)))])
+ self.assertStatus('200 OK')
+
+ def test_data_missing_patch(self):
+ data = '{"cephx": {"name": "node-proxy.host01", "secret": "fake-secret01"}}'
+ self.getPage("/data", method="POST", body=data, headers=[('Content-Type', 'application/json'),
+ ('Content-Length', str(len(data)))])
+ self.assertStatus('400 Bad Request')
+
+ def test_data_raises_alert(self):
+ patch = node_proxy_data.full_set_with_critical
+ data = {"cephx": {"name": "node-proxy.host01", "secret": "fake-secret01"}, "patch": patch}
+ data_str = json.dumps(data)
+ self.getPage("/data", method="POST", body=data_str, headers=[('Content-Type', 'application/json'),
+ ('Content-Length', str(len(data_str)))])
+ self.assertStatus('200 OK')
+
+ calls = [call('HARDWARE_STORAGE',
+ count=2,
+ detail=['disk.bay.0:enclosure.internal.0-1:raid.integrated.1-1 is critical: Enabled',
+ 'disk.bay.9:enclosure.internal.0-1 is critical: Enabled'],
+ summary='2 storage members are not ok'),
+ call('HARDWARE_MEMORY',
+ count=1,
+ detail=['dimm.socket.a1 is critical: Enabled'],
+ summary='1 memory member is not ok')]
+
+ assert TestNodeProxyEndpoint.mgr.set_health_warning.mock_calls == calls
+
+ def test_led_GET_no_hostname(self):
+ self.getPage("/led", method="GET")
+ self.assertStatus('501 Not Implemented')
+
+ def test_led_PATCH_no_hostname(self):
+ data = "{}"
+ self.getPage("/led", method="PATCH", body=data, headers=[('Content-Type', 'application/json'),
+ ('Content-Length', str(len(data)))])
+ self.assertStatus('501 Not Implemented')
+
+ def test_set_led_no_type(self):
+ data = '{"state": "on", "keyring": "fake-secret01"}'
+ self.getPage("/host01/led", method="PATCH", body=data, headers=[('Content-Type', 'application/json'),
+ ('Content-Length', str(len(data)))])
+ self.assertStatus('400 Bad Request')
+
+ def test_set_chassis_led(self):
+ data = '{"state": "on", "keyring": "fake-secret01"}'
+ with patch('cephadm.agent.http_req') as p:
+ p.return_value = [], '{}', 200
+ self.getPage("/host01/led/chassis", method="PATCH", body=data, headers=[('Content-Type', 'application/json'),
+ ('Content-Length', str(len(data)))])
+ self.assertStatus('200 OK')
+
+ def test_get_led_missing_type(self):
+ self.getPage("/host01/led", method="GET")
+ self.assertStatus('400 Bad Request')
+
+ def test_get_led_no_hostname(self):
+ self.getPage("/led", method="GET")
+ self.assertStatus('501 Not Implemented')
+
+ def test_get_led_type_chassis_no_hostname(self):
+ self.getPage("/led/chassis", method="GET")
+ self.assertStatus('404 Not Found')
+
+ def test_get_led_type_drive_no_hostname(self):
+ self.getPage("/led/chassis", method="GET")
+ self.assertStatus('404 Not Found')
+
+ def test_get_led_type_drive_missing_id(self):
+ self.getPage("/host01/led/drive", method="GET")
+ self.assertStatus('400 Bad Request')
+
+ def test_get_led_url_error(self):
+ with patch('cephadm.agent.http_req') as p:
+ p.side_effect = URLError('fake error')
+ self.getPage("/host02/led/chassis", method="GET")
+ self.assertStatus('502 Bad Gateway')
+
+ def test_get_chassis_led_ok(self):
+ with patch('cephadm.agent.http_req', return_value=MagicMock()) as p:
+ p.return_value = [], '{}', 200
+ self.getPage("/host01/led/chassis", method="GET")
+ self.assertStatus('200 OK')
+
+ def test_get_drive_led_without_id(self):
+ self.getPage("/host01/led/drive", method="GET")
+ self.assertStatus('400 Bad Request')
+
+ def test_get_drive_led_with_id(self):
+ with patch('cephadm.agent.http_req', return_value=MagicMock()) as p:
+ p.return_value = [], '{}', 200
+ self.getPage("/host01/led/drive/123", method="GET")
+ self.assertStatus('200 OK')
+
+ def test_fullreport_with_valid_hostname(self):
+ # data = '{"cephx": {"name": "node-proxy.host01", "secret": "fake-secret01"}}'
+ # self.getPage("/host02/fullreport", method="POST", body=data, headers=[('Content-Type', 'application/json'), ('Content-Length', str(len(data)))])
+ self.getPage("/host02/fullreport", method="GET")
+ self.assertStatus('200 OK')
+
+ def test_fullreport_no_hostname(self):
+ # data = '{"cephx": {"name": "node-proxy.host01", "secret": "fake-secret01"}}'
+ # self.getPage("/fullreport", method="POST", body=data, headers=[('Content-Type', 'application/json'), ('Content-Length', str(len(data)))])
+ self.getPage("/fullreport", method="GET")
+ self.assertStatus('200 OK')
+
+ def test_fullreport_with_invalid_hostname(self):
+ # data = '{"cephx": {"name": "node-proxy.host03", "secret": "fake-secret03"}}'
+ # self.getPage("/host03/fullreport", method="POST", body=data, headers=[('Content-Type', 'application/json'), ('Content-Length', str(len(data)))])
+ self.getPage("/host03/fullreport", method="GET")
+ self.assertStatus('404 Not Found')
+
+ def test_summary_with_valid_hostname(self):
+ self.getPage("/host02/summary", method="GET")
+ self.assertStatus('200 OK')
+
+ def test_summary_no_hostname(self):
+ self.getPage("/summary", method="GET")
+ self.assertStatus('200 OK')
+
+ def test_summary_with_invalid_hostname(self):
+ self.getPage("/host03/summary", method="GET")
+ self.assertStatus('404 Not Found')
+
+ def test_criticals_with_valid_hostname(self):
+ self.getPage("/host02/criticals", method="GET")
+ self.assertStatus('200 OK')
+
+ def test_criticals_no_hostname(self):
+ self.getPage("/criticals", method="GET")
+ self.assertStatus('200 OK')
+
+ def test_criticals_with_invalid_hostname(self):
+ self.getPage("/host03/criticals", method="GET")
+ self.assertStatus('404 Not Found')
+
+ def test_memory_with_valid_hostname(self):
+ self.getPage("/host02/memory", method="GET")
+ self.assertStatus('200 OK')
+
+ def test_memory_no_hostname(self):
+ self.getPage("/memory", method="GET")
+ self.assertStatus('200 OK')
+
+ def test_memory_with_invalid_hostname(self):
+ self.getPage("/host03/memory", method="GET")
+ self.assertStatus('404 Not Found')
+
+ def test_network_with_valid_hostname(self):
+ self.getPage("/host02/network", method="GET")
+ self.assertStatus('200 OK')
+
+ def test_network_no_hostname(self):
+ self.getPage("/network", method="GET")
+ self.assertStatus('200 OK')
+
+ def test_network_with_invalid_hostname(self):
+ self.getPage("/host03/network", method="GET")
+ self.assertStatus('404 Not Found')
+
+ def test_processors_with_valid_hostname(self):
+ self.getPage("/host02/processors", method="GET")
+ self.assertStatus('200 OK')
+
+ def test_processors_no_hostname(self):
+ self.getPage("/processors", method="GET")
+ self.assertStatus('200 OK')
+
+ def test_processors_with_invalid_hostname(self):
+ self.getPage("/host03/processors", method="GET")
+ self.assertStatus('404 Not Found')
+
+ def test_storage_with_valid_hostname(self):
+ self.getPage("/host02/storage", method="GET")
+ self.assertStatus('200 OK')
+
+ def test_storage_no_hostname(self):
+ self.getPage("/storage", method="GET")
+ self.assertStatus('200 OK')
+
+ def test_storage_with_invalid_hostname(self):
+ self.getPage("/host03/storage", method="GET")
+ self.assertStatus('404 Not Found')
+
+ def test_power_with_valid_hostname(self):
+ self.getPage("/host02/power", method="GET")
+ self.assertStatus('200 OK')
+
+ def test_power_no_hostname(self):
+ self.getPage("/power", method="GET")
+ self.assertStatus('200 OK')
+
+ def test_power_with_invalid_hostname(self):
+ self.getPage("/host03/power", method="GET")
+ self.assertStatus('404 Not Found')
+
+ def test_fans_with_valid_hostname(self):
+ self.getPage("/host02/fans", method="GET")
+ self.assertStatus('200 OK')
+
+ def test_fans_no_hostname(self):
+ self.getPage("/fans", method="GET")
+ self.assertStatus('200 OK')
+
+ def test_fans_with_invalid_hostname(self):
+ self.getPage("/host03/fans", method="GET")
+ self.assertStatus('404 Not Found')
+
+ def test_firmwares_with_valid_hostname(self):
+ self.getPage("/host02/firmwares", method="GET")
+ self.assertStatus('200 OK')
+
+ def test_firmwares_no_hostname(self):
+ self.getPage("/firmwares", method="GET")
+ self.assertStatus('200 OK')
+
+ def test_firmwares_with_invalid_hostname(self):
+ self.getPage("/host03/firmwares", method="GET")
+ self.assertStatus('404 Not Found')
diff --git a/src/pybind/mgr/cephadm/tests/test_scheduling.py b/src/pybind/mgr/cephadm/tests/test_scheduling.py
index 067cd5028..f445ed6f0 100644
--- a/src/pybind/mgr/cephadm/tests/test_scheduling.py
+++ b/src/pybind/mgr/cephadm/tests/test_scheduling.py
@@ -6,7 +6,13 @@ from typing import NamedTuple, List, Dict, Optional
import pytest
from ceph.deployment.hostspec import HostSpec
-from ceph.deployment.service_spec import ServiceSpec, PlacementSpec, IngressSpec
+from ceph.deployment.service_spec import (
+ ServiceSpec,
+ PlacementSpec,
+ IngressSpec,
+ PatternType,
+ HostPattern,
+)
from ceph.deployment.hostspec import SpecValidationError
from cephadm.module import HostAssignment
@@ -631,6 +637,17 @@ class NodeAssignmentTest(NamedTuple):
'rgw:host2(*:81)', 'rgw:host3(*:81)'],
['rgw.c']
),
+ # label + host pattern
+ # Note all hosts will get the "foo" label, we are checking
+ # that it also filters on the host pattern when label is provided
+ NodeAssignmentTest(
+ 'mgr',
+ PlacementSpec(label='foo', host_pattern='mgr*'),
+ 'mgr1 mgr2 osd1'.split(),
+ [],
+ None, None,
+ ['mgr:mgr1', 'mgr:mgr2'], ['mgr:mgr1', 'mgr:mgr2'], []
+ ),
# cephadm.py teuth case
NodeAssignmentTest(
'mgr',
@@ -1697,3 +1714,42 @@ def test_drain_from_explict_placement(service_type, placement, hosts, maintenanc
).place()
assert sorted([h.hostname for h in to_add]) in expected_add
assert sorted([h.name() for h in to_remove]) in expected_remove
+
+
+class RegexHostPatternTest(NamedTuple):
+ service_type: str
+ placement: PlacementSpec
+ hosts: List[str]
+ expected_add: List[List[str]]
+
+
+@pytest.mark.parametrize("service_type,placement,hosts,expected_add",
+ [
+ RegexHostPatternTest(
+ 'crash',
+ PlacementSpec(host_pattern=HostPattern(pattern='host1|host3', pattern_type=PatternType.regex)),
+ 'host1 host2 host3 host4'.split(),
+ ['host1', 'host3'],
+ ),
+ RegexHostPatternTest(
+ 'crash',
+ PlacementSpec(host_pattern=HostPattern(pattern='host[2-4]', pattern_type=PatternType.regex)),
+ 'host1 host2 host3 host4'.split(),
+ ['host2', 'host3', 'host4'],
+ ),
+ ])
+def test_placement_regex_host_pattern(service_type, placement, hosts, expected_add):
+ spec = ServiceSpec(service_type=service_type,
+ service_id='test',
+ placement=placement)
+
+ host_specs = [HostSpec(h) for h in hosts]
+
+ hosts, to_add, to_remove = HostAssignment(
+ spec=spec,
+ hosts=host_specs,
+ unreachable_hosts=[],
+ draining_hosts=[],
+ daemons=[],
+ ).place()
+ assert sorted([h.hostname for h in to_add]) == expected_add
diff --git a/src/pybind/mgr/cephadm/tests/test_service_discovery.py b/src/pybind/mgr/cephadm/tests/test_service_discovery.py
index ff98a1388..687b64553 100644
--- a/src/pybind/mgr/cephadm/tests/test_service_discovery.py
+++ b/src/pybind/mgr/cephadm/tests/test_service_discovery.py
@@ -19,6 +19,9 @@ class FakeCache:
if service_type == 'ceph-exporter':
return [FakeDaemonDescription('1.2.3.4', [9926], 'node0'),
FakeDaemonDescription('1.2.3.5', [9926], 'node1')]
+ if service_type == 'nvmeof':
+ return [FakeDaemonDescription('1.2.3.4', [10008], 'node0'),
+ FakeDaemonDescription('1.2.3.5', [10008], 'node1')]
return [FakeDaemonDescription('1.2.3.4', [9100], 'node0'),
FakeDaemonDescription('1.2.3.5', [9200], 'node1')]
@@ -171,6 +174,20 @@ class TestServiceDiscovery:
# check content
assert cfg[0]['targets'] == ['1.2.3.4:9926']
+ def test_get_sd_config_nvmeof(self):
+ mgr = FakeMgr()
+ root = Root(mgr, 5000, '0.0.0.0')
+ cfg = root.get_sd_config('nvmeof')
+
+ # check response structure
+ assert cfg
+ for entry in cfg:
+ assert 'labels' in entry
+ assert 'targets' in entry
+
+ # check content
+ assert cfg[0]['targets'] == ['1.2.3.4:10008']
+
def test_get_sd_config_invalid_service(self):
mgr = FakeMgr()
root = Root(mgr, 5000, '0.0.0.0')
diff --git a/src/pybind/mgr/cephadm/tests/test_services.py b/src/pybind/mgr/cephadm/tests/test_services.py
index 2300b288d..1265a39f6 100644
--- a/src/pybind/mgr/cephadm/tests/test_services.py
+++ b/src/pybind/mgr/cephadm/tests/test_services.py
@@ -376,6 +376,9 @@ port = {default_port}
enable_auth = False
state_update_notify = True
state_update_interval_sec = 5
+enable_prometheus_exporter = True
+prometheus_exporter_ssl = False
+prometheus_port = 10008
[ceph]
pool = {pool}
@@ -665,7 +668,9 @@ class TestMonitoring:
keepalived_password='12345',
virtual_ip="1.2.3.4/32",
backend_service='rgw.foo')) as _, \
- with_service(cephadm_module, PrometheusSpec('prometheus')) as _:
+ with_service(cephadm_module, PrometheusSpec('prometheus',
+ networks=['1.2.3.0/24'],
+ only_bind_port_on_networks=True)) as _:
y = dedent("""
# This file is generated by cephadm.
@@ -699,6 +704,10 @@ class TestMonitoring:
honor_labels: true
http_sd_configs:
- url: http://[::1]:8765/sd/prometheus/sd-config?service=ceph-exporter
+
+ - job_name: 'nvmeof'
+ http_sd_configs:
+ - url: http://[::1]:8765/sd/prometheus/sd-config?service=nvmeof
""").lstrip()
_run_cephadm.assert_called_with(
@@ -713,11 +722,12 @@ class TestMonitoring:
"deploy_arguments": [],
"params": {
'tcp_ports': [9095],
+ 'port_ips': {'8765': '1.2.3.1'}
},
"meta": {
'service_name': 'prometheus',
'ports': [9095],
- 'ip': None,
+ 'ip': '1.2.3.1',
'deployed_by': [],
'rank': None,
'rank_generation': None,
@@ -731,6 +741,7 @@ class TestMonitoring:
},
'retention_time': '15d',
'retention_size': '0',
+ 'ip_to_bind_to': '1.2.3.1',
},
}),
)
@@ -855,6 +866,19 @@ class TestMonitoring:
password: sd_password
tls_config:
ca_file: root_cert.pem
+
+ - job_name: 'nvmeof'
+ honor_labels: true
+ scheme: https
+ tls_config:
+ ca_file: root_cert.pem
+ http_sd_configs:
+ - url: https://[::1]:8765/sd/prometheus/sd-config?service=nvmeof
+ basic_auth:
+ username: sd_user
+ password: sd_password
+ tls_config:
+ ca_file: root_cert.pem
""").lstrip()
_run_cephadm.assert_called_with(
@@ -892,6 +916,7 @@ class TestMonitoring:
},
'retention_time': '15d',
'retention_size': '0',
+ 'ip_to_bind_to': '',
'web_config': '/etc/prometheus/web.yml',
},
}),
@@ -1633,7 +1658,7 @@ class TestIngressService:
)
if enable_haproxy_protocol:
haproxy_txt += ' default-server send-proxy-v2\n'
- haproxy_txt += ' server nfs.foo.0 192.168.122.111:12049\n'
+ haproxy_txt += ' server nfs.foo.0 192.168.122.111:12049 check\n'
haproxy_expected_conf = {
'files': {'haproxy.cfg': haproxy_txt}
}
@@ -1783,7 +1808,7 @@ class TestIngressService:
'balance static-rr\n '
'option httpchk HEAD / HTTP/1.0\n '
'server '
- + haproxy_generated_conf[1][0] + ' 1.2.3.7:80 check weight 100\n'
+ + haproxy_generated_conf[1][0] + ' 1.2.3.7:80 check weight 100 inter 2s\n'
}
}
@@ -1908,7 +1933,7 @@ class TestIngressService:
'balance static-rr\n '
'option httpchk HEAD / HTTP/1.0\n '
'server '
- + haproxy_generated_conf[1][0] + ' 1::4:443 check weight 100\n'
+ + haproxy_generated_conf[1][0] + ' 1::4:443 check weight 100 inter 2s\n'
}
}
@@ -2032,7 +2057,7 @@ class TestIngressService:
'balance static-rr\n '
'option httpchk HEAD / HTTP/1.0\n '
'server '
- + haproxy_generated_conf[1][0] + ' 1.2.3.7:80 check weight 100\n'
+ + haproxy_generated_conf[1][0] + ' 1.2.3.7:80 check weight 100 inter 2s\n'
}
}
@@ -2411,7 +2436,7 @@ class TestIngressService:
' balance source\n'
' hash-type consistent\n'
' default-server send-proxy-v2\n'
- ' server nfs.foo.0 192.168.122.111:12049\n'
+ ' server nfs.foo.0 192.168.122.111:12049 check\n'
)
haproxy_expected_conf = {
'files': {'haproxy.cfg': haproxy_txt}
@@ -2431,6 +2456,7 @@ class TestIngressService:
' Delegations = false;\n'
" RecoveryBackend = 'rados_cluster';\n"
' Minor_Versions = 1, 2;\n'
+ ' IdmapConf = "/etc/ganesha/idmap.conf";\n'
'}\n'
'\n'
'RADOS_KV {\n'
@@ -2454,7 +2480,7 @@ class TestIngressService:
"%url rados://.nfs/foo/conf-nfs.foo"
)
nfs_expected_conf = {
- 'files': {'ganesha.conf': nfs_ganesha_txt},
+ 'files': {'ganesha.conf': nfs_ganesha_txt, 'idmap.conf': ''},
'config': '',
'extra_args': ['-N', 'NIV_EVENT'],
'keyring': (