summaryrefslogtreecommitdiffstats
path: root/src/pybind/mgr/cephadm/tests/test_cephadm.py
diff options
context:
space:
mode:
Diffstat (limited to 'src/pybind/mgr/cephadm/tests/test_cephadm.py')
-rw-r--r--src/pybind/mgr/cephadm/tests/test_cephadm.py73
1 files changed, 63 insertions, 10 deletions
diff --git a/src/pybind/mgr/cephadm/tests/test_cephadm.py b/src/pybind/mgr/cephadm/tests/test_cephadm.py
index 24fcb0280..2477de13e 100644
--- a/src/pybind/mgr/cephadm/tests/test_cephadm.py
+++ b/src/pybind/mgr/cephadm/tests/test_cephadm.py
@@ -400,6 +400,42 @@ class TestCephadm(object):
assert 'myerror' in ''.join(evs)
+ @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
+ def test_daemon_action_event_timestamp_update(self, cephadm_module: CephadmOrchestrator):
+ # Test to make sure if a new daemon event is created with the same subject
+ # and message that the timestamp of the event is updated to let users know
+ # when it most recently occurred.
+ cephadm_module.service_cache_timeout = 10
+ with with_host(cephadm_module, 'test'):
+ with with_service(cephadm_module, RGWSpec(service_id='myrgw.foobar', unmanaged=True)) as _, \
+ with_daemon(cephadm_module, RGWSpec(service_id='myrgw.foobar'), 'test') as daemon_id:
+
+ d_name = 'rgw.' + daemon_id
+
+ now = str_to_datetime('2023-10-18T22:45:29.119250Z')
+ with mock.patch("cephadm.inventory.datetime_now", lambda: now):
+ c = cephadm_module.daemon_action('redeploy', d_name)
+ assert wait(cephadm_module,
+ c) == f"Scheduled to redeploy rgw.{daemon_id} on host 'test'"
+
+ CephadmServe(cephadm_module)._check_daemons()
+
+ d_events = cephadm_module.events.get_for_daemon(d_name)
+ assert len(d_events) == 1
+ assert d_events[0].created == now
+
+ later = str_to_datetime('2023-10-18T23:46:37.119250Z')
+ with mock.patch("cephadm.inventory.datetime_now", lambda: later):
+ c = cephadm_module.daemon_action('redeploy', d_name)
+ assert wait(cephadm_module,
+ c) == f"Scheduled to redeploy rgw.{daemon_id} on host 'test'"
+
+ CephadmServe(cephadm_module)._check_daemons()
+
+ d_events = cephadm_module.events.get_for_daemon(d_name)
+ assert len(d_events) == 1
+ assert d_events[0].created == later
+
@pytest.mark.parametrize(
"action",
[
@@ -1157,7 +1193,8 @@ class TestCephadm(object):
@mock.patch('cephadm.services.osd.OSDService.driveselection_to_ceph_volume')
@mock.patch('cephadm.services.osd.OsdIdClaims.refresh', lambda _: None)
@mock.patch('cephadm.services.osd.OsdIdClaims.get', lambda _: {})
- def test_limit_not_reached(self, d_to_cv, _run_cv_cmd, cephadm_module):
+ @mock.patch('cephadm.inventory.HostCache.get_daemons_by_service')
+ def test_limit_not_reached(self, _get_daemons_by_service, d_to_cv, _run_cv_cmd, cephadm_module):
with with_host(cephadm_module, 'test'):
dg = DriveGroupSpec(placement=PlacementSpec(host_pattern='test'),
data_devices=DeviceSelection(limit=5, rotational=1),
@@ -1167,12 +1204,14 @@ class TestCephadm(object):
'[{"data": "/dev/vdb", "data_size": "50.00 GB", "encryption": "None"}, {"data": "/dev/vdc", "data_size": "50.00 GB", "encryption": "None"}]']
d_to_cv.return_value = 'foo'
_run_cv_cmd.side_effect = async_side_effect((disks_found, '', 0))
+ _get_daemons_by_service.return_value = [DaemonDescription(daemon_type='osd', hostname='test', service_name='not_enough')]
preview = cephadm_module.osd_service.generate_previews([dg], 'test')
for osd in preview:
assert 'notes' in osd
assert osd['notes'] == [
- 'NOTE: Did not find enough disks matching filter on host test to reach data device limit (Found: 2 | Limit: 5)']
+ ('NOTE: Did not find enough disks matching filter on host test to reach '
+ 'data device limit\n(New Devices: 2 | Existing Matching Daemons: 1 | Limit: 5)')]
@mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
def test_prepare_drivegroup(self, cephadm_module):
@@ -1251,7 +1290,11 @@ class TestCephadm(object):
))
@mock.patch("cephadm.services.osd.OSD.exists", True)
@mock.patch("cephadm.services.osd.RemoveUtil.get_pg_count", lambda _, __: 0)
- def test_remove_osds(self, cephadm_module):
+ @mock.patch("cephadm.services.osd.RemoveUtil.get_weight")
+ @mock.patch("cephadm.services.osd.RemoveUtil.reweight_osd")
+ def test_remove_osds(self, _reweight_osd, _get_weight, cephadm_module):
+ osd_initial_weight = 2.1
+ _get_weight.return_value = osd_initial_weight
with with_host(cephadm_module, 'test'):
CephadmServe(cephadm_module)._refresh_host_daemons('test')
c = cephadm_module.list_daemons()
@@ -1261,13 +1304,23 @@ class TestCephadm(object):
out = wait(cephadm_module, c)
assert out == ["Removed osd.0 from host 'test'"]
- cephadm_module.to_remove_osds.enqueue(OSD(osd_id=0,
- replace=False,
- force=False,
- hostname='test',
- process_started_at=datetime_now(),
- remove_util=cephadm_module.to_remove_osds.rm_util
- ))
+ osd_0 = OSD(osd_id=0,
+ replace=False,
+ force=False,
+ hostname='test',
+ process_started_at=datetime_now(),
+ remove_util=cephadm_module.to_remove_osds.rm_util
+ )
+
+ cephadm_module.to_remove_osds.enqueue(osd_0)
+ _get_weight.assert_called()
+
+ # test that OSD is properly reweighted on removal
+ cephadm_module.stop_remove_osds([0])
+ _reweight_osd.assert_called_with(mock.ANY, osd_initial_weight)
+
+ # add OSD back to queue and test normal removal queue processing
+ cephadm_module.to_remove_osds.enqueue(osd_0)
cephadm_module.to_remove_osds.process_removal_queue()
assert cephadm_module.to_remove_osds == OSDRemovalQueue(cephadm_module)