summaryrefslogtreecommitdiffstats
path: root/src/ceph-volume/ceph_volume/tests/devices
diff options
context:
space:
mode:
Diffstat (limited to 'src/ceph-volume/ceph_volume/tests/devices')
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/__init__.py0
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/lvm/__init__.py0
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/lvm/test_activate.py442
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/lvm/test_batch.py306
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/lvm/test_common.py8
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/lvm/test_create.py52
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/lvm/test_deactivate.py59
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/lvm/test_listing.py352
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/lvm/test_migrate.py2299
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/lvm/test_prepare.py189
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/lvm/test_trigger.py45
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/lvm/test_zap.py241
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/raw/__init__.py0
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/raw/test_list.py238
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/raw/test_prepare.py97
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/simple/test_activate.py200
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/simple/test_scan.py71
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/simple/test_trigger.py45
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/test_zap.py38
19 files changed, 4682 insertions, 0 deletions
diff --git a/src/ceph-volume/ceph_volume/tests/devices/__init__.py b/src/ceph-volume/ceph_volume/tests/devices/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/__init__.py
diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/__init__.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/__init__.py
diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_activate.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_activate.py
new file mode 100644
index 000000000..2237f259e
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_activate.py
@@ -0,0 +1,442 @@
+import pytest
+from copy import deepcopy
+from ceph_volume.devices.lvm import activate
+from ceph_volume.api import lvm as api
+from ceph_volume.tests.conftest import Capture
+
+
+class Args(object):
+
+ def __init__(self, **kw):
+ # default flags
+ self.bluestore = False
+ self.filestore = False
+ self.no_systemd = False
+ self.auto_detect_objectstore = None
+ for k, v in kw.items():
+ setattr(self, k, v)
+
+
+class TestActivate(object):
+
+ # these tests are very functional, hence the heavy patching, it is hard to
+ # test the negative side effect with an actual functional run, so we must
+ # setup a perfect scenario for this test to check it can really work
+ # with/without osd_id
+ def test_no_osd_id_matches_fsid(self, is_root, monkeypatch, capture):
+ FooVolume = api.Volume(lv_name='foo', lv_path='/dev/vg/foo',
+ lv_tags="ceph.osd_fsid=1234")
+ volumes = []
+ volumes.append(FooVolume)
+ monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: volumes)
+ monkeypatch.setattr(activate, 'activate_filestore', capture)
+ args = Args(osd_id=None, osd_fsid='1234', filestore=True)
+ activate.Activate([]).activate(args)
+ assert capture.calls[0]['args'][0] == [FooVolume]
+
+ def test_no_osd_id_matches_fsid_bluestore(self, is_root, monkeypatch, capture):
+ FooVolume = api.Volume(lv_name='foo', lv_path='/dev/vg/foo',
+ lv_tags="ceph.osd_fsid=1234")
+ volumes = []
+ volumes.append(FooVolume)
+ monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: volumes)
+ monkeypatch.setattr(activate, 'activate_bluestore', capture)
+ args = Args(osd_id=None, osd_fsid='1234', bluestore=True)
+ activate.Activate([]).activate(args)
+ assert capture.calls[0]['args'][0] == [FooVolume]
+
+ def test_no_osd_id_no_matching_fsid(self, is_root, monkeypatch, capture):
+ FooVolume = api.Volume(lv_name='foo', lv_path='/dev/vg/foo',
+ lv_tags="ceph.osd_fsid=1111")
+ volumes = []
+ volumes.append(FooVolume)
+ monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: [])
+ monkeypatch.setattr(api, 'get_single_lv', lambda **kwargs: [])
+ monkeypatch.setattr(activate, 'activate_filestore', capture)
+
+ args = Args(osd_id=None, osd_fsid='2222')
+ with pytest.raises(RuntimeError):
+ activate.Activate([]).activate(args)
+
+ def test_osd_id_no_osd_fsid(self, is_root):
+ args = Args(osd_id=42, osd_fsid=None)
+ with pytest.raises(RuntimeError) as result:
+ activate.Activate([]).activate(args)
+ assert result.value.args[0] == 'could not activate osd.42, please provide the osd_fsid too'
+
+ def test_no_osd_id_no_osd_fsid(self, is_root):
+ args = Args(osd_id=None, osd_fsid=None)
+ with pytest.raises(RuntimeError) as result:
+ activate.Activate([]).activate(args)
+ assert result.value.args[0] == 'Please provide both osd_id and osd_fsid'
+
+ def test_filestore_no_systemd(self, is_root, monkeypatch, capture):
+ monkeypatch.setattr('ceph_volume.configuration.load', lambda: None)
+ fake_enable = Capture()
+ fake_start_osd = Capture()
+ monkeypatch.setattr('ceph_volume.util.system.device_is_mounted', lambda *a, **kw: True)
+ monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw: True)
+ monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True)
+ monkeypatch.setattr(activate.systemctl, 'enable_volume', fake_enable)
+ monkeypatch.setattr(activate.systemctl, 'start_osd', fake_start_osd)
+ JournalVolume = api.Volume(
+ lv_name='journal',
+ lv_path='/dev/vg/journal',
+ lv_uuid='000',
+ lv_tags=','.join([
+ "ceph.cluster_name=ceph", "ceph.journal_device=/dev/vg/journal",
+ "ceph.journal_uuid=000", "ceph.type=journal",
+ "ceph.osd_id=0", "ceph.osd_fsid=1234"])
+ )
+ DataVolume = api.Volume(
+ lv_name='data',
+ lv_path='/dev/vg/data',
+ lv_uuid='001',
+ lv_tags="ceph.cluster_name=ceph,ceph.journal_device=/dev/vg/" + \
+ "journal,ceph.journal_uuid=000,ceph.type=data," + \
+ "ceph.osd_id=0,ceph.osd_fsid=1234")
+ volumes = []
+ volumes.append(DataVolume)
+ volumes.append(JournalVolume)
+ monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: deepcopy(volumes))
+
+ args = Args(osd_id=None, osd_fsid='1234', no_systemd=True, filestore=True)
+ activate.Activate([]).activate(args)
+ assert fake_enable.calls == []
+ assert fake_start_osd.calls == []
+
+ def test_filestore_no_systemd_autodetect(self, is_root, monkeypatch, capture):
+ monkeypatch.setattr('ceph_volume.configuration.load', lambda: None)
+ fake_enable = Capture()
+ fake_start_osd = Capture()
+ monkeypatch.setattr('ceph_volume.util.system.device_is_mounted', lambda *a, **kw: True)
+ monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw: True)
+ monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True)
+ monkeypatch.setattr(activate.systemctl, 'enable_volume', fake_enable)
+ monkeypatch.setattr(activate.systemctl, 'start_osd', fake_start_osd)
+ JournalVolume = api.Volume(
+ lv_name='journal',
+ lv_path='/dev/vg/journal',
+ lv_uuid='000',
+ lv_tags=','.join([
+ "ceph.cluster_name=ceph", "ceph.journal_device=/dev/vg/journal",
+ "ceph.journal_uuid=000", "ceph.type=journal",
+ "ceph.osd_id=0", "ceph.osd_fsid=1234"])
+ )
+ DataVolume = api.Volume(
+ lv_name='data',
+ lv_path='/dev/vg/data',
+ lv_uuid='001',
+ lv_tags="ceph.cluster_name=ceph,ceph.journal_device=/dev/vg/" + \
+ "journal,ceph.journal_uuid=000,ceph.type=data," + \
+ "ceph.osd_id=0,ceph.osd_fsid=1234")
+ volumes = []
+ volumes.append(DataVolume)
+ volumes.append(JournalVolume)
+ monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: deepcopy(volumes))
+
+ args = Args(osd_id=None, osd_fsid='1234', no_systemd=True,
+ filestore=True, auto_detect_objectstore=True)
+ activate.Activate([]).activate(args)
+ assert fake_enable.calls == []
+ assert fake_start_osd.calls == []
+
+ def test_filestore_systemd_autodetect(self, is_root, monkeypatch, capture):
+ fake_enable = Capture()
+ fake_start_osd = Capture()
+ monkeypatch.setattr('ceph_volume.configuration.load', lambda: None)
+ monkeypatch.setattr('ceph_volume.util.system.device_is_mounted', lambda *a, **kw: True)
+ monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw: True)
+ monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True)
+ monkeypatch.setattr(activate.systemctl, 'enable_volume', fake_enable)
+ monkeypatch.setattr(activate.systemctl, 'start_osd', fake_start_osd)
+ JournalVolume = api.Volume(
+ lv_name='journal',
+ lv_path='/dev/vg/journal',
+ lv_uuid='000',
+ lv_tags=','.join([
+ "ceph.cluster_name=ceph", "ceph.journal_device=/dev/vg/journal",
+ "ceph.journal_uuid=000", "ceph.type=journal",
+ "ceph.osd_id=0","ceph.osd_fsid=1234"])
+ )
+ DataVolume = api.Volume(
+ lv_name='data',
+ lv_path='/dev/vg/data',
+ lv_uuid='001',
+ lv_tags="ceph.cluster_name=ceph,ceph.journal_device=/dev/vg/" + \
+ "journal,ceph.journal_uuid=000,ceph.type=data," + \
+ "ceph.osd_id=0,ceph.osd_fsid=1234")
+ volumes = []
+ volumes.append(DataVolume)
+ volumes.append(JournalVolume)
+ monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: deepcopy(volumes))
+
+ args = Args(osd_id=None, osd_fsid='1234', no_systemd=False,
+ filestore=True, auto_detect_objectstore=False)
+ activate.Activate([]).activate(args)
+ assert fake_enable.calls != []
+ assert fake_start_osd.calls != []
+
+ def test_filestore_systemd(self, is_root, monkeypatch, capture):
+ fake_enable = Capture()
+ fake_start_osd = Capture()
+ monkeypatch.setattr('ceph_volume.configuration.load', lambda: None)
+ monkeypatch.setattr('ceph_volume.util.system.device_is_mounted', lambda *a, **kw: True)
+ monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw: True)
+ monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True)
+ monkeypatch.setattr(activate.systemctl, 'enable_volume', fake_enable)
+ monkeypatch.setattr(activate.systemctl, 'start_osd', fake_start_osd)
+ JournalVolume = api.Volume(
+ lv_name='journal',
+ lv_path='/dev/vg/journal',
+ lv_uuid='000',
+ lv_tags=','.join([
+ "ceph.cluster_name=ceph", "ceph.journal_device=/dev/vg/journal",
+ "ceph.journal_uuid=000", "ceph.type=journal",
+ "ceph.osd_id=0","ceph.osd_fsid=1234"])
+ )
+ DataVolume = api.Volume(
+ lv_name='data',
+ lv_path='/dev/vg/data',
+ lv_uuid='001',
+ lv_tags="ceph.cluster_name=ceph,ceph.journal_device=/dev/vg/" + \
+ "journal,ceph.journal_uuid=000,ceph.type=data," + \
+ "ceph.osd_id=0,ceph.osd_fsid=1234")
+ volumes = []
+ volumes.append(DataVolume)
+ volumes.append(JournalVolume)
+ monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: deepcopy(volumes))
+
+ args = Args(osd_id=None, osd_fsid='1234', no_systemd=False,
+ filestore=True)
+ activate.Activate([]).activate(args)
+ assert fake_enable.calls != []
+ assert fake_start_osd.calls != []
+
+ def test_bluestore_no_systemd(self, is_root, monkeypatch, capture):
+ monkeypatch.setattr('ceph_volume.configuration.load', lambda: None)
+ fake_enable = Capture()
+ fake_start_osd = Capture()
+ monkeypatch.setattr('ceph_volume.util.system.path_is_mounted', lambda *a, **kw: True)
+ monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw: True)
+ monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True)
+ monkeypatch.setattr(activate.systemctl, 'enable_volume', fake_enable)
+ monkeypatch.setattr(activate.systemctl, 'start_osd', fake_start_osd)
+ DataVolume = api.Volume(
+ lv_name='data',
+ lv_path='/dev/vg/data',
+ lv_tags="ceph.cluster_name=ceph,,ceph.journal_uuid=000," + \
+ "ceph.type=block,ceph.osd_id=0,ceph.osd_fsid=1234")
+ volumes = []
+ volumes.append(DataVolume)
+ monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: deepcopy(volumes))
+
+ args = Args(osd_id=None, osd_fsid='1234', no_systemd=True, bluestore=True)
+ activate.Activate([]).activate(args)
+ assert fake_enable.calls == []
+ assert fake_start_osd.calls == []
+
+ def test_bluestore_systemd(self, is_root, monkeypatch, capture):
+ monkeypatch.setattr('ceph_volume.configuration.load', lambda: None)
+ fake_enable = Capture()
+ fake_start_osd = Capture()
+ monkeypatch.setattr('ceph_volume.util.system.path_is_mounted', lambda *a, **kw: True)
+ monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw: True)
+ monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True)
+ monkeypatch.setattr(activate.systemctl, 'enable_volume', fake_enable)
+ monkeypatch.setattr(activate.systemctl, 'start_osd', fake_start_osd)
+ DataVolume = api.Volume(
+ lv_name='data',
+ lv_path='/dev/vg/data',
+ lv_tags="ceph.cluster_name=ceph,,ceph.journal_uuid=000," + \
+ "ceph.type=block,ceph.osd_id=0,ceph.osd_fsid=1234")
+ volumes = []
+ volumes.append(DataVolume)
+ monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: deepcopy(volumes))
+
+ args = Args(osd_id=None, osd_fsid='1234', no_systemd=False,
+ bluestore=True)
+ activate.Activate([]).activate(args)
+ assert fake_enable.calls != []
+ assert fake_start_osd.calls != []
+
+ def test_bluestore_no_systemd_autodetect(self, is_root, monkeypatch, capture):
+ monkeypatch.setattr('ceph_volume.configuration.load', lambda: None)
+ fake_enable = Capture()
+ fake_start_osd = Capture()
+ monkeypatch.setattr('ceph_volume.util.system.path_is_mounted', lambda *a, **kw: True)
+ monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw: True)
+ monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True)
+ monkeypatch.setattr(activate.systemctl, 'enable_volume', fake_enable)
+ monkeypatch.setattr(activate.systemctl, 'start_osd', fake_start_osd)
+ DataVolume = api.Volume(
+ lv_name='data',
+ lv_path='/dev/vg/data',
+ lv_tags="ceph.cluster_name=ceph,,ceph.block_uuid=000," + \
+ "ceph.type=block,ceph.osd_id=0,ceph.osd_fsid=1234")
+ volumes = []
+ volumes.append(DataVolume)
+ monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: deepcopy(volumes))
+
+ args = Args(osd_id=None, osd_fsid='1234', no_systemd=True,
+ bluestore=True, auto_detect_objectstore=True)
+ activate.Activate([]).activate(args)
+ assert fake_enable.calls == []
+ assert fake_start_osd.calls == []
+
+ def test_bluestore_systemd_autodetect(self, is_root, monkeypatch, capture):
+ monkeypatch.setattr('ceph_volume.configuration.load', lambda: None)
+ fake_enable = Capture()
+ fake_start_osd = Capture()
+ monkeypatch.setattr('ceph_volume.util.system.path_is_mounted',
+ lambda *a, **kw: True)
+ monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw:
+ True)
+ monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True)
+ monkeypatch.setattr(activate.systemctl, 'enable_volume', fake_enable)
+ monkeypatch.setattr(activate.systemctl, 'start_osd', fake_start_osd)
+ DataVolume = api.Volume(
+ lv_name='data',
+ lv_path='/dev/vg/data',
+ lv_tags="ceph.cluster_name=ceph,,ceph.journal_uuid=000," + \
+ "ceph.type=block,ceph.osd_id=0,ceph.osd_fsid=1234")
+ volumes = []
+ volumes.append(DataVolume)
+ monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: deepcopy(volumes))
+
+ args = Args(osd_id=None, osd_fsid='1234', no_systemd=False,
+ bluestore=True, auto_detect_objectstore=False)
+ activate.Activate([]).activate(args)
+ assert fake_enable.calls != []
+ assert fake_start_osd.calls != []
+
+class TestActivateFlags(object):
+
+ def test_default_objectstore(self, capture):
+ args = ['0', 'asdf-ljh-asdf']
+ activation = activate.Activate(args)
+ activation.activate = capture
+ activation.main()
+ parsed_args = capture.calls[0]['args'][0]
+ assert parsed_args.filestore is False
+ assert parsed_args.bluestore is False
+
+ def test_uses_filestore(self, capture):
+ args = ['--filestore', '0', 'asdf-ljh-asdf']
+ activation = activate.Activate(args)
+ activation.activate = capture
+ activation.main()
+ parsed_args = capture.calls[0]['args'][0]
+ assert parsed_args.filestore is True
+ assert parsed_args.bluestore is False
+
+ def test_uses_bluestore(self, capture):
+ args = ['--bluestore', '0', 'asdf-ljh-asdf']
+ activation = activate.Activate(args)
+ activation.activate = capture
+ activation.main()
+ parsed_args = capture.calls[0]['args'][0]
+ assert parsed_args.filestore is False
+ assert parsed_args.bluestore is True
+
+
+class TestActivateAll(object):
+
+ def test_does_not_detect_osds(self, capsys, is_root, capture, monkeypatch):
+ monkeypatch.setattr('ceph_volume.devices.lvm.activate.direct_report', lambda: {})
+ args = ['--all']
+ activation = activate.Activate(args)
+ activation.main()
+ out, err = capsys.readouterr()
+ assert 'Was unable to find any OSDs to activate' in err
+ assert 'Verify OSDs are present with ' in err
+
+ def test_detects_running_osds(self, capsys, is_root, capture, monkeypatch):
+ monkeypatch.setattr('ceph_volume.devices.lvm.activate.direct_report', lambda: direct_report)
+ monkeypatch.setattr('ceph_volume.devices.lvm.activate.systemctl.osd_is_active', lambda x: True)
+ args = ['--all']
+ activation = activate.Activate(args)
+ activation.main()
+ out, err = capsys.readouterr()
+ assert 'a8789a96ce8b process is active. Skipping activation' in err
+ assert 'b8218eaa1634 process is active. Skipping activation' in err
+
+ def test_detects_osds_to_activate_systemd(self, is_root, capture, monkeypatch):
+ monkeypatch.setattr('ceph_volume.devices.lvm.activate.direct_report', lambda: direct_report)
+ monkeypatch.setattr('ceph_volume.devices.lvm.activate.systemctl.osd_is_active', lambda x: False)
+ args = ['--all']
+ activation = activate.Activate(args)
+ activation.activate = capture
+ activation.main()
+ calls = sorted(capture.calls, key=lambda x: x['kwargs']['osd_id'])
+ assert calls[0]['kwargs']['osd_id'] == '0'
+ assert calls[0]['kwargs']['osd_fsid'] == '957d22b7-24ce-466a-9883-b8218eaa1634'
+ assert calls[1]['kwargs']['osd_id'] == '1'
+ assert calls[1]['kwargs']['osd_fsid'] == 'd0f3e4ad-e52a-4520-afc0-a8789a96ce8b'
+
+ def test_detects_osds_to_activate_no_systemd(self, is_root, capture, monkeypatch):
+ monkeypatch.setattr('ceph_volume.devices.lvm.activate.direct_report', lambda: direct_report)
+ args = ['--all', '--no-systemd']
+ activation = activate.Activate(args)
+ activation.activate = capture
+ activation.main()
+ calls = sorted(capture.calls, key=lambda x: x['kwargs']['osd_id'])
+ assert calls[0]['kwargs']['osd_id'] == '0'
+ assert calls[0]['kwargs']['osd_fsid'] == '957d22b7-24ce-466a-9883-b8218eaa1634'
+ assert calls[1]['kwargs']['osd_id'] == '1'
+ assert calls[1]['kwargs']['osd_fsid'] == 'd0f3e4ad-e52a-4520-afc0-a8789a96ce8b'
+
+#
+# Activate All fixture
+#
+
+direct_report = {
+ "0": [
+ {
+ "lv_name": "osd-block-957d22b7-24ce-466a-9883-b8218eaa1634",
+ "lv_path": "/dev/ceph-d4962338-46ff-4cd5-8ea6-c033dbdc5b44/osd-block-957d22b7-24ce-466a-9883-b8218eaa1634",
+ "lv_tags": "ceph.block_device=/dev/ceph-d4962338-46ff-4cd5-8ea6-c033dbdc5b44/osd-block-957d22b7-24ce-466a-9883-b8218eaa1634,ceph.block_uuid=6MixOd-2Q1I-f8K3-PPOq-UJGV-L3A0-0XwUm4,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=d4962338-46ff-4cd5-8ea6-c033dbdc5b44,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=957d22b7-24ce-466a-9883-b8218eaa1634,ceph.osd_id=0,ceph.type=block",
+ "lv_uuid": "6MixOd-2Q1I-f8K3-PPOq-UJGV-L3A0-0XwUm4",
+ "name": "osd-block-957d22b7-24ce-466a-9883-b8218eaa1634",
+ "path": "/dev/ceph-d4962338-46ff-4cd5-8ea6-c033dbdc5b44/osd-block-957d22b7-24ce-466a-9883-b8218eaa1634",
+ "tags": {
+ "ceph.block_device": "/dev/ceph-d4962338-46ff-4cd5-8ea6-c033dbdc5b44/osd-block-957d22b7-24ce-466a-9883-b8218eaa1634",
+ "ceph.block_uuid": "6MixOd-2Q1I-f8K3-PPOq-UJGV-L3A0-0XwUm4",
+ "ceph.cephx_lockbox_secret": "",
+ "ceph.cluster_fsid": "d4962338-46ff-4cd5-8ea6-c033dbdc5b44",
+ "ceph.cluster_name": "ceph",
+ "ceph.crush_device_class": "",
+ "ceph.encrypted": "0",
+ "ceph.osd_fsid": "957d22b7-24ce-466a-9883-b8218eaa1634",
+ "ceph.osd_id": "0",
+ "ceph.type": "block"
+ },
+ "type": "block",
+ "vg_name": "ceph-d4962338-46ff-4cd5-8ea6-c033dbdc5b44"
+ }
+ ],
+ "1": [
+ {
+ "lv_name": "osd-block-d0f3e4ad-e52a-4520-afc0-a8789a96ce8b",
+ "lv_path": "/dev/ceph-7538bcf0-f155-4d3f-a9fd-d8b15905e532/osd-block-d0f3e4ad-e52a-4520-afc0-a8789a96ce8b",
+ "lv_tags": "ceph.block_device=/dev/ceph-7538bcf0-f155-4d3f-a9fd-d8b15905e532/osd-block-d0f3e4ad-e52a-4520-afc0-a8789a96ce8b,ceph.block_uuid=1igwLb-ZlmV-eLgp-hapx-c1Hr-M5gz-sHjnyW,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=d4962338-46ff-4cd5-8ea6-c033dbdc5b44,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=d0f3e4ad-e52a-4520-afc0-a8789a96ce8b,ceph.osd_id=1,ceph.type=block",
+ "lv_uuid": "1igwLb-ZlmV-eLgp-hapx-c1Hr-M5gz-sHjnyW",
+ "name": "osd-block-d0f3e4ad-e52a-4520-afc0-a8789a96ce8b",
+ "path": "/dev/ceph-7538bcf0-f155-4d3f-a9fd-d8b15905e532/osd-block-d0f3e4ad-e52a-4520-afc0-a8789a96ce8b",
+ "tags": {
+ "ceph.block_device": "/dev/ceph-7538bcf0-f155-4d3f-a9fd-d8b15905e532/osd-block-d0f3e4ad-e52a-4520-afc0-a8789a96ce8b",
+ "ceph.block_uuid": "1igwLb-ZlmV-eLgp-hapx-c1Hr-M5gz-sHjnyW",
+ "ceph.cephx_lockbox_secret": "",
+ "ceph.cluster_fsid": "d4962338-46ff-4cd5-8ea6-c033dbdc5b44",
+ "ceph.cluster_name": "ceph",
+ "ceph.crush_device_class": "",
+ "ceph.encrypted": "0",
+ "ceph.osd_fsid": "d0f3e4ad-e52a-4520-afc0-a8789a96ce8b",
+ "ceph.osd_id": "1",
+ "ceph.type": "block"
+ },
+ "type": "block",
+ "vg_name": "ceph-7538bcf0-f155-4d3f-a9fd-d8b15905e532"
+ }
+ ]
+}
diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_batch.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_batch.py
new file mode 100644
index 000000000..25c8a990c
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_batch.py
@@ -0,0 +1,306 @@
+import pytest
+import json
+import random
+
+from argparse import ArgumentError
+from mock import MagicMock, patch
+
+from ceph_volume.devices.lvm import batch
+from ceph_volume.util import arg_validators
+
+
+class TestBatch(object):
+
+ def test_batch_instance(self, is_root):
+ b = batch.Batch([])
+ b.main()
+
+ def test_invalid_osd_ids_passed(self):
+ with pytest.raises(SystemExit):
+ batch.Batch(argv=['--osd-ids', '1', 'foo']).main()
+
+ def test_disjoint_device_lists(self, factory):
+ device1 = factory(used_by_ceph=False, available=True, abspath="/dev/sda")
+ device2 = factory(used_by_ceph=False, available=True, abspath="/dev/sdb")
+ devices = [device1, device2]
+ db_devices = [device2]
+ with pytest.raises(Exception) as disjoint_ex:
+ batch.ensure_disjoint_device_lists(devices, db_devices)
+ assert 'Device lists are not disjoint' in str(disjoint_ex.value)
+
+ @patch('ceph_volume.util.arg_validators.Device')
+ def test_reject_partition(self, mocked_device):
+ mocked_device.return_value = MagicMock(
+ is_partition=True,
+ has_fs=False,
+ is_lvm_member=False,
+ has_gpt_headers=False,
+ has_partitions=False,
+ )
+ with pytest.raises(ArgumentError):
+ arg_validators.ValidBatchDevice()('foo')
+
+ @pytest.mark.parametrize('format_', ['pretty', 'json', 'json-pretty'])
+ def test_report(self, format_, factory, conf_ceph_stub, mock_device_generator):
+ # just ensure reporting works
+ conf_ceph_stub('[global]\nfsid=asdf-lkjh')
+ devs = [mock_device_generator() for _ in range(5)]
+ args = factory(data_slots=1,
+ osds_per_device=1,
+ osd_ids=[],
+ report=True,
+ format=format_,
+ devices=devs,
+ db_devices=[],
+ wal_devices=[],
+ bluestore=True,
+ block_db_size="1G",
+ dmcrypt=True,
+ )
+ b = batch.Batch([])
+ plan = b.get_plan(args)
+ b.args = args
+ b.report(plan)
+
+ @pytest.mark.parametrize('format_', ['json', 'json-pretty'])
+ def test_json_report_valid_empty(self, format_, factory, conf_ceph_stub, mock_device_generator):
+ # ensure json reports are valid when empty
+ conf_ceph_stub('[global]\nfsid=asdf-lkjh')
+ devs = []
+ args = factory(data_slots=1,
+ osds_per_device=1,
+ osd_ids=[],
+ report=True,
+ format=format_,
+ devices=devs,
+ db_devices=[],
+ wal_devices=[],
+ bluestore=True,
+ block_db_size="1G",
+ dmcrypt=True,
+ )
+ b = batch.Batch([])
+ plan = b.get_plan(args)
+ b.args = args
+ report = b._create_report(plan)
+ json.loads(report)
+
+ @pytest.mark.parametrize('format_', ['json', 'json-pretty'])
+ def test_json_report_valid_empty_unavailable_fast(self, format_, factory, conf_ceph_stub, mock_device_generator):
+ # ensure json reports are valid when empty
+ conf_ceph_stub('[global]\nfsid=asdf-lkjh')
+ devs = [mock_device_generator() for _ in range(5)]
+ fast_devs = [mock_device_generator()]
+ fast_devs[0].available_lvm = False
+ args = factory(data_slots=1,
+ osds_per_device=1,
+ osd_ids=[],
+ report=True,
+ format=format_,
+ devices=devs,
+ db_devices=fast_devs,
+ wal_devices=[],
+ bluestore=True,
+ block_db_size="1G",
+ dmcrypt=True,
+ )
+ b = batch.Batch([])
+ plan = b.get_plan(args)
+ b.args = args
+ report = b._create_report(plan)
+ json.loads(report)
+
+
+ @pytest.mark.parametrize('format_', ['json', 'json-pretty'])
+ def test_json_report_valid_empty_unavailable_very_fast(self, format_, factory, conf_ceph_stub, mock_device_generator):
+ # ensure json reports are valid when empty
+ conf_ceph_stub('[global]\nfsid=asdf-lkjh')
+ devs = [mock_device_generator() for _ in range(5)]
+ fast_devs = [mock_device_generator()]
+ very_fast_devs = [mock_device_generator()]
+ very_fast_devs[0].available_lvm = False
+ args = factory(data_slots=1,
+ osds_per_device=1,
+ osd_ids=[],
+ report=True,
+ format=format_,
+ devices=devs,
+ db_devices=fast_devs,
+ wal_devices=very_fast_devs,
+ bluestore=True,
+ block_db_size="1G",
+ dmcrypt=True,
+ )
+ b = batch.Batch([])
+ plan = b.get_plan(args)
+ b.args = args
+ report = b._create_report(plan)
+ json.loads(report)
+
+ @pytest.mark.parametrize('rota', [0, 1])
+ def test_batch_sort_full(self, factory, rota):
+ device1 = factory(used_by_ceph=False, available=True, rotational=rota, abspath="/dev/sda")
+ device2 = factory(used_by_ceph=False, available=True, rotational=rota, abspath="/dev/sdb")
+ device3 = factory(used_by_ceph=False, available=True, rotational=rota, abspath="/dev/sdc")
+ devices = [device1, device2, device3]
+ args = factory(report=True,
+ devices=devices,
+ filestore=False,
+ )
+ b = batch.Batch([])
+ b.args = args
+ b._sort_rotational_disks()
+ assert len(b.args.devices) == 3
+
+ @pytest.mark.parametrize('objectstore', ['bluestore', 'filestore'])
+ def test_batch_sort_mixed(self, factory, objectstore):
+ device1 = factory(used_by_ceph=False, available=True, rotational=1, abspath="/dev/sda")
+ device2 = factory(used_by_ceph=False, available=True, rotational=1, abspath="/dev/sdb")
+ device3 = factory(used_by_ceph=False, available=True, rotational=0, abspath="/dev/sdc")
+ devices = [device1, device2, device3]
+ args = factory(report=True,
+ devices=devices,
+ filestore=False if objectstore == 'bluestore' else True,
+ )
+ b = batch.Batch([])
+ b.args = args
+ b._sort_rotational_disks()
+ assert len(b.args.devices) == 2
+ if objectstore == 'bluestore':
+ assert len(b.args.db_devices) == 1
+ else:
+ assert len(b.args.journal_devices) == 1
+
+ def test_get_physical_osds_return_len(self, factory,
+ mock_devices_available,
+ conf_ceph_stub,
+ osds_per_device):
+ conf_ceph_stub('[global]\nfsid=asdf-lkjh')
+ args = factory(data_slots=1, osds_per_device=osds_per_device,
+ osd_ids=[], dmcrypt=False)
+ osds = batch.get_physical_osds(mock_devices_available, args)
+ assert len(osds) == len(mock_devices_available) * osds_per_device
+
+ def test_get_physical_osds_rel_size(self, factory,
+ mock_devices_available,
+ conf_ceph_stub,
+ osds_per_device):
+ args = factory(data_slots=1, osds_per_device=osds_per_device,
+ osd_ids=[], dmcrypt=False)
+ osds = batch.get_physical_osds(mock_devices_available, args)
+ for osd in osds:
+ assert osd.data[1] == 1.0 / osds_per_device
+
+ def test_get_physical_osds_abs_size(self, factory,
+ mock_devices_available,
+ conf_ceph_stub,
+ osds_per_device):
+ conf_ceph_stub('[global]\nfsid=asdf-lkjh')
+ args = factory(data_slots=1, osds_per_device=osds_per_device,
+ osd_ids=[], dmcrypt=False)
+ osds = batch.get_physical_osds(mock_devices_available, args)
+ for osd, dev in zip(osds, mock_devices_available):
+ assert osd.data[2] == int(dev.vg_size[0] / osds_per_device)
+
+ def test_get_physical_osds_osd_ids(self, factory,
+ mock_devices_available,
+ osds_per_device):
+ pass
+
+ def test_get_physical_fast_allocs_length(self, factory,
+ conf_ceph_stub,
+ mock_devices_available):
+ conf_ceph_stub('[global]\nfsid=asdf-lkjh')
+ args = factory(block_db_slots=None, get_block_db_size=None)
+ fast = batch.get_physical_fast_allocs(mock_devices_available,
+ 'block_db', 2, 2, args)
+ assert len(fast) == 2
+
+ def test_get_physical_fast_allocs_abs_size(self, factory,
+ conf_ceph_stub,
+ mock_devices_available):
+ conf_ceph_stub('[global]\nfsid=asdf-lkjh')
+ args = factory(block_db_slots=None, get_block_db_size=None)
+ fasts = batch.get_physical_fast_allocs(mock_devices_available,
+ 'block_db', 2, 2, args)
+ for fast, dev in zip(fasts, mock_devices_available):
+ assert fast[2] == int(dev.vg_size[0] / 2)
+
+ def test_batch_fast_allocations_one_block_db_length(self, factory, conf_ceph_stub,
+ mock_lv_device_generator):
+ conf_ceph_stub('[global]\nfsid=asdf-lkjh')
+
+ b = batch.Batch([])
+ db_lv_devices = [mock_lv_device_generator()]
+ fast = b.fast_allocations(db_lv_devices, 1, 0, 'block_db')
+ assert len(fast) == 1
+
+ @pytest.mark.parametrize('occupied_prior', range(7))
+ @pytest.mark.parametrize('slots,num_devs',
+ [l for sub in [list(zip([x]*x, range(1, x + 1))) for x in range(1,7)] for l in sub])
+ def test_get_physical_fast_allocs_length_existing(self,
+ num_devs,
+ slots,
+ occupied_prior,
+ factory,
+ conf_ceph_stub,
+ mock_device_generator):
+ conf_ceph_stub('[global]\nfsid=asdf-lkjh')
+ occupied_prior = min(occupied_prior, slots)
+ devs = [mock_device_generator() for _ in range(num_devs)]
+ already_assigned = 0
+ while already_assigned < occupied_prior:
+ dev_i = random.randint(0, num_devs - 1)
+ dev = devs[dev_i]
+ if len(dev.lvs) < occupied_prior:
+ dev.lvs.append('foo')
+ dev.path = '/dev/bar'
+ already_assigned = sum([len(d.lvs) for d in devs])
+ args = factory(block_db_slots=None, get_block_db_size=None)
+ expected_num_osds = max(len(devs) * slots - occupied_prior, 0)
+ fast = batch.get_physical_fast_allocs(devs,
+ 'block_db', slots,
+ expected_num_osds, args)
+ assert len(fast) == expected_num_osds
+ expected_assignment_on_used_devices = sum([slots - len(d.lvs) for d in devs if len(d.lvs) > 0])
+ assert len([f for f in fast if f[0] == '/dev/bar']) == expected_assignment_on_used_devices
+ assert len([f for f in fast if f[0] != '/dev/bar']) == expected_num_osds - expected_assignment_on_used_devices
+
+ def test_get_lvm_osds_return_len(self, factory,
+ mock_lv_device_generator,
+ conf_ceph_stub,
+ osds_per_device):
+ conf_ceph_stub('[global]\nfsid=asdf-lkjh')
+ args = factory(data_slots=1, osds_per_device=osds_per_device,
+ osd_ids=[], dmcrypt=False)
+ mock_lvs = [mock_lv_device_generator()]
+ osds = batch.get_lvm_osds(mock_lvs, args)
+ assert len(osds) == 1
+
+
+class TestBatchOsd(object):
+
+ def test_osd_class_ctor(self):
+ osd = batch.Batch.OSD('/dev/data', 1, '5G', 1, 1, None)
+ assert osd.data == batch.Batch.OSD.VolSpec('/dev/data',
+ 1,
+ '5G',
+ 1,
+ 'data')
+ def test_add_fast(self):
+ osd = batch.Batch.OSD('/dev/data', 1, '5G', 1, 1, None)
+ osd.add_fast_device('/dev/db', 1, '5G', 1, 'block_db')
+ assert osd.fast == batch.Batch.OSD.VolSpec('/dev/db',
+ 1,
+ '5G',
+ 1,
+ 'block_db')
+
+ def test_add_very_fast(self):
+ osd = batch.Batch.OSD('/dev/data', 1, '5G', 1, 1, None)
+ osd.add_very_fast_device('/dev/wal', 1, '5G', 1)
+ assert osd.very_fast == batch.Batch.OSD.VolSpec('/dev/wal',
+ 1,
+ '5G',
+ 1,
+ 'block_wal')
diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_common.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_common.py
new file mode 100644
index 000000000..fe792d5ab
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_common.py
@@ -0,0 +1,8 @@
+from ceph_volume.devices.lvm import common
+
+
+class TestCommon(object):
+
+ def test_get_default_args_smoke(self):
+ default_args = common.get_default_args()
+ assert default_args
diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_create.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_create.py
new file mode 100644
index 000000000..1665d76c3
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_create.py
@@ -0,0 +1,52 @@
+import pytest
+from ceph_volume.devices import lvm
+from mock import patch
+
+
+class TestCreate(object):
+
+ def test_main_spits_help_with_no_arguments(self, capsys):
+ lvm.create.Create([]).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'Create an OSD by assigning an ID and FSID' in stdout
+
+ def test_main_shows_full_help(self, capsys):
+ with pytest.raises(SystemExit):
+ lvm.create.Create(argv=['--help']).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'Use the filestore objectstore' in stdout
+ assert 'Use the bluestore objectstore' in stdout
+ assert 'A physical device or logical' in stdout
+
+ @patch('ceph_volume.util.disk.has_bluestore_label', return_value=False)
+ def test_excludes_filestore_bluestore_flags(self, m_has_bs_label, fake_call, capsys, device_info):
+ device_info()
+ with pytest.raises(SystemExit):
+ lvm.create.Create(argv=['--data', '/dev/sdfoo', '--filestore', '--bluestore']).main()
+ stdout, stderr = capsys.readouterr()
+ expected = 'Cannot use --filestore (filestore) with --bluestore (bluestore)'
+ assert expected in stderr
+
+ @patch('ceph_volume.util.disk.has_bluestore_label', return_value=False)
+ def test_excludes_other_filestore_bluestore_flags(self, m_has_bs_label, fake_call, capsys, device_info):
+ device_info()
+ with pytest.raises(SystemExit):
+ lvm.create.Create(argv=[
+ '--bluestore', '--data', '/dev/sdfoo',
+ '--journal', '/dev/sf14',
+ ]).main()
+ stdout, stderr = capsys.readouterr()
+ expected = 'Cannot use --bluestore (bluestore) with --journal (filestore)'
+ assert expected in stderr
+
+ @patch('ceph_volume.util.disk.has_bluestore_label', return_value=False)
+ def test_excludes_block_and_journal_flags(self, m_has_bs_label, fake_call, capsys, device_info):
+ device_info()
+ with pytest.raises(SystemExit):
+ lvm.create.Create(argv=[
+ '--bluestore', '--data', '/dev/sdfoo', '--block.db', 'vg/ceph1',
+ '--journal', '/dev/sf14',
+ ]).main()
+ stdout, stderr = capsys.readouterr()
+ expected = 'Cannot use --block.db (bluestore) with --journal (filestore)'
+ assert expected in stderr
diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_deactivate.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_deactivate.py
new file mode 100644
index 000000000..4b8304ce6
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_deactivate.py
@@ -0,0 +1,59 @@
+import pytest
+from mock.mock import patch
+from ceph_volume.api import lvm
+from ceph_volume.devices.lvm import deactivate
+
+class TestDeactivate(object):
+
+ @patch("ceph_volume.devices.lvm.deactivate.get_lvs_by_tag")
+ def test_no_osd(self, p_get_lvs):
+ p_get_lvs.return_value = []
+ with pytest.raises(StopIteration):
+ deactivate.deactivate_osd(0)
+
+ @patch("ceph_volume.devices.lvm.deactivate.get_lvs_by_tag")
+ @patch("ceph_volume.util.system.unmount_tmpfs")
+ def test_unmount_tmpfs_called_osd_id(self, p_u_tmpfs, p_get_lvs):
+ FooVolume = lvm.Volume(
+ lv_name='foo', lv_path='/dev/vg/foo',
+ lv_tags="ceph.osd_id=0,ceph.cluster_name=foo,ceph.type=data")
+ p_get_lvs.return_value = [FooVolume]
+
+ deactivate.deactivate_osd(0)
+ p_u_tmpfs.assert_called_with(
+ '/var/lib/ceph/osd/{}-{}'.format('foo', 0))
+
+ @patch("ceph_volume.devices.lvm.deactivate.get_lvs_by_tag")
+ @patch("ceph_volume.util.system.unmount_tmpfs")
+ def test_unmount_tmpfs_called_osd_uuid(self, p_u_tmpfs, p_get_lvs):
+ FooVolume = lvm.Volume(
+ lv_name='foo', lv_path='/dev/vg/foo',
+ lv_tags="ceph.osd_fsid=0,ceph.osd_id=1,ceph.cluster_name=foo,ceph.type=data")
+ p_get_lvs.return_value = [FooVolume]
+
+ deactivate.deactivate_osd(None, 0)
+ p_u_tmpfs.assert_called_with(
+ '/var/lib/ceph/osd/{}-{}'.format('foo', 1))
+
+ @patch("ceph_volume.devices.lvm.deactivate.get_lvs_by_tag")
+ @patch("ceph_volume.util.system.unmount_tmpfs")
+ @patch("ceph_volume.util.encryption.dmcrypt_close")
+ def test_no_crypt_no_dmclose(self, p_dm_close, p_u_tmpfs, p_get_lvs):
+ FooVolume = lvm.Volume(
+ lv_name='foo', lv_path='/dev/vg/foo',
+ lv_tags="ceph.osd_id=0,ceph.cluster_name=foo,ceph.type=data")
+ p_get_lvs.return_value = [FooVolume]
+
+ deactivate.deactivate_osd(0)
+
+ @patch("ceph_volume.devices.lvm.deactivate.get_lvs_by_tag")
+ @patch("ceph_volume.util.system.unmount_tmpfs")
+ @patch("ceph_volume.util.encryption.dmcrypt_close")
+ def test_crypt_dmclose(self, p_dm_close, p_u_tmpfs, p_get_lvs):
+ FooVolume = lvm.Volume(
+ lv_name='foo', lv_path='/dev/vg/foo', lv_uuid='123',
+ lv_tags="ceph.osd_id=0,ceph.encrypted=1,ceph.cluster_name=foo,ceph.type=data")
+ p_get_lvs.return_value = [FooVolume]
+
+ deactivate.deactivate_osd(0)
+ p_dm_close.assert_called_with('123')
diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_listing.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_listing.py
new file mode 100644
index 000000000..7e4d963c8
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_listing.py
@@ -0,0 +1,352 @@
+import pytest
+from ceph_volume.devices import lvm
+from ceph_volume.api import lvm as api
+
+# TODO: add tests for following commands -
+# ceph-volume list
+# ceph-volume list <path-to-pv>
+# ceph-volume list <path-to-vg>
+# ceph-volume list <path-to-lv>
+
+class TestReadableTag(object):
+
+ def test_dots_get_replaced(self):
+ result = lvm.listing.readable_tag('ceph.foo')
+ assert result == 'foo'
+
+ def test_underscores_are_replaced_with_spaces(self):
+ result = lvm.listing.readable_tag('ceph.long_tag')
+ assert result == 'long tag'
+
+
+class TestPrettyReport(object):
+
+ def test_is_empty(self, capsys):
+ lvm.listing.pretty_report({})
+ stdout, stderr = capsys.readouterr()
+ assert stdout == '\n'
+
+ def test_type_and_path_are_reported(self, capsys):
+ lvm.listing.pretty_report({0: [
+ {'type': 'data', 'path': '/dev/sda1', 'devices': ['/dev/sda']}
+ ]})
+ stdout, stderr = capsys.readouterr()
+ assert '[data] /dev/sda1' in stdout
+
+ def test_osd_id_header_is_reported(self, capsys):
+ lvm.listing.pretty_report({0: [
+ {'type': 'data', 'path': '/dev/sda1', 'devices': ['/dev/sda']}
+ ]})
+ stdout, stderr = capsys.readouterr()
+ assert '====== osd.0 =======' in stdout
+
+ def test_tags_are_included(self, capsys):
+ lvm.listing.pretty_report(
+ {0: [{
+ 'type': 'data',
+ 'path': '/dev/sda1',
+ 'tags': {'ceph.osd_id': '0'},
+ 'devices': ['/dev/sda'],
+ }]}
+ )
+ stdout, stderr = capsys.readouterr()
+ assert 'osd id' in stdout
+
+ def test_devices_are_comma_separated(self, capsys):
+ lvm.listing.pretty_report({0: [
+ {'type': 'data', 'path': '/dev/sda1', 'devices': ['/dev/sda', '/dev/sdb1']}
+ ]})
+ stdout, stderr = capsys.readouterr()
+ assert '/dev/sda,/dev/sdb1' in stdout
+
+
+class TestList(object):
+
+ def test_empty_full_json_zero_exit_status(self, fake_call, is_root, factory, capsys):
+ args = factory(format='json', device=None)
+ lvm.listing.List([]).list(args)
+ stdout, stderr = capsys.readouterr()
+ assert stdout == '{}\n'
+
+ def test_empty_device_json_zero_exit_status(self, is_root,factory,capsys):
+ args = factory(format='json', device='/dev/sda1')
+ lvm.listing.List([]).list(args)
+ stdout, stderr = capsys.readouterr()
+ assert stdout == '{}\n'
+
+ def test_empty_full_zero_exit_status(self, fake_call, is_root, factory):
+ args = factory(format='pretty', device=None)
+ with pytest.raises(SystemExit):
+ lvm.listing.List([]).list(args)
+
+ def test_empty_device_zero_exit_status(self, is_root, factory):
+ args = factory(format='pretty', device='/dev/sda1')
+ with pytest.raises(SystemExit):
+ lvm.listing.List([]).list(args)
+
+class TestFullReport(object):
+
+ def test_no_ceph_lvs(self, monkeypatch):
+ # ceph lvs are detected by looking into its tags
+ osd = api.Volume(lv_name='volume1', lv_path='/dev/VolGroup/lv',
+ lv_tags={})
+ volumes = []
+ volumes.append(osd)
+ monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs:
+ volumes)
+
+ result = lvm.listing.List([]).full_report()
+ assert result == {}
+
+ def test_ceph_data_lv_reported(self, monkeypatch):
+ tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data'
+ pv = api.PVolume(pv_name='/dev/sda1', pv_tags={}, pv_uuid="0000",
+ vg_name='VolGroup', lv_uuid="aaaa")
+ osd = api.Volume(lv_name='volume1', lv_uuid='y', lv_tags=tags,
+ lv_path='/dev/VolGroup/lv', vg_name='VolGroup')
+ volumes = []
+ volumes.append(osd)
+ monkeypatch.setattr(lvm.listing.api, 'get_single_pv', lambda **kwargs: pv)
+ monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs:
+ volumes)
+
+ result = lvm.listing.List([]).full_report()
+ assert result['0'][0]['name'] == 'volume1'
+
+ def test_ceph_journal_lv_reported(self, monkeypatch):
+ tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data'
+ journal_tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=journal'
+ pv = api.PVolume(pv_name='/dev/sda1', pv_tags={}, pv_uuid="0000",
+ vg_name="VolGroup", lv_uuid="aaaa")
+ osd = api.Volume(lv_name='volume1', lv_uuid='y', lv_tags=tags,
+ lv_path='/dev/VolGroup/lv', vg_name='VolGroup')
+ journal = api.Volume(
+ lv_name='journal', lv_uuid='x', lv_tags=journal_tags,
+ lv_path='/dev/VolGroup/journal', vg_name='VolGroup')
+ volumes = []
+ volumes.append(osd)
+ volumes.append(journal)
+ monkeypatch.setattr(lvm.listing.api,'get_single_pv',lambda **kwargs:pv)
+ monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs:
+ volumes)
+
+ result = lvm.listing.List([]).full_report()
+ assert result['0'][0]['name'] == 'volume1'
+ assert result['0'][1]['name'] == 'journal'
+
+ def test_ceph_wal_lv_reported(self, monkeypatch):
+ tags = 'ceph.osd_id=0,ceph.wal_uuid=x,ceph.type=data'
+ wal_tags = 'ceph.osd_id=0,ceph.wal_uuid=x,ceph.type=wal'
+ osd = api.Volume(lv_name='volume1', lv_uuid='y', lv_tags=tags,
+ lv_path='/dev/VolGroup/lv', vg_name='VolGroup')
+ wal = api.Volume(lv_name='wal', lv_uuid='x', lv_tags=wal_tags,
+ lv_path='/dev/VolGroup/wal', vg_name='VolGroup')
+ volumes = []
+ volumes.append(osd)
+ volumes.append(wal)
+ monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs:
+ volumes)
+
+ result = lvm.listing.List([]).full_report()
+ assert result['0'][0]['name'] == 'volume1'
+ assert result['0'][1]['name'] == 'wal'
+
+ @pytest.mark.parametrize('type_', ['journal', 'db', 'wal'])
+ def test_physical_2nd_device_gets_reported(self, type_, monkeypatch):
+ tags = ('ceph.osd_id=0,ceph.{t}_uuid=x,ceph.type=data,'
+ 'ceph.{t}_device=/dev/sda1').format(t=type_)
+ osd = api.Volume(lv_name='volume1', lv_uuid='y', lv_tags=tags,
+ vg_name='VolGroup', lv_path='/dev/VolGroup/lv')
+ monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs:
+ [osd])
+
+ result = lvm.listing.List([]).full_report()
+ assert result['0'][1]['path'] == '/dev/sda1'
+ assert result['0'][1]['tags'] == {'PARTUUID': 'x'}
+ assert result['0'][1]['type'] == type_
+
+
+class TestSingleReport(object):
+
+ def test_not_a_ceph_lv(self, monkeypatch):
+ # ceph lvs are detected by looking into its tags
+ lv = api.Volume(lv_name='lv', lv_tags={}, lv_path='/dev/VolGroup/lv',
+ vg_name='VolGroup')
+ monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs:
+ [lv])
+
+ result = lvm.listing.List([]).single_report('VolGroup/lv')
+ assert result == {}
+
+ def test_report_a_ceph_lv(self, monkeypatch):
+ # ceph lvs are detected by looking into its tags
+ tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data'
+ lv = api.Volume(lv_name='lv', vg_name='VolGroup', lv_uuid='aaaa',
+ lv_path='/dev/VolGroup/lv', lv_tags=tags)
+ volumes = []
+ volumes.append(lv)
+ monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs:
+ volumes)
+
+ result = lvm.listing.List([]).single_report('VolGroup/lv')
+ assert result['0'][0]['name'] == 'lv'
+ assert result['0'][0]['lv_tags'] == tags
+ assert result['0'][0]['path'] == '/dev/VolGroup/lv'
+ assert result['0'][0]['devices'] == []
+
+ def test_report_a_ceph_journal_device(self, monkeypatch):
+ # ceph lvs are detected by looking into its tags
+ tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data,' + \
+ 'ceph.journal_device=/dev/sda1'
+ lv = api.Volume(lv_name='lv', lv_uuid='aaa', lv_tags=tags,
+ lv_path='/dev/VolGroup/lv', vg_name='VolGroup')
+ monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs:
+ [lv] if 'tags' in kwargs else [])
+
+ result = lvm.listing.List([]).single_report('/dev/sda1')
+ assert result['0'][0]['tags'] == {'PARTUUID': 'x'}
+ assert result['0'][0]['type'] == 'journal'
+ assert result['0'][0]['path'] == '/dev/sda1'
+
+ def test_report_a_ceph_lv_with_devices(self, monkeypatch):
+ pvolumes = []
+
+ tags = 'ceph.osd_id=0,ceph.type=data'
+ pv1 = api.PVolume(vg_name="VolGroup", pv_name='/dev/sda1',
+ pv_uuid='', pv_tags={}, lv_uuid="aaaa")
+ pv2 = api.PVolume(vg_name="VolGroup", pv_name='/dev/sdb1',
+ pv_uuid='', pv_tags={}, lv_uuid="aaaa")
+ pvolumes.append(pv1)
+ pvolumes.append(pv2)
+
+
+ volumes = []
+ lv = api.Volume(lv_name='lv', vg_name='VolGroup',lv_uuid='aaaa',
+ lv_path='/dev/VolGroup/lv', lv_tags=tags)
+ volumes.append(lv)
+
+ monkeypatch.setattr(lvm.listing.api, 'get_pvs', lambda **kwargs:
+ pvolumes)
+ monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs:
+ volumes)
+
+ listing = lvm.listing.List([])
+ listing._pvs = [
+ {'lv_uuid': 'aaaa', 'pv_name': '/dev/sda1', 'pv_tags': '', 'pv_uuid': ''},
+ {'lv_uuid': 'aaaa', 'pv_name': '/dev/sdb1', 'pv_tags': '', 'pv_uuid': ''},
+ ]
+
+ result = listing.single_report('VolGroup/lv')
+ assert result['0'][0]['name'] == 'lv'
+ assert result['0'][0]['lv_tags'] == tags
+ assert result['0'][0]['path'] == '/dev/VolGroup/lv'
+ assert result['0'][0]['devices'] == ['/dev/sda1', '/dev/sdb1']
+
+ def test_report_by_osd_id_for_just_block_dev(self, monkeypatch):
+ tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=block'
+ lvs = [ api.Volume(lv_name='lv1', lv_tags=tags, lv_path='/dev/vg/lv1',
+ lv_uuid='aaaa', vg_name='vg')
+ ]
+ monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs: lvs)
+
+ listing = lvm.listing.List([])
+ result = listing.single_report(0)
+ assert result['0'][0]['name'] == 'lv1'
+ assert result['0'][0]['lv_tags'] == tags
+ assert result['0'][0]['lv_path'] == '/dev/vg/lv1'
+ assert result['0'][0]['vg_name'] == 'vg'
+
+ def test_report_by_osd_id_for_just_data_dev(self, monkeypatch):
+ tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data'
+ lvs = [ api.Volume(lv_name='lv1', lv_tags=tags, lv_path='/dev/vg/lv1',
+ lv_uuid='bbbb', vg_name='vg'),
+ ]
+ monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs: lvs)
+
+ listing = lvm.listing.List([])
+ result = listing.single_report(0)
+ assert result['0'][0]['name'] == 'lv1'
+ assert result['0'][0]['lv_tags'] == tags
+ assert result['0'][0]['lv_path'] == '/dev/vg/lv1'
+ assert result['0'][0]['vg_name'] == 'vg'
+
+ def test_report_by_osd_id_for_just_block_wal_and_db_dev(self, monkeypatch):
+ tags1 = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=block'
+ tags2 = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=wal'
+ tags3 = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=db'
+ lvs = [ api.Volume(lv_name='lv1', lv_tags=tags1, lv_path='/dev/vg/lv1',
+ lv_uuid='aaaa', vg_name='vg'),
+ api.Volume(lv_name='lv2', lv_tags=tags2, lv_path='/dev/vg/lv2',
+ lv_uuid='bbbb', vg_name='vg'),
+ api.Volume(lv_name='lv3', lv_tags=tags3, lv_path='/dev/vg/lv3',
+ lv_uuid='cccc', vg_name='vg'),
+ ]
+ monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs: lvs)
+
+ listing = lvm.listing.List([])
+ result = listing.single_report(0)
+ assert result['0'][0]['name'] == 'lv1'
+ assert result['0'][0]['lv_tags'] == tags1
+ assert result['0'][0]['lv_path'] == '/dev/vg/lv1'
+ assert result['0'][0]['vg_name'] == 'vg'
+ assert result['0'][1]['name'] == 'lv2'
+ assert result['0'][1]['lv_tags'] == tags2
+ assert result['0'][1]['lv_path'] == '/dev/vg/lv2'
+ assert result['0'][1]['vg_name'] == 'vg'
+ assert result['0'][2]['name'] == 'lv3'
+ assert result['0'][2]['lv_tags'] == tags3
+ assert result['0'][2]['lv_path'] == '/dev/vg/lv3'
+ assert result['0'][2]['vg_name'] == 'vg'
+
+
+ def test_report_by_osd_id_for_data_and_journal_dev(self, monkeypatch):
+ tags1 = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data'
+ tags2 = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=journal'
+ lvs = [ api.Volume(lv_name='lv1', lv_tags=tags1, lv_path='/dev/vg/lv1',
+ lv_uuid='aaaa', vg_name='vg'),
+ api.Volume(lv_name='lv2', lv_tags=tags2, lv_path='/dev/vg/lv2',
+ lv_uuid='bbbb', vg_name='vg'),
+ ]
+ monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs: lvs)
+
+ listing = lvm.listing.List([])
+ result = listing.single_report(0)
+ assert result['0'][0]['name'] == 'lv1'
+ assert result['0'][0]['lv_tags'] == tags1
+ assert result['0'][0]['lv_path'] == '/dev/vg/lv1'
+ assert result['0'][0]['vg_name'] == 'vg'
+ assert result['0'][1]['name'] == 'lv2'
+ assert result['0'][1]['lv_tags'] == tags2
+ assert result['0'][1]['lv_path'] == '/dev/vg/lv2'
+ assert result['0'][1]['vg_name'] == 'vg'
+
+ def test_report_by_nonexistent_osd_id(self, monkeypatch):
+ lv = api.Volume(lv_name='lv', lv_tags={}, lv_path='/dev/VolGroup/lv',
+ vg_name='VolGroup')
+ monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs:
+ [lv])
+
+ result = lvm.listing.List([]).single_report('1')
+ assert result == {}
+
+ def test_report_a_ceph_lv_with_no_matching_devices(self, monkeypatch):
+ tags = 'ceph.osd_id=0,ceph.type=data'
+ lv = api.Volume(lv_name='lv', vg_name='VolGroup', lv_uuid='aaaa',
+ lv_path='/dev/VolGroup/lv', lv_tags=tags)
+ volumes = []
+ volumes.append(lv)
+ monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs:
+ volumes)
+
+ listing = lvm.listing.List([])
+ listing._pvs = [
+ {'lv_uuid': 'ffff', 'pv_name': '/dev/sda1', 'pv_tags': '',
+ 'pv_uuid': ''},
+ {'lv_uuid': 'ffff', 'pv_name': '/dev/sdb1', 'pv_tags': '',
+ 'pv_uuid': ''}]
+
+ result = listing.single_report('VolGroup/lv')
+ assert result['0'][0]['name'] == 'lv'
+ assert result['0'][0]['lv_tags'] == tags
+ assert result['0'][0]['path'] == '/dev/VolGroup/lv'
+ assert result['0'][0]['devices'] == []
diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_migrate.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_migrate.py
new file mode 100644
index 000000000..4c86d0ca1
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_migrate.py
@@ -0,0 +1,2299 @@
+import pytest
+from mock.mock import patch
+from ceph_volume import process
+from ceph_volume.api import lvm as api
+from ceph_volume.devices.lvm import migrate
+from ceph_volume.util.device import Device
+from ceph_volume.util import system
+
+class TestGetClusterName(object):
+
+ mock_volumes = []
+ def mock_get_lvs(self, *args, **kwargs):
+ return self.mock_volumes.pop(0)
+
+ def test_cluster_found(self, monkeypatch):
+ tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data,ceph.osd_fsid=1234,ceph.cluster_name=name_of_the_cluster'
+ vol = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='',
+ lv_path='/dev/VolGroup/lv1', lv_tags=tags)
+ self.mock_volumes = []
+ self.mock_volumes.append([vol])
+
+ monkeypatch.setattr(migrate.api, 'get_lvs', self.mock_get_lvs)
+ monkeypatch.setattr(process, 'call', lambda x, **kw: ('', '', 0))
+
+ result = migrate.get_cluster_name(osd_id='0', osd_fsid='1234')
+ assert "name_of_the_cluster" == result
+
+ def test_cluster_not_found(self, monkeypatch, capsys):
+ self.mock_volumes = []
+ self.mock_volumes.append([])
+
+ monkeypatch.setattr(migrate.api, 'get_lvs', self.mock_get_lvs)
+ monkeypatch.setattr(process, 'call', lambda x, **kw: ('', '', 0))
+
+ with pytest.raises(SystemExit) as error:
+ migrate.get_cluster_name(osd_id='0', osd_fsid='1234')
+ stdout, stderr = capsys.readouterr()
+ expected = 'Unexpected error, terminating'
+ assert expected in str(error.value)
+ expected = 'Unable to find any LV for source OSD: id:0 fsid:1234'
+ assert expected in stderr
+
+class TestFindAssociatedDevices(object):
+
+ mock_volumes = []
+ def mock_get_lvs(self, *args, **kwargs):
+ return self.mock_volumes.pop(0)
+
+ mock_single_volumes = {}
+ def mock_get_single_lv(self, *args, **kwargs):
+ p = kwargs['filters']['lv_path']
+ return self.mock_single_volumes[p]
+
+ def test_lv_is_matched_id(self, monkeypatch):
+ tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data,ceph.osd_fsid=1234'
+ vol = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='',
+ lv_path='/dev/VolGroup/lv1', lv_tags=tags)
+ self.mock_volumes = []
+ self.mock_volumes.append([vol])
+ self.mock_volumes.append([vol])
+ self.mock_volumes.append([])
+ self.mock_volumes.append([])
+
+ self.mock_single_volumes = {'/dev/VolGroup/lv1': vol}
+
+ monkeypatch.setattr(migrate.api, 'get_lvs', self.mock_get_lvs)
+ monkeypatch.setattr(migrate.api, 'get_single_lv', self.mock_get_single_lv)
+ monkeypatch.setattr(process, 'call', lambda x, **kw: ('', '', 0))
+
+ result = migrate.find_associated_devices(osd_id='0', osd_fsid='1234')
+ assert len(result) == 1
+ assert result[0][0].path == '/dev/VolGroup/lv1'
+ assert result[0][0].lvs == [vol]
+ assert result[0][1] == 'block'
+
+ def test_lv_is_matched_id2(self, monkeypatch):
+ tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data,ceph.osd_fsid=1234'
+ vol = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='vg',
+ lv_path='/dev/VolGroup/lv1', lv_tags=tags)
+ tags2 = 'ceph.osd_id=0,ceph.journal_uuid=xx,ceph.type=wal,ceph.osd_fsid=1234'
+ vol2 = api.Volume(lv_name='volume2', lv_uuid='z', vg_name='vg',
+ lv_path='/dev/VolGroup/lv2', lv_tags=tags2)
+ self.mock_volumes = []
+ self.mock_volumes.append([vol])
+ self.mock_volumes.append([vol])
+ self.mock_volumes.append([])
+ self.mock_volumes.append([vol2])
+
+ self.mock_single_volumes = {'/dev/VolGroup/lv1': vol, '/dev/VolGroup/lv2': vol2}
+
+ monkeypatch.setattr(migrate.api, 'get_lvs', self.mock_get_lvs)
+ monkeypatch.setattr(migrate.api, 'get_single_lv', self.mock_get_single_lv)
+ monkeypatch.setattr(process, 'call', lambda x, **kw: ('', '', 0))
+
+ result = migrate.find_associated_devices(osd_id='0', osd_fsid='1234')
+ assert len(result) == 2
+ for d in result:
+ if d[1] == 'block':
+ assert d[0].path == '/dev/VolGroup/lv1'
+ assert d[0].lvs == [vol]
+ elif d[1] == 'wal':
+ assert d[0].path == '/dev/VolGroup/lv2'
+ assert d[0].lvs == [vol2]
+ else:
+ assert False
+
+ def test_lv_is_matched_id3(self, monkeypatch):
+ tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data,ceph.osd_fsid=1234'
+ vol = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='vg',
+ lv_path='/dev/VolGroup/lv1', lv_tags=tags)
+ tags2 = 'ceph.osd_id=0,ceph.journal_uuid=xx,ceph.type=wal,ceph.osd_fsid=1234'
+ vol2 = api.Volume(lv_name='volume2', lv_uuid='z', vg_name='vg',
+ lv_path='/dev/VolGroup/lv2', lv_tags=tags2)
+ tags3 = 'ceph.osd_id=0,ceph.journal_uuid=xx,ceph.type=db,ceph.osd_fsid=1234'
+ vol3 = api.Volume(lv_name='volume3', lv_uuid='z', vg_name='vg',
+ lv_path='/dev/VolGroup/lv3', lv_tags=tags3)
+
+ self.mock_volumes = []
+ self.mock_volumes.append([vol])
+ self.mock_volumes.append([vol])
+ self.mock_volumes.append([vol3])
+ self.mock_volumes.append([vol2])
+
+ self.mock_single_volumes = {'/dev/VolGroup/lv1': vol,
+ '/dev/VolGroup/lv2': vol2,
+ '/dev/VolGroup/lv3': vol3}
+
+ monkeypatch.setattr(migrate.api, 'get_lvs', self.mock_get_lvs)
+ monkeypatch.setattr(migrate.api, 'get_single_lv', self.mock_get_single_lv)
+ monkeypatch.setattr(process, 'call', lambda x, **kw: ('', '', 0))
+
+ result = migrate.find_associated_devices(osd_id='0', osd_fsid='1234')
+ assert len(result) == 3
+ for d in result:
+ if d[1] == 'block':
+ assert d[0].path == '/dev/VolGroup/lv1'
+ assert d[0].lvs == [vol]
+ elif d[1] == 'wal':
+ assert d[0].path == '/dev/VolGroup/lv2'
+ assert d[0].lvs == [vol2]
+ elif d[1] == 'db':
+ assert d[0].path == '/dev/VolGroup/lv3'
+ assert d[0].lvs == [vol3]
+ else:
+ assert False
+
+ def test_lv_is_not_matched(self, monkeypatch, capsys):
+ self.mock_volumes = [None]
+ monkeypatch.setattr(migrate.api, 'get_lvs', self.mock_get_lvs)
+ monkeypatch.setattr(process, 'call', lambda x, **kw: ('', '', 0))
+
+ with pytest.raises(SystemExit) as error:
+ migrate.find_associated_devices(osd_id='1', osd_fsid='1234')
+ stdout, stderr = capsys.readouterr()
+ expected = 'Unexpected error, terminating'
+ assert expected in str(error.value)
+ expected = 'Unable to find any LV for source OSD: id:1 fsid:1234'
+ assert expected in stderr
+
+class TestVolumeTagTracker(object):
+ mock_single_volumes = {}
+ def mock_get_single_lv(self, *args, **kwargs):
+ p = kwargs['filters']['lv_path']
+ return self.mock_single_volumes[p]
+
+ mock_process_input = []
+ def mock_process(self, *args, **kwargs):
+ self.mock_process_input.append(args[0]);
+ return ('', '', 0)
+
+ def test_init(self, monkeypatch):
+ source_tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data,ceph.osd_fsid=1234'
+ source_db_tags = 'ceph.osd_id=0,journal_uuid=x,ceph.type=db, osd_fsid=1234'
+ source_wal_tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=wal'
+ target_tags="ceph.a=1,ceph.b=2,c=3,ceph.d=4" # 'c' to be bypassed
+ devices=[]
+
+ data_vol = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='vg',
+ lv_path='/dev/VolGroup/lv1', lv_tags=source_tags)
+ db_vol = api.Volume(lv_name='volume2', lv_uuid='y', vg_name='vg',
+ lv_path='/dev/VolGroup/lv2', lv_tags=source_db_tags)
+ wal_vol = api.Volume(lv_name='volume3', lv_uuid='y', vg_name='vg',
+ lv_path='/dev/VolGroup/lv3', lv_tags=source_wal_tags)
+
+ self.mock_single_volumes = {'/dev/VolGroup/lv1': data_vol,
+ '/dev/VolGroup/lv2': db_vol,
+ '/dev/VolGroup/lv3': wal_vol}
+ monkeypatch.setattr(migrate.api, 'get_single_lv', self.mock_get_single_lv)
+
+ self.mock_process_input = []
+ monkeypatch.setattr(process, 'call', self.mock_process)
+
+ data_device = Device(path = '/dev/VolGroup/lv1')
+ db_device = Device(path = '/dev/VolGroup/lv2')
+ wal_device = Device(path = '/dev/VolGroup/lv3')
+ devices.append([data_device, 'block'])
+ devices.append([db_device, 'db'])
+ devices.append([wal_device, 'wal'])
+
+ target = api.Volume(lv_name='target_name', lv_tags=target_tags,
+ lv_path='/dev/VolGroup/lv_target')
+ t = migrate.VolumeTagTracker(devices, target);
+
+ assert 3 == len(t.old_target_tags)
+
+ assert data_device == t.data_device
+ assert 4 == len(t.old_data_tags)
+ assert 'data' == t.old_data_tags['ceph.type']
+
+ assert db_device == t.db_device
+ assert 2 == len(t.old_db_tags)
+ assert 'db' == t.old_db_tags['ceph.type']
+
+ assert wal_device == t.wal_device
+ assert 3 == len(t.old_wal_tags)
+ assert 'wal' == t.old_wal_tags['ceph.type']
+
+ def test_update_tags_when_lv_create(self, monkeypatch):
+ source_tags = \
+ 'ceph.osd_id=0,ceph.journal_uuid=x,' \
+ 'ceph.type=data,ceph.osd_fsid=1234'
+ source_db_tags = \
+ 'ceph.osd_id=0,journal_uuid=x,ceph.type=db,' \
+ 'osd_fsid=1234'
+
+ devices=[]
+
+ data_vol = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='vg',
+ lv_path='/dev/VolGroup/lv1', lv_tags=source_tags)
+ db_vol = api.Volume(lv_name='volume2', lv_uuid='y', vg_name='vg',
+ lv_path='/dev/VolGroup/lv2', lv_tags=source_db_tags)
+
+ self.mock_single_volumes = {'/dev/VolGroup/lv1': data_vol,
+ '/dev/VolGroup/lv2': db_vol}
+
+ monkeypatch.setattr(migrate.api, 'get_single_lv', self.mock_get_single_lv)
+
+ self.mock_process_input = []
+ monkeypatch.setattr(process, 'call', self.mock_process)
+
+ data_device = Device(path = '/dev/VolGroup/lv1')
+ db_device = Device(path = '/dev/VolGroup/lv2')
+ devices.append([data_device, 'block'])
+ devices.append([db_device, 'db'])
+
+ target = api.Volume(lv_name='target_name', lv_tags='',
+ lv_uuid='wal_uuid',
+ lv_path='/dev/VolGroup/lv_target')
+ t = migrate.VolumeTagTracker(devices, target);
+
+ self.mock_process_input = []
+ t.update_tags_when_lv_create('wal')
+
+ assert 3 == len(self.mock_process_input)
+
+ assert ['lvchange',
+ '--addtag', 'ceph.wal_uuid=wal_uuid',
+ '--addtag', 'ceph.wal_device=/dev/VolGroup/lv_target',
+ '/dev/VolGroup/lv1'] == self.mock_process_input[0]
+
+ assert self.mock_process_input[1].sort() == [
+ 'lvchange',
+ '--addtag', 'ceph.osd_id=0',
+ '--addtag', 'ceph.journal_uuid=x',
+ '--addtag', 'ceph.type=wal',
+ '--addtag', 'ceph.osd_fsid=1234',
+ '--addtag', 'ceph.wal_uuid=wal_uuid',
+ '--addtag', 'ceph.wal_device=/dev/VolGroup/lv_target',
+ '/dev/VolGroup/lv_target'].sort()
+
+ assert ['lvchange',
+ '--addtag', 'ceph.wal_uuid=wal_uuid',
+ '--addtag', 'ceph.wal_device=/dev/VolGroup/lv_target',
+ '/dev/VolGroup/lv2'] == self.mock_process_input[2]
+
+ def test_remove_lvs(self, monkeypatch):
+ source_tags = \
+ 'ceph.osd_id=0,ceph.journal_uuid=x,' \
+ 'ceph.type=data,ceph.osd_fsid=1234,ceph.wal_uuid=aaaaa'
+ source_db_tags = \
+ 'ceph.osd_id=0,journal_uuid=x,ceph.type=db,' \
+ 'osd_fsid=1234,ceph.wal_device=aaaaa'
+ source_wal_tags = \
+ 'ceph.wal_uuid=uuid,ceph.wal_device=device,' \
+ 'ceph.osd_id=0,ceph.type=wal'
+
+ devices=[]
+
+ data_vol = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='vg',
+ lv_path='/dev/VolGroup/lv1', lv_tags=source_tags)
+ db_vol = api.Volume(lv_name='volume2', lv_uuid='y', vg_name='vg',
+ lv_path='/dev/VolGroup/lv2', lv_tags=source_db_tags)
+ wal_vol = api.Volume(lv_name='volume3', lv_uuid='y', vg_name='vg',
+ lv_path='/dev/VolGroup/lv3', lv_tags=source_wal_tags)
+
+ self.mock_single_volumes = {'/dev/VolGroup/lv1': data_vol,
+ '/dev/VolGroup/lv2': db_vol,
+ '/dev/VolGroup/lv3': wal_vol}
+
+ monkeypatch.setattr(migrate.api, 'get_single_lv', self.mock_get_single_lv)
+
+ self.mock_process_input = []
+ monkeypatch.setattr(process, 'call', self.mock_process)
+
+ data_device = Device(path = '/dev/VolGroup/lv1')
+ db_device = Device(path = '/dev/VolGroup/lv2')
+ wal_device = Device(path = '/dev/VolGroup/lv3')
+ devices.append([data_device, 'block'])
+ devices.append([db_device, 'db'])
+ devices.append([wal_device, 'wal'])
+
+ target = api.Volume(lv_name='target_name', lv_tags='',
+ lv_path='/dev/VolGroup/lv_target')
+ t = migrate.VolumeTagTracker(devices, target);
+
+ device_to_remove = devices.copy()
+
+ self.mock_process_input = []
+ t.remove_lvs(device_to_remove, 'db')
+
+ assert 3 == len(self.mock_process_input)
+ assert ['lvchange',
+ '--deltag', 'ceph.wal_uuid=uuid',
+ '--deltag', 'ceph.wal_device=device',
+ '--deltag', 'ceph.osd_id=0',
+ '--deltag', 'ceph.type=wal',
+ '/dev/VolGroup/lv3'] == self.mock_process_input[0]
+ assert ['lvchange',
+ '--deltag', 'ceph.wal_uuid=aaaaa',
+ '/dev/VolGroup/lv1'] == self.mock_process_input[1]
+ assert ['lvchange',
+ '--deltag', 'ceph.wal_device=aaaaa',
+ '/dev/VolGroup/lv2'] == self.mock_process_input[2]
+
+ def test_replace_lvs(self, monkeypatch):
+ source_tags = \
+ 'ceph.osd_id=0,ceph.type=data,ceph.osd_fsid=1234,'\
+ 'ceph.wal_uuid=wal_uuid,ceph.db_device=/dbdevice'
+ source_db_tags = \
+ 'ceph.osd_id=0,ceph.type=db,ceph.osd_fsid=1234'
+ source_wal_tags = \
+ 'ceph.wal_uuid=uuid,ceph.wal_device=device,' \
+ 'ceph.osd_id=0,ceph.type=wal'
+
+ devices=[]
+
+ data_vol = api.Volume(lv_name='volume1', lv_uuid='datauuid', vg_name='vg',
+ lv_path='/dev/VolGroup/lv1', lv_tags=source_tags)
+ db_vol = api.Volume(lv_name='volume2', lv_uuid='dbuuid', vg_name='vg',
+ lv_path='/dev/VolGroup/lv2', lv_tags=source_db_tags)
+ wal_vol = api.Volume(lv_name='volume3', lv_uuid='waluuid', vg_name='vg',
+ lv_path='/dev/VolGroup/lv3', lv_tags=source_wal_tags)
+
+ self.mock_single_volumes = {'/dev/VolGroup/lv1': data_vol,
+ '/dev/VolGroup/lv2': db_vol,
+ '/dev/VolGroup/lv3': wal_vol}
+
+ monkeypatch.setattr(migrate.api, 'get_single_lv', self.mock_get_single_lv)
+
+ self.mock_process_input = []
+ monkeypatch.setattr(process, 'call', self.mock_process)
+
+ data_device = Device(path = '/dev/VolGroup/lv1')
+ db_device = Device(path = '/dev/VolGroup/lv2')
+ wal_device = Device(path = '/dev/VolGroup/lv3')
+ devices.append([data_device, 'block'])
+ devices.append([db_device, 'db'])
+ devices.append([wal_device, 'wal'])
+
+ target = api.Volume(lv_name='target_name',
+ lv_uuid='ttt',
+ lv_tags='ceph.tag_to_remove=aaa',
+ lv_path='/dev/VolGroup/lv_target')
+ t = migrate.VolumeTagTracker(devices, target);
+
+ self.mock_process_input = []
+ t.replace_lvs(devices, 'db')
+
+ assert 5 == len(self.mock_process_input)
+
+ assert ['lvchange',
+ '--deltag', 'ceph.osd_id=0',
+ '--deltag', 'ceph.type=db',
+ '--deltag', 'ceph.osd_fsid=1234',
+ '/dev/VolGroup/lv2'] == self.mock_process_input[0]
+ assert ['lvchange',
+ '--deltag', 'ceph.wal_uuid=uuid',
+ '--deltag', 'ceph.wal_device=device',
+ '--deltag', 'ceph.osd_id=0',
+ '--deltag', 'ceph.type=wal',
+ '/dev/VolGroup/lv3'] == self.mock_process_input[1]
+ assert ['lvchange',
+ '--deltag', 'ceph.db_device=/dbdevice',
+ '--deltag', 'ceph.wal_uuid=wal_uuid',
+ '/dev/VolGroup/lv1'] == self.mock_process_input[2]
+
+ assert ['lvchange',
+ '--addtag', 'ceph.db_uuid=ttt',
+ '--addtag', 'ceph.db_device=/dev/VolGroup/lv_target',
+ '/dev/VolGroup/lv1'] == self.mock_process_input[3]
+
+ assert self.mock_process_input[4].sort() == [
+ 'lvchange',
+ '--addtag', 'ceph.osd_id=0',
+ '--addtag', 'ceph.osd_fsid=1234',
+ '--addtag', 'ceph.type=db',
+ '--addtag', 'ceph.db_uuid=ttt',
+ '--addtag', 'ceph.db_device=/dev/VolGroup/lv_target',
+ '/dev/VolGroup/lv_target'].sort()
+
+ def test_undo(self, monkeypatch):
+ source_tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data,ceph.osd_fsid=1234'
+ source_db_tags = 'ceph.osd_id=0,journal_uuid=x,ceph.type=db, osd_fsid=1234'
+ source_wal_tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=wal'
+ target_tags=""
+ devices=[]
+
+ data_vol = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='vg',
+ lv_path='/dev/VolGroup/lv1', lv_tags=source_tags)
+ db_vol = api.Volume(lv_name='volume2', lv_uuid='y', vg_name='vg',
+ lv_path='/dev/VolGroup/lv2', lv_tags=source_db_tags)
+ wal_vol = api.Volume(lv_name='volume3', lv_uuid='y', vg_name='vg',
+ lv_path='/dev/VolGroup/lv3', lv_tags=source_wal_tags)
+
+ self.mock_single_volumes = {'/dev/VolGroup/lv1': data_vol,
+ '/dev/VolGroup/lv2': db_vol,
+ '/dev/VolGroup/lv3': wal_vol}
+
+ monkeypatch.setattr(migrate.api, 'get_single_lv', self.mock_get_single_lv)
+
+ self.mock_process_input = []
+ monkeypatch.setattr(process, 'call', self.mock_process)
+
+ data_device = Device(path = '/dev/VolGroup/lv1')
+ db_device = Device(path = '/dev/VolGroup/lv2')
+ wal_device = Device(path = '/dev/VolGroup/lv3')
+ devices.append([data_device, 'block'])
+ devices.append([db_device, 'db'])
+ devices.append([wal_device, 'wal'])
+
+ target = api.Volume(lv_name='target_name', lv_tags=target_tags,
+ lv_path='/dev/VolGroup/lv_target')
+ t = migrate.VolumeTagTracker(devices, target);
+
+ target.tags['ceph.a'] = 'aa';
+ target.tags['ceph.b'] = 'bb';
+
+ data_vol.tags['ceph.journal_uuid'] = 'z';
+
+ db_vol.tags.pop('ceph.type')
+
+ wal_vol.tags.clear()
+
+ assert 2 == len(target.tags)
+ assert 4 == len(data_vol.tags)
+ assert 1 == len(db_vol.tags)
+
+ self.mock_process_input = []
+ t.undo()
+
+ assert 0 == len(target.tags)
+ assert 4 == len(data_vol.tags)
+ assert 'x' == data_vol.tags['ceph.journal_uuid']
+
+ assert 2 == len(db_vol.tags)
+ assert 'db' == db_vol.tags['ceph.type']
+
+ assert 3 == len(wal_vol.tags)
+ assert 'wal' == wal_vol.tags['ceph.type']
+
+ assert 6 == len(self.mock_process_input)
+ assert 'lvchange' in self.mock_process_input[0]
+ assert '--deltag' in self.mock_process_input[0]
+ assert 'ceph.journal_uuid=z' in self.mock_process_input[0]
+ assert '/dev/VolGroup/lv1' in self.mock_process_input[0]
+
+ assert 'lvchange' in self.mock_process_input[1]
+ assert '--addtag' in self.mock_process_input[1]
+ assert 'ceph.journal_uuid=x' in self.mock_process_input[1]
+ assert '/dev/VolGroup/lv1' in self.mock_process_input[1]
+
+ assert 'lvchange' in self.mock_process_input[2]
+ assert '--deltag' in self.mock_process_input[2]
+ assert 'ceph.osd_id=0' in self.mock_process_input[2]
+ assert '/dev/VolGroup/lv2' in self.mock_process_input[2]
+
+ assert 'lvchange' in self.mock_process_input[3]
+ assert '--addtag' in self.mock_process_input[3]
+ assert 'ceph.type=db' in self.mock_process_input[3]
+ assert '/dev/VolGroup/lv2' in self.mock_process_input[3]
+
+ assert 'lvchange' in self.mock_process_input[4]
+ assert '--addtag' in self.mock_process_input[4]
+ assert 'ceph.type=wal' in self.mock_process_input[4]
+ assert '/dev/VolGroup/lv3' in self.mock_process_input[4]
+
+ assert 'lvchange' in self.mock_process_input[5]
+ assert '--deltag' in self.mock_process_input[5]
+ assert 'ceph.a=aa' in self.mock_process_input[5]
+ assert 'ceph.b=bb' in self.mock_process_input[5]
+ assert '/dev/VolGroup/lv_target' in self.mock_process_input[5]
+
+class TestNew(object):
+
+ mock_volume = None
+ def mock_get_lv_by_fullname(self, *args, **kwargs):
+ return self.mock_volume
+
+ mock_process_input = []
+ def mock_process(self, *args, **kwargs):
+ self.mock_process_input.append(args[0]);
+ return ('', '', 0)
+
+ mock_single_volumes = {}
+ def mock_get_single_lv(self, *args, **kwargs):
+ p = kwargs['filters']['lv_path']
+ return self.mock_single_volumes[p]
+
+ mock_volumes = []
+ def mock_get_lvs(self, *args, **kwargs):
+ return self.mock_volumes.pop(0)
+
+ def test_newdb_non_root(self):
+ with pytest.raises(Exception) as error:
+ migrate.NewDB(argv=[
+ '--osd-id', '1',
+ '--osd-fsid', '55BD4219-16A7-4037-BC20-0F158EFCC83D',
+ '--target', 'vgname/new_db']).main()
+ expected = 'This command needs to be executed with sudo or as root'
+ assert expected in str(error.value)
+
+ @patch('os.getuid')
+ def test_newdb_not_target_lvm(self, m_getuid, capsys):
+ m_getuid.return_value = 0
+ with pytest.raises(SystemExit) as error:
+ migrate.NewDB(argv=[
+ '--osd-id', '1',
+ '--osd-fsid', '55BD4219-16A7-4037-BC20-0F158EFCC83D',
+ '--target', 'vgname/new_db']).main()
+ stdout, stderr = capsys.readouterr()
+ expected = 'Unable to attach new volume : vgname/new_db'
+ assert expected in str(error.value)
+ expected = 'Target path vgname/new_db is not a Logical Volume'
+ assert expected in stderr
+
+
+ @patch('os.getuid')
+ def test_newdb_already_in_use(self, m_getuid, monkeypatch, capsys):
+ m_getuid.return_value = 0
+
+ self.mock_volume = api.Volume(lv_name='volume1',
+ lv_uuid='y',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv1',
+ lv_tags='ceph.osd_id=5') # this results in set used_by_ceph
+ monkeypatch.setattr(api, 'get_lv_by_fullname', self.mock_get_lv_by_fullname)
+
+ with pytest.raises(SystemExit) as error:
+ migrate.NewDB(argv=[
+ '--osd-id', '1',
+ '--osd-fsid', '55BD4219-16A7-4037-BC20-0F158EFCC83D',
+ '--target', 'vgname/new_db']).main()
+ stdout, stderr = capsys.readouterr()
+ expected = 'Unable to attach new volume : vgname/new_db'
+ assert expected in str(error.value)
+ expected = 'Target Logical Volume is already used by ceph: vgname/new_db'
+ assert expected in stderr
+
+ @patch('os.getuid')
+ def test_newdb(self, m_getuid, monkeypatch, capsys):
+ m_getuid.return_value = 0
+
+ source_tags = \
+ 'ceph.osd_id=0,ceph.type=data,ceph.osd_fsid=1234,'\
+ 'ceph.wal_uuid=wal_uuid,ceph.db_device=/dbdevice'
+ source_wal_tags = \
+ 'ceph.wal_uuid=uuid,ceph.wal_device=device,' \
+ 'ceph.osd_id=0,ceph.type=wal'
+
+ data_vol = api.Volume(lv_name='volume1', lv_uuid='datauuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv1',
+ lv_tags=source_tags)
+ wal_vol = api.Volume(lv_name='volume3',
+ lv_uuid='waluuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv3',
+ lv_tags=source_wal_tags)
+
+ self.mock_single_volumes = {'/dev/VolGroup/lv1': data_vol,
+ '/dev/VolGroup/lv3': wal_vol}
+
+ monkeypatch.setattr(migrate.api, 'get_single_lv',
+ self.mock_get_single_lv)
+
+ self.mock_process_input = []
+ monkeypatch.setattr(process, 'call', self.mock_process)
+
+ self.mock_volume = api.Volume(lv_name='target_volume1', lv_uuid='y',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/target_volume',
+ lv_tags='')
+ monkeypatch.setattr(api, 'get_lv_by_fullname',
+ self.mock_get_lv_by_fullname)
+
+ monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active",
+ lambda id: False)
+
+ #find_associated_devices will call get_lvs() 4 times
+ # and it this needs results to be arranged that way
+ self.mock_volumes = []
+ self.mock_volumes.append([data_vol, wal_vol])
+ self.mock_volumes.append([data_vol])
+ self.mock_volumes.append([])
+ self.mock_volumes.append([wal_vol])
+
+ monkeypatch.setattr(migrate.api, 'get_lvs', self.mock_get_lvs)
+
+ monkeypatch.setattr(migrate, 'get_cluster_name',
+ lambda osd_id, osd_fsid: 'ceph_cluster')
+ monkeypatch.setattr(system, 'chown', lambda path: 0)
+
+ migrate.NewDB(argv=[
+ '--osd-id', '1',
+ '--osd-fsid', '55BD4219-16A7-4037-BC20-0F158EFCC83D',
+ '--target', 'vgname/new_db']).main()
+
+ n = len(self.mock_process_input)
+ assert n >= 5
+
+ assert self.mock_process_input[n - 5] == [
+ 'lvchange',
+ '--deltag', 'ceph.db_device=/dbdevice',
+ '/dev/VolGroup/lv1']
+ assert self.mock_process_input[n - 4] == [
+ 'lvchange',
+ '--addtag', 'ceph.db_uuid=y',
+ '--addtag', 'ceph.db_device=/dev/VolGroup/target_volume',
+ '/dev/VolGroup/lv1']
+
+ assert self.mock_process_input[n - 3].sort() == [
+ 'lvchange',
+ '--addtag', 'ceph.wal_uuid=uuid',
+ '--addtag', 'ceph.osd_id=0',
+ '--addtag', 'ceph.type=db',
+ '--addtag', 'ceph.osd_fsid=1234',
+ '--addtag', 'ceph.db_uuid=y',
+ '--addtag', 'ceph.db_device=/dev/VolGroup/target_volume',
+ '/dev/VolGroup/target_volume'].sort()
+
+ assert self.mock_process_input[n - 2] == [
+ 'lvchange',
+ '--addtag', 'ceph.db_uuid=y',
+ '--addtag', 'ceph.db_device=/dev/VolGroup/target_volume',
+ '/dev/VolGroup/lv3']
+
+ assert self.mock_process_input[n - 1] == [
+ 'ceph-bluestore-tool',
+ '--path', '/var/lib/ceph/osd/ceph_cluster-1',
+ '--dev-target', '/dev/VolGroup/target_volume',
+ '--command', 'bluefs-bdev-new-db']
+
+ def test_newdb_active_systemd(self, is_root, monkeypatch, capsys):
+ source_tags = \
+ 'ceph.osd_id=0,ceph.type=data,ceph.osd_fsid=1234,'\
+ 'ceph.wal_uuid=wal_uuid,ceph.db_device=/dbdevice'
+ source_wal_tags = \
+ 'ceph.wal_uuid=uuid,ceph.wal_device=device,' \
+ 'ceph.osd_id=0,ceph.type=wal'
+
+ data_vol = api.Volume(lv_name='volume1', lv_uuid='datauuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv1',
+ lv_tags=source_tags)
+ wal_vol = api.Volume(lv_name='volume3',
+ lv_uuid='waluuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv3',
+ lv_tags=source_wal_tags)
+
+ self.mock_single_volumes = {'/dev/VolGroup/lv1': data_vol,
+ '/dev/VolGroup/lv3': wal_vol}
+
+ monkeypatch.setattr(migrate.api, 'get_single_lv',
+ self.mock_get_single_lv)
+
+ self.mock_process_input = []
+ monkeypatch.setattr(process, 'call', self.mock_process)
+
+ self.mock_volume = api.Volume(lv_name='target_volume1', lv_uuid='y',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/target_volume',
+ lv_tags='')
+ monkeypatch.setattr(api, 'get_lv_by_fullname',
+ self.mock_get_lv_by_fullname)
+
+ monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active",
+ lambda id: True)
+
+ #find_associated_devices will call get_lvs() 4 times
+ # and it this needs results to be arranged that way
+ self.mock_volumes = []
+ self.mock_volumes.append([data_vol, wal_vol])
+ self.mock_volumes.append([data_vol])
+ self.mock_volumes.append([])
+ self.mock_volumes.append([wal_vol])
+
+ monkeypatch.setattr(migrate.api, 'get_lvs', self.mock_get_lvs)
+
+ monkeypatch.setattr(migrate, 'get_cluster_name',
+ lambda osd_id, osd_fsid: 'ceph_cluster')
+ monkeypatch.setattr(system, 'chown', lambda path: 0)
+
+ m = migrate.NewDB(argv=[
+ '--osd-id', '1',
+ '--osd-fsid', '55BD4219-16A7-4037-BC20-0F158EFCC83D',
+ '--target', 'vgname/new_db'])
+
+ with pytest.raises(SystemExit) as error:
+ m.main()
+
+ stdout, stderr = capsys.readouterr()
+
+ assert 'Unable to attach new volume for OSD: 1' == str(error.value)
+ assert '--> OSD ID is running, stop it with: systemctl stop ceph-osd@1' == stderr.rstrip()
+ assert not stdout
+
+ def test_newdb_no_systemd(self, is_root, monkeypatch):
+ source_tags = \
+ 'ceph.osd_id=0,ceph.type=data,ceph.osd_fsid=1234,'\
+ 'ceph.wal_uuid=wal_uuid,ceph.db_device=/dbdevice'
+ source_wal_tags = \
+ 'ceph.wal_uuid=uuid,ceph.wal_device=device,' \
+ 'ceph.osd_id=0,ceph.type=wal'
+
+ data_vol = api.Volume(lv_name='volume1', lv_uuid='datauuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv1',
+ lv_tags=source_tags)
+ wal_vol = api.Volume(lv_name='volume3',
+ lv_uuid='waluuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv3',
+ lv_tags=source_wal_tags)
+
+ self.mock_single_volumes = {'/dev/VolGroup/lv1': data_vol,
+ '/dev/VolGroup/lv3': wal_vol}
+
+ monkeypatch.setattr(migrate.api, 'get_single_lv',
+ self.mock_get_single_lv)
+
+ self.mock_process_input = []
+ monkeypatch.setattr(process, 'call', self.mock_process)
+
+ self.mock_volume = api.Volume(lv_name='target_volume1', lv_uuid='y',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/target_volume',
+ lv_tags='')
+ monkeypatch.setattr(api, 'get_lv_by_fullname',
+ self.mock_get_lv_by_fullname)
+
+ #find_associated_devices will call get_lvs() 4 times
+ # and it this needs results to be arranged that way
+ self.mock_volumes = []
+ self.mock_volumes.append([data_vol, wal_vol])
+ self.mock_volumes.append([data_vol])
+ self.mock_volumes.append([])
+ self.mock_volumes.append([wal_vol])
+
+ monkeypatch.setattr(migrate.api, 'get_lvs', self.mock_get_lvs)
+
+ monkeypatch.setattr(migrate, 'get_cluster_name',
+ lambda osd_id, osd_fsid: 'ceph_cluster')
+ monkeypatch.setattr(system, 'chown', lambda path: 0)
+
+ migrate.NewDB(argv=[
+ '--osd-id', '1',
+ '--osd-fsid', '55BD4219-16A7-4037-BC20-0F158EFCC83D',
+ '--target', 'vgname/new_db',
+ '--no-systemd']).main()
+
+ n = len(self.mock_process_input)
+ assert n >= 5
+
+ assert self.mock_process_input[n - 5] == [
+ 'lvchange',
+ '--deltag', 'ceph.db_device=/dbdevice',
+ '/dev/VolGroup/lv1']
+ assert self.mock_process_input[n - 4] == [
+ 'lvchange',
+ '--addtag', 'ceph.db_uuid=y',
+ '--addtag', 'ceph.db_device=/dev/VolGroup/target_volume',
+ '/dev/VolGroup/lv1']
+
+ assert self.mock_process_input[n - 3].sort() == [
+ 'lvchange',
+ '--addtag', 'ceph.wal_uuid=uuid',
+ '--addtag', 'ceph.osd_id=0',
+ '--addtag', 'ceph.type=db',
+ '--addtag', 'ceph.osd_fsid=1234',
+ '--addtag', 'ceph.db_uuid=y',
+ '--addtag', 'ceph.db_device=/dev/VolGroup/target_volume',
+ '/dev/VolGroup/target_volume'].sort()
+
+ assert self.mock_process_input[n - 2] == [
+ 'lvchange',
+ '--addtag', 'ceph.db_uuid=y',
+ '--addtag', 'ceph.db_device=/dev/VolGroup/target_volume',
+ '/dev/VolGroup/lv3']
+
+ assert self.mock_process_input[n - 1] == [
+ 'ceph-bluestore-tool',
+ '--path', '/var/lib/ceph/osd/ceph_cluster-1',
+ '--dev-target', '/dev/VolGroup/target_volume',
+ '--command', 'bluefs-bdev-new-db']
+
+ @patch('os.getuid')
+ def test_newwal(self, m_getuid, monkeypatch, capsys):
+ m_getuid.return_value = 0
+
+ source_tags = \
+ 'ceph.osd_id=0,ceph.type=data,ceph.osd_fsid=1234'
+
+ data_vol = api.Volume(lv_name='volume1', lv_uuid='datauuid', vg_name='vg',
+ lv_path='/dev/VolGroup/lv1', lv_tags=source_tags)
+
+ self.mock_single_volumes = {'/dev/VolGroup/lv1': data_vol}
+
+ monkeypatch.setattr(migrate.api, 'get_single_lv', self.mock_get_single_lv)
+
+ self.mock_process_input = []
+ monkeypatch.setattr(process, 'call', self.mock_process)
+
+ self.mock_volume = api.Volume(lv_name='target_volume1', lv_uuid='y', vg_name='vg',
+ lv_path='/dev/VolGroup/target_volume',
+ lv_tags='')
+ monkeypatch.setattr(api, 'get_lv_by_fullname', self.mock_get_lv_by_fullname)
+
+ monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active", lambda id: False)
+
+ #find_associated_devices will call get_lvs() 4 times
+ # and it this needs results to be arranged that way
+ self.mock_volumes = []
+ self.mock_volumes.append([data_vol])
+ self.mock_volumes.append([data_vol])
+ self.mock_volumes.append([])
+ self.mock_volumes.append([])
+
+ monkeypatch.setattr(migrate.api, 'get_lvs', self.mock_get_lvs)
+
+ monkeypatch.setattr(migrate, 'get_cluster_name', lambda osd_id, osd_fsid: 'cluster')
+ monkeypatch.setattr(system, 'chown', lambda path: 0)
+
+ migrate.NewWAL(argv=[
+ '--osd-id', '2',
+ '--osd-fsid', '55BD4219-16A7-4037-BC20-0F158EFCC83D',
+ '--target', 'vgname/new_wal']).main()
+
+ n = len(self.mock_process_input)
+ assert n >= 3
+
+ assert self.mock_process_input[n - 3] == [
+ 'lvchange',
+ '--addtag', 'ceph.wal_uuid=y',
+ '--addtag', 'ceph.wal_device=/dev/VolGroup/target_volume',
+ '/dev/VolGroup/lv1']
+
+ assert self.mock_process_input[n - 2].sort() == [
+ 'lvchange',
+ '--addtag', 'ceph.osd_id=0',
+ '--addtag', 'ceph.type=wal',
+ '--addtag', 'ceph.osd_fsid=1234',
+ '--addtag', 'ceph.wal_uuid=y',
+ '--addtag', 'ceph.wal_device=/dev/VolGroup/target_volume',
+ '/dev/VolGroup/target_volume'].sort()
+
+ assert self.mock_process_input[n - 1] == [
+ 'ceph-bluestore-tool',
+ '--path', '/var/lib/ceph/osd/cluster-2',
+ '--dev-target', '/dev/VolGroup/target_volume',
+ '--command', 'bluefs-bdev-new-wal']
+
+ def test_newwal_active_systemd(self, is_root, monkeypatch, capsys):
+ source_tags = \
+ 'ceph.osd_id=0,ceph.type=data,ceph.osd_fsid=1234'
+
+ data_vol = api.Volume(lv_name='volume1', lv_uuid='datauuid', vg_name='vg',
+ lv_path='/dev/VolGroup/lv1', lv_tags=source_tags)
+
+ self.mock_single_volumes = {'/dev/VolGroup/lv1': data_vol}
+
+ monkeypatch.setattr(migrate.api, 'get_single_lv', self.mock_get_single_lv)
+
+ self.mock_process_input = []
+ monkeypatch.setattr(process, 'call', self.mock_process)
+
+ self.mock_volume = api.Volume(lv_name='target_volume1', lv_uuid='y', vg_name='vg',
+ lv_path='/dev/VolGroup/target_volume',
+ lv_tags='')
+ monkeypatch.setattr(api, 'get_lv_by_fullname', self.mock_get_lv_by_fullname)
+
+ monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active", lambda id: True)
+
+ #find_associated_devices will call get_lvs() 4 times
+ # and it this needs results to be arranged that way
+ self.mock_volumes = []
+ self.mock_volumes.append([data_vol])
+ self.mock_volumes.append([data_vol])
+ self.mock_volumes.append([])
+ self.mock_volumes.append([])
+
+ monkeypatch.setattr(migrate.api, 'get_lvs', self.mock_get_lvs)
+
+ monkeypatch.setattr(migrate, 'get_cluster_name', lambda osd_id, osd_fsid: 'cluster')
+ monkeypatch.setattr(system, 'chown', lambda path: 0)
+
+ m = migrate.NewWAL(argv=[
+ '--osd-id', '2',
+ '--osd-fsid', '55BD4219-16A7-4037-BC20-0F158EFCC83D',
+ '--target', 'vgname/new_wal'])
+
+ with pytest.raises(SystemExit) as error:
+ m.main()
+
+ stdout, stderr = capsys.readouterr()
+
+ assert 'Unable to attach new volume for OSD: 2' == str(error.value)
+ assert '--> OSD ID is running, stop it with: systemctl stop ceph-osd@2' == stderr.rstrip()
+ assert not stdout
+
+ def test_newwal_no_systemd(self, is_root, monkeypatch):
+ source_tags = \
+ 'ceph.osd_id=0,ceph.type=data,ceph.osd_fsid=1234'
+
+ data_vol = api.Volume(lv_name='volume1', lv_uuid='datauuid', vg_name='vg',
+ lv_path='/dev/VolGroup/lv1', lv_tags=source_tags)
+
+ self.mock_single_volumes = {'/dev/VolGroup/lv1': data_vol}
+
+ monkeypatch.setattr(migrate.api, 'get_single_lv', self.mock_get_single_lv)
+
+ self.mock_process_input = []
+ monkeypatch.setattr(process, 'call', self.mock_process)
+
+ self.mock_volume = api.Volume(lv_name='target_volume1', lv_uuid='y', vg_name='vg',
+ lv_path='/dev/VolGroup/target_volume',
+ lv_tags='')
+ monkeypatch.setattr(api, 'get_lv_by_fullname', self.mock_get_lv_by_fullname)
+
+ #find_associated_devices will call get_lvs() 4 times
+ # and it this needs results to be arranged that way
+ self.mock_volumes = []
+ self.mock_volumes.append([data_vol])
+ self.mock_volumes.append([data_vol])
+ self.mock_volumes.append([])
+ self.mock_volumes.append([])
+
+ monkeypatch.setattr(migrate.api, 'get_lvs', self.mock_get_lvs)
+
+ monkeypatch.setattr(migrate, 'get_cluster_name', lambda osd_id, osd_fsid: 'cluster')
+ monkeypatch.setattr(system, 'chown', lambda path: 0)
+
+ migrate.NewWAL(argv=[
+ '--osd-id', '2',
+ '--osd-fsid', '55BD4219-16A7-4037-BC20-0F158EFCC83D',
+ '--target', 'vgname/new_wal',
+ '--no-systemd']).main()
+
+ n = len(self.mock_process_input)
+ assert n >= 3
+
+ assert self.mock_process_input[n - 3] == [
+ 'lvchange',
+ '--addtag', 'ceph.wal_uuid=y',
+ '--addtag', 'ceph.wal_device=/dev/VolGroup/target_volume',
+ '/dev/VolGroup/lv1']
+
+ assert self.mock_process_input[n - 2].sort() == [
+ 'lvchange',
+ '--addtag', 'ceph.osd_id=0',
+ '--addtag', 'ceph.type=wal',
+ '--addtag', 'ceph.osd_fsid=1234',
+ '--addtag', 'ceph.wal_uuid=y',
+ '--addtag', 'ceph.wal_device=/dev/VolGroup/target_volume',
+ '/dev/VolGroup/target_volume'].sort()
+
+ assert self.mock_process_input[n - 1] == [
+ 'ceph-bluestore-tool',
+ '--path', '/var/lib/ceph/osd/cluster-2',
+ '--dev-target', '/dev/VolGroup/target_volume',
+ '--command', 'bluefs-bdev-new-wal']
+
+class TestMigrate(object):
+
+ def test_invalid_osd_id_passed(self, is_root):
+ with pytest.raises(SystemExit):
+ migrate.Migrate(argv=['--osd-fsid', '123', '--from', 'data', '--target', 'foo', '--osd-id', 'foo']).main()
+
+ mock_volume = None
+ def mock_get_lv_by_fullname(self, *args, **kwargs):
+ return self.mock_volume
+
+ mock_process_input = []
+ def mock_process(self, *args, **kwargs):
+ self.mock_process_input.append(args[0])
+ return ('', '', 0)
+
+ mock_single_volumes = {}
+ def mock_get_single_lv(self, *args, **kwargs):
+ p = kwargs['filters']['lv_path']
+ return self.mock_single_volumes[p]
+
+ mock_volumes = []
+ def mock_get_lvs(self, *args, **kwargs):
+ return self.mock_volumes.pop(0)
+
+ def test_get_source_devices(self, monkeypatch):
+
+ source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234'
+ source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234'
+ source_wal_tags = 'ceph.osd_id=2,ceph.type=wal,ceph.osd_fsid=1234'
+
+ data_vol = api.Volume(lv_name='volume1',
+ lv_uuid='datauuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv1',
+ lv_tags=source_tags)
+ db_vol = api.Volume(lv_name='volume2',
+ lv_uuid='datauuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv2',
+ lv_tags=source_db_tags)
+
+ wal_vol = api.Volume(lv_name='volume3',
+ lv_uuid='datauuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv3',
+ lv_tags=source_wal_tags)
+
+ self.mock_single_volumes = {
+ '/dev/VolGroup/lv1': data_vol,
+ '/dev/VolGroup/lv2': db_vol,
+ '/dev/VolGroup/lv3': wal_vol,
+ }
+ monkeypatch.setattr(migrate.api, 'get_single_lv',
+ self.mock_get_single_lv)
+
+ self.mock_volume = api.Volume(lv_name='volume2', lv_uuid='y',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv2',
+ lv_tags='ceph.osd_id=5,ceph.osd_type=db')
+ monkeypatch.setattr(api, 'get_lv_by_fullname',
+ self.mock_get_lv_by_fullname)
+
+ self.mock_process_input = []
+ monkeypatch.setattr(process, 'call', self.mock_process)
+
+ devices = []
+ devices.append([Device('/dev/VolGroup/lv1'), 'block'])
+ devices.append([Device('/dev/VolGroup/lv2'), 'db'])
+ devices.append([Device('/dev/VolGroup/lv3'), 'wal'])
+
+ monkeypatch.setattr(migrate, 'find_associated_devices',
+ lambda osd_id, osd_fsid: devices)
+
+
+ argv = [
+ '--osd-id', '2',
+ '--osd-fsid', '55BD4219-16A7-4037-BC20-0F158EFCC83D',
+ '--from', 'data', 'wal',
+ '--target', 'vgname/new_wal'
+ ]
+ m = migrate.Migrate(argv=argv)
+ m.args = m.make_parser('ceph-volume lvm migation', 'help').parse_args(argv)
+ res_devices = m.get_source_devices(devices)
+
+ assert 2 == len(res_devices)
+ assert devices[0] == res_devices[0]
+ assert devices[2] == res_devices[1]
+
+ argv = [
+ '--osd-id', '2',
+ '--osd-fsid', '55BD4219-16A7-4037-BC20-0F158EFCC83D',
+ '--from', 'db', 'wal', 'data',
+ '--target', 'vgname/new_wal'
+ ]
+ m = migrate.Migrate(argv=argv)
+ m.args = m.make_parser('ceph-volume lvm migation', 'help').parse_args(argv)
+ res_devices = m.get_source_devices(devices)
+
+ assert 3 == len(res_devices)
+ assert devices[0] == res_devices[0]
+ assert devices[1] == res_devices[1]
+ assert devices[2] == res_devices[2]
+
+
+ def test_migrate_without_args(self, capsys):
+ help_msg = """
+Moves BlueFS data from source volume(s) to the target one, source
+volumes (except the main (i.e. data or block) one) are removed on
+success. LVM volumes are permitted for Target only, both already
+attached or new logical one. In the latter case it is attached to OSD
+replacing one of the source devices. Following replacement rules apply
+(in the order of precedence, stop on the first match):
+* if source list has DB volume - target device replaces it.
+* if source list has WAL volume - target device replace it.
+* if source list has slow volume only - operation is not permitted,
+ requires explicit allocation via new-db/new-wal command.
+
+Example calls for supported scenarios:
+
+ Moves BlueFS data from main device to LV already attached as DB:
+
+ ceph-volume lvm migrate --osd-id 1 --osd-fsid <uuid> --from data --target vgname/db
+
+ Moves BlueFS data from shared main device to LV which will be attached
+ as a new DB:
+
+ ceph-volume lvm migrate --osd-id 1 --osd-fsid <uuid> --from data --target vgname/new_db
+
+ Moves BlueFS data from DB device to new LV, DB is replaced:
+
+ ceph-volume lvm migrate --osd-id 1 --osd-fsid <uuid> --from db --target vgname/new_db
+
+ Moves BlueFS data from main and DB devices to new LV, DB is replaced:
+
+ ceph-volume lvm migrate --osd-id 1 --osd-fsid <uuid> --from data db --target vgname/new_db
+
+ Moves BlueFS data from main, DB and WAL devices to new LV, WAL is
+ removed and DB is replaced:
+
+ ceph-volume lvm migrate --osd-id 1 --osd-fsid <uuid> --from data db wal --target vgname/new_db
+
+ Moves BlueFS data from main, DB and WAL devices to main device, WAL
+ and DB are removed:
+
+ ceph-volume lvm migrate --osd-id 1 --osd-fsid <uuid> --from db wal --target vgname/data
+
+"""
+ m = migrate.Migrate(argv=[])
+ m.main()
+ stdout, stderr = capsys.readouterr()
+ assert help_msg in stdout
+ assert not stderr
+
+
+ @patch('os.getuid')
+ def test_migrate_data_db_to_new_db(self, m_getuid, monkeypatch):
+ m_getuid.return_value = 0
+
+ source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev'
+ source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev'
+
+ data_vol = api.Volume(lv_name='volume1',
+ lv_uuid='datauuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv1',
+ lv_tags=source_tags)
+ db_vol = api.Volume(lv_name='volume2',
+ lv_uuid='dbuuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv2',
+ lv_tags=source_db_tags)
+
+ self.mock_single_volumes = {
+ '/dev/VolGroup/lv1': data_vol,
+ '/dev/VolGroup/lv2': db_vol,
+ }
+ monkeypatch.setattr(migrate.api, 'get_single_lv',
+ self.mock_get_single_lv)
+
+ self.mock_volume = api.Volume(lv_name='volume2_new', lv_uuid='new-db-uuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv2_new',
+ lv_tags='')
+ monkeypatch.setattr(api, 'get_lv_by_fullname',
+ self.mock_get_lv_by_fullname)
+
+ self.mock_process_input = []
+ monkeypatch.setattr(process, 'call', self.mock_process)
+
+ devices = []
+ devices.append([Device('/dev/VolGroup/lv1'), 'block'])
+ devices.append([Device('/dev/VolGroup/lv2'), 'db'])
+
+ monkeypatch.setattr(migrate, 'find_associated_devices',
+ lambda osd_id, osd_fsid: devices)
+
+
+ monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active",
+ lambda id: False)
+
+ monkeypatch.setattr(migrate, 'get_cluster_name',
+ lambda osd_id, osd_fsid: 'ceph')
+ monkeypatch.setattr(system, 'chown', lambda path: 0)
+ m = migrate.Migrate(argv=[
+ '--osd-id', '2',
+ '--osd-fsid', '1234',
+ '--from', 'data', 'db', 'wal',
+ '--target', 'vgname/new_wal'])
+ m.main()
+
+ n = len(self.mock_process_input)
+ assert n >= 5
+
+ assert self. mock_process_input[n-5] == [
+ 'lvchange',
+ '--deltag', 'ceph.osd_id=2',
+ '--deltag', 'ceph.type=db',
+ '--deltag', 'ceph.osd_fsid=1234',
+ '--deltag', 'ceph.cluster_name=ceph',
+ '--deltag', 'ceph.db_uuid=dbuuid',
+ '--deltag', 'ceph.db_device=db_dev',
+ '/dev/VolGroup/lv2']
+
+ assert self. mock_process_input[n-4] == [
+ 'lvchange',
+ '--deltag', 'ceph.db_uuid=dbuuid',
+ '--deltag', 'ceph.db_device=db_dev',
+ '/dev/VolGroup/lv1']
+
+ assert self. mock_process_input[n-3] == [
+ 'lvchange',
+ '--addtag', 'ceph.db_uuid=new-db-uuid',
+ '--addtag', 'ceph.db_device=/dev/VolGroup/lv2_new',
+ '/dev/VolGroup/lv1']
+
+ assert self. mock_process_input[n-2] == [
+ 'lvchange',
+ '--addtag', 'ceph.osd_id=2',
+ '--addtag', 'ceph.type=db',
+ '--addtag', 'ceph.osd_fsid=1234',
+ '--addtag', 'ceph.cluster_name=ceph',
+ '--addtag', 'ceph.db_uuid=new-db-uuid',
+ '--addtag', 'ceph.db_device=/dev/VolGroup/lv2_new',
+ '/dev/VolGroup/lv2_new']
+
+ assert self. mock_process_input[n-1] == [
+ 'ceph-bluestore-tool',
+ '--path', '/var/lib/ceph/osd/ceph-2',
+ '--dev-target', '/dev/VolGroup/lv2_new',
+ '--command', 'bluefs-bdev-migrate',
+ '--devs-source', '/var/lib/ceph/osd/ceph-2/block',
+ '--devs-source', '/var/lib/ceph/osd/ceph-2/block.db']
+
+ def test_migrate_data_db_to_new_db_active_systemd(self, is_root, monkeypatch, capsys):
+ source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev'
+ source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev'
+
+ data_vol = api.Volume(lv_name='volume1',
+ lv_uuid='datauuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv1',
+ lv_tags=source_tags)
+ db_vol = api.Volume(lv_name='volume2',
+ lv_uuid='dbuuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv2',
+ lv_tags=source_db_tags)
+
+ self.mock_single_volumes = {
+ '/dev/VolGroup/lv1': data_vol,
+ '/dev/VolGroup/lv2': db_vol,
+ }
+ monkeypatch.setattr(migrate.api, 'get_single_lv',
+ self.mock_get_single_lv)
+
+ self.mock_volume = api.Volume(lv_name='volume2_new', lv_uuid='new-db-uuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv2_new',
+ lv_tags='')
+ monkeypatch.setattr(api, 'get_lv_by_fullname',
+ self.mock_get_lv_by_fullname)
+
+ self.mock_process_input = []
+ monkeypatch.setattr(process, 'call', self.mock_process)
+
+ devices = []
+ devices.append([Device('/dev/VolGroup/lv1'), 'block'])
+ devices.append([Device('/dev/VolGroup/lv2'), 'db'])
+
+ monkeypatch.setattr(migrate, 'find_associated_devices',
+ lambda osd_id, osd_fsid: devices)
+
+
+ monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active",
+ lambda id: True)
+
+ monkeypatch.setattr(migrate, 'get_cluster_name',
+ lambda osd_id, osd_fsid: 'ceph')
+ monkeypatch.setattr(system, 'chown', lambda path: 0)
+ m = migrate.Migrate(argv=[
+ '--osd-id', '2',
+ '--osd-fsid', '1234',
+ '--from', 'data', 'db', 'wal',
+ '--target', 'vgname/new_wal'])
+
+ with pytest.raises(SystemExit) as error:
+ m.main()
+
+ stdout, stderr = capsys.readouterr()
+
+ assert 'Unable to migrate devices associated with OSD ID: 2' == str(error.value)
+ assert '--> OSD is running, stop it with: systemctl stop ceph-osd@2' == stderr.rstrip()
+ assert not stdout
+
+ def test_migrate_data_db_to_new_db_no_systemd(self, is_root, monkeypatch):
+ source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev'
+ source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev'
+
+ data_vol = api.Volume(lv_name='volume1',
+ lv_uuid='datauuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv1',
+ lv_tags=source_tags)
+ db_vol = api.Volume(lv_name='volume2',
+ lv_uuid='dbuuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv2',
+ lv_tags=source_db_tags)
+
+ self.mock_single_volumes = {
+ '/dev/VolGroup/lv1': data_vol,
+ '/dev/VolGroup/lv2': db_vol,
+ }
+ monkeypatch.setattr(migrate.api, 'get_single_lv',
+ self.mock_get_single_lv)
+
+ self.mock_volume = api.Volume(lv_name='volume2_new', lv_uuid='new-db-uuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv2_new',
+ lv_tags='')
+ monkeypatch.setattr(api, 'get_lv_by_fullname',
+ self.mock_get_lv_by_fullname)
+
+ self.mock_process_input = []
+ monkeypatch.setattr(process, 'call', self.mock_process)
+
+ devices = []
+ devices.append([Device('/dev/VolGroup/lv1'), 'block'])
+ devices.append([Device('/dev/VolGroup/lv2'), 'db'])
+
+ monkeypatch.setattr(migrate, 'find_associated_devices',
+ lambda osd_id, osd_fsid: devices)
+
+
+ monkeypatch.setattr(migrate, 'get_cluster_name',
+ lambda osd_id, osd_fsid: 'ceph')
+ monkeypatch.setattr(system, 'chown', lambda path: 0)
+ m = migrate.Migrate(argv=[
+ '--osd-id', '2',
+ '--osd-fsid', '1234',
+ '--from', 'data', 'db', 'wal',
+ '--target', 'vgname/new_wal',
+ '--no-systemd'])
+ m.main()
+
+ n = len(self.mock_process_input)
+ assert n >= 5
+
+ assert self. mock_process_input[n-5] == [
+ 'lvchange',
+ '--deltag', 'ceph.osd_id=2',
+ '--deltag', 'ceph.type=db',
+ '--deltag', 'ceph.osd_fsid=1234',
+ '--deltag', 'ceph.cluster_name=ceph',
+ '--deltag', 'ceph.db_uuid=dbuuid',
+ '--deltag', 'ceph.db_device=db_dev',
+ '/dev/VolGroup/lv2']
+
+ assert self. mock_process_input[n-4] == [
+ 'lvchange',
+ '--deltag', 'ceph.db_uuid=dbuuid',
+ '--deltag', 'ceph.db_device=db_dev',
+ '/dev/VolGroup/lv1']
+
+ assert self. mock_process_input[n-3] == [
+ 'lvchange',
+ '--addtag', 'ceph.db_uuid=new-db-uuid',
+ '--addtag', 'ceph.db_device=/dev/VolGroup/lv2_new',
+ '/dev/VolGroup/lv1']
+
+ assert self. mock_process_input[n-2] == [
+ 'lvchange',
+ '--addtag', 'ceph.osd_id=2',
+ '--addtag', 'ceph.type=db',
+ '--addtag', 'ceph.osd_fsid=1234',
+ '--addtag', 'ceph.cluster_name=ceph',
+ '--addtag', 'ceph.db_uuid=new-db-uuid',
+ '--addtag', 'ceph.db_device=/dev/VolGroup/lv2_new',
+ '/dev/VolGroup/lv2_new']
+
+ assert self. mock_process_input[n-1] == [
+ 'ceph-bluestore-tool',
+ '--path', '/var/lib/ceph/osd/ceph-2',
+ '--dev-target', '/dev/VolGroup/lv2_new',
+ '--command', 'bluefs-bdev-migrate',
+ '--devs-source', '/var/lib/ceph/osd/ceph-2/block',
+ '--devs-source', '/var/lib/ceph/osd/ceph-2/block.db']
+
+ @patch('os.getuid')
+ def test_migrate_data_db_to_new_db_skip_wal(self, m_getuid, monkeypatch):
+ m_getuid.return_value = 0
+
+ source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev'
+ source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev'
+ source_wal_tags = 'ceph.osd_id=2,ceph.type=wal,ceph.osd_fsid=1234' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev'
+
+ data_vol = api.Volume(lv_name='volume1',
+ lv_uuid='datauuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv1',
+ lv_tags=source_tags)
+ db_vol = api.Volume(lv_name='volume2',
+ lv_uuid='dbuuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv2',
+ lv_tags=source_db_tags)
+
+ wal_vol = api.Volume(lv_name='volume3',
+ lv_uuid='datauuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv3',
+ lv_tags=source_wal_tags)
+
+ self.mock_single_volumes = {
+ '/dev/VolGroup/lv1': data_vol,
+ '/dev/VolGroup/lv2': db_vol,
+ '/dev/VolGroup/lv3': wal_vol,
+ }
+ monkeypatch.setattr(migrate.api, 'get_single_lv',
+ self.mock_get_single_lv)
+
+ self.mock_volume = api.Volume(lv_name='volume2_new', lv_uuid='new-db-uuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv2_new',
+ lv_tags='')
+ monkeypatch.setattr(api, 'get_lv_by_fullname',
+ self.mock_get_lv_by_fullname)
+
+ self.mock_process_input = []
+ monkeypatch.setattr(process, 'call', self.mock_process)
+
+ devices = []
+ devices.append([Device('/dev/VolGroup/lv1'), 'block'])
+ devices.append([Device('/dev/VolGroup/lv2'), 'db'])
+ devices.append([Device('/dev/VolGroup/lv3'), 'wal'])
+
+ monkeypatch.setattr(migrate, 'find_associated_devices',
+ lambda osd_id, osd_fsid: devices)
+
+ monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active",
+ lambda id: False)
+
+ monkeypatch.setattr(migrate, 'get_cluster_name',
+ lambda osd_id, osd_fsid: 'ceph')
+ monkeypatch.setattr(system, 'chown', lambda path: 0)
+ m = migrate.Migrate(argv=[
+ '--osd-id', '2',
+ '--osd-fsid', '1234',
+ '--from', 'data', 'db',
+ '--target', 'vgname/new_wal'])
+ m.main()
+
+ n = len(self.mock_process_input)
+ assert n >= 7
+
+ assert self. mock_process_input[n-7] == [
+ 'lvchange',
+ '--deltag', 'ceph.osd_id=2',
+ '--deltag', 'ceph.type=db',
+ '--deltag', 'ceph.osd_fsid=1234',
+ '--deltag', 'ceph.cluster_name=ceph',
+ '--deltag', 'ceph.db_uuid=dbuuid',
+ '--deltag', 'ceph.db_device=db_dev',
+ '/dev/VolGroup/lv2']
+
+ assert self. mock_process_input[n-6] == [
+ 'lvchange',
+ '--deltag', 'ceph.db_uuid=dbuuid',
+ '--deltag', 'ceph.db_device=db_dev',
+ '/dev/VolGroup/lv1']
+
+ assert self. mock_process_input[n-5] == [
+ 'lvchange',
+ '--addtag', 'ceph.db_uuid=new-db-uuid',
+ '--addtag', 'ceph.db_device=/dev/VolGroup/lv2_new',
+ '/dev/VolGroup/lv1']
+
+ assert self. mock_process_input[n-4] == [
+ 'lvchange',
+ '--deltag', 'ceph.db_uuid=dbuuid',
+ '--deltag', 'ceph.db_device=db_dev',
+ '/dev/VolGroup/lv3']
+
+ assert self. mock_process_input[n-3] == [
+ 'lvchange',
+ '--addtag', 'ceph.db_uuid=new-db-uuid',
+ '--addtag', 'ceph.db_device=/dev/VolGroup/lv2_new',
+ '/dev/VolGroup/lv3']
+
+ assert self. mock_process_input[n-2] == [
+ 'lvchange',
+ '--addtag', 'ceph.osd_id=2',
+ '--addtag', 'ceph.type=db',
+ '--addtag', 'ceph.osd_fsid=1234',
+ '--addtag', 'ceph.cluster_name=ceph',
+ '--addtag', 'ceph.db_uuid=new-db-uuid',
+ '--addtag', 'ceph.db_device=/dev/VolGroup/lv2_new',
+ '/dev/VolGroup/lv2_new']
+
+ assert self. mock_process_input[n-1] == [
+ 'ceph-bluestore-tool',
+ '--path', '/var/lib/ceph/osd/ceph-2',
+ '--dev-target', '/dev/VolGroup/lv2_new',
+ '--command', 'bluefs-bdev-migrate',
+ '--devs-source', '/var/lib/ceph/osd/ceph-2/block',
+ '--devs-source', '/var/lib/ceph/osd/ceph-2/block.db']
+
+ @patch('os.getuid')
+ def test_migrate_data_db_wal_to_new_db(self, m_getuid, monkeypatch):
+ m_getuid.return_value = 0
+
+ source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
+ 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
+ source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev'
+ source_wal_tags = 'ceph.osd_id=0,ceph.type=wal,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
+ 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
+
+ data_vol = api.Volume(lv_name='volume1',
+ lv_uuid='datauuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv1',
+ lv_tags=source_tags)
+ db_vol = api.Volume(lv_name='volume2',
+ lv_uuid='dbuuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv2',
+ lv_tags=source_db_tags)
+
+ wal_vol = api.Volume(lv_name='volume3',
+ lv_uuid='waluuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv3',
+ lv_tags=source_wal_tags)
+
+ self.mock_single_volumes = {
+ '/dev/VolGroup/lv1': data_vol,
+ '/dev/VolGroup/lv2': db_vol,
+ '/dev/VolGroup/lv3': wal_vol,
+ }
+ monkeypatch.setattr(migrate.api, 'get_single_lv',
+ self.mock_get_single_lv)
+
+ self.mock_volume = api.Volume(lv_name='volume2_new', lv_uuid='new-db-uuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv2_new',
+ lv_tags='')
+ monkeypatch.setattr(api, 'get_lv_by_fullname',
+ self.mock_get_lv_by_fullname)
+
+ self.mock_process_input = []
+ monkeypatch.setattr(process, 'call', self.mock_process)
+
+ devices = []
+ devices.append([Device('/dev/VolGroup/lv1'), 'block'])
+ devices.append([Device('/dev/VolGroup/lv2'), 'db'])
+ devices.append([Device('/dev/VolGroup/lv3'), 'wal'])
+
+ monkeypatch.setattr(migrate, 'find_associated_devices',
+ lambda osd_id, osd_fsid: devices)
+
+ monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active",
+ lambda id: False)
+
+ monkeypatch.setattr(migrate, 'get_cluster_name',
+ lambda osd_id, osd_fsid: 'ceph')
+ monkeypatch.setattr(system, 'chown', lambda path: 0)
+ m = migrate.Migrate(argv=[
+ '--osd-id', '2',
+ '--osd-fsid', '1234',
+ '--from', 'data', 'db', 'wal',
+ '--target', 'vgname/new_wal'])
+ m.main()
+
+ n = len(self.mock_process_input)
+ assert n >= 6
+
+ assert self. mock_process_input[n-6] == [
+ 'lvchange',
+ '--deltag', 'ceph.osd_id=2',
+ '--deltag', 'ceph.type=db',
+ '--deltag', 'ceph.osd_fsid=1234',
+ '--deltag', 'ceph.cluster_name=ceph',
+ '--deltag', 'ceph.db_uuid=dbuuid',
+ '--deltag', 'ceph.db_device=db_dev',
+ '/dev/VolGroup/lv2']
+
+ assert self. mock_process_input[n-5] == [
+ 'lvchange',
+ '--deltag', 'ceph.osd_id=0',
+ '--deltag', 'ceph.type=wal',
+ '--deltag', 'ceph.osd_fsid=1234',
+ '--deltag', 'ceph.cluster_name=ceph',
+ '--deltag', 'ceph.db_uuid=dbuuid',
+ '--deltag', 'ceph.db_device=db_dev',
+ '--deltag', 'ceph.wal_uuid=waluuid',
+ '--deltag', 'ceph.wal_device=wal_dev',
+ '/dev/VolGroup/lv3']
+
+ assert self. mock_process_input[n-4] == [
+ 'lvchange',
+ '--deltag', 'ceph.db_uuid=dbuuid',
+ '--deltag', 'ceph.db_device=db_dev',
+ '--deltag', 'ceph.wal_uuid=waluuid',
+ '--deltag', 'ceph.wal_device=wal_dev',
+ '/dev/VolGroup/lv1']
+
+ assert self. mock_process_input[n-3] == [
+ 'lvchange',
+ '--addtag', 'ceph.db_uuid=new-db-uuid',
+ '--addtag', 'ceph.db_device=/dev/VolGroup/lv2_new',
+ '/dev/VolGroup/lv1']
+
+ assert self. mock_process_input[n-2] == [
+ 'lvchange',
+ '--addtag', 'ceph.osd_id=2',
+ '--addtag', 'ceph.type=db',
+ '--addtag', 'ceph.osd_fsid=1234',
+ '--addtag', 'ceph.cluster_name=ceph',
+ '--addtag', 'ceph.db_uuid=new-db-uuid',
+ '--addtag', 'ceph.db_device=/dev/VolGroup/lv2_new',
+ '/dev/VolGroup/lv2_new']
+
+ assert self. mock_process_input[n-1] == [
+ 'ceph-bluestore-tool',
+ '--path', '/var/lib/ceph/osd/ceph-2',
+ '--dev-target', '/dev/VolGroup/lv2_new',
+ '--command', 'bluefs-bdev-migrate',
+ '--devs-source', '/var/lib/ceph/osd/ceph-2/block',
+ '--devs-source', '/var/lib/ceph/osd/ceph-2/block.db',
+ '--devs-source', '/var/lib/ceph/osd/ceph-2/block.wal']
+
+ @patch('os.getuid')
+ def test_dont_migrate_data_db_wal_to_new_data(self,
+ m_getuid,
+ monkeypatch,
+ capsys):
+ m_getuid.return_value = 0
+
+ source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev'
+ source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev'
+
+ data_vol = api.Volume(lv_name='volume1',
+ lv_uuid='datauuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv1',
+ lv_tags=source_tags)
+ db_vol = api.Volume(lv_name='volume2',
+ lv_uuid='dbuuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv2',
+ lv_tags=source_db_tags)
+
+ self.mock_single_volumes = {
+ '/dev/VolGroup/lv1': data_vol,
+ '/dev/VolGroup/lv2': db_vol,
+ }
+ monkeypatch.setattr(migrate.api, 'get_single_lv',
+ self.mock_get_single_lv)
+
+ self.mock_volume = api.Volume(lv_name='volume2_new', lv_uuid='new-db-uuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv2_new',
+ lv_tags='')
+ monkeypatch.setattr(api, 'get_lv_by_fullname',
+ self.mock_get_lv_by_fullname)
+
+ self.mock_process_input = []
+ monkeypatch.setattr(process, 'call', self.mock_process)
+
+ devices = []
+ devices.append([Device('/dev/VolGroup/lv1'), 'block'])
+ devices.append([Device('/dev/VolGroup/lv2'), 'db'])
+
+ monkeypatch.setattr(migrate, 'find_associated_devices',
+ lambda osd_id, osd_fsid: devices)
+
+ monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active",
+ lambda id: False)
+
+ monkeypatch.setattr(migrate, 'get_cluster_name',
+ lambda osd_id, osd_fsid: 'ceph')
+ monkeypatch.setattr(system, 'chown', lambda path: 0)
+ m = migrate.Migrate(argv=[
+ '--osd-id', '2',
+ '--osd-fsid', '1234',
+ '--from', 'data',
+ '--target', 'vgname/new_data'])
+
+ with pytest.raises(SystemExit) as error:
+ m.main()
+ stdout, stderr = capsys.readouterr()
+ expected = 'Unable to migrate to : vgname/new_data'
+ assert expected in str(error.value)
+ expected = 'Unable to determine new volume type,'
+ ' please use new-db or new-wal command before.'
+ assert expected in stderr
+
+ @patch('os.getuid')
+ def test_dont_migrate_db_to_wal(self,
+ m_getuid,
+ monkeypatch,
+ capsys):
+ m_getuid.return_value = 0
+
+ source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
+ 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
+ source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev'
+ source_wal_tags = 'ceph.osd_id=2,ceph.type=wal,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
+ 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
+
+ data_vol = api.Volume(lv_name='volume1',
+ lv_uuid='datauuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv1',
+ lv_tags=source_tags)
+ db_vol = api.Volume(lv_name='volume2',
+ lv_uuid='dbuuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv2',
+ lv_tags=source_db_tags)
+
+ wal_vol = api.Volume(lv_name='volume3',
+ lv_uuid='waluuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv3',
+ lv_tags=source_wal_tags)
+
+ self.mock_single_volumes = {
+ '/dev/VolGroup/lv1': data_vol,
+ '/dev/VolGroup/lv2': db_vol,
+ '/dev/VolGroup/lv3': wal_vol,
+ }
+ monkeypatch.setattr(migrate.api, 'get_single_lv',
+ self.mock_get_single_lv)
+
+ self.mock_volume = wal_vol
+ monkeypatch.setattr(api, 'get_lv_by_fullname',
+ self.mock_get_lv_by_fullname)
+
+ self.mock_process_input = []
+ monkeypatch.setattr(process, 'call', self.mock_process)
+
+ devices = []
+ devices.append([Device('/dev/VolGroup/lv1'), 'block'])
+ devices.append([Device('/dev/VolGroup/lv2'), 'db'])
+ devices.append([Device('/dev/VolGroup/lv3'), 'wal'])
+
+ monkeypatch.setattr(migrate, 'find_associated_devices',
+ lambda osd_id, osd_fsid: devices)
+
+ monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active",
+ lambda id: False)
+
+ monkeypatch.setattr(migrate, 'get_cluster_name',
+ lambda osd_id, osd_fsid: 'ceph')
+ monkeypatch.setattr(system, 'chown', lambda path: 0)
+ m = migrate.Migrate(argv=[
+ '--osd-id', '2',
+ '--osd-fsid', '1234',
+ '--from', 'db',
+ '--target', 'vgname/wal'])
+
+ with pytest.raises(SystemExit) as error:
+ m.main()
+ stdout, stderr = capsys.readouterr()
+ expected = 'Unable to migrate to : vgname/wal'
+ assert expected in str(error.value)
+ expected = 'Migrate to WAL is not supported'
+ assert expected in stderr
+
+ @patch('os.getuid')
+ def test_migrate_data_db_to_db(self,
+ m_getuid,
+ monkeypatch,
+ capsys):
+ m_getuid.return_value = 0
+
+ source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
+ 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
+ source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev'
+ source_wal_tags = 'ceph.osd_id=2,ceph.type=wal,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
+ 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
+
+ data_vol = api.Volume(lv_name='volume1',
+ lv_uuid='datauuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv1',
+ lv_tags=source_tags)
+ db_vol = api.Volume(lv_name='volume2',
+ lv_uuid='dbuuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv2',
+ lv_tags=source_db_tags)
+
+ wal_vol = api.Volume(lv_name='volume3',
+ lv_uuid='waluuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv3',
+ lv_tags=source_wal_tags)
+
+ self.mock_single_volumes = {
+ '/dev/VolGroup/lv1': data_vol,
+ '/dev/VolGroup/lv2': db_vol,
+ '/dev/VolGroup/lv3': wal_vol,
+ }
+ monkeypatch.setattr(migrate.api, 'get_single_lv',
+ self.mock_get_single_lv)
+
+ self.mock_volume = db_vol
+ monkeypatch.setattr(api, 'get_lv_by_fullname',
+ self.mock_get_lv_by_fullname)
+
+ self.mock_process_input = []
+ monkeypatch.setattr(process, 'call', self.mock_process)
+
+ devices = []
+ devices.append([Device('/dev/VolGroup/lv1'), 'block'])
+ devices.append([Device('/dev/VolGroup/lv2'), 'db'])
+ devices.append([Device('/dev/VolGroup/lv3'), 'wal'])
+
+ monkeypatch.setattr(migrate, 'find_associated_devices',
+ lambda osd_id, osd_fsid: devices)
+
+ monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active",
+ lambda id: False)
+
+ monkeypatch.setattr(migrate, 'get_cluster_name',
+ lambda osd_id, osd_fsid: 'ceph')
+ monkeypatch.setattr(system, 'chown', lambda path: 0)
+ m = migrate.Migrate(argv=[
+ '--osd-id', '2',
+ '--osd-fsid', '1234',
+ '--from', 'db', 'data',
+ '--target', 'vgname/db'])
+
+ m.main()
+
+ n = len(self.mock_process_input)
+ assert n >= 1
+ for s in self.mock_process_input:
+ print(s)
+
+ assert self. mock_process_input[n-1] == [
+ 'ceph-bluestore-tool',
+ '--path', '/var/lib/ceph/osd/ceph-2',
+ '--dev-target', '/var/lib/ceph/osd/ceph-2/block.db',
+ '--command', 'bluefs-bdev-migrate',
+ '--devs-source', '/var/lib/ceph/osd/ceph-2/block']
+
+ def test_migrate_data_db_to_db_active_systemd(self, is_root, monkeypatch, capsys):
+ source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
+ 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
+ source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev'
+ source_wal_tags = 'ceph.osd_id=2,ceph.type=wal,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
+ 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
+
+ data_vol = api.Volume(lv_name='volume1',
+ lv_uuid='datauuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv1',
+ lv_tags=source_tags)
+ db_vol = api.Volume(lv_name='volume2',
+ lv_uuid='dbuuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv2',
+ lv_tags=source_db_tags)
+
+ wal_vol = api.Volume(lv_name='volume3',
+ lv_uuid='waluuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv3',
+ lv_tags=source_wal_tags)
+
+ self.mock_single_volumes = {
+ '/dev/VolGroup/lv1': data_vol,
+ '/dev/VolGroup/lv2': db_vol,
+ '/dev/VolGroup/lv3': wal_vol,
+ }
+ monkeypatch.setattr(migrate.api, 'get_single_lv',
+ self.mock_get_single_lv)
+
+ self.mock_volume = db_vol
+ monkeypatch.setattr(api, 'get_lv_by_fullname',
+ self.mock_get_lv_by_fullname)
+
+ self.mock_process_input = []
+ monkeypatch.setattr(process, 'call', self.mock_process)
+
+ devices = []
+ devices.append([Device('/dev/VolGroup/lv1'), 'block'])
+ devices.append([Device('/dev/VolGroup/lv2'), 'db'])
+ devices.append([Device('/dev/VolGroup/lv3'), 'wal'])
+
+ monkeypatch.setattr(migrate, 'find_associated_devices',
+ lambda osd_id, osd_fsid: devices)
+
+ monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active",
+ lambda id: True)
+
+ monkeypatch.setattr(migrate, 'get_cluster_name',
+ lambda osd_id, osd_fsid: 'ceph')
+ monkeypatch.setattr(system, 'chown', lambda path: 0)
+ m = migrate.Migrate(argv=[
+ '--osd-id', '2',
+ '--osd-fsid', '1234',
+ '--from', 'db', 'data',
+ '--target', 'vgname/db'])
+
+ with pytest.raises(SystemExit) as error:
+ m.main()
+
+ stdout, stderr = capsys.readouterr()
+
+ assert 'Unable to migrate devices associated with OSD ID: 2' == str(error.value)
+ assert '--> OSD is running, stop it with: systemctl stop ceph-osd@2' == stderr.rstrip()
+ assert not stdout
+
+ def test_migrate_data_db_to_db_no_systemd(self, is_root, monkeypatch):
+ source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
+ 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
+ source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev'
+ source_wal_tags = 'ceph.osd_id=2,ceph.type=wal,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
+ 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
+
+ data_vol = api.Volume(lv_name='volume1',
+ lv_uuid='datauuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv1',
+ lv_tags=source_tags)
+ db_vol = api.Volume(lv_name='volume2',
+ lv_uuid='dbuuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv2',
+ lv_tags=source_db_tags)
+
+ wal_vol = api.Volume(lv_name='volume3',
+ lv_uuid='waluuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv3',
+ lv_tags=source_wal_tags)
+
+ self.mock_single_volumes = {
+ '/dev/VolGroup/lv1': data_vol,
+ '/dev/VolGroup/lv2': db_vol,
+ '/dev/VolGroup/lv3': wal_vol,
+ }
+ monkeypatch.setattr(migrate.api, 'get_single_lv',
+ self.mock_get_single_lv)
+
+ self.mock_volume = db_vol
+ monkeypatch.setattr(api, 'get_lv_by_fullname',
+ self.mock_get_lv_by_fullname)
+
+ self.mock_process_input = []
+ monkeypatch.setattr(process, 'call', self.mock_process)
+
+ devices = []
+ devices.append([Device('/dev/VolGroup/lv1'), 'block'])
+ devices.append([Device('/dev/VolGroup/lv2'), 'db'])
+ devices.append([Device('/dev/VolGroup/lv3'), 'wal'])
+
+ monkeypatch.setattr(migrate, 'find_associated_devices',
+ lambda osd_id, osd_fsid: devices)
+
+ monkeypatch.setattr(migrate, 'get_cluster_name',
+ lambda osd_id, osd_fsid: 'ceph')
+ monkeypatch.setattr(system, 'chown', lambda path: 0)
+ m = migrate.Migrate(argv=[
+ '--osd-id', '2',
+ '--osd-fsid', '1234',
+ '--from', 'db', 'data',
+ '--target', 'vgname/db',
+ '--no-systemd'])
+
+ m.main()
+
+ n = len(self.mock_process_input)
+ assert n >= 1
+ for s in self.mock_process_input:
+ print(s)
+
+ assert self. mock_process_input[n-1] == [
+ 'ceph-bluestore-tool',
+ '--path', '/var/lib/ceph/osd/ceph-2',
+ '--dev-target', '/var/lib/ceph/osd/ceph-2/block.db',
+ '--command', 'bluefs-bdev-migrate',
+ '--devs-source', '/var/lib/ceph/osd/ceph-2/block']
+
+ @patch('os.getuid')
+ def test_migrate_data_wal_to_db(self,
+ m_getuid,
+ monkeypatch,
+ capsys):
+ m_getuid.return_value = 0
+
+ source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
+ 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
+ source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
+ 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
+ source_wal_tags = 'ceph.osd_id=2,ceph.type=wal,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
+ 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
+
+ data_vol = api.Volume(lv_name='volume1',
+ lv_uuid='datauuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv1',
+ lv_tags=source_tags)
+ db_vol = api.Volume(lv_name='volume2',
+ lv_uuid='dbuuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv2',
+ lv_tags=source_db_tags)
+
+ wal_vol = api.Volume(lv_name='volume3',
+ lv_uuid='waluuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv3',
+ lv_tags=source_wal_tags)
+
+ self.mock_single_volumes = {
+ '/dev/VolGroup/lv1': data_vol,
+ '/dev/VolGroup/lv2': db_vol,
+ '/dev/VolGroup/lv3': wal_vol,
+ }
+ monkeypatch.setattr(migrate.api, 'get_single_lv',
+ self.mock_get_single_lv)
+
+ self.mock_volume = db_vol
+ monkeypatch.setattr(api, 'get_lv_by_fullname',
+ self.mock_get_lv_by_fullname)
+
+ self.mock_process_input = []
+ monkeypatch.setattr(process, 'call', self.mock_process)
+
+ devices = []
+ devices.append([Device('/dev/VolGroup/lv1'), 'block'])
+ devices.append([Device('/dev/VolGroup/lv2'), 'db'])
+ devices.append([Device('/dev/VolGroup/lv3'), 'wal'])
+
+ monkeypatch.setattr(migrate, 'find_associated_devices',
+ lambda osd_id, osd_fsid: devices)
+
+ monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active",
+ lambda id: False)
+
+ monkeypatch.setattr(migrate, 'get_cluster_name',
+ lambda osd_id, osd_fsid: 'ceph')
+ monkeypatch.setattr(system, 'chown', lambda path: 0)
+ m = migrate.Migrate(argv=[
+ '--osd-id', '2',
+ '--osd-fsid', '1234',
+ '--from', 'db', 'data', 'wal',
+ '--target', 'vgname/db'])
+
+ m.main()
+
+ n = len(self.mock_process_input)
+ assert n >= 1
+ for s in self.mock_process_input:
+ print(s)
+
+ assert self. mock_process_input[n-4] == [
+ 'lvchange',
+ '--deltag', 'ceph.osd_id=2',
+ '--deltag', 'ceph.type=wal',
+ '--deltag', 'ceph.osd_fsid=1234',
+ '--deltag', 'ceph.cluster_name=ceph',
+ '--deltag', 'ceph.db_uuid=dbuuid',
+ '--deltag', 'ceph.db_device=db_dev',
+ '--deltag', 'ceph.wal_uuid=waluuid',
+ '--deltag', 'ceph.wal_device=wal_dev',
+ '/dev/VolGroup/lv3']
+ assert self. mock_process_input[n-3] == [
+ 'lvchange',
+ '--deltag', 'ceph.wal_uuid=waluuid',
+ '--deltag', 'ceph.wal_device=wal_dev',
+ '/dev/VolGroup/lv1']
+ assert self. mock_process_input[n-2] == [
+ 'lvchange',
+ '--deltag', 'ceph.wal_uuid=waluuid',
+ '--deltag', 'ceph.wal_device=wal_dev',
+ '/dev/VolGroup/lv2']
+ assert self. mock_process_input[n-1] == [
+ 'ceph-bluestore-tool',
+ '--path', '/var/lib/ceph/osd/ceph-2',
+ '--dev-target', '/var/lib/ceph/osd/ceph-2/block.db',
+ '--command', 'bluefs-bdev-migrate',
+ '--devs-source', '/var/lib/ceph/osd/ceph-2/block',
+ '--devs-source', '/var/lib/ceph/osd/ceph-2/block.wal']
+
+ def test_migrate_data_wal_to_db_active_systemd(self, is_root, monkeypatch, capsys):
+ source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
+ 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
+ source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
+ 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
+ source_wal_tags = 'ceph.osd_id=2,ceph.type=wal,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
+ 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
+
+ data_vol = api.Volume(lv_name='volume1',
+ lv_uuid='datauuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv1',
+ lv_tags=source_tags)
+ db_vol = api.Volume(lv_name='volume2',
+ lv_uuid='dbuuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv2',
+ lv_tags=source_db_tags)
+
+ wal_vol = api.Volume(lv_name='volume3',
+ lv_uuid='waluuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv3',
+ lv_tags=source_wal_tags)
+
+ self.mock_single_volumes = {
+ '/dev/VolGroup/lv1': data_vol,
+ '/dev/VolGroup/lv2': db_vol,
+ '/dev/VolGroup/lv3': wal_vol,
+ }
+ monkeypatch.setattr(migrate.api, 'get_single_lv',
+ self.mock_get_single_lv)
+
+ self.mock_volume = db_vol
+ monkeypatch.setattr(api, 'get_lv_by_fullname',
+ self.mock_get_lv_by_fullname)
+
+ self.mock_process_input = []
+ monkeypatch.setattr(process, 'call', self.mock_process)
+
+ devices = []
+ devices.append([Device('/dev/VolGroup/lv1'), 'block'])
+ devices.append([Device('/dev/VolGroup/lv2'), 'db'])
+ devices.append([Device('/dev/VolGroup/lv3'), 'wal'])
+
+ monkeypatch.setattr(migrate, 'find_associated_devices',
+ lambda osd_id, osd_fsid: devices)
+
+ monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active",
+ lambda id: True)
+
+ monkeypatch.setattr(migrate, 'get_cluster_name',
+ lambda osd_id, osd_fsid: 'ceph')
+ monkeypatch.setattr(system, 'chown', lambda path: 0)
+ m = migrate.Migrate(argv=[
+ '--osd-id', '2',
+ '--osd-fsid', '1234',
+ '--from', 'db', 'data', 'wal',
+ '--target', 'vgname/db'])
+
+ with pytest.raises(SystemExit) as error:
+ m.main()
+
+ stdout, stderr = capsys.readouterr()
+
+ assert 'Unable to migrate devices associated with OSD ID: 2' == str(error.value)
+ assert '--> OSD is running, stop it with: systemctl stop ceph-osd@2' == stderr.rstrip()
+ assert not stdout
+
+ def test_migrate_data_wal_to_db_no_systemd(self, is_root, monkeypatch):
+ source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
+ 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
+ source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
+ 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
+ source_wal_tags = 'ceph.osd_id=2,ceph.type=wal,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
+ 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
+
+ data_vol = api.Volume(lv_name='volume1',
+ lv_uuid='datauuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv1',
+ lv_tags=source_tags)
+ db_vol = api.Volume(lv_name='volume2',
+ lv_uuid='dbuuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv2',
+ lv_tags=source_db_tags)
+
+ wal_vol = api.Volume(lv_name='volume3',
+ lv_uuid='waluuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv3',
+ lv_tags=source_wal_tags)
+
+ self.mock_single_volumes = {
+ '/dev/VolGroup/lv1': data_vol,
+ '/dev/VolGroup/lv2': db_vol,
+ '/dev/VolGroup/lv3': wal_vol,
+ }
+ monkeypatch.setattr(migrate.api, 'get_single_lv',
+ self.mock_get_single_lv)
+
+ self.mock_volume = db_vol
+ monkeypatch.setattr(api, 'get_lv_by_fullname',
+ self.mock_get_lv_by_fullname)
+
+ self.mock_process_input = []
+ monkeypatch.setattr(process, 'call', self.mock_process)
+
+ devices = []
+ devices.append([Device('/dev/VolGroup/lv1'), 'block'])
+ devices.append([Device('/dev/VolGroup/lv2'), 'db'])
+ devices.append([Device('/dev/VolGroup/lv3'), 'wal'])
+
+ monkeypatch.setattr(migrate, 'find_associated_devices',
+ lambda osd_id, osd_fsid: devices)
+
+ monkeypatch.setattr(migrate, 'get_cluster_name',
+ lambda osd_id, osd_fsid: 'ceph')
+ monkeypatch.setattr(system, 'chown', lambda path: 0)
+ m = migrate.Migrate(argv=[
+ '--osd-id', '2',
+ '--osd-fsid', '1234',
+ '--from', 'db', 'data', 'wal',
+ '--target', 'vgname/db',
+ '--no-systemd'])
+
+ m.main()
+
+ n = len(self.mock_process_input)
+ assert n >= 1
+ for s in self.mock_process_input:
+ print(s)
+
+ assert self. mock_process_input[n-4] == [
+ 'lvchange',
+ '--deltag', 'ceph.osd_id=2',
+ '--deltag', 'ceph.type=wal',
+ '--deltag', 'ceph.osd_fsid=1234',
+ '--deltag', 'ceph.cluster_name=ceph',
+ '--deltag', 'ceph.db_uuid=dbuuid',
+ '--deltag', 'ceph.db_device=db_dev',
+ '--deltag', 'ceph.wal_uuid=waluuid',
+ '--deltag', 'ceph.wal_device=wal_dev',
+ '/dev/VolGroup/lv3']
+ assert self. mock_process_input[n-3] == [
+ 'lvchange',
+ '--deltag', 'ceph.wal_uuid=waluuid',
+ '--deltag', 'ceph.wal_device=wal_dev',
+ '/dev/VolGroup/lv1']
+ assert self. mock_process_input[n-2] == [
+ 'lvchange',
+ '--deltag', 'ceph.wal_uuid=waluuid',
+ '--deltag', 'ceph.wal_device=wal_dev',
+ '/dev/VolGroup/lv2']
+ assert self. mock_process_input[n-1] == [
+ 'ceph-bluestore-tool',
+ '--path', '/var/lib/ceph/osd/ceph-2',
+ '--dev-target', '/var/lib/ceph/osd/ceph-2/block.db',
+ '--command', 'bluefs-bdev-migrate',
+ '--devs-source', '/var/lib/ceph/osd/ceph-2/block',
+ '--devs-source', '/var/lib/ceph/osd/ceph-2/block.wal']
diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_prepare.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_prepare.py
new file mode 100644
index 000000000..9f0a5e0bb
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_prepare.py
@@ -0,0 +1,189 @@
+import pytest
+from ceph_volume.devices import lvm
+from ceph_volume.api import lvm as api
+from mock.mock import patch, Mock, MagicMock
+
+
+class TestLVM(object):
+
+ def test_main_spits_help_with_no_arguments(self, capsys):
+ lvm.main.LVM([]).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'Use LVM and LVM-based technologies to deploy' in stdout
+
+ def test_main_shows_activate_subcommands(self, capsys):
+ lvm.main.LVM([]).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'activate ' in stdout
+ assert 'Discover and mount' in stdout
+
+ def test_main_shows_prepare_subcommands(self, capsys):
+ lvm.main.LVM([]).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'prepare ' in stdout
+ assert 'Format an LVM device' in stdout
+
+
+class TestPrepareDevice(object):
+
+ def test_cannot_use_device(self, factory):
+ args = factory(data='/dev/var/foo')
+ with pytest.raises(RuntimeError) as error:
+ p = lvm.prepare.Prepare([])
+ p.args = args
+ p.prepare_data_device( 'data', '0')
+ assert 'Cannot use device (/dev/var/foo)' in str(error.value)
+ assert 'A vg/lv path or an existing device is needed' in str(error.value)
+
+
+class TestGetClusterFsid(object):
+
+ def test_fsid_is_passed_in(self, factory):
+ args = factory(cluster_fsid='aaaa-1111')
+ prepare_obj = lvm.prepare.Prepare([])
+ prepare_obj.args = args
+ assert prepare_obj.get_cluster_fsid() == 'aaaa-1111'
+
+ def test_fsid_is_read_from_ceph_conf(self, factory, conf_ceph_stub):
+ conf_ceph_stub('[global]\nfsid = bbbb-2222')
+ prepare_obj = lvm.prepare.Prepare([])
+ prepare_obj.args = factory(cluster_fsid=None)
+ assert prepare_obj.get_cluster_fsid() == 'bbbb-2222'
+
+
+class TestPrepare(object):
+
+ def test_main_spits_help_with_no_arguments(self, capsys):
+ lvm.prepare.Prepare([]).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'Prepare an OSD by assigning an ID and FSID' in stdout
+
+ def test_main_shows_full_help(self, capsys):
+ with pytest.raises(SystemExit):
+ lvm.prepare.Prepare(argv=['--help']).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'Use the filestore objectstore' in stdout
+ assert 'Use the bluestore objectstore' in stdout
+ assert 'A physical device or logical' in stdout
+
+
+ @patch('ceph_volume.util.disk.has_bluestore_label', return_value=False)
+ def test_excludes_filestore_bluestore_flags(self, m_has_bs_label, fake_call, capsys, device_info):
+ device_info()
+ with pytest.raises(SystemExit):
+ lvm.prepare.Prepare(argv=['--data', '/dev/sdfoo', '--filestore', '--bluestore']).main()
+ stdout, stderr = capsys.readouterr()
+ expected = 'Cannot use --filestore (filestore) with --bluestore (bluestore)'
+ assert expected in stderr
+
+
+ @patch('ceph_volume.util.disk.has_bluestore_label', return_value=False)
+ def test_excludes_other_filestore_bluestore_flags(self, m_has_bs_label, fake_call, capsys, device_info):
+ device_info()
+ with pytest.raises(SystemExit):
+ lvm.prepare.Prepare(argv=[
+ '--bluestore', '--data', '/dev/sdfoo',
+ '--journal', '/dev/sf14',
+ ]).main()
+ stdout, stderr = capsys.readouterr()
+ expected = 'Cannot use --bluestore (bluestore) with --journal (filestore)'
+ assert expected in stderr
+
+ @patch('ceph_volume.util.disk.has_bluestore_label', return_value=False)
+ def test_excludes_block_and_journal_flags(self, m_has_bs_label, fake_call, capsys, device_info):
+ device_info()
+ with pytest.raises(SystemExit):
+ lvm.prepare.Prepare(argv=[
+ '--bluestore', '--data', '/dev/sdfoo', '--block.db', 'vg/ceph1',
+ '--journal', '/dev/sf14',
+ ]).main()
+ stdout, stderr = capsys.readouterr()
+ expected = 'Cannot use --block.db (bluestore) with --journal (filestore)'
+ assert expected in stderr
+
+ @patch('ceph_volume.util.arg_validators.Device')
+ @patch('ceph_volume.util.disk.has_bluestore_label', return_value=False)
+ def test_journal_is_required_with_filestore(self, m_has_bs_label, m_device, is_root, monkeypatch, device_info):
+ m_device.return_value = MagicMock(exists=True,
+ has_fs=False,
+ used_by_ceph=False,
+ has_partitions=False,
+ has_gpt_headers=False)
+ monkeypatch.setattr("os.path.exists", lambda path: True)
+ with pytest.raises(SystemExit) as error:
+ lvm.prepare.Prepare(argv=['--filestore', '--data', '/dev/sdfoo']).main()
+ expected = '--journal is required when using --filestore'
+ assert expected in str(error.value)
+
+ @patch('ceph_volume.devices.lvm.prepare.api.is_ceph_device')
+ def test_safe_prepare_osd_already_created(self, m_is_ceph_device):
+ m_is_ceph_device.return_value = True
+ with pytest.raises(RuntimeError) as error:
+ prepare = lvm.prepare.Prepare(argv=[])
+ prepare.args = Mock()
+ prepare.args.data = '/dev/sdfoo'
+ prepare.get_lv = Mock()
+ prepare.safe_prepare()
+ expected = 'skipping {}, it is already prepared'.format('/dev/sdfoo')
+ assert expected in str(error.value)
+
+ def test_setup_device_device_name_is_none(self):
+ result = lvm.prepare.Prepare([]).setup_device(device_type='data', device_name=None, tags={'ceph.type': 'data'}, size=0, slots=None)
+ assert result == ('', '', {'ceph.type': 'data'})
+
+ @patch('ceph_volume.api.lvm.Volume.set_tags')
+ @patch('ceph_volume.devices.lvm.prepare.api.get_single_lv')
+ def test_setup_device_lv_passed(self, m_get_single_lv, m_set_tags):
+ fake_volume = api.Volume(lv_name='lv_foo', lv_path='/fake-path', vg_name='vg_foo', lv_tags='', lv_uuid='fake-uuid')
+ m_get_single_lv.return_value = fake_volume
+ result = lvm.prepare.Prepare([]).setup_device(device_type='data', device_name='vg_foo/lv_foo', tags={'ceph.type': 'data'}, size=0, slots=None)
+
+ assert result == ('/fake-path', 'fake-uuid', {'ceph.type': 'data',
+ 'ceph.vdo': '0',
+ 'ceph.data_uuid': 'fake-uuid',
+ 'ceph.data_device': '/fake-path'})
+
+ @patch('ceph_volume.devices.lvm.prepare.api.create_lv')
+ @patch('ceph_volume.api.lvm.Volume.set_tags')
+ @patch('ceph_volume.util.disk.is_device')
+ def test_setup_device_device_passed(self, m_is_device, m_set_tags, m_create_lv):
+ fake_volume = api.Volume(lv_name='lv_foo', lv_path='/fake-path', vg_name='vg_foo', lv_tags='', lv_uuid='fake-uuid')
+ m_is_device.return_value = True
+ m_create_lv.return_value = fake_volume
+ result = lvm.prepare.Prepare([]).setup_device(device_type='data', device_name='/dev/sdx', tags={'ceph.type': 'data'}, size=0, slots=None)
+
+ assert result == ('/fake-path', 'fake-uuid', {'ceph.type': 'data',
+ 'ceph.vdo': '0',
+ 'ceph.data_uuid': 'fake-uuid',
+ 'ceph.data_device': '/fake-path'})
+
+ @patch('ceph_volume.devices.lvm.prepare.Prepare.get_ptuuid')
+ @patch('ceph_volume.devices.lvm.prepare.api.get_single_lv')
+ def test_setup_device_partition_passed(self, m_get_single_lv, m_get_ptuuid):
+ m_get_single_lv.side_effect = ValueError()
+ m_get_ptuuid.return_value = 'fake-uuid'
+ result = lvm.prepare.Prepare([]).setup_device(device_type='data', device_name='/dev/sdx', tags={'ceph.type': 'data'}, size=0, slots=None)
+
+ assert result == ('/dev/sdx', 'fake-uuid', {'ceph.type': 'data',
+ 'ceph.vdo': '0',
+ 'ceph.data_uuid': 'fake-uuid',
+ 'ceph.data_device': '/dev/sdx'})
+
+ def test_invalid_osd_id_passed(self):
+ with pytest.raises(SystemExit):
+ lvm.prepare.Prepare(argv=['--osd-id', 'foo']).main()
+
+
+class TestActivate(object):
+
+ def test_main_spits_help_with_no_arguments(self, capsys):
+ lvm.activate.Activate([]).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'Activate OSDs by discovering them with' in stdout
+
+ def test_main_shows_full_help(self, capsys):
+ with pytest.raises(SystemExit):
+ lvm.activate.Activate(argv=['--help']).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'optional arguments' in stdout
+ assert 'positional arguments' in stdout
diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_trigger.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_trigger.py
new file mode 100644
index 000000000..b5280f931
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_trigger.py
@@ -0,0 +1,45 @@
+import pytest
+from ceph_volume import exceptions
+from ceph_volume.devices.lvm import trigger
+
+
+class TestParseOSDid(object):
+
+ def test_no_id_found_if_no_digit(self):
+ with pytest.raises(exceptions.SuffixParsingError):
+ trigger.parse_osd_id('asdlj-ljahsdfaslkjhdfa')
+
+ def test_no_id_found(self):
+ with pytest.raises(exceptions.SuffixParsingError):
+ trigger.parse_osd_id('ljahsdfaslkjhdfa')
+
+ def test_id_found(self):
+ result = trigger.parse_osd_id('1-ljahsdfaslkjhdfa')
+ assert result == '1'
+
+
+class TestParseOSDUUID(object):
+
+ def test_uuid_is_parsed(self):
+ result = trigger.parse_osd_uuid('1-asdf-ljkh-asdf-ljkh-asdf')
+ assert result == 'asdf-ljkh-asdf-ljkh-asdf'
+
+ def test_uuid_is_parsed_longer_sha1(self):
+ result = trigger.parse_osd_uuid('1-foo-bar-asdf-ljkh-asdf-ljkh-asdf')
+ assert result == 'foo-bar-asdf-ljkh-asdf-ljkh-asdf'
+
+ def test_uuid_is_not_found(self):
+ with pytest.raises(exceptions.SuffixParsingError):
+ trigger.parse_osd_uuid('ljahsdfaslkjhdfa')
+
+ def test_uuid_is_not_found_missing_id(self):
+ with pytest.raises(exceptions.SuffixParsingError):
+ trigger.parse_osd_uuid('ljahs-dfa-slkjhdfa-foo')
+
+ def test_robust_double_id_in_uuid(self):
+ # it is possible to have the id in the SHA1, this should
+ # be fine parsing that
+ result = trigger.parse_osd_uuid("1-abc959fd-1ec9-4864-b141-3154f9b9f8ed")
+ assert result == 'abc959fd-1ec9-4864-b141-3154f9b9f8ed'
+
+
diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_zap.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_zap.py
new file mode 100644
index 000000000..64016111c
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_zap.py
@@ -0,0 +1,241 @@
+import os
+import pytest
+from copy import deepcopy
+from mock.mock import patch, call
+from ceph_volume import process
+from ceph_volume.api import lvm as api
+from ceph_volume.devices.lvm import zap
+
+
+class TestZap(object):
+ def test_invalid_osd_id_passed(self):
+ with pytest.raises(SystemExit):
+ zap.Zap(argv=['--osd-id', 'foo']).main()
+
+class TestFindAssociatedDevices(object):
+
+ def test_no_lvs_found_that_match_id(self, monkeypatch, device_info):
+ tags = 'ceph.osd_id=9,ceph.journal_uuid=x,ceph.type=data'
+ osd = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='vg',
+ lv_tags=tags, lv_path='/dev/VolGroup/lv')
+ volumes = []
+ volumes.append(osd)
+ monkeypatch.setattr(zap.api, 'get_lvs', lambda **kwargs: {})
+
+ with pytest.raises(RuntimeError):
+ zap.find_associated_devices(osd_id=10)
+
+ def test_no_lvs_found_that_match_fsid(self, monkeypatch, device_info):
+ tags = 'ceph.osd_id=9,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,'+\
+ 'ceph.type=data'
+ osd = api.Volume(lv_name='volume1', lv_uuid='y', lv_tags=tags,
+ vg_name='vg', lv_path='/dev/VolGroup/lv')
+ volumes = []
+ volumes.append(osd)
+ monkeypatch.setattr(zap.api, 'get_lvs', lambda **kwargs: {})
+
+ with pytest.raises(RuntimeError):
+ zap.find_associated_devices(osd_fsid='aaaa-lkjh')
+
+ def test_no_lvs_found_that_match_id_fsid(self, monkeypatch, device_info):
+ tags = 'ceph.osd_id=9,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,'+\
+ 'ceph.type=data'
+ osd = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='vg',
+ lv_tags=tags, lv_path='/dev/VolGroup/lv')
+ volumes = []
+ volumes.append(osd)
+ monkeypatch.setattr(zap.api, 'get_lvs', lambda **kwargs: {})
+
+ with pytest.raises(RuntimeError):
+ zap.find_associated_devices(osd_id='9', osd_fsid='aaaa-lkjh')
+
+ def test_no_ceph_lvs_found(self, monkeypatch):
+ osd = api.Volume(lv_name='volume1', lv_uuid='y', lv_tags='',
+ lv_path='/dev/VolGroup/lv')
+ volumes = []
+ volumes.append(osd)
+ monkeypatch.setattr(zap.api, 'get_lvs', lambda **kwargs: {})
+
+ with pytest.raises(RuntimeError):
+ zap.find_associated_devices(osd_id=100)
+
+ def test_lv_is_matched_id(self, monkeypatch):
+ tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data'
+ osd = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='',
+ lv_path='/dev/VolGroup/lv', lv_tags=tags)
+ volumes = []
+ volumes.append(osd)
+ monkeypatch.setattr(zap.api, 'get_lvs', lambda **kw: volumes)
+ monkeypatch.setattr(process, 'call', lambda x, **kw: ('', '', 0))
+
+ result = zap.find_associated_devices(osd_id='0')
+ assert result[0].path == '/dev/VolGroup/lv'
+
+ def test_lv_is_matched_fsid(self, monkeypatch):
+ tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,' +\
+ 'ceph.type=data'
+ osd = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='',
+ lv_path='/dev/VolGroup/lv', lv_tags=tags)
+ volumes = []
+ volumes.append(osd)
+ monkeypatch.setattr(zap.api, 'get_lvs', lambda **kw: deepcopy(volumes))
+ monkeypatch.setattr(process, 'call', lambda x, **kw: ('', '', 0))
+
+ result = zap.find_associated_devices(osd_fsid='asdf-lkjh')
+ assert result[0].path == '/dev/VolGroup/lv'
+
+ def test_lv_is_matched_id_fsid(self, monkeypatch):
+ tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,' +\
+ 'ceph.type=data'
+ osd = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='',
+ lv_path='/dev/VolGroup/lv', lv_tags=tags)
+ volumes = []
+ volumes.append(osd)
+ monkeypatch.setattr(zap.api, 'get_lvs', lambda **kw: volumes)
+ monkeypatch.setattr(process, 'call', lambda x, **kw: ('', '', 0))
+
+ result = zap.find_associated_devices(osd_id='0', osd_fsid='asdf-lkjh')
+ assert result[0].path == '/dev/VolGroup/lv'
+
+
+class TestEnsureAssociatedLVs(object):
+
+ def test_nothing_is_found(self):
+ volumes = []
+ result = zap.ensure_associated_lvs(volumes)
+ assert result == []
+
+ def test_data_is_found(self, fake_call):
+ tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,ceph.type=data'
+ osd = api.Volume(
+ lv_name='volume1', lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/data', lv_tags=tags)
+ volumes = []
+ volumes.append(osd)
+ result = zap.ensure_associated_lvs(volumes)
+ assert result == ['/dev/VolGroup/data']
+
+ def test_block_is_found(self, fake_call):
+ tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,ceph.type=block'
+ osd = api.Volume(
+ lv_name='volume1', lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/block', lv_tags=tags)
+ volumes = []
+ volumes.append(osd)
+ result = zap.ensure_associated_lvs(volumes)
+ assert result == ['/dev/VolGroup/block']
+
+ def test_success_message_for_fsid(self, factory, is_root, capsys):
+ cli_zap = zap.Zap([])
+ args = factory(devices=[], osd_id=None, osd_fsid='asdf-lkjh')
+ cli_zap.args = args
+ cli_zap.zap()
+ out, err = capsys.readouterr()
+ assert "Zapping successful for OSD: asdf-lkjh" in err
+
+ def test_success_message_for_id(self, factory, is_root, capsys):
+ cli_zap = zap.Zap([])
+ args = factory(devices=[], osd_id='1', osd_fsid=None)
+ cli_zap.args = args
+ cli_zap.zap()
+ out, err = capsys.readouterr()
+ assert "Zapping successful for OSD: 1" in err
+
+ def test_block_and_partition_are_found(self, monkeypatch):
+ monkeypatch.setattr(zap.disk, 'get_device_from_partuuid', lambda x: '/dev/sdb1')
+ tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,ceph.type=block'
+ osd = api.Volume(
+ lv_name='volume1', lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/block', lv_tags=tags)
+ volumes = []
+ volumes.append(osd)
+ result = zap.ensure_associated_lvs(volumes)
+ assert '/dev/sdb1' in result
+ assert '/dev/VolGroup/block' in result
+
+ def test_journal_is_found(self, fake_call):
+ tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,ceph.type=journal'
+ osd = api.Volume(
+ lv_name='volume1', lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/lv', lv_tags=tags)
+ volumes = []
+ volumes.append(osd)
+ result = zap.ensure_associated_lvs(volumes)
+ assert result == ['/dev/VolGroup/lv']
+
+ def test_multiple_journals_are_found(self):
+ tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,ceph.type=journal'
+ volumes = []
+ for i in range(3):
+ osd = api.Volume(
+ lv_name='volume%s' % i, lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/lv%s' % i, lv_tags=tags)
+ volumes.append(osd)
+ result = zap.ensure_associated_lvs(volumes)
+ assert '/dev/VolGroup/lv0' in result
+ assert '/dev/VolGroup/lv1' in result
+ assert '/dev/VolGroup/lv2' in result
+
+ def test_multiple_dbs_are_found(self):
+ tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,ceph.type=db'
+ volumes = []
+ for i in range(3):
+ osd = api.Volume(
+ lv_name='volume%s' % i, lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/lv%s' % i, lv_tags=tags)
+ volumes.append(osd)
+ result = zap.ensure_associated_lvs(volumes)
+ assert '/dev/VolGroup/lv0' in result
+ assert '/dev/VolGroup/lv1' in result
+ assert '/dev/VolGroup/lv2' in result
+
+ def test_multiple_wals_are_found(self):
+ tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.wal_uuid=x,ceph.type=wal'
+ volumes = []
+ for i in range(3):
+ osd = api.Volume(
+ lv_name='volume%s' % i, lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/lv%s' % i, lv_tags=tags)
+ volumes.append(osd)
+ result = zap.ensure_associated_lvs(volumes)
+ assert '/dev/VolGroup/lv0' in result
+ assert '/dev/VolGroup/lv1' in result
+ assert '/dev/VolGroup/lv2' in result
+
+ def test_multiple_backing_devs_are_found(self):
+ volumes = []
+ for _type in ['journal', 'db', 'wal']:
+ tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.wal_uuid=x,ceph.type=%s' % _type
+ osd = api.Volume(
+ lv_name='volume%s' % _type, lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/lv%s' % _type, lv_tags=tags)
+ volumes.append(osd)
+ result = zap.ensure_associated_lvs(volumes)
+ assert '/dev/VolGroup/lvjournal' in result
+ assert '/dev/VolGroup/lvwal' in result
+ assert '/dev/VolGroup/lvdb' in result
+
+ @patch('ceph_volume.devices.lvm.zap.api.get_lvs')
+ def test_ensure_associated_lvs(self, m_get_lvs):
+ zap.ensure_associated_lvs([], lv_tags={'ceph.osd_id': '1'})
+ calls = [
+ call(tags={'ceph.type': 'journal', 'ceph.osd_id': '1'}),
+ call(tags={'ceph.type': 'db', 'ceph.osd_id': '1'}),
+ call(tags={'ceph.type': 'wal', 'ceph.osd_id': '1'})
+ ]
+ m_get_lvs.assert_has_calls(calls, any_order=True)
+
+
+class TestWipeFs(object):
+
+ def setup(self):
+ os.environ['CEPH_VOLUME_WIPEFS_INTERVAL'] = '0'
+
+ def test_works_on_second_try(self, stub_call):
+ os.environ['CEPH_VOLUME_WIPEFS_TRIES'] = '2'
+ stub_call([('wiping /dev/sda', '', 1), ('', '', 0)])
+ result = zap.wipefs('/dev/sda')
+ assert result is None
+
+ def test_does_not_work_after_several_tries(self, stub_call):
+ os.environ['CEPH_VOLUME_WIPEFS_TRIES'] = '2'
+ stub_call([('wiping /dev/sda', '', 1), ('', '', 1)])
+ with pytest.raises(RuntimeError):
+ zap.wipefs('/dev/sda')
+
+ def test_does_not_work_default_tries(self, stub_call):
+ stub_call([('wiping /dev/sda', '', 1)]*8)
+ with pytest.raises(RuntimeError):
+ zap.wipefs('/dev/sda')
diff --git a/src/ceph-volume/ceph_volume/tests/devices/raw/__init__.py b/src/ceph-volume/ceph_volume/tests/devices/raw/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/raw/__init__.py
diff --git a/src/ceph-volume/ceph_volume/tests/devices/raw/test_list.py b/src/ceph-volume/ceph_volume/tests/devices/raw/test_list.py
new file mode 100644
index 000000000..5ad501bab
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/raw/test_list.py
@@ -0,0 +1,238 @@
+import pytest
+from mock.mock import patch
+from ceph_volume.devices import raw
+
+# Sample lsblk output is below that overviews the test scenario. (--json output for reader clarity)
+# - sda and all its children are used for the OS
+# - sdb is a bluestore OSD with phantom Atari partitions
+# - sdc is an empty disk
+# - sdd has 2 LVM device children
+# > lsblk --paths --json
+# {
+# "blockdevices": [
+# {"name": "/dev/sda", "maj:min": "8:0", "rm": "0", "size": "128G", "ro": "0", "type": "disk", "mountpoint": null,
+# "children": [
+# {"name": "/dev/sda1", "maj:min": "8:1", "rm": "0", "size": "487M", "ro": "0", "type": "part", "mountpoint": null},
+# {"name": "/dev/sda2", "maj:min": "8:2", "rm": "0", "size": "1.9G", "ro": "0", "type": "part", "mountpoint": null},
+# {"name": "/dev/sda3", "maj:min": "8:3", "rm": "0", "size": "125.6G", "ro": "0", "type": "part", "mountpoint": "/etc/hosts"}
+# ]
+# },
+# {"name": "/dev/sdb", "maj:min": "8:16", "rm": "0", "size": "1T", "ro": "0", "type": "disk", "mountpoint": null,
+# "children": [
+# {"name": "/dev/sdb2", "maj:min": "8:18", "rm": "0", "size": "48G", "ro": "0", "type": "part", "mountpoint": null},
+# {"name": "/dev/sdb3", "maj:min": "8:19", "rm": "0", "size": "6M", "ro": "0", "type": "part", "mountpoint": null}
+# ]
+# },
+# {"name": "/dev/sdc", "maj:min": "8:32", "rm": "0", "size": "1T", "ro": "0", "type": "disk", "mountpoint": null},
+# {"name": "/dev/sdd", "maj:min": "8:48", "rm": "0", "size": "1T", "ro": "0", "type": "disk", "mountpoint": null,
+# "children": [
+# {"name": "/dev/mapper/ceph--osd--block--1", "maj:min": "253:0", "rm": "0", "size": "512G", "ro": "0", "type": "lvm", "mountpoint": null},
+# {"name": "/dev/mapper/ceph--osd--block--2", "maj:min": "253:1", "rm": "0", "size": "512G", "ro": "0", "type": "lvm", "mountpoint": null}
+# ]
+# }
+# ]
+# }
+
+def _devices_side_effect():
+ return {
+ "/dev/sda": {},
+ "/dev/sda1": {},
+ "/dev/sda2": {},
+ "/dev/sda3": {},
+ "/dev/sdb": {},
+ "/dev/sdb2": {},
+ "/dev/sdb3": {},
+ "/dev/sdc": {},
+ "/dev/sdd": {},
+ "/dev/mapper/ceph--osd--block--1": {},
+ "/dev/mapper/ceph--osd--block--2": {},
+ }
+
+def _lsblk_all_devices(abspath=True):
+ return [
+ {"NAME": "/dev/sda", "KNAME": "/dev/sda", "PKNAME": ""},
+ {"NAME": "/dev/sda1", "KNAME": "/dev/sda1", "PKNAME": "/dev/sda"},
+ {"NAME": "/dev/sda2", "KNAME": "/dev/sda2", "PKNAME": "/dev/sda"},
+ {"NAME": "/dev/sda3", "KNAME": "/dev/sda3", "PKNAME": "/dev/sda"},
+ {"NAME": "/dev/sdb", "KNAME": "/dev/sdb", "PKNAME": ""},
+ {"NAME": "/dev/sdb2", "KNAME": "/dev/sdb2", "PKNAME": "/dev/sdb"},
+ {"NAME": "/dev/sdb3", "KNAME": "/dev/sdb3", "PKNAME": "/dev/sdb"},
+ {"NAME": "/dev/sdc", "KNAME": "/dev/sdc", "PKNAME": ""},
+ {"NAME": "/dev/sdd", "KNAME": "/dev/sdd", "PKNAME": ""},
+ {"NAME": "/dev/mapper/ceph--osd--block--1", "KNAME": "/dev/mapper/ceph--osd--block--1", "PKNAME": "/dev/sdd"},
+ {"NAME": "/dev/mapper/ceph--osd--block--2", "KNAME": "/dev/mapper/ceph--osd--block--2", "PKNAME": "/dev/sdd"},
+ ]
+
+# dummy lsblk output for device with optional parent output
+def _lsblk_output(dev, parent=None):
+ if parent is None:
+ parent = ""
+ ret = 'NAME="{}" KNAME="{}" PKNAME="{}"'.format(dev, dev, parent)
+ return [ret] # needs to be in a list form
+
+def _bluestore_tool_label_output_sdb():
+ return '''{
+ "/dev/sdb": {
+ "osd_uuid": "sdb-uuid",
+ "size": 1099511627776,
+ "btime": "2021-07-23T16:02:22.809186+0000",
+ "description": "main",
+ "bfm_blocks": "268435456",
+ "bfm_blocks_per_key": "128",
+ "bfm_bytes_per_block": "4096",
+ "bfm_size": "1099511627776",
+ "bluefs": "1",
+ "ceph_fsid": "sdb-fsid",
+ "kv_backend": "rocksdb",
+ "magic": "ceph osd volume v026",
+ "mkfs_done": "yes",
+ "osd_key": "AQAO6PpgK+y4CBAAixq/X7OVimbaezvwD/cDmg==",
+ "ready": "ready",
+ "require_osd_release": "16",
+ "whoami": "0"
+ }
+}'''
+
+def _bluestore_tool_label_output_sdb2():
+ return '''{
+ "/dev/sdb2": {
+ "osd_uuid": "sdb2-uuid",
+ "size": 1099511627776,
+ "btime": "2021-07-23T16:02:22.809186+0000",
+ "description": "main",
+ "bfm_blocks": "268435456",
+ "bfm_blocks_per_key": "128",
+ "bfm_bytes_per_block": "4096",
+ "bfm_size": "1099511627776",
+ "bluefs": "1",
+ "ceph_fsid": "sdb2-fsid",
+ "kv_backend": "rocksdb",
+ "magic": "ceph osd volume v026",
+ "mkfs_done": "yes",
+ "osd_key": "AQAO6PpgK+y4CBAAixq/X7OVimbaezvwD/cDmg==",
+ "ready": "ready",
+ "require_osd_release": "16",
+ "whoami": "2"
+ }
+}'''
+
+def _bluestore_tool_label_output_dm_okay():
+ return '''{
+ "/dev/mapper/ceph--osd--block--1": {
+ "osd_uuid": "lvm-1-uuid",
+ "size": 549751619584,
+ "btime": "2021-07-23T16:04:37.881060+0000",
+ "description": "main",
+ "bfm_blocks": "134216704",
+ "bfm_blocks_per_key": "128",
+ "bfm_bytes_per_block": "4096",
+ "bfm_size": "549751619584",
+ "bluefs": "1",
+ "ceph_fsid": "lvm-1-fsid",
+ "kv_backend": "rocksdb",
+ "magic": "ceph osd volume v026",
+ "mkfs_done": "yes",
+ "osd_key": "AQCU6Ppgz+UcIRAAh6IUjtPjiXBlEXfwO8ixzw==",
+ "ready": "ready",
+ "require_osd_release": "16",
+ "whoami": "2"
+ }
+}'''
+
+def _process_call_side_effect(command, **kw):
+ if "lsblk" in command:
+ if "/dev/" in command[-1]:
+ dev = command[-1]
+ if dev == "/dev/sda1" or dev == "/dev/sda2" or dev == "/dev/sda3":
+ return _lsblk_output(dev, parent="/dev/sda"), '', 0
+ if dev == "/dev/sdb2" or dev == "/dev/sdb3":
+ return _lsblk_output(dev, parent="/dev/sdb"), '', 0
+ if dev == "/dev/sda" or dev == "/dev/sdb" or dev == "/dev/sdc" or dev == "/dev/sdd":
+ return _lsblk_output(dev), '', 0
+ if "mapper" in dev:
+ return _lsblk_output(dev, parent="/dev/sdd"), '', 0
+ pytest.fail('dev {} needs behavior specified for it'.format(dev))
+ if "/dev/" not in command:
+ return _lsblk_all_devices(), '', 0
+ pytest.fail('command {} needs behavior specified for it'.format(command))
+
+ if "ceph-bluestore-tool" in command:
+ if "/dev/sdb" in command:
+ # sdb is a bluestore OSD
+ return _bluestore_tool_label_output_sdb(), '', 0
+ if "/dev/sdb2" in command:
+ # sdb2 is a phantom atari partition that appears to have some valid bluestore info
+ return _bluestore_tool_label_output_sdb2(), '', 0
+ if "/dev/mapper/ceph--osd--block--1" in command:
+ # dm device 1 is a valid bluestore OSD (the other is corrupted/invalid)
+ return _bluestore_tool_label_output_dm_okay(), '', 0
+ # sda and children, sdb's children, sdc, sdd, dm device 2 all do NOT have bluestore OSD data
+ return [], 'fake No such file or directory error', 1
+ pytest.fail('command {} needs behavior specified for it'.format(command))
+
+def _has_bluestore_label_side_effect(disk_path):
+ if "/dev/sda" in disk_path:
+ return False # disk and all children are for the OS
+ if disk_path == "/dev/sdb":
+ return True # sdb is a valid bluestore OSD
+ if disk_path == "/dev/sdb2":
+ return True # sdb2 appears to be a valid bluestore OSD even though it should not be
+ if disk_path == "/dev/sdc":
+ return False # empty disk
+ if disk_path == "/dev/sdd":
+ return False # has LVM subdevices
+ if disk_path == "/dev/mapper/ceph--osd--block--1":
+ return True # good OSD
+ if disk_path == "/dev/mapper/ceph--osd--block--2":
+ return False # corrupted
+ pytest.fail('device {} needs behavior specified for it'.format(disk_path))
+
+class TestList(object):
+
+ @patch('ceph_volume.util.device.disk.get_devices')
+ @patch('ceph_volume.util.disk.has_bluestore_label')
+ @patch('ceph_volume.process.call')
+ @patch('ceph_volume.util.disk.lsblk_all')
+ def test_raw_list(self, patched_disk_lsblk, patched_call, patched_bluestore_label, patched_get_devices):
+ raw.list.logger.setLevel("DEBUG")
+ patched_call.side_effect = _process_call_side_effect
+ patched_disk_lsblk.side_effect = _lsblk_all_devices
+ patched_bluestore_label.side_effect = _has_bluestore_label_side_effect
+ patched_get_devices.side_effect = _devices_side_effect
+
+ result = raw.list.List([]).generate()
+ assert len(result) == 3
+
+ sdb = result['sdb-uuid']
+ assert sdb['osd_uuid'] == 'sdb-uuid'
+ assert sdb['osd_id'] == 0
+ assert sdb['device'] == '/dev/sdb'
+ assert sdb['ceph_fsid'] == 'sdb-fsid'
+ assert sdb['type'] == 'bluestore'
+
+ lvm1 = result['lvm-1-uuid']
+ assert lvm1['osd_uuid'] == 'lvm-1-uuid'
+ assert lvm1['osd_id'] == 2
+ assert lvm1['device'] == '/dev/mapper/ceph--osd--block--1'
+ assert lvm1['ceph_fsid'] == 'lvm-1-fsid'
+ assert lvm1['type'] == 'bluestore'
+
+ @patch('ceph_volume.util.device.disk.get_devices')
+ @patch('ceph_volume.util.disk.has_bluestore_label')
+ @patch('ceph_volume.process.call')
+ @patch('ceph_volume.util.disk.lsblk_all')
+ def test_raw_list_with_OSError(self, patched_disk_lsblk, patched_call, patched_bluestore_label, patched_get_devices):
+ def _has_bluestore_label_side_effect_with_OSError(device_path):
+ if device_path == "/dev/sdd":
+ raise OSError('fake OSError')
+ return _has_bluestore_label_side_effect(device_path)
+
+ raw.list.logger.setLevel("DEBUG")
+ patched_disk_lsblk.side_effect = _lsblk_all_devices
+ patched_call.side_effect = _process_call_side_effect
+ patched_bluestore_label.side_effect = _has_bluestore_label_side_effect_with_OSError
+ patched_get_devices.side_effect = _devices_side_effect
+
+ result = raw.list.List([]).generate()
+ assert len(result) == 3
+ assert 'sdb-uuid' in result
diff --git a/src/ceph-volume/ceph_volume/tests/devices/raw/test_prepare.py b/src/ceph-volume/ceph_volume/tests/devices/raw/test_prepare.py
new file mode 100644
index 000000000..f814bbf13
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/raw/test_prepare.py
@@ -0,0 +1,97 @@
+import pytest
+from ceph_volume.devices import raw
+from mock.mock import patch
+
+
+class TestRaw(object):
+
+ def test_main_spits_help_with_no_arguments(self, capsys):
+ raw.main.Raw([]).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'Manage a single-device OSD on a raw block device.' in stdout
+
+ def test_main_shows_activate_subcommands(self, capsys):
+ raw.main.Raw([]).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'activate ' in stdout
+ assert 'Discover and prepare' in stdout
+
+ def test_main_shows_prepare_subcommands(self, capsys):
+ raw.main.Raw([]).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'prepare ' in stdout
+ assert 'Format a raw device' in stdout
+
+
+class TestPrepare(object):
+
+ def test_main_spits_help_with_no_arguments(self, capsys):
+ raw.prepare.Prepare([]).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'Prepare an OSD by assigning an ID and FSID' in stdout
+
+ def test_main_shows_full_help(self, capsys):
+ with pytest.raises(SystemExit):
+ raw.prepare.Prepare(argv=['--help']).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'a raw device to use for the OSD' in stdout
+ assert 'Crush device class to assign this OSD to' in stdout
+ assert 'Use BlueStore backend' in stdout
+ assert 'Path to bluestore block.db block device' in stdout
+ assert 'Path to bluestore block.wal block device' in stdout
+ assert 'Enable device encryption via dm-crypt' in stdout
+
+ @patch('ceph_volume.util.arg_validators.ValidRawDevice.__call__')
+ def test_prepare_dmcrypt_no_secret_passed(self, m_valid_device, capsys):
+ m_valid_device.return_value = '/dev/foo'
+ with pytest.raises(SystemExit):
+ raw.prepare.Prepare(argv=['--bluestore', '--data', '/dev/foo', '--dmcrypt']).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'CEPH_VOLUME_DMCRYPT_SECRET is not set, you must set' in stderr
+
+ @patch('ceph_volume.util.encryption.luks_open')
+ @patch('ceph_volume.util.encryption.luks_format')
+ @patch('ceph_volume.util.disk.lsblk')
+ def test_prepare_dmcrypt_block(self, m_lsblk, m_luks_format, m_luks_open):
+ m_lsblk.return_value = {'KNAME': 'foo'}
+ m_luks_format.return_value = True
+ m_luks_open.return_value = True
+ result = raw.prepare.prepare_dmcrypt('foo', '/dev/foo', 'block', '123')
+ m_luks_open.assert_called_with('foo', '/dev/foo', 'ceph-123-foo-block-dmcrypt')
+ m_luks_format.assert_called_with('foo', '/dev/foo')
+ assert result == '/dev/mapper/ceph-123-foo-block-dmcrypt'
+
+ @patch('ceph_volume.util.encryption.luks_open')
+ @patch('ceph_volume.util.encryption.luks_format')
+ @patch('ceph_volume.util.disk.lsblk')
+ def test_prepare_dmcrypt_db(self, m_lsblk, m_luks_format, m_luks_open):
+ m_lsblk.return_value = {'KNAME': 'foo'}
+ m_luks_format.return_value = True
+ m_luks_open.return_value = True
+ result = raw.prepare.prepare_dmcrypt('foo', '/dev/foo', 'db', '123')
+ m_luks_open.assert_called_with('foo', '/dev/foo', 'ceph-123-foo-db-dmcrypt')
+ m_luks_format.assert_called_with('foo', '/dev/foo')
+ assert result == '/dev/mapper/ceph-123-foo-db-dmcrypt'
+
+ @patch('ceph_volume.util.encryption.luks_open')
+ @patch('ceph_volume.util.encryption.luks_format')
+ @patch('ceph_volume.util.disk.lsblk')
+ def test_prepare_dmcrypt_wal(self, m_lsblk, m_luks_format, m_luks_open):
+ m_lsblk.return_value = {'KNAME': 'foo'}
+ m_luks_format.return_value = True
+ m_luks_open.return_value = True
+ result = raw.prepare.prepare_dmcrypt('foo', '/dev/foo', 'wal', '123')
+ m_luks_open.assert_called_with('foo', '/dev/foo', 'ceph-123-foo-wal-dmcrypt')
+ m_luks_format.assert_called_with('foo', '/dev/foo')
+ assert result == '/dev/mapper/ceph-123-foo-wal-dmcrypt'
+
+ @patch('ceph_volume.devices.raw.prepare.rollback_osd')
+ @patch('ceph_volume.devices.raw.prepare.Prepare.prepare')
+ @patch('ceph_volume.util.arg_validators.ValidRawDevice.__call__')
+ def test_safe_prepare_exception_raised(self, m_valid_device, m_prepare, m_rollback_osd):
+ m_valid_device.return_value = '/dev/foo'
+ m_prepare.side_effect=Exception('foo')
+ m_rollback_osd.return_value = 'foobar'
+ with pytest.raises(Exception):
+ raw.prepare.Prepare(argv=['--bluestore', '--data', '/dev/foo']).main()
+ m_rollback_osd.assert_called()
diff --git a/src/ceph-volume/ceph_volume/tests/devices/simple/test_activate.py b/src/ceph-volume/ceph_volume/tests/devices/simple/test_activate.py
new file mode 100644
index 000000000..5c7bd3117
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/simple/test_activate.py
@@ -0,0 +1,200 @@
+import os
+import pytest
+from ceph_volume.devices.simple import activate
+
+
+class TestActivate(object):
+
+ def test_no_data_uuid(self, factory, is_root, monkeypatch, capture, fake_filesystem):
+ fake_filesystem.create_file('/tmp/json-config', contents='{}')
+ args = factory(osd_id='0', osd_fsid='1234', json_config='/tmp/json-config')
+ with pytest.raises(RuntimeError):
+ activate.Activate([]).activate(args)
+
+ def test_invalid_json_path(self):
+ os.environ['CEPH_VOLUME_SIMPLE_JSON_DIR'] = '/non/existing/path'
+ with pytest.raises(RuntimeError) as error:
+ activate.Activate(['1', 'asdf']).main()
+ assert 'Expected JSON config path not found' in str(error.value)
+
+ def test_main_spits_help_with_no_arguments(self, capsys):
+ activate.Activate([]).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'Activate OSDs by mounting devices previously configured' in stdout
+
+ def test_activate_all(self, is_root, monkeypatch):
+ '''
+ make sure Activate calls activate for each file returned by glob
+ '''
+ mocked_glob = []
+ def mock_glob(glob):
+ path = os.path.dirname(glob)
+ mocked_glob.extend(['{}/{}.json'.format(path, file_) for file_ in
+ ['1', '2', '3']])
+ return mocked_glob
+ activate_files = []
+ def mock_activate(self, args):
+ activate_files.append(args.json_config)
+ monkeypatch.setattr('glob.glob', mock_glob)
+ monkeypatch.setattr(activate.Activate, 'activate', mock_activate)
+ activate.Activate(['--all']).main()
+ assert activate_files == mocked_glob
+
+
+
+
+class TestEnableSystemdUnits(object):
+
+ def test_nothing_is_activated(self, is_root, capsys, fake_filesystem):
+ fake_filesystem.create_file('/tmp/json-config', contents='{}')
+ activation = activate.Activate(['--no-systemd', '--file', '/tmp/json-config', '0', '1234'], from_trigger=True)
+ activation.activate = lambda x: True
+ activation.main()
+ activation.enable_systemd_units('0', '1234')
+ stdout, stderr = capsys.readouterr()
+ assert 'Skipping enabling of `simple`' in stderr
+ assert 'Skipping masking of ceph-disk' in stderr
+ assert 'Skipping enabling and starting OSD simple' in stderr
+
+ def test_no_systemd_flag_is_true(self, is_root, fake_filesystem):
+ fake_filesystem.create_file('/tmp/json-config', contents='{}')
+ activation = activate.Activate(['--no-systemd', '--file', '/tmp/json-config', '0', '1234'], from_trigger=True)
+ activation.activate = lambda x: True
+ activation.main()
+ assert activation.skip_systemd is True
+
+ def test_no_systemd_flag_is_false(self, is_root, fake_filesystem):
+ fake_filesystem.create_file('/tmp/json-config', contents='{}')
+ activation = activate.Activate(['--file', '/tmp/json-config', '0', '1234'], from_trigger=True)
+ activation.activate = lambda x: True
+ activation.main()
+ assert activation.skip_systemd is False
+
+ def test_masks_ceph_disk(self, is_root, monkeypatch, capture, fake_filesystem):
+ monkeypatch.setattr('ceph_volume.systemd.systemctl.mask_ceph_disk', capture)
+ monkeypatch.setattr('ceph_volume.systemd.systemctl.enable_volume', lambda *a: True)
+ monkeypatch.setattr('ceph_volume.systemd.systemctl.enable_osd', lambda *a: True)
+ monkeypatch.setattr('ceph_volume.systemd.systemctl.start_osd', lambda *a: True)
+
+ fake_filesystem.create_file('/tmp/json-config', contents='{}')
+ activation = activate.Activate(['--file', '/tmp/json-config', '0', '1234'], from_trigger=False)
+ activation.activate = lambda x: True
+ activation.main()
+ activation.enable_systemd_units('0', '1234')
+ assert len(capture.calls) == 1
+
+ def test_enables_simple_unit(self, is_root, monkeypatch, capture, fake_filesystem):
+ monkeypatch.setattr('ceph_volume.systemd.systemctl.mask_ceph_disk', lambda *a: True)
+ monkeypatch.setattr('ceph_volume.systemd.systemctl.enable_volume', capture)
+ monkeypatch.setattr('ceph_volume.systemd.systemctl.enable_osd', lambda *a: True)
+ monkeypatch.setattr('ceph_volume.systemd.systemctl.start_osd', lambda *a: True)
+
+ fake_filesystem.create_file('/tmp/json-config', contents='{}')
+ activation = activate.Activate(['--file', '/tmp/json-config', '0', '1234'], from_trigger=False)
+ activation.activate = lambda x: True
+ activation.main()
+ activation.enable_systemd_units('0', '1234')
+ assert len(capture.calls) == 1
+ assert capture.calls[0]['args'] == ('0', '1234', 'simple')
+
+ def test_enables_osd_unit(self, is_root, monkeypatch, capture, fake_filesystem):
+ monkeypatch.setattr('ceph_volume.systemd.systemctl.mask_ceph_disk', lambda *a: True)
+ monkeypatch.setattr('ceph_volume.systemd.systemctl.enable_volume', lambda *a: True)
+ monkeypatch.setattr('ceph_volume.systemd.systemctl.enable_osd', capture)
+ monkeypatch.setattr('ceph_volume.systemd.systemctl.start_osd', lambda *a: True)
+
+ fake_filesystem.create_file('/tmp/json-config', contents='{}')
+ activation = activate.Activate(['--file', '/tmp/json-config', '0', '1234'], from_trigger=False)
+ activation.activate = lambda x: True
+ activation.main()
+ activation.enable_systemd_units('0', '1234')
+ assert len(capture.calls) == 1
+ assert capture.calls[0]['args'] == ('0',)
+
+ def test_starts_osd_unit(self, is_root, monkeypatch, capture, fake_filesystem):
+ monkeypatch.setattr('ceph_volume.systemd.systemctl.mask_ceph_disk', lambda *a: True)
+ monkeypatch.setattr('ceph_volume.systemd.systemctl.enable_volume', lambda *a: True)
+ monkeypatch.setattr('ceph_volume.systemd.systemctl.enable_osd', lambda *a: True)
+ monkeypatch.setattr('ceph_volume.systemd.systemctl.start_osd', capture)
+
+ fake_filesystem.create_file('/tmp/json-config', contents='{}')
+ activation = activate.Activate(['--file', '/tmp/json-config', '0', '1234'], from_trigger=False)
+ activation.activate = lambda x: True
+ activation.main()
+ activation.enable_systemd_units('0', '1234')
+ assert len(capture.calls) == 1
+ assert capture.calls[0]['args'] == ('0',)
+
+
+class TestValidateDevices(object):
+
+ def test_filestore_missing_journal(self):
+ activation = activate.Activate([])
+ with pytest.raises(RuntimeError) as error:
+ activation.validate_devices({'type': 'filestore', 'data': {}})
+ assert 'Unable to activate filestore OSD due to missing devices' in str(error.value)
+
+ def test_filestore_missing_data(self):
+ activation = activate.Activate([])
+ with pytest.raises(RuntimeError) as error:
+ activation.validate_devices({'type': 'filestore', 'journal': {}})
+ assert 'Unable to activate filestore OSD due to missing devices' in str(error.value)
+
+ def test_filestore_journal_device_found(self, capsys):
+ activation = activate.Activate([])
+ with pytest.raises(RuntimeError):
+ activation.validate_devices({'type': 'filestore', 'journal': {}})
+ stdout, stderr = capsys.readouterr()
+ assert "devices found: ['journal']" in stderr
+
+ def test_filestore_data_device_found(self, capsys):
+ activation = activate.Activate([])
+ with pytest.raises(RuntimeError):
+ activation.validate_devices({'type': 'filestore', 'data': {}})
+ stdout, stderr = capsys.readouterr()
+ assert "devices found: ['data']" in stderr
+
+ def test_filestore_with_all_devices(self):
+ activation = activate.Activate([])
+ result = activation.validate_devices({'type': 'filestore', 'journal': {}, 'data': {}})
+ assert result is True
+
+ def test_filestore_without_type(self):
+ activation = activate.Activate([])
+ result = activation.validate_devices({'journal': {}, 'data': {}})
+ assert result is True
+
+ def test_bluestore_with_all_devices(self):
+ activation = activate.Activate([])
+ result = activation.validate_devices({'type': 'bluestore', 'data': {}, 'block': {}})
+ assert result is True
+
+ def test_bluestore_without_type(self):
+ activation = activate.Activate([])
+ result = activation.validate_devices({'data': {}, 'block': {}})
+ assert result is True
+
+ def test_bluestore_is_default(self):
+ activation = activate.Activate([])
+ result = activation.validate_devices({'data': {}, 'block': {}})
+ assert result is True
+
+ def test_bluestore_data_device_found(self, capsys):
+ activation = activate.Activate([])
+ with pytest.raises(RuntimeError):
+ activation.validate_devices({'data': {}})
+ stdout, stderr = capsys.readouterr()
+ assert "devices found: ['data']" in stderr
+
+ def test_bluestore_missing_data(self):
+ activation = activate.Activate([])
+ with pytest.raises(RuntimeError) as error:
+ activation.validate_devices({'type': 'bluestore', 'block': {}})
+ assert 'Unable to activate bluestore OSD due to missing devices' in str(error.value)
+
+ def test_bluestore_block_device_found(self, capsys):
+ activation = activate.Activate([])
+ with pytest.raises(RuntimeError):
+ activation.validate_devices({'block': {}})
+ stdout, stderr = capsys.readouterr()
+ assert "devices found: ['block']" in stderr
diff --git a/src/ceph-volume/ceph_volume/tests/devices/simple/test_scan.py b/src/ceph-volume/ceph_volume/tests/devices/simple/test_scan.py
new file mode 100644
index 000000000..b5d120655
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/simple/test_scan.py
@@ -0,0 +1,71 @@
+import os
+import pytest
+from ceph_volume.devices.simple import scan
+
+
+class TestGetContents(object):
+
+ def setup(self):
+ self.magic_file_name = '/tmp/magic-file'
+
+ def test_multiple_lines_are_left_as_is(self, fake_filesystem):
+ magic_file = fake_filesystem.create_file(self.magic_file_name, contents='first\nsecond\n')
+ scanner = scan.Scan([])
+ assert scanner.get_contents(magic_file.path) == 'first\nsecond\n'
+
+ def test_extra_whitespace_gets_removed(self, fake_filesystem):
+ magic_file = fake_filesystem.create_file(self.magic_file_name, contents='first ')
+ scanner = scan.Scan([])
+ assert scanner.get_contents(magic_file.path) == 'first'
+
+ def test_single_newline_values_are_trimmed(self, fake_filesystem):
+ magic_file = fake_filesystem.create_file(self.magic_file_name, contents='first\n')
+ scanner = scan.Scan([])
+ assert scanner.get_contents(magic_file.path) == 'first'
+
+
+class TestEtcPath(object):
+
+ def test_directory_is_valid(self, tmpdir):
+ path = str(tmpdir)
+ scanner = scan.Scan([])
+ scanner._etc_path = path
+ assert scanner.etc_path == path
+
+ def test_directory_does_not_exist_gets_created(self, tmpdir):
+ path = os.path.join(str(tmpdir), 'subdir')
+ scanner = scan.Scan([])
+ scanner._etc_path = path
+ assert scanner.etc_path == path
+ assert os.path.isdir(path)
+
+ def test_complains_when_file(self, fake_filesystem):
+ etc_dir = fake_filesystem.create_file('/etc/ceph/osd')
+ scanner = scan.Scan([])
+ scanner._etc_path = etc_dir.path
+ with pytest.raises(RuntimeError):
+ scanner.etc_path
+
+
+class TestParseKeyring(object):
+
+ def test_newlines_are_removed(self):
+ contents = [
+ '[client.osd-lockbox.8d7a8ab2-5db0-4f83-a785-2809aba403d5]',
+ '\tkey = AQDtoGha/GYJExAA7HNl7Ukhqr7AKlCpLJk6UA==', '']
+ assert '\n' not in scan.parse_keyring('\n'.join(contents))
+
+ def test_key_has_spaces_removed(self):
+ contents = [
+ '[client.osd-lockbox.8d7a8ab2-5db0-4f83-a785-2809aba403d5]',
+ '\tkey = AQDtoGha/GYJExAA7HNl7Ukhqr7AKlCpLJk6UA==', '']
+ result = scan.parse_keyring('\n'.join(contents))
+ assert result.startswith(' ') is False
+ assert result.endswith(' ') is False
+
+ def test_actual_key_is_extracted(self):
+ contents = [
+ '[client.osd-lockbox.8d7a8ab2-5db0-4f83-a785-2809aba403d5]',
+ '\tkey = AQDtoGha/GYJExAA7HNl7Ukhqr7AKlCpLJk6UA==', '']
+ result = scan.parse_keyring('\n'.join(contents))
+ assert result == 'AQDtoGha/GYJExAA7HNl7Ukhqr7AKlCpLJk6UA=='
diff --git a/src/ceph-volume/ceph_volume/tests/devices/simple/test_trigger.py b/src/ceph-volume/ceph_volume/tests/devices/simple/test_trigger.py
new file mode 100644
index 000000000..d3220f2b0
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/simple/test_trigger.py
@@ -0,0 +1,45 @@
+import pytest
+from ceph_volume import exceptions
+from ceph_volume.devices.simple import trigger
+
+
+class TestParseOSDid(object):
+
+ def test_no_id_found_if_no_digit(self):
+ with pytest.raises(exceptions.SuffixParsingError):
+ trigger.parse_osd_id('asdlj-ljahsdfaslkjhdfa')
+
+ def test_no_id_found(self):
+ with pytest.raises(exceptions.SuffixParsingError):
+ trigger.parse_osd_id('ljahsdfaslkjhdfa')
+
+ def test_id_found(self):
+ result = trigger.parse_osd_id('1-ljahsdfaslkjhdfa')
+ assert result == '1'
+
+
+class TestParseOSDUUID(object):
+
+ def test_uuid_is_parsed(self):
+ result = trigger.parse_osd_uuid('1-asdf-ljkh-asdf-ljkh-asdf')
+ assert result == 'asdf-ljkh-asdf-ljkh-asdf'
+
+ def test_uuid_is_parsed_longer_sha1(self):
+ result = trigger.parse_osd_uuid('1-foo-bar-asdf-ljkh-asdf-ljkh-asdf')
+ assert result == 'foo-bar-asdf-ljkh-asdf-ljkh-asdf'
+
+ def test_uuid_is_not_found(self):
+ with pytest.raises(exceptions.SuffixParsingError):
+ trigger.parse_osd_uuid('ljahsdfaslkjhdfa')
+
+ def test_uuid_is_not_found_missing_id(self):
+ with pytest.raises(exceptions.SuffixParsingError):
+ trigger.parse_osd_uuid('ljahs-dfa-slkjhdfa-foo')
+
+ def test_robust_double_id_in_uuid(self):
+ # it is possible to have the id in the SHA1, this should
+ # be fine parsing that
+ result = trigger.parse_osd_uuid("1-abc959fd-1ec9-4864-b141-3154f9b9f8ed")
+ assert result == 'abc959fd-1ec9-4864-b141-3154f9b9f8ed'
+
+
diff --git a/src/ceph-volume/ceph_volume/tests/devices/test_zap.py b/src/ceph-volume/ceph_volume/tests/devices/test_zap.py
new file mode 100644
index 000000000..745b58ae5
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/test_zap.py
@@ -0,0 +1,38 @@
+import pytest
+from ceph_volume.devices import lvm
+from mock.mock import patch, MagicMock
+
+
+class TestZap(object):
+
+ def test_main_spits_help_with_no_arguments(self, capsys):
+ lvm.zap.Zap([]).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'Zaps the given logical volume(s), raw device(s) or partition(s)' in stdout
+
+ def test_main_shows_full_help(self, capsys):
+ with pytest.raises(SystemExit):
+ lvm.zap.Zap(argv=['--help']).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'optional arguments' in stdout
+
+ @pytest.mark.parametrize('device_name', [
+ '/dev/mapper/foo',
+ '/dev/dm-0',
+ ])
+ @patch('ceph_volume.util.arg_validators.Device')
+ def test_can_not_zap_mapper_device(self, mocked_device, monkeypatch, device_info, capsys, is_root, device_name):
+ monkeypatch.setattr('os.path.exists', lambda x: True)
+ mocked_device.return_value = MagicMock(
+ is_mapper=True,
+ is_mpath=False,
+ used_by_ceph=True,
+ exists=True,
+ has_partitions=False,
+ has_gpt_headers=False,
+ has_fs=False
+ )
+ with pytest.raises(SystemExit):
+ lvm.zap.Zap(argv=[device_name]).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'Refusing to zap' in stderr