summaryrefslogtreecommitdiffstats
path: root/src/ceph-volume/ceph_volume/tests/devices/lvm
diff options
context:
space:
mode:
Diffstat (limited to 'src/ceph-volume/ceph_volume/tests/devices/lvm')
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/lvm/__init__.py0
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/lvm/test_activate.py414
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/lvm/test_batch.py280
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/lvm/test_common.py8
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/lvm/test_create.py48
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/lvm/test_deactivate.py59
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/lvm/test_listing.py265
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/lvm/test_prepare.py174
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/lvm/test_trigger.py45
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/lvm/test_zap.py236
10 files changed, 1529 insertions, 0 deletions
diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/__init__.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/__init__.py
diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_activate.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_activate.py
new file mode 100644
index 00000000..33e0ed32
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_activate.py
@@ -0,0 +1,414 @@
+import pytest
+from copy import deepcopy
+from ceph_volume.devices.lvm import activate
+from ceph_volume.api import lvm as api
+from ceph_volume.tests.conftest import Capture
+
+
+class Args(object):
+
+ def __init__(self, **kw):
+ # default flags
+ self.bluestore = False
+ self.filestore = False
+ self.no_systemd = False
+ self.auto_detect_objectstore = None
+ for k, v in kw.items():
+ setattr(self, k, v)
+
+
+class TestActivate(object):
+
+ # these tests are very functional, hence the heavy patching, it is hard to
+ # test the negative side effect with an actual functional run, so we must
+ # setup a perfect scenario for this test to check it can really work
+ # with/without osd_id
+ def test_no_osd_id_matches_fsid(self, is_root, monkeypatch, capture):
+ FooVolume = api.Volume(lv_name='foo', lv_path='/dev/vg/foo',
+ lv_tags="ceph.osd_fsid=1234")
+ volumes = []
+ volumes.append(FooVolume)
+ monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: volumes)
+ monkeypatch.setattr(activate, 'activate_filestore', capture)
+ args = Args(osd_id=None, osd_fsid='1234', filestore=True)
+ activate.Activate([]).activate(args)
+ assert capture.calls[0]['args'][0] == [FooVolume]
+
+ def test_no_osd_id_matches_fsid_bluestore(self, is_root, monkeypatch, capture):
+ FooVolume = api.Volume(lv_name='foo', lv_path='/dev/vg/foo',
+ lv_tags="ceph.osd_fsid=1234")
+ volumes = []
+ volumes.append(FooVolume)
+ monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: volumes)
+ monkeypatch.setattr(activate, 'activate_bluestore', capture)
+ args = Args(osd_id=None, osd_fsid='1234', bluestore=True)
+ activate.Activate([]).activate(args)
+ assert capture.calls[0]['args'][0] == [FooVolume]
+
+ def test_no_osd_id_no_matching_fsid(self, is_root, monkeypatch, capture):
+ FooVolume = api.Volume(lv_name='foo', lv_path='/dev/vg/foo',
+ lv_tags="ceph.osd_fsid=1111")
+ volumes = []
+ volumes.append(FooVolume)
+ monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: [])
+ monkeypatch.setattr(api, 'get_first_lv', lambda **kwargs: [])
+ monkeypatch.setattr(activate, 'activate_filestore', capture)
+
+ args = Args(osd_id=None, osd_fsid='2222')
+ with pytest.raises(RuntimeError):
+ activate.Activate([]).activate(args)
+
+ def test_filestore_no_systemd(self, is_root, monkeypatch, capture):
+ monkeypatch.setattr('ceph_volume.configuration.load', lambda: None)
+ fake_enable = Capture()
+ fake_start_osd = Capture()
+ monkeypatch.setattr('ceph_volume.util.system.device_is_mounted', lambda *a, **kw: True)
+ monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw: True)
+ monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True)
+ monkeypatch.setattr(activate.systemctl, 'enable_volume', fake_enable)
+ monkeypatch.setattr(activate.systemctl, 'start_osd', fake_start_osd)
+ JournalVolume = api.Volume(
+ lv_name='journal',
+ lv_path='/dev/vg/journal',
+ lv_uuid='000',
+ lv_tags=','.join([
+ "ceph.cluster_name=ceph", "ceph.journal_device=/dev/vg/journal",
+ "ceph.journal_uuid=000", "ceph.type=journal",
+ "ceph.osd_id=0", "ceph.osd_fsid=1234"])
+ )
+ DataVolume = api.Volume(
+ lv_name='data',
+ lv_path='/dev/vg/data',
+ lv_uuid='001',
+ lv_tags="ceph.cluster_name=ceph,ceph.journal_device=/dev/vg/" + \
+ "journal,ceph.journal_uuid=000,ceph.type=data," + \
+ "ceph.osd_id=0,ceph.osd_fsid=1234")
+ volumes = []
+ volumes.append(DataVolume)
+ volumes.append(JournalVolume)
+ monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: deepcopy(volumes))
+
+ args = Args(osd_id=None, osd_fsid='1234', no_systemd=True, filestore=True)
+ activate.Activate([]).activate(args)
+ assert fake_enable.calls == []
+ assert fake_start_osd.calls == []
+
+ def test_filestore_no_systemd_autodetect(self, is_root, monkeypatch, capture):
+ monkeypatch.setattr('ceph_volume.configuration.load', lambda: None)
+ fake_enable = Capture()
+ fake_start_osd = Capture()
+ monkeypatch.setattr('ceph_volume.util.system.device_is_mounted', lambda *a, **kw: True)
+ monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw: True)
+ monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True)
+ monkeypatch.setattr(activate.systemctl, 'enable_volume', fake_enable)
+ monkeypatch.setattr(activate.systemctl, 'start_osd', fake_start_osd)
+ JournalVolume = api.Volume(
+ lv_name='journal',
+ lv_path='/dev/vg/journal',
+ lv_uuid='000',
+ lv_tags=','.join([
+ "ceph.cluster_name=ceph", "ceph.journal_device=/dev/vg/journal",
+ "ceph.journal_uuid=000", "ceph.type=journal",
+ "ceph.osd_id=0", "ceph.osd_fsid=1234"])
+ )
+ DataVolume = api.Volume(
+ lv_name='data',
+ lv_path='/dev/vg/data',
+ lv_uuid='001',
+ lv_tags="ceph.cluster_name=ceph,ceph.journal_device=/dev/vg/" + \
+ "journal,ceph.journal_uuid=000,ceph.type=data," + \
+ "ceph.osd_id=0,ceph.osd_fsid=1234")
+ volumes = []
+ volumes.append(DataVolume)
+ volumes.append(JournalVolume)
+ monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: deepcopy(volumes))
+
+ args = Args(osd_id=None, osd_fsid='1234', no_systemd=True,
+ filestore=True, auto_detect_objectstore=True)
+ activate.Activate([]).activate(args)
+ assert fake_enable.calls == []
+ assert fake_start_osd.calls == []
+
+ def test_filestore_systemd_autodetect(self, is_root, monkeypatch, capture):
+ fake_enable = Capture()
+ fake_start_osd = Capture()
+ monkeypatch.setattr('ceph_volume.configuration.load', lambda: None)
+ monkeypatch.setattr('ceph_volume.util.system.device_is_mounted', lambda *a, **kw: True)
+ monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw: True)
+ monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True)
+ monkeypatch.setattr(activate.systemctl, 'enable_volume', fake_enable)
+ monkeypatch.setattr(activate.systemctl, 'start_osd', fake_start_osd)
+ JournalVolume = api.Volume(
+ lv_name='journal',
+ lv_path='/dev/vg/journal',
+ lv_uuid='000',
+ lv_tags=','.join([
+ "ceph.cluster_name=ceph", "ceph.journal_device=/dev/vg/journal",
+ "ceph.journal_uuid=000", "ceph.type=journal",
+ "ceph.osd_id=0","ceph.osd_fsid=1234"])
+ )
+ DataVolume = api.Volume(
+ lv_name='data',
+ lv_path='/dev/vg/data',
+ lv_uuid='001',
+ lv_tags="ceph.cluster_name=ceph,ceph.journal_device=/dev/vg/" + \
+ "journal,ceph.journal_uuid=000,ceph.type=data," + \
+ "ceph.osd_id=0,ceph.osd_fsid=1234")
+ volumes = []
+ volumes.append(DataVolume)
+ volumes.append(JournalVolume)
+ monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: deepcopy(volumes))
+
+ args = Args(osd_id=None, osd_fsid='1234', no_systemd=False,
+ filestore=True, auto_detect_objectstore=False)
+ activate.Activate([]).activate(args)
+ assert fake_enable.calls != []
+ assert fake_start_osd.calls != []
+
+ def test_filestore_systemd(self, is_root, monkeypatch, capture):
+ fake_enable = Capture()
+ fake_start_osd = Capture()
+ monkeypatch.setattr('ceph_volume.configuration.load', lambda: None)
+ monkeypatch.setattr('ceph_volume.util.system.device_is_mounted', lambda *a, **kw: True)
+ monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw: True)
+ monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True)
+ monkeypatch.setattr(activate.systemctl, 'enable_volume', fake_enable)
+ monkeypatch.setattr(activate.systemctl, 'start_osd', fake_start_osd)
+ JournalVolume = api.Volume(
+ lv_name='journal',
+ lv_path='/dev/vg/journal',
+ lv_uuid='000',
+ lv_tags=','.join([
+ "ceph.cluster_name=ceph", "ceph.journal_device=/dev/vg/journal",
+ "ceph.journal_uuid=000", "ceph.type=journal",
+ "ceph.osd_id=0","ceph.osd_fsid=1234"])
+ )
+ DataVolume = api.Volume(
+ lv_name='data',
+ lv_path='/dev/vg/data',
+ lv_uuid='001',
+ lv_tags="ceph.cluster_name=ceph,ceph.journal_device=/dev/vg/" + \
+ "journal,ceph.journal_uuid=000,ceph.type=data," + \
+ "ceph.osd_id=0,ceph.osd_fsid=1234")
+ volumes = []
+ volumes.append(DataVolume)
+ volumes.append(JournalVolume)
+ monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: deepcopy(volumes))
+
+ args = Args(osd_id=None, osd_fsid='1234', no_systemd=False,
+ filestore=True)
+ activate.Activate([]).activate(args)
+ assert fake_enable.calls != []
+ assert fake_start_osd.calls != []
+
+ def test_bluestore_no_systemd(self, is_root, monkeypatch, capture):
+ fake_enable = Capture()
+ fake_start_osd = Capture()
+ monkeypatch.setattr('ceph_volume.util.system.path_is_mounted', lambda *a, **kw: True)
+ monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw: True)
+ monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True)
+ monkeypatch.setattr(activate.systemctl, 'enable_volume', fake_enable)
+ monkeypatch.setattr(activate.systemctl, 'start_osd', fake_start_osd)
+ DataVolume = api.Volume(
+ lv_name='data',
+ lv_path='/dev/vg/data',
+ lv_tags="ceph.cluster_name=ceph,,ceph.journal_uuid=000," + \
+ "ceph.type=block,ceph.osd_id=0,ceph.osd_fsid=1234")
+ volumes = []
+ volumes.append(DataVolume)
+ monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: deepcopy(volumes))
+
+ args = Args(osd_id=None, osd_fsid='1234', no_systemd=True, bluestore=True)
+ activate.Activate([]).activate(args)
+ assert fake_enable.calls == []
+ assert fake_start_osd.calls == []
+
+ def test_bluestore_systemd(self, is_root, monkeypatch, capture):
+ fake_enable = Capture()
+ fake_start_osd = Capture()
+ monkeypatch.setattr('ceph_volume.util.system.path_is_mounted', lambda *a, **kw: True)
+ monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw: True)
+ monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True)
+ monkeypatch.setattr(activate.systemctl, 'enable_volume', fake_enable)
+ monkeypatch.setattr(activate.systemctl, 'start_osd', fake_start_osd)
+ DataVolume = api.Volume(
+ lv_name='data',
+ lv_path='/dev/vg/data',
+ lv_tags="ceph.cluster_name=ceph,,ceph.journal_uuid=000," + \
+ "ceph.type=block,ceph.osd_id=0,ceph.osd_fsid=1234")
+ volumes = []
+ volumes.append(DataVolume)
+ monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: deepcopy(volumes))
+
+ args = Args(osd_id=None, osd_fsid='1234', no_systemd=False,
+ bluestore=True)
+ activate.Activate([]).activate(args)
+ assert fake_enable.calls != []
+ assert fake_start_osd.calls != []
+
+ def test_bluestore_no_systemd_autodetect(self, is_root, monkeypatch, capture):
+ fake_enable = Capture()
+ fake_start_osd = Capture()
+ monkeypatch.setattr('ceph_volume.util.system.path_is_mounted', lambda *a, **kw: True)
+ monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw: True)
+ monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True)
+ monkeypatch.setattr(activate.systemctl, 'enable_volume', fake_enable)
+ monkeypatch.setattr(activate.systemctl, 'start_osd', fake_start_osd)
+ DataVolume = api.Volume(
+ lv_name='data',
+ lv_path='/dev/vg/data',
+ lv_tags="ceph.cluster_name=ceph,,ceph.block_uuid=000," + \
+ "ceph.type=block,ceph.osd_id=0,ceph.osd_fsid=1234")
+ volumes = []
+ volumes.append(DataVolume)
+ monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: deepcopy(volumes))
+
+ args = Args(osd_id=None, osd_fsid='1234', no_systemd=True,
+ bluestore=True, auto_detect_objectstore=True)
+ activate.Activate([]).activate(args)
+ assert fake_enable.calls == []
+ assert fake_start_osd.calls == []
+
+ def test_bluestore_systemd_autodetect(self, is_root, monkeypatch, capture):
+ fake_enable = Capture()
+ fake_start_osd = Capture()
+ monkeypatch.setattr('ceph_volume.util.system.path_is_mounted',
+ lambda *a, **kw: True)
+ monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw:
+ True)
+ monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True)
+ monkeypatch.setattr(activate.systemctl, 'enable_volume', fake_enable)
+ monkeypatch.setattr(activate.systemctl, 'start_osd', fake_start_osd)
+ DataVolume = api.Volume(
+ lv_name='data',
+ lv_path='/dev/vg/data',
+ lv_tags="ceph.cluster_name=ceph,,ceph.journal_uuid=000," + \
+ "ceph.type=block,ceph.osd_id=0,ceph.osd_fsid=1234")
+ volumes = []
+ volumes.append(DataVolume)
+ monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: deepcopy(volumes))
+
+ args = Args(osd_id=None, osd_fsid='1234', no_systemd=False,
+ bluestore=True, auto_detect_objectstore=False)
+ activate.Activate([]).activate(args)
+ assert fake_enable.calls != []
+ assert fake_start_osd.calls != []
+
+class TestActivateFlags(object):
+
+ def test_default_objectstore(self, capture):
+ args = ['0', 'asdf-ljh-asdf']
+ activation = activate.Activate(args)
+ activation.activate = capture
+ activation.main()
+ parsed_args = capture.calls[0]['args'][0]
+ assert parsed_args.filestore is False
+ assert parsed_args.bluestore is True
+
+ def test_uses_filestore(self, capture):
+ args = ['--filestore', '0', 'asdf-ljh-asdf']
+ activation = activate.Activate(args)
+ activation.activate = capture
+ activation.main()
+ parsed_args = capture.calls[0]['args'][0]
+ assert parsed_args.filestore is True
+ assert parsed_args.bluestore is False
+
+ def test_uses_bluestore(self, capture):
+ args = ['--bluestore', '0', 'asdf-ljh-asdf']
+ activation = activate.Activate(args)
+ activation.activate = capture
+ activation.main()
+ parsed_args = capture.calls[0]['args'][0]
+ assert parsed_args.filestore is False
+ assert parsed_args.bluestore is True
+
+
+class TestActivateAll(object):
+
+ def test_does_not_detect_osds(self, capsys, is_root, capture, monkeypatch):
+ monkeypatch.setattr('ceph_volume.devices.lvm.activate.direct_report', lambda: {})
+ args = ['--all']
+ activation = activate.Activate(args)
+ activation.main()
+ out, err = capsys.readouterr()
+ assert 'Was unable to find any OSDs to activate' in err
+ assert 'Verify OSDs are present with ' in err
+
+ def test_detects_running_osds(self, capsys, is_root, capture, monkeypatch):
+ monkeypatch.setattr('ceph_volume.devices.lvm.activate.direct_report', lambda: direct_report)
+ monkeypatch.setattr('ceph_volume.devices.lvm.activate.systemctl.osd_is_active', lambda x: True)
+ args = ['--all']
+ activation = activate.Activate(args)
+ activation.main()
+ out, err = capsys.readouterr()
+ assert 'a8789a96ce8b process is active. Skipping activation' in err
+ assert 'b8218eaa1634 process is active. Skipping activation' in err
+
+ def test_detects_osds_to_activate(self, is_root, capture, monkeypatch):
+ monkeypatch.setattr('ceph_volume.devices.lvm.activate.direct_report', lambda: direct_report)
+ monkeypatch.setattr('ceph_volume.devices.lvm.activate.systemctl.osd_is_active', lambda x: False)
+ args = ['--all']
+ activation = activate.Activate(args)
+ activation.activate = capture
+ activation.main()
+ calls = sorted(capture.calls, key=lambda x: x['kwargs']['osd_id'])
+ assert calls[0]['kwargs']['osd_id'] == '0'
+ assert calls[0]['kwargs']['osd_fsid'] == '957d22b7-24ce-466a-9883-b8218eaa1634'
+ assert calls[1]['kwargs']['osd_id'] == '1'
+ assert calls[1]['kwargs']['osd_fsid'] == 'd0f3e4ad-e52a-4520-afc0-a8789a96ce8b'
+
+#
+# Activate All fixture
+#
+
+direct_report = {
+ "0": [
+ {
+ "lv_name": "osd-block-957d22b7-24ce-466a-9883-b8218eaa1634",
+ "lv_path": "/dev/ceph-d4962338-46ff-4cd5-8ea6-c033dbdc5b44/osd-block-957d22b7-24ce-466a-9883-b8218eaa1634",
+ "lv_tags": "ceph.block_device=/dev/ceph-d4962338-46ff-4cd5-8ea6-c033dbdc5b44/osd-block-957d22b7-24ce-466a-9883-b8218eaa1634,ceph.block_uuid=6MixOd-2Q1I-f8K3-PPOq-UJGV-L3A0-0XwUm4,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=d4962338-46ff-4cd5-8ea6-c033dbdc5b44,ceph.cluster_name=ceph,ceph.crush_device_class=None,ceph.encrypted=0,ceph.osd_fsid=957d22b7-24ce-466a-9883-b8218eaa1634,ceph.osd_id=0,ceph.type=block",
+ "lv_uuid": "6MixOd-2Q1I-f8K3-PPOq-UJGV-L3A0-0XwUm4",
+ "name": "osd-block-957d22b7-24ce-466a-9883-b8218eaa1634",
+ "path": "/dev/ceph-d4962338-46ff-4cd5-8ea6-c033dbdc5b44/osd-block-957d22b7-24ce-466a-9883-b8218eaa1634",
+ "tags": {
+ "ceph.block_device": "/dev/ceph-d4962338-46ff-4cd5-8ea6-c033dbdc5b44/osd-block-957d22b7-24ce-466a-9883-b8218eaa1634",
+ "ceph.block_uuid": "6MixOd-2Q1I-f8K3-PPOq-UJGV-L3A0-0XwUm4",
+ "ceph.cephx_lockbox_secret": "",
+ "ceph.cluster_fsid": "d4962338-46ff-4cd5-8ea6-c033dbdc5b44",
+ "ceph.cluster_name": "ceph",
+ "ceph.crush_device_class": "None",
+ "ceph.encrypted": "0",
+ "ceph.osd_fsid": "957d22b7-24ce-466a-9883-b8218eaa1634",
+ "ceph.osd_id": "0",
+ "ceph.type": "block"
+ },
+ "type": "block",
+ "vg_name": "ceph-d4962338-46ff-4cd5-8ea6-c033dbdc5b44"
+ }
+ ],
+ "1": [
+ {
+ "lv_name": "osd-block-d0f3e4ad-e52a-4520-afc0-a8789a96ce8b",
+ "lv_path": "/dev/ceph-7538bcf0-f155-4d3f-a9fd-d8b15905e532/osd-block-d0f3e4ad-e52a-4520-afc0-a8789a96ce8b",
+ "lv_tags": "ceph.block_device=/dev/ceph-7538bcf0-f155-4d3f-a9fd-d8b15905e532/osd-block-d0f3e4ad-e52a-4520-afc0-a8789a96ce8b,ceph.block_uuid=1igwLb-ZlmV-eLgp-hapx-c1Hr-M5gz-sHjnyW,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=d4962338-46ff-4cd5-8ea6-c033dbdc5b44,ceph.cluster_name=ceph,ceph.crush_device_class=None,ceph.encrypted=0,ceph.osd_fsid=d0f3e4ad-e52a-4520-afc0-a8789a96ce8b,ceph.osd_id=1,ceph.type=block",
+ "lv_uuid": "1igwLb-ZlmV-eLgp-hapx-c1Hr-M5gz-sHjnyW",
+ "name": "osd-block-d0f3e4ad-e52a-4520-afc0-a8789a96ce8b",
+ "path": "/dev/ceph-7538bcf0-f155-4d3f-a9fd-d8b15905e532/osd-block-d0f3e4ad-e52a-4520-afc0-a8789a96ce8b",
+ "tags": {
+ "ceph.block_device": "/dev/ceph-7538bcf0-f155-4d3f-a9fd-d8b15905e532/osd-block-d0f3e4ad-e52a-4520-afc0-a8789a96ce8b",
+ "ceph.block_uuid": "1igwLb-ZlmV-eLgp-hapx-c1Hr-M5gz-sHjnyW",
+ "ceph.cephx_lockbox_secret": "",
+ "ceph.cluster_fsid": "d4962338-46ff-4cd5-8ea6-c033dbdc5b44",
+ "ceph.cluster_name": "ceph",
+ "ceph.crush_device_class": "None",
+ "ceph.encrypted": "0",
+ "ceph.osd_fsid": "d0f3e4ad-e52a-4520-afc0-a8789a96ce8b",
+ "ceph.osd_id": "1",
+ "ceph.type": "block"
+ },
+ "type": "block",
+ "vg_name": "ceph-7538bcf0-f155-4d3f-a9fd-d8b15905e532"
+ }
+ ]
+}
diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_batch.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_batch.py
new file mode 100644
index 00000000..7c968ae8
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_batch.py
@@ -0,0 +1,280 @@
+import pytest
+import json
+import random
+
+from argparse import ArgumentError
+from mock import MagicMock, patch
+
+from ceph_volume.devices.lvm import batch
+from ceph_volume.util import arg_validators
+
+
+class TestBatch(object):
+
+ def test_batch_instance(self, is_root):
+ b = batch.Batch([])
+ b.main()
+
+ def test_disjoint_device_lists(self, factory):
+ device1 = factory(used_by_ceph=False, available=True, abspath="/dev/sda")
+ device2 = factory(used_by_ceph=False, available=True, abspath="/dev/sdb")
+ devices = [device1, device2]
+ db_devices = [device2]
+ with pytest.raises(Exception) as disjoint_ex:
+ batch.ensure_disjoint_device_lists(devices, db_devices)
+ assert 'Device lists are not disjoint' in str(disjoint_ex.value)
+
+ @patch('ceph_volume.util.arg_validators.Device')
+ def test_reject_partition(self, mocked_device):
+ mocked_device.return_value = MagicMock(
+ is_partition=True,
+ has_gpt_headers=False,
+ )
+ with pytest.raises(ArgumentError):
+ arg_validators.ValidBatchDevice()('foo')
+
+ @pytest.mark.parametrize('format_', ['pretty', 'json', 'json-pretty'])
+ def test_report(self, format_, factory, conf_ceph_stub, mock_device_generator):
+ # just ensure reporting works
+ conf_ceph_stub('[global]\nfsid=asdf-lkjh')
+ devs = [mock_device_generator() for _ in range(5)]
+ args = factory(data_slots=1,
+ osds_per_device=1,
+ osd_ids=[],
+ report=True,
+ format=format_,
+ devices=devs,
+ db_devices=[],
+ wal_devices=[],
+ bluestore=True,
+ block_db_size="1G",
+ dmcrypt=True,
+ )
+ b = batch.Batch([])
+ plan = b.get_plan(args)
+ b.args = args
+ b.report(plan)
+
+ @pytest.mark.parametrize('format_', ['json', 'json-pretty'])
+ def test_json_report_valid_empty(self, format_, factory, conf_ceph_stub, mock_device_generator):
+ # ensure json reports are valid when empty
+ conf_ceph_stub('[global]\nfsid=asdf-lkjh')
+ devs = []
+ args = factory(data_slots=1,
+ osds_per_device=1,
+ osd_ids=[],
+ report=True,
+ format=format_,
+ devices=devs,
+ db_devices=[],
+ wal_devices=[],
+ bluestore=True,
+ block_db_size="1G",
+ dmcrypt=True,
+ )
+ b = batch.Batch([])
+ plan = b.get_plan(args)
+ b.args = args
+ report = b._create_report(plan)
+ json.loads(report)
+
+ @pytest.mark.parametrize('format_', ['json', 'json-pretty'])
+ def test_json_report_valid_empty_unavailable_fast(self, format_, factory, conf_ceph_stub, mock_device_generator):
+ # ensure json reports are valid when empty
+ conf_ceph_stub('[global]\nfsid=asdf-lkjh')
+ devs = [mock_device_generator() for _ in range(5)]
+ fast_devs = [mock_device_generator()]
+ fast_devs[0].available_lvm = False
+ args = factory(data_slots=1,
+ osds_per_device=1,
+ osd_ids=[],
+ report=True,
+ format=format_,
+ devices=devs,
+ db_devices=fast_devs,
+ wal_devices=[],
+ bluestore=True,
+ block_db_size="1G",
+ dmcrypt=True,
+ )
+ b = batch.Batch([])
+ plan = b.get_plan(args)
+ b.args = args
+ report = b._create_report(plan)
+ json.loads(report)
+
+
+ @pytest.mark.parametrize('format_', ['json', 'json-pretty'])
+ def test_json_report_valid_empty_unavailable_very_fast(self, format_, factory, conf_ceph_stub, mock_device_generator):
+ # ensure json reports are valid when empty
+ conf_ceph_stub('[global]\nfsid=asdf-lkjh')
+ devs = [mock_device_generator() for _ in range(5)]
+ fast_devs = [mock_device_generator()]
+ very_fast_devs = [mock_device_generator()]
+ very_fast_devs[0].available_lvm = False
+ args = factory(data_slots=1,
+ osds_per_device=1,
+ osd_ids=[],
+ report=True,
+ format=format_,
+ devices=devs,
+ db_devices=fast_devs,
+ wal_devices=very_fast_devs,
+ bluestore=True,
+ block_db_size="1G",
+ dmcrypt=True,
+ )
+ b = batch.Batch([])
+ plan = b.get_plan(args)
+ b.args = args
+ report = b._create_report(plan)
+ json.loads(report)
+
+ @pytest.mark.parametrize('rota', [0, 1])
+ def test_batch_sort_full(self, factory, rota):
+ device1 = factory(used_by_ceph=False, available=True, rotational=rota, abspath="/dev/sda")
+ device2 = factory(used_by_ceph=False, available=True, rotational=rota, abspath="/dev/sdb")
+ device3 = factory(used_by_ceph=False, available=True, rotational=rota, abspath="/dev/sdc")
+ devices = [device1, device2, device3]
+ args = factory(report=True,
+ devices=devices,
+ filestore=False,
+ )
+ b = batch.Batch([])
+ b.args = args
+ b._sort_rotational_disks()
+ assert len(b.args.devices) == 3
+
+ @pytest.mark.parametrize('objectstore', ['bluestore', 'filestore'])
+ def test_batch_sort_mixed(self, factory, objectstore):
+ device1 = factory(used_by_ceph=False, available=True, rotational=1, abspath="/dev/sda")
+ device2 = factory(used_by_ceph=False, available=True, rotational=1, abspath="/dev/sdb")
+ device3 = factory(used_by_ceph=False, available=True, rotational=0, abspath="/dev/sdc")
+ devices = [device1, device2, device3]
+ args = factory(report=True,
+ devices=devices,
+ filestore=False if objectstore == 'bluestore' else True,
+ )
+ b = batch.Batch([])
+ b.args = args
+ b._sort_rotational_disks()
+ assert len(b.args.devices) == 2
+ if objectstore == 'bluestore':
+ assert len(b.args.db_devices) == 1
+ else:
+ assert len(b.args.journal_devices) == 1
+
+ def test_get_physical_osds_return_len(self, factory,
+ mock_devices_available,
+ conf_ceph_stub,
+ osds_per_device):
+ conf_ceph_stub('[global]\nfsid=asdf-lkjh')
+ args = factory(data_slots=1, osds_per_device=osds_per_device,
+ osd_ids=[], dmcrypt=False)
+ osds = batch.get_physical_osds(mock_devices_available, args)
+ assert len(osds) == len(mock_devices_available) * osds_per_device
+
+ def test_get_physical_osds_rel_size(self, factory,
+ mock_devices_available,
+ conf_ceph_stub,
+ osds_per_device):
+ args = factory(data_slots=1, osds_per_device=osds_per_device,
+ osd_ids=[], dmcrypt=False)
+ osds = batch.get_physical_osds(mock_devices_available, args)
+ for osd in osds:
+ assert osd.data[1] == 1.0 / osds_per_device
+
+ def test_get_physical_osds_abs_size(self, factory,
+ mock_devices_available,
+ conf_ceph_stub,
+ osds_per_device):
+ conf_ceph_stub('[global]\nfsid=asdf-lkjh')
+ args = factory(data_slots=1, osds_per_device=osds_per_device,
+ osd_ids=[], dmcrypt=False)
+ osds = batch.get_physical_osds(mock_devices_available, args)
+ for osd, dev in zip(osds, mock_devices_available):
+ assert osd.data[2] == int(dev.vg_size[0] / osds_per_device)
+
+ def test_get_physical_osds_osd_ids(self, factory,
+ mock_devices_available,
+ osds_per_device):
+ pass
+
+ def test_get_physical_fast_allocs_length(self, factory,
+ conf_ceph_stub,
+ mock_devices_available):
+ conf_ceph_stub('[global]\nfsid=asdf-lkjh')
+ args = factory(block_db_slots=None, get_block_db_size=None)
+ fast = batch.get_physical_fast_allocs(mock_devices_available,
+ 'block_db', 2, 2, args)
+ assert len(fast) == 2
+
+ @pytest.mark.parametrize('occupied_prior', range(7))
+ @pytest.mark.parametrize('slots,num_devs',
+ [l for sub in [list(zip([x]*x, range(1, x + 1))) for x in range(1,7)] for l in sub])
+ def test_get_physical_fast_allocs_length_existing(self,
+ num_devs,
+ slots,
+ occupied_prior,
+ factory,
+ conf_ceph_stub,
+ mock_device_generator):
+ conf_ceph_stub('[global]\nfsid=asdf-lkjh')
+ occupied_prior = min(occupied_prior, slots)
+ devs = [mock_device_generator() for _ in range(num_devs)]
+ already_assigned = 0
+ while already_assigned < occupied_prior:
+ dev_i = random.randint(0, num_devs - 1)
+ dev = devs[dev_i]
+ if len(dev.lvs) < occupied_prior:
+ dev.lvs.append('foo')
+ dev.path = '/dev/bar'
+ already_assigned = sum([len(d.lvs) for d in devs])
+ args = factory(block_db_slots=None, get_block_db_size=None)
+ expected_num_osds = max(len(devs) * slots - occupied_prior, 0)
+ fast = batch.get_physical_fast_allocs(devs,
+ 'block_db', slots,
+ expected_num_osds, args)
+ assert len(fast) == expected_num_osds
+ expected_assignment_on_used_devices = sum([slots - len(d.lvs) for d in devs if len(d.lvs) > 0])
+ assert len([f for f in fast if f[0] == '/dev/bar']) == expected_assignment_on_used_devices
+ assert len([f for f in fast if f[0] != '/dev/bar']) == expected_num_osds - expected_assignment_on_used_devices
+
+ def test_get_lvm_osds_return_len(self, factory,
+ mock_lv_device_generator,
+ conf_ceph_stub,
+ osds_per_device):
+ conf_ceph_stub('[global]\nfsid=asdf-lkjh')
+ args = factory(data_slots=1, osds_per_device=osds_per_device,
+ osd_ids=[], dmcrypt=False)
+ mock_lvs = [mock_lv_device_generator()]
+ osds = batch.get_lvm_osds(mock_lvs, args)
+ assert len(osds) == 1
+
+
+class TestBatchOsd(object):
+
+ def test_osd_class_ctor(self):
+ osd = batch.Batch.OSD('/dev/data', 1, '5G', 1, 1, None)
+ assert osd.data == batch.Batch.OSD.VolSpec('/dev/data',
+ 1,
+ '5G',
+ 1,
+ 'data')
+ def test_add_fast(self):
+ osd = batch.Batch.OSD('/dev/data', 1, '5G', 1, 1, None)
+ osd.add_fast_device('/dev/db', 1, '5G', 1, 'block_db')
+ assert osd.fast == batch.Batch.OSD.VolSpec('/dev/db',
+ 1,
+ '5G',
+ 1,
+ 'block_db')
+
+ def test_add_very_fast(self):
+ osd = batch.Batch.OSD('/dev/data', 1, '5G', 1, 1, None)
+ osd.add_very_fast_device('/dev/wal', 1, '5G', 1)
+ assert osd.very_fast == batch.Batch.OSD.VolSpec('/dev/wal',
+ 1,
+ '5G',
+ 1,
+ 'block_wal')
diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_common.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_common.py
new file mode 100644
index 00000000..fe792d5a
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_common.py
@@ -0,0 +1,8 @@
+from ceph_volume.devices.lvm import common
+
+
+class TestCommon(object):
+
+ def test_get_default_args_smoke(self):
+ default_args = common.get_default_args()
+ assert default_args
diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_create.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_create.py
new file mode 100644
index 00000000..994038f3
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_create.py
@@ -0,0 +1,48 @@
+import pytest
+from ceph_volume.devices import lvm
+
+
+class TestCreate(object):
+
+ def test_main_spits_help_with_no_arguments(self, capsys):
+ lvm.create.Create([]).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'Create an OSD by assigning an ID and FSID' in stdout
+
+ def test_main_shows_full_help(self, capsys):
+ with pytest.raises(SystemExit):
+ lvm.create.Create(argv=['--help']).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'Use the filestore objectstore' in stdout
+ assert 'Use the bluestore objectstore' in stdout
+ assert 'A physical device or logical' in stdout
+
+ def test_excludes_filestore_bluestore_flags(self, capsys, device_info):
+ device_info()
+ with pytest.raises(SystemExit):
+ lvm.create.Create(argv=['--data', '/dev/sdfoo', '--filestore', '--bluestore']).main()
+ stdout, stderr = capsys.readouterr()
+ expected = 'Cannot use --filestore (filestore) with --bluestore (bluestore)'
+ assert expected in stderr
+
+ def test_excludes_other_filestore_bluestore_flags(self, capsys, device_info):
+ device_info()
+ with pytest.raises(SystemExit):
+ lvm.create.Create(argv=[
+ '--bluestore', '--data', '/dev/sdfoo',
+ '--journal', '/dev/sf14',
+ ]).main()
+ stdout, stderr = capsys.readouterr()
+ expected = 'Cannot use --bluestore (bluestore) with --journal (filestore)'
+ assert expected in stderr
+
+ def test_excludes_block_and_journal_flags(self, capsys, device_info):
+ device_info()
+ with pytest.raises(SystemExit):
+ lvm.create.Create(argv=[
+ '--bluestore', '--data', '/dev/sdfoo', '--block.db', 'vg/ceph1',
+ '--journal', '/dev/sf14',
+ ]).main()
+ stdout, stderr = capsys.readouterr()
+ expected = 'Cannot use --block.db (bluestore) with --journal (filestore)'
+ assert expected in stderr
diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_deactivate.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_deactivate.py
new file mode 100644
index 00000000..4b8304ce
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_deactivate.py
@@ -0,0 +1,59 @@
+import pytest
+from mock.mock import patch
+from ceph_volume.api import lvm
+from ceph_volume.devices.lvm import deactivate
+
+class TestDeactivate(object):
+
+ @patch("ceph_volume.devices.lvm.deactivate.get_lvs_by_tag")
+ def test_no_osd(self, p_get_lvs):
+ p_get_lvs.return_value = []
+ with pytest.raises(StopIteration):
+ deactivate.deactivate_osd(0)
+
+ @patch("ceph_volume.devices.lvm.deactivate.get_lvs_by_tag")
+ @patch("ceph_volume.util.system.unmount_tmpfs")
+ def test_unmount_tmpfs_called_osd_id(self, p_u_tmpfs, p_get_lvs):
+ FooVolume = lvm.Volume(
+ lv_name='foo', lv_path='/dev/vg/foo',
+ lv_tags="ceph.osd_id=0,ceph.cluster_name=foo,ceph.type=data")
+ p_get_lvs.return_value = [FooVolume]
+
+ deactivate.deactivate_osd(0)
+ p_u_tmpfs.assert_called_with(
+ '/var/lib/ceph/osd/{}-{}'.format('foo', 0))
+
+ @patch("ceph_volume.devices.lvm.deactivate.get_lvs_by_tag")
+ @patch("ceph_volume.util.system.unmount_tmpfs")
+ def test_unmount_tmpfs_called_osd_uuid(self, p_u_tmpfs, p_get_lvs):
+ FooVolume = lvm.Volume(
+ lv_name='foo', lv_path='/dev/vg/foo',
+ lv_tags="ceph.osd_fsid=0,ceph.osd_id=1,ceph.cluster_name=foo,ceph.type=data")
+ p_get_lvs.return_value = [FooVolume]
+
+ deactivate.deactivate_osd(None, 0)
+ p_u_tmpfs.assert_called_with(
+ '/var/lib/ceph/osd/{}-{}'.format('foo', 1))
+
+ @patch("ceph_volume.devices.lvm.deactivate.get_lvs_by_tag")
+ @patch("ceph_volume.util.system.unmount_tmpfs")
+ @patch("ceph_volume.util.encryption.dmcrypt_close")
+ def test_no_crypt_no_dmclose(self, p_dm_close, p_u_tmpfs, p_get_lvs):
+ FooVolume = lvm.Volume(
+ lv_name='foo', lv_path='/dev/vg/foo',
+ lv_tags="ceph.osd_id=0,ceph.cluster_name=foo,ceph.type=data")
+ p_get_lvs.return_value = [FooVolume]
+
+ deactivate.deactivate_osd(0)
+
+ @patch("ceph_volume.devices.lvm.deactivate.get_lvs_by_tag")
+ @patch("ceph_volume.util.system.unmount_tmpfs")
+ @patch("ceph_volume.util.encryption.dmcrypt_close")
+ def test_crypt_dmclose(self, p_dm_close, p_u_tmpfs, p_get_lvs):
+ FooVolume = lvm.Volume(
+ lv_name='foo', lv_path='/dev/vg/foo', lv_uuid='123',
+ lv_tags="ceph.osd_id=0,ceph.encrypted=1,ceph.cluster_name=foo,ceph.type=data")
+ p_get_lvs.return_value = [FooVolume]
+
+ deactivate.deactivate_osd(0)
+ p_dm_close.assert_called_with('123')
diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_listing.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_listing.py
new file mode 100644
index 00000000..cf4b68c7
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_listing.py
@@ -0,0 +1,265 @@
+import pytest
+from ceph_volume.devices import lvm
+from ceph_volume.api import lvm as api
+
+# TODO: add tests for following commands -
+# ceph-volume list
+# ceph-volume list <path-to-pv>
+# ceph-volume list <path-to-vg>
+# ceph-volume list <path-to-lv>
+
+class TestReadableTag(object):
+
+ def test_dots_get_replaced(self):
+ result = lvm.listing.readable_tag('ceph.foo')
+ assert result == 'foo'
+
+ def test_underscores_are_replaced_with_spaces(self):
+ result = lvm.listing.readable_tag('ceph.long_tag')
+ assert result == 'long tag'
+
+
+class TestPrettyReport(object):
+
+ def test_is_empty(self, capsys):
+ lvm.listing.pretty_report({})
+ stdout, stderr = capsys.readouterr()
+ assert stdout == '\n'
+
+ def test_type_and_path_are_reported(self, capsys):
+ lvm.listing.pretty_report({0: [
+ {'type': 'data', 'path': '/dev/sda1', 'devices': ['/dev/sda']}
+ ]})
+ stdout, stderr = capsys.readouterr()
+ assert '[data] /dev/sda1' in stdout
+
+ def test_osd_id_header_is_reported(self, capsys):
+ lvm.listing.pretty_report({0: [
+ {'type': 'data', 'path': '/dev/sda1', 'devices': ['/dev/sda']}
+ ]})
+ stdout, stderr = capsys.readouterr()
+ assert '====== osd.0 =======' in stdout
+
+ def test_tags_are_included(self, capsys):
+ lvm.listing.pretty_report(
+ {0: [{
+ 'type': 'data',
+ 'path': '/dev/sda1',
+ 'tags': {'ceph.osd_id': '0'},
+ 'devices': ['/dev/sda'],
+ }]}
+ )
+ stdout, stderr = capsys.readouterr()
+ assert 'osd id' in stdout
+
+ def test_devices_are_comma_separated(self, capsys):
+ lvm.listing.pretty_report({0: [
+ {'type': 'data', 'path': '/dev/sda1', 'devices': ['/dev/sda', '/dev/sdb1']}
+ ]})
+ stdout, stderr = capsys.readouterr()
+ assert '/dev/sda,/dev/sdb1' in stdout
+
+
+class TestList(object):
+
+ def test_empty_full_json_zero_exit_status(self, is_root,factory,capsys):
+ args = factory(format='json', device=None)
+ lvm.listing.List([]).list(args)
+ stdout, stderr = capsys.readouterr()
+ assert stdout == '{}\n'
+
+ def test_empty_device_json_zero_exit_status(self, is_root,factory,capsys):
+ args = factory(format='json', device='/dev/sda1')
+ lvm.listing.List([]).list(args)
+ stdout, stderr = capsys.readouterr()
+ assert stdout == '{}\n'
+
+ def test_empty_full_zero_exit_status(self, is_root, factory):
+ args = factory(format='pretty', device=None)
+ with pytest.raises(SystemExit):
+ lvm.listing.List([]).list(args)
+
+ def test_empty_device_zero_exit_status(self, is_root, factory):
+ args = factory(format='pretty', device='/dev/sda1')
+ with pytest.raises(SystemExit):
+ lvm.listing.List([]).list(args)
+
+class TestFullReport(object):
+
+ def test_no_ceph_lvs(self, monkeypatch):
+ # ceph lvs are detected by looking into its tags
+ osd = api.Volume(lv_name='volume1', lv_path='/dev/VolGroup/lv',
+ lv_tags={})
+ volumes = []
+ volumes.append(osd)
+ monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs:
+ volumes)
+
+ result = lvm.listing.List([]).full_report()
+ assert result == {}
+
+ def test_ceph_data_lv_reported(self, monkeypatch):
+ tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data'
+ pv = api.PVolume(pv_name='/dev/sda1', pv_tags={}, pv_uuid="0000",
+ vg_name='VolGroup', lv_uuid="aaaa")
+ osd = api.Volume(lv_name='volume1', lv_uuid='y', lv_tags=tags,
+ lv_path='/dev/VolGroup/lv', vg_name='VolGroup')
+ volumes = []
+ volumes.append(osd)
+ monkeypatch.setattr(lvm.listing.api, 'get_first_pv', lambda **kwargs: pv)
+ monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs:
+ volumes)
+
+ result = lvm.listing.List([]).full_report()
+ assert result['0'][0]['name'] == 'volume1'
+
+ def test_ceph_journal_lv_reported(self, monkeypatch):
+ tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data'
+ journal_tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=journal'
+ pv = api.PVolume(pv_name='/dev/sda1', pv_tags={}, pv_uuid="0000",
+ vg_name="VolGroup", lv_uuid="aaaa")
+ osd = api.Volume(lv_name='volume1', lv_uuid='y', lv_tags=tags,
+ lv_path='/dev/VolGroup/lv', vg_name='VolGroup')
+ journal = api.Volume(
+ lv_name='journal', lv_uuid='x', lv_tags=journal_tags,
+ lv_path='/dev/VolGroup/journal', vg_name='VolGroup')
+ volumes = []
+ volumes.append(osd)
+ volumes.append(journal)
+ monkeypatch.setattr(lvm.listing.api,'get_first_pv',lambda **kwargs:pv)
+ monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs:
+ volumes)
+
+ result = lvm.listing.List([]).full_report()
+ assert result['0'][0]['name'] == 'volume1'
+ assert result['0'][1]['name'] == 'journal'
+
+ def test_ceph_wal_lv_reported(self, monkeypatch):
+ tags = 'ceph.osd_id=0,ceph.wal_uuid=x,ceph.type=data'
+ wal_tags = 'ceph.osd_id=0,ceph.wal_uuid=x,ceph.type=wal'
+ osd = api.Volume(lv_name='volume1', lv_uuid='y', lv_tags=tags,
+ lv_path='/dev/VolGroup/lv', vg_name='VolGroup')
+ wal = api.Volume(lv_name='wal', lv_uuid='x', lv_tags=wal_tags,
+ lv_path='/dev/VolGroup/wal', vg_name='VolGroup')
+ volumes = []
+ volumes.append(osd)
+ volumes.append(wal)
+ monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs:
+ volumes)
+
+ result = lvm.listing.List([]).full_report()
+ assert result['0'][0]['name'] == 'volume1'
+ assert result['0'][1]['name'] == 'wal'
+
+ @pytest.mark.parametrize('type_', ['journal', 'db', 'wal'])
+ def test_physical_2nd_device_gets_reported(self, type_, monkeypatch):
+ tags = ('ceph.osd_id=0,ceph.{t}_uuid=x,ceph.type=data,'
+ 'ceph.{t}_device=/dev/sda1').format(t=type_)
+ osd = api.Volume(lv_name='volume1', lv_uuid='y', lv_tags=tags,
+ vg_name='VolGroup', lv_path='/dev/VolGroup/lv')
+ monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs:
+ [osd])
+
+ result = lvm.listing.List([]).full_report()
+ assert result['0'][1]['path'] == '/dev/sda1'
+ assert result['0'][1]['tags'] == {'PARTUUID': 'x'}
+ assert result['0'][1]['type'] == type_
+
+
+class TestSingleReport(object):
+
+ def test_not_a_ceph_lv(self, monkeypatch):
+ # ceph lvs are detected by looking into its tags
+ lv = api.Volume(lv_name='lv', lv_tags={}, lv_path='/dev/VolGroup/lv',
+ vg_name='VolGroup')
+ monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs:
+ [lv])
+
+ result = lvm.listing.List([]).single_report('VolGroup/lv')
+ assert result == {}
+
+ def test_report_a_ceph_lv(self, monkeypatch):
+ # ceph lvs are detected by looking into its tags
+ tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data'
+ lv = api.Volume(lv_name='lv', vg_name='VolGroup', lv_uuid='aaaa',
+ lv_path='/dev/VolGroup/lv', lv_tags=tags)
+ volumes = []
+ volumes.append(lv)
+ monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs:
+ volumes)
+
+ result = lvm.listing.List([]).single_report('VolGroup/lv')
+ assert result['0'][0]['name'] == 'lv'
+ assert result['0'][0]['lv_tags'] == tags
+ assert result['0'][0]['path'] == '/dev/VolGroup/lv'
+ assert result['0'][0]['devices'] == []
+
+ def test_report_a_ceph_journal_device(self, monkeypatch):
+ # ceph lvs are detected by looking into its tags
+ tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data,' + \
+ 'ceph.journal_device=/dev/sda1'
+ lv = api.Volume(lv_name='lv', lv_uuid='aaa', lv_tags=tags,
+ lv_path='/dev/VolGroup/lv', vg_name='VolGroup')
+ monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs:
+ [lv] if 'tags' in kwargs else [])
+
+ result = lvm.listing.List([]).single_report('/dev/sda1')
+ assert result['0'][0]['tags'] == {'PARTUUID': 'x'}
+ assert result['0'][0]['type'] == 'journal'
+ assert result['0'][0]['path'] == '/dev/sda1'
+
+ def test_report_a_ceph_lv_with_devices(self, monkeypatch):
+ pvolumes = []
+
+ tags = 'ceph.osd_id=0,ceph.type=data'
+ pv1 = api.PVolume(vg_name="VolGroup", pv_name='/dev/sda1',
+ pv_uuid='', pv_tags={}, lv_uuid="aaaa")
+ pv2 = api.PVolume(vg_name="VolGroup", pv_name='/dev/sdb1',
+ pv_uuid='', pv_tags={}, lv_uuid="aaaa")
+ pvolumes.append(pv1)
+ pvolumes.append(pv2)
+
+
+ volumes = []
+ lv = api.Volume(lv_name='lv', vg_name='VolGroup',lv_uuid='aaaa',
+ lv_path='/dev/VolGroup/lv', lv_tags=tags)
+ volumes.append(lv)
+
+ monkeypatch.setattr(lvm.listing.api, 'get_pvs', lambda **kwargs:
+ pvolumes)
+ monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs:
+ volumes)
+
+ listing = lvm.listing.List([])
+ listing._pvs = [
+ {'lv_uuid': 'aaaa', 'pv_name': '/dev/sda1', 'pv_tags': '', 'pv_uuid': ''},
+ {'lv_uuid': 'aaaa', 'pv_name': '/dev/sdb1', 'pv_tags': '', 'pv_uuid': ''},
+ ]
+
+ result = listing.single_report('VolGroup/lv')
+ assert result['0'][0]['name'] == 'lv'
+ assert result['0'][0]['lv_tags'] == tags
+ assert result['0'][0]['path'] == '/dev/VolGroup/lv'
+ assert result['0'][0]['devices'] == ['/dev/sda1', '/dev/sdb1']
+
+ def test_report_a_ceph_lv_with_no_matching_devices(self, monkeypatch):
+ tags = 'ceph.osd_id=0,ceph.type=data'
+ lv = api.Volume(lv_name='lv', vg_name='VolGroup', lv_uuid='aaaa',
+ lv_path='/dev/VolGroup/lv', lv_tags=tags)
+ volumes = []
+ volumes.append(lv)
+ monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs:
+ volumes)
+
+ listing = lvm.listing.List([])
+ listing._pvs = [
+ {'lv_uuid': 'ffff', 'pv_name': '/dev/sda1', 'pv_tags': '',
+ 'pv_uuid': ''},
+ {'lv_uuid': 'ffff', 'pv_name': '/dev/sdb1', 'pv_tags': '',
+ 'pv_uuid': ''}]
+
+ result = listing.single_report('VolGroup/lv')
+ assert result['0'][0]['name'] == 'lv'
+ assert result['0'][0]['lv_tags'] == tags
+ assert result['0'][0]['path'] == '/dev/VolGroup/lv'
+ assert result['0'][0]['devices'] == []
diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_prepare.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_prepare.py
new file mode 100644
index 00000000..70915a0f
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_prepare.py
@@ -0,0 +1,174 @@
+import pytest
+from ceph_volume.devices import lvm
+from ceph_volume.api import lvm as api
+from mock.mock import patch, Mock
+
+
+class TestLVM(object):
+
+ def test_main_spits_help_with_no_arguments(self, capsys):
+ lvm.main.LVM([]).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'Use LVM and LVM-based technologies to deploy' in stdout
+
+ def test_main_shows_activate_subcommands(self, capsys):
+ lvm.main.LVM([]).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'activate ' in stdout
+ assert 'Discover and mount' in stdout
+
+ def test_main_shows_prepare_subcommands(self, capsys):
+ lvm.main.LVM([]).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'prepare ' in stdout
+ assert 'Format an LVM device' in stdout
+
+
+class TestPrepareDevice(object):
+
+ def test_cannot_use_device(self, factory):
+ args = factory(data='/dev/var/foo')
+ with pytest.raises(RuntimeError) as error:
+ p = lvm.prepare.Prepare([])
+ p.args = args
+ p.prepare_data_device( 'data', '0')
+ assert 'Cannot use device (/dev/var/foo)' in str(error.value)
+ assert 'A vg/lv path or an existing device is needed' in str(error.value)
+
+
+class TestGetClusterFsid(object):
+
+ def test_fsid_is_passed_in(self, factory):
+ args = factory(cluster_fsid='aaaa-1111')
+ prepare_obj = lvm.prepare.Prepare([])
+ prepare_obj.args = args
+ assert prepare_obj.get_cluster_fsid() == 'aaaa-1111'
+
+ def test_fsid_is_read_from_ceph_conf(self, factory, conf_ceph_stub):
+ conf_ceph_stub('[global]\nfsid = bbbb-2222')
+ prepare_obj = lvm.prepare.Prepare([])
+ prepare_obj.args = factory(cluster_fsid=None)
+ assert prepare_obj.get_cluster_fsid() == 'bbbb-2222'
+
+
+class TestPrepare(object):
+
+ def test_main_spits_help_with_no_arguments(self, capsys):
+ lvm.prepare.Prepare([]).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'Prepare an OSD by assigning an ID and FSID' in stdout
+
+ def test_main_shows_full_help(self, capsys):
+ with pytest.raises(SystemExit):
+ lvm.prepare.Prepare(argv=['--help']).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'Use the filestore objectstore' in stdout
+ assert 'Use the bluestore objectstore' in stdout
+ assert 'A physical device or logical' in stdout
+
+ def test_excludes_filestore_bluestore_flags(self, capsys, device_info):
+ device_info()
+ with pytest.raises(SystemExit):
+ lvm.prepare.Prepare(argv=['--data', '/dev/sdfoo', '--filestore', '--bluestore']).main()
+ stdout, stderr = capsys.readouterr()
+ expected = 'Cannot use --filestore (filestore) with --bluestore (bluestore)'
+ assert expected in stderr
+
+ def test_excludes_other_filestore_bluestore_flags(self, capsys, device_info):
+ device_info()
+ with pytest.raises(SystemExit):
+ lvm.prepare.Prepare(argv=[
+ '--bluestore', '--data', '/dev/sdfoo',
+ '--journal', '/dev/sf14',
+ ]).main()
+ stdout, stderr = capsys.readouterr()
+ expected = 'Cannot use --bluestore (bluestore) with --journal (filestore)'
+ assert expected in stderr
+
+ def test_excludes_block_and_journal_flags(self, capsys, device_info):
+ device_info()
+ with pytest.raises(SystemExit):
+ lvm.prepare.Prepare(argv=[
+ '--bluestore', '--data', '/dev/sdfoo', '--block.db', 'vg/ceph1',
+ '--journal', '/dev/sf14',
+ ]).main()
+ stdout, stderr = capsys.readouterr()
+ expected = 'Cannot use --block.db (bluestore) with --journal (filestore)'
+ assert expected in stderr
+
+ def test_journal_is_required_with_filestore(self, is_root, monkeypatch, device_info):
+ monkeypatch.setattr("os.path.exists", lambda path: True)
+ device_info()
+ with pytest.raises(SystemExit) as error:
+ lvm.prepare.Prepare(argv=['--filestore', '--data', '/dev/sdfoo']).main()
+ expected = '--journal is required when using --filestore'
+ assert expected in str(error.value)
+
+ @patch('ceph_volume.devices.lvm.prepare.api.is_ceph_device')
+ def test_safe_prepare_osd_already_created(self, m_is_ceph_device):
+ m_is_ceph_device.return_value = True
+ with pytest.raises(RuntimeError) as error:
+ prepare = lvm.prepare.Prepare(argv=[])
+ prepare.args = Mock()
+ prepare.args.data = '/dev/sdfoo'
+ prepare.get_lv = Mock()
+ prepare.safe_prepare()
+ expected = 'skipping {}, it is already prepared'.format('/dev/sdfoo')
+ assert expected in str(error.value)
+
+ def test_setup_device_device_name_is_none(self):
+ result = lvm.prepare.Prepare([]).setup_device(device_type='data', device_name=None, tags={'ceph.type': 'data'}, size=0, slots=None)
+ assert result == ('', '', {'ceph.type': 'data'})
+
+ @patch('ceph_volume.api.lvm.Volume.set_tags')
+ @patch('ceph_volume.devices.lvm.prepare.api.get_first_lv')
+ def test_setup_device_lv_passed(self, m_get_first_lv, m_set_tags):
+ fake_volume = api.Volume(lv_name='lv_foo', lv_path='/fake-path', vg_name='vg_foo', lv_tags='', lv_uuid='fake-uuid')
+ m_get_first_lv.return_value = fake_volume
+ result = lvm.prepare.Prepare([]).setup_device(device_type='data', device_name='vg_foo/lv_foo', tags={'ceph.type': 'data'}, size=0, slots=None)
+
+ assert result == ('/fake-path', 'fake-uuid', {'ceph.type': 'data',
+ 'ceph.vdo': '0',
+ 'ceph.data_uuid': 'fake-uuid',
+ 'ceph.data_device': '/fake-path'})
+
+ @patch('ceph_volume.devices.lvm.prepare.api.create_lv')
+ @patch('ceph_volume.api.lvm.Volume.set_tags')
+ @patch('ceph_volume.util.disk.is_device')
+ def test_setup_device_device_passed(self, m_is_device, m_set_tags, m_create_lv):
+ fake_volume = api.Volume(lv_name='lv_foo', lv_path='/fake-path', vg_name='vg_foo', lv_tags='', lv_uuid='fake-uuid')
+ m_is_device.return_value = True
+ m_create_lv.return_value = fake_volume
+ result = lvm.prepare.Prepare([]).setup_device(device_type='data', device_name='/dev/sdx', tags={'ceph.type': 'data'}, size=0, slots=None)
+
+ assert result == ('/fake-path', 'fake-uuid', {'ceph.type': 'data',
+ 'ceph.vdo': '0',
+ 'ceph.data_uuid': 'fake-uuid',
+ 'ceph.data_device': '/fake-path'})
+
+ @patch('ceph_volume.devices.lvm.prepare.Prepare.get_ptuuid')
+ @patch('ceph_volume.devices.lvm.prepare.api.get_first_lv')
+ def test_setup_device_partition_passed(self, m_get_first_lv, m_get_ptuuid):
+ m_get_first_lv.side_effect = ValueError()
+ m_get_ptuuid.return_value = 'fake-uuid'
+ result = lvm.prepare.Prepare([]).setup_device(device_type='data', device_name='/dev/sdx', tags={'ceph.type': 'data'}, size=0, slots=None)
+
+ assert result == ('/dev/sdx', 'fake-uuid', {'ceph.type': 'data',
+ 'ceph.vdo': '0',
+ 'ceph.data_uuid': 'fake-uuid',
+ 'ceph.data_device': '/dev/sdx'})
+
+
+class TestActivate(object):
+
+ def test_main_spits_help_with_no_arguments(self, capsys):
+ lvm.activate.Activate([]).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'Activate OSDs by discovering them with' in stdout
+
+ def test_main_shows_full_help(self, capsys):
+ with pytest.raises(SystemExit):
+ lvm.activate.Activate(argv=['--help']).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'optional arguments' in stdout
+ assert 'positional arguments' in stdout
diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_trigger.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_trigger.py
new file mode 100644
index 00000000..b5280f93
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_trigger.py
@@ -0,0 +1,45 @@
+import pytest
+from ceph_volume import exceptions
+from ceph_volume.devices.lvm import trigger
+
+
+class TestParseOSDid(object):
+
+ def test_no_id_found_if_no_digit(self):
+ with pytest.raises(exceptions.SuffixParsingError):
+ trigger.parse_osd_id('asdlj-ljahsdfaslkjhdfa')
+
+ def test_no_id_found(self):
+ with pytest.raises(exceptions.SuffixParsingError):
+ trigger.parse_osd_id('ljahsdfaslkjhdfa')
+
+ def test_id_found(self):
+ result = trigger.parse_osd_id('1-ljahsdfaslkjhdfa')
+ assert result == '1'
+
+
+class TestParseOSDUUID(object):
+
+ def test_uuid_is_parsed(self):
+ result = trigger.parse_osd_uuid('1-asdf-ljkh-asdf-ljkh-asdf')
+ assert result == 'asdf-ljkh-asdf-ljkh-asdf'
+
+ def test_uuid_is_parsed_longer_sha1(self):
+ result = trigger.parse_osd_uuid('1-foo-bar-asdf-ljkh-asdf-ljkh-asdf')
+ assert result == 'foo-bar-asdf-ljkh-asdf-ljkh-asdf'
+
+ def test_uuid_is_not_found(self):
+ with pytest.raises(exceptions.SuffixParsingError):
+ trigger.parse_osd_uuid('ljahsdfaslkjhdfa')
+
+ def test_uuid_is_not_found_missing_id(self):
+ with pytest.raises(exceptions.SuffixParsingError):
+ trigger.parse_osd_uuid('ljahs-dfa-slkjhdfa-foo')
+
+ def test_robust_double_id_in_uuid(self):
+ # it is possible to have the id in the SHA1, this should
+ # be fine parsing that
+ result = trigger.parse_osd_uuid("1-abc959fd-1ec9-4864-b141-3154f9b9f8ed")
+ assert result == 'abc959fd-1ec9-4864-b141-3154f9b9f8ed'
+
+
diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_zap.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_zap.py
new file mode 100644
index 00000000..1fa22e5b
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_zap.py
@@ -0,0 +1,236 @@
+import os
+import pytest
+from copy import deepcopy
+from mock.mock import patch, call
+from ceph_volume import process
+from ceph_volume.api import lvm as api
+from ceph_volume.devices.lvm import zap
+
+
+class TestFindAssociatedDevices(object):
+
+ def test_no_lvs_found_that_match_id(self, monkeypatch, device_info):
+ tags = 'ceph.osd_id=9,ceph.journal_uuid=x,ceph.type=data'
+ osd = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='vg',
+ lv_tags=tags, lv_path='/dev/VolGroup/lv')
+ volumes = []
+ volumes.append(osd)
+ monkeypatch.setattr(zap.api, 'get_lvs', lambda **kwargs: {})
+
+ with pytest.raises(RuntimeError):
+ zap.find_associated_devices(osd_id=10)
+
+ def test_no_lvs_found_that_match_fsid(self, monkeypatch, device_info):
+ tags = 'ceph.osd_id=9,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,'+\
+ 'ceph.type=data'
+ osd = api.Volume(lv_name='volume1', lv_uuid='y', lv_tags=tags,
+ vg_name='vg', lv_path='/dev/VolGroup/lv')
+ volumes = []
+ volumes.append(osd)
+ monkeypatch.setattr(zap.api, 'get_lvs', lambda **kwargs: {})
+
+ with pytest.raises(RuntimeError):
+ zap.find_associated_devices(osd_fsid='aaaa-lkjh')
+
+ def test_no_lvs_found_that_match_id_fsid(self, monkeypatch, device_info):
+ tags = 'ceph.osd_id=9,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,'+\
+ 'ceph.type=data'
+ osd = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='vg',
+ lv_tags=tags, lv_path='/dev/VolGroup/lv')
+ volumes = []
+ volumes.append(osd)
+ monkeypatch.setattr(zap.api, 'get_lvs', lambda **kwargs: {})
+
+ with pytest.raises(RuntimeError):
+ zap.find_associated_devices(osd_id='9', osd_fsid='aaaa-lkjh')
+
+ def test_no_ceph_lvs_found(self, monkeypatch):
+ osd = api.Volume(lv_name='volume1', lv_uuid='y', lv_tags='',
+ lv_path='/dev/VolGroup/lv')
+ volumes = []
+ volumes.append(osd)
+ monkeypatch.setattr(zap.api, 'get_lvs', lambda **kwargs: {})
+
+ with pytest.raises(RuntimeError):
+ zap.find_associated_devices(osd_id=100)
+
+ def test_lv_is_matched_id(self, monkeypatch):
+ tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data'
+ osd = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='',
+ lv_path='/dev/VolGroup/lv', lv_tags=tags)
+ volumes = []
+ volumes.append(osd)
+ monkeypatch.setattr(zap.api, 'get_lvs', lambda **kw: volumes)
+ monkeypatch.setattr(process, 'call', lambda x, **kw: ('', '', 0))
+
+ result = zap.find_associated_devices(osd_id='0')
+ assert result[0].abspath == '/dev/VolGroup/lv'
+
+ def test_lv_is_matched_fsid(self, monkeypatch):
+ tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,' +\
+ 'ceph.type=data'
+ osd = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='',
+ lv_path='/dev/VolGroup/lv', lv_tags=tags)
+ volumes = []
+ volumes.append(osd)
+ monkeypatch.setattr(zap.api, 'get_lvs', lambda **kw: deepcopy(volumes))
+ monkeypatch.setattr(process, 'call', lambda x, **kw: ('', '', 0))
+
+ result = zap.find_associated_devices(osd_fsid='asdf-lkjh')
+ assert result[0].abspath == '/dev/VolGroup/lv'
+
+ def test_lv_is_matched_id_fsid(self, monkeypatch):
+ tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,' +\
+ 'ceph.type=data'
+ osd = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='',
+ lv_path='/dev/VolGroup/lv', lv_tags=tags)
+ volumes = []
+ volumes.append(osd)
+ monkeypatch.setattr(zap.api, 'get_lvs', lambda **kw: volumes)
+ monkeypatch.setattr(process, 'call', lambda x, **kw: ('', '', 0))
+
+ result = zap.find_associated_devices(osd_id='0', osd_fsid='asdf-lkjh')
+ assert result[0].abspath == '/dev/VolGroup/lv'
+
+
+class TestEnsureAssociatedLVs(object):
+
+ def test_nothing_is_found(self):
+ volumes = []
+ result = zap.ensure_associated_lvs(volumes)
+ assert result == []
+
+ def test_data_is_found(self):
+ tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,ceph.type=data'
+ osd = api.Volume(
+ lv_name='volume1', lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/data', lv_tags=tags)
+ volumes = []
+ volumes.append(osd)
+ result = zap.ensure_associated_lvs(volumes)
+ assert result == ['/dev/VolGroup/data']
+
+ def test_block_is_found(self):
+ tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,ceph.type=block'
+ osd = api.Volume(
+ lv_name='volume1', lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/block', lv_tags=tags)
+ volumes = []
+ volumes.append(osd)
+ result = zap.ensure_associated_lvs(volumes)
+ assert result == ['/dev/VolGroup/block']
+
+ def test_success_message_for_fsid(self, factory, is_root, capsys):
+ cli_zap = zap.Zap([])
+ args = factory(devices=[], osd_id=None, osd_fsid='asdf-lkjh')
+ cli_zap.args = args
+ cli_zap.zap()
+ out, err = capsys.readouterr()
+ assert "Zapping successful for OSD: asdf-lkjh" in err
+
+ def test_success_message_for_id(self, factory, is_root, capsys):
+ cli_zap = zap.Zap([])
+ args = factory(devices=[], osd_id='1', osd_fsid=None)
+ cli_zap.args = args
+ cli_zap.zap()
+ out, err = capsys.readouterr()
+ assert "Zapping successful for OSD: 1" in err
+
+ def test_block_and_partition_are_found(self, monkeypatch):
+ monkeypatch.setattr(zap.disk, 'get_device_from_partuuid', lambda x: '/dev/sdb1')
+ tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,ceph.type=block'
+ osd = api.Volume(
+ lv_name='volume1', lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/block', lv_tags=tags)
+ volumes = []
+ volumes.append(osd)
+ result = zap.ensure_associated_lvs(volumes)
+ assert '/dev/sdb1' in result
+ assert '/dev/VolGroup/block' in result
+
+ def test_journal_is_found(self):
+ tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,ceph.type=journal'
+ osd = api.Volume(
+ lv_name='volume1', lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/lv', lv_tags=tags)
+ volumes = []
+ volumes.append(osd)
+ result = zap.ensure_associated_lvs(volumes)
+ assert result == ['/dev/VolGroup/lv']
+
+ def test_multiple_journals_are_found(self):
+ tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,ceph.type=journal'
+ volumes = []
+ for i in range(3):
+ osd = api.Volume(
+ lv_name='volume%s' % i, lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/lv%s' % i, lv_tags=tags)
+ volumes.append(osd)
+ result = zap.ensure_associated_lvs(volumes)
+ assert '/dev/VolGroup/lv0' in result
+ assert '/dev/VolGroup/lv1' in result
+ assert '/dev/VolGroup/lv2' in result
+
+ def test_multiple_dbs_are_found(self):
+ tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,ceph.type=db'
+ volumes = []
+ for i in range(3):
+ osd = api.Volume(
+ lv_name='volume%s' % i, lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/lv%s' % i, lv_tags=tags)
+ volumes.append(osd)
+ result = zap.ensure_associated_lvs(volumes)
+ assert '/dev/VolGroup/lv0' in result
+ assert '/dev/VolGroup/lv1' in result
+ assert '/dev/VolGroup/lv2' in result
+
+ def test_multiple_wals_are_found(self):
+ tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.wal_uuid=x,ceph.type=wal'
+ volumes = []
+ for i in range(3):
+ osd = api.Volume(
+ lv_name='volume%s' % i, lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/lv%s' % i, lv_tags=tags)
+ volumes.append(osd)
+ result = zap.ensure_associated_lvs(volumes)
+ assert '/dev/VolGroup/lv0' in result
+ assert '/dev/VolGroup/lv1' in result
+ assert '/dev/VolGroup/lv2' in result
+
+ def test_multiple_backing_devs_are_found(self):
+ volumes = []
+ for _type in ['journal', 'db', 'wal']:
+ tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.wal_uuid=x,ceph.type=%s' % _type
+ osd = api.Volume(
+ lv_name='volume%s' % _type, lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/lv%s' % _type, lv_tags=tags)
+ volumes.append(osd)
+ result = zap.ensure_associated_lvs(volumes)
+ assert '/dev/VolGroup/lvjournal' in result
+ assert '/dev/VolGroup/lvwal' in result
+ assert '/dev/VolGroup/lvdb' in result
+
+ @patch('ceph_volume.devices.lvm.zap.api.get_lvs')
+ def test_ensure_associated_lvs(self, m_get_lvs):
+ zap.ensure_associated_lvs([], lv_tags={'ceph.osd_id': '1'})
+ calls = [
+ call(tags={'ceph.type': 'journal', 'ceph.osd_id': '1'}),
+ call(tags={'ceph.type': 'db', 'ceph.osd_id': '1'}),
+ call(tags={'ceph.type': 'wal', 'ceph.osd_id': '1'})
+ ]
+ m_get_lvs.assert_has_calls(calls, any_order=True)
+
+
+class TestWipeFs(object):
+
+ def setup(self):
+ os.environ['CEPH_VOLUME_WIPEFS_INTERVAL'] = '0'
+
+ def test_works_on_second_try(self, stub_call):
+ os.environ['CEPH_VOLUME_WIPEFS_TRIES'] = '2'
+ stub_call([('wiping /dev/sda', '', 1), ('', '', 0)])
+ result = zap.wipefs('/dev/sda')
+ assert result is None
+
+ def test_does_not_work_after_several_tries(self, stub_call):
+ os.environ['CEPH_VOLUME_WIPEFS_TRIES'] = '2'
+ stub_call([('wiping /dev/sda', '', 1), ('', '', 1)])
+ with pytest.raises(RuntimeError):
+ zap.wipefs('/dev/sda')
+
+ def test_does_not_work_default_tries(self, stub_call):
+ stub_call([('wiping /dev/sda', '', 1)]*8)
+ with pytest.raises(RuntimeError):
+ zap.wipefs('/dev/sda')