summaryrefslogtreecommitdiffstats
path: root/src/ceph-volume
diff options
context:
space:
mode:
Diffstat (limited to 'src/ceph-volume')
-rw-r--r--src/ceph-volume/.gitignore13
-rw-r--r--src/ceph-volume/CMakeLists.txt29
-rw-r--r--src/ceph-volume/MANIFEST.in2
-rw-r--r--src/ceph-volume/ceph_volume/__init__.py22
-rw-r--r--src/ceph-volume/ceph_volume/activate/__init__.py1
-rw-r--r--src/ceph-volume/ceph_volume/activate/main.py85
-rw-r--r--src/ceph-volume/ceph_volume/api/__init__.py3
-rw-r--r--src/ceph-volume/ceph_volume/api/lvm.py1212
-rw-r--r--src/ceph-volume/ceph_volume/configuration.py232
-rw-r--r--src/ceph-volume/ceph_volume/decorators.py90
-rw-r--r--src/ceph-volume/ceph_volume/devices/__init__.py1
-rw-r--r--src/ceph-volume/ceph_volume/devices/lvm/__init__.py1
-rw-r--r--src/ceph-volume/ceph_volume/devices/lvm/activate.py384
-rw-r--r--src/ceph-volume/ceph_volume/devices/lvm/batch.py654
-rw-r--r--src/ceph-volume/ceph_volume/devices/lvm/common.py190
-rw-r--r--src/ceph-volume/ceph_volume/devices/lvm/create.py77
-rw-r--r--src/ceph-volume/ceph_volume/devices/lvm/deactivate.py88
-rw-r--r--src/ceph-volume/ceph_volume/devices/lvm/listing.py223
-rw-r--r--src/ceph-volume/ceph_volume/devices/lvm/main.py54
-rw-r--r--src/ceph-volume/ceph_volume/devices/lvm/migrate.py693
-rw-r--r--src/ceph-volume/ceph_volume/devices/lvm/prepare.py441
-rw-r--r--src/ceph-volume/ceph_volume/devices/lvm/trigger.py70
-rw-r--r--src/ceph-volume/ceph_volume/devices/lvm/zap.py406
-rw-r--r--src/ceph-volume/ceph_volume/devices/raw/__init__.py1
-rw-r--r--src/ceph-volume/ceph_volume/devices/raw/activate.py166
-rw-r--r--src/ceph-volume/ceph_volume/devices/raw/common.py52
-rw-r--r--src/ceph-volume/ceph_volume/devices/raw/list.py163
-rw-r--r--src/ceph-volume/ceph_volume/devices/raw/main.py40
-rw-r--r--src/ceph-volume/ceph_volume/devices/raw/prepare.py169
-rw-r--r--src/ceph-volume/ceph_volume/devices/simple/__init__.py1
-rw-r--r--src/ceph-volume/ceph_volume/devices/simple/activate.py302
-rw-r--r--src/ceph-volume/ceph_volume/devices/simple/main.py41
-rw-r--r--src/ceph-volume/ceph_volume/devices/simple/scan.py385
-rw-r--r--src/ceph-volume/ceph_volume/devices/simple/trigger.py70
-rw-r--r--src/ceph-volume/ceph_volume/drive_group/__init__.py1
-rw-r--r--src/ceph-volume/ceph_volume/drive_group/main.py98
-rw-r--r--src/ceph-volume/ceph_volume/exceptions.py63
-rw-r--r--src/ceph-volume/ceph_volume/inventory/__init__.py1
-rw-r--r--src/ceph-volume/ceph_volume/inventory/main.py67
-rw-r--r--src/ceph-volume/ceph_volume/log.py49
-rw-r--r--src/ceph-volume/ceph_volume/main.py183
-rw-r--r--src/ceph-volume/ceph_volume/process.py229
-rw-r--r--src/ceph-volume/ceph_volume/systemd/__init__.py1
-rw-r--r--src/ceph-volume/ceph_volume/systemd/main.py108
-rw-r--r--src/ceph-volume/ceph_volume/systemd/systemctl.py101
-rw-r--r--src/ceph-volume/ceph_volume/terminal.py214
-rw-r--r--src/ceph-volume/ceph_volume/tests/__init__.py0
-rw-r--r--src/ceph-volume/ceph_volume/tests/api/test_lvm.py885
-rw-r--r--src/ceph-volume/ceph_volume/tests/conftest.py317
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/__init__.py0
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/lvm/__init__.py0
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/lvm/test_activate.py442
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/lvm/test_batch.py306
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/lvm/test_common.py8
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/lvm/test_create.py52
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/lvm/test_deactivate.py59
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/lvm/test_listing.py352
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/lvm/test_migrate.py2299
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/lvm/test_prepare.py189
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/lvm/test_trigger.py45
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/lvm/test_zap.py241
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/raw/__init__.py0
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/raw/test_list.py238
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/raw/test_prepare.py97
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/simple/test_activate.py200
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/simple/test_scan.py71
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/simple/test_trigger.py45
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/test_zap.py38
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/.gitignore5
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/README.md24
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/Vagrantfile423
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt-explicit/Vagrantfile1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt-explicit/group_vars/all1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt-explicit/hosts8
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt-explicit/setup.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt-explicit/test.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt-explicit/test_zap.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt-explicit/vagrant_variables.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt/Vagrantfile1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt/group_vars/all1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt/hosts8
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt/setup.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt/test.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt/test_zap.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt/vagrant_variables.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-explicit/Vagrantfile1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-explicit/group_vars/all1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-explicit/hosts8
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-explicit/setup.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-explicit/test.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-explicit/test_zap.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-explicit/vagrant_variables.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type/Vagrantfile1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type/group_vars/all1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type/hosts8
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type/setup.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type/test.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type/test_zap.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type/vagrant_variables.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type-dmcrypt/Vagrantfile1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type-dmcrypt/group_vars/all1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type-dmcrypt/hosts8
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type-dmcrypt/setup.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type-dmcrypt/test.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type-dmcrypt/test_zap.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type-dmcrypt/vagrant_variables.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type/Vagrantfile1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type/group_vars/all1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type/hosts8
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type/setup.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type/test.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type/test_zap.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type/vagrant_variables.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-dmcrypt-explicit/Vagrantfile1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-dmcrypt-explicit/group_vars/all1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-dmcrypt-explicit/hosts8
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-dmcrypt-explicit/setup.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-dmcrypt-explicit/test.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-dmcrypt-explicit/test_zap.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-dmcrypt-explicit/vagrant_variables.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-dmcrypt/Vagrantfile1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-dmcrypt/group_vars/all1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-dmcrypt/hosts8
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-dmcrypt/setup.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-dmcrypt/test.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-dmcrypt/test_zap.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-dmcrypt/vagrant_variables.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-explicit/Vagrantfile1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-explicit/group_vars/all1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-explicit/hosts8
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-explicit/setup.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-explicit/test.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-explicit/test_zap.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-explicit/vagrant_variables.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type/Vagrantfile1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type/group_vars/all1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type/hosts8
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type/setup.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type/test.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type/test_zap.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type/vagrant_variables.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/single-type-dmcrypt/Vagrantfile1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/single-type-dmcrypt/group_vars/all1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/single-type-dmcrypt/hosts8
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/single-type-dmcrypt/setup.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/single-type-dmcrypt/test.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/single-type-dmcrypt/test_zap.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/single-type-dmcrypt/vagrant_variables.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/single-type/Vagrantfile1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/single-type/group_vars/all1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/single-type/hosts8
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/single-type/setup.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/single-type/test.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/single-type/test_zap.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/single-type/vagrant_variables.yml1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/noop.yml12
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/setup_mixed_type.yml215
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test.yml64
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_explicit.yml64
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_zap.yml34
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/tox.ini74
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/group_vars/bluestore34
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/group_vars/bluestore_lvm32
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/group_vars/bluestore_lvm_dmcrypt33
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/group_vars/bluestore_single29
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/group_vars/filestore32
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/group_vars/filestore_lvm35
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/group_vars/filestore_lvm_dmcrypt36
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/group_vars/filestore_single29
l---------src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/Vagrantfile1
l---------src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/group_vars/all1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/hosts8
l---------src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/setup.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/test.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/vagrant_variables.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/Vagrantfile1
l---------src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/group_vars/all1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/hosts8
l---------src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/setup.yml1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/test.yml123
l---------src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/vagrant_variables.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/create/Vagrantfile1
l---------src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/create/group_vars/all1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/create/hosts8
l---------src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/create/setup.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/create/test.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/create/vagrant_variables.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/dmcrypt/Vagrantfile1
l---------src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/dmcrypt/group_vars/all1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/dmcrypt/hosts8
l---------src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/dmcrypt/setup.yml1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/dmcrypt/test.yml120
l---------src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/dmcrypt/vagrant_variables.yml1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/setup_partitions.yml27
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_bluestore.yml161
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_filestore.yml191
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini71
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/playbooks/deploy.yml155
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/scripts/generate_ssh_config.sh14
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/scripts/output.py5
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/scripts/test_unicode.sh35
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/scripts/vagrant_reload.sh21
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/scripts/vagrant_up.sh14
l---------src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/Vagrantfile1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/group_vars/all19
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/host_vars/osd0.yml7
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/host_vars/osd1.yml6
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/hosts9
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/test.yml31
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/vagrant_variables.yml73
l---------src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/Vagrantfile1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/group_vars/all22
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/host_vars/osd0.yml7
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/host_vars/osd1.yml6
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/hosts9
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/test.yml15
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/vagrant_variables.yml73
l---------src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/Vagrantfile1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/group_vars/all22
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/host_vars/osd0.yml7
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/host_vars/osd1.yml6
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/hosts9
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/test.yml31
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/vagrant_variables.yml73
l---------src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/Vagrantfile1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/group_vars/all19
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/host_vars/osd0.yml7
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/host_vars/osd1.yml6
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/hosts9
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/test.yml29
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/vagrant_variables.yml73
l---------src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/Vagrantfile1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/group_vars/all22
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/host_vars/osd0.yml7
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/host_vars/osd1.yml6
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/hosts9
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/test.yml31
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/vagrant_variables.yml73
l---------src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/Vagrantfile1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/group_vars/all22
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/host_vars/osd0.yml7
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/host_vars/osd1.yml6
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/hosts9
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/test.yml31
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/vagrant_variables.yml73
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/tox.ini58
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/tests/__init__.py0
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/tests/conftest.py103
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/tests/osd/__init__.py0
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/tests/osd/test_osds.py60
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/vagrant_variables.yml57
-rw-r--r--src/ceph-volume/ceph_volume/tests/systemd/test_main.py51
-rw-r--r--src/ceph-volume/ceph_volume/tests/systemd/test_systemctl.py21
-rw-r--r--src/ceph-volume/ceph_volume/tests/test_configuration.py117
-rw-r--r--src/ceph-volume/ceph_volume/tests/test_decorators.py78
-rw-r--r--src/ceph-volume/ceph_volume/tests/test_inventory.py263
-rw-r--r--src/ceph-volume/ceph_volume/tests/test_main.py69
-rw-r--r--src/ceph-volume/ceph_volume/tests/test_process.py92
-rw-r--r--src/ceph-volume/ceph_volume/tests/test_terminal.py143
-rw-r--r--src/ceph-volume/ceph_volume/tests/util/test_arg_validators.py338
-rw-r--r--src/ceph-volume/ceph_volume/tests/util/test_device.py704
-rw-r--r--src/ceph-volume/ceph_volume/tests/util/test_disk.py524
-rw-r--r--src/ceph-volume/ceph_volume/tests/util/test_encryption.py138
-rw-r--r--src/ceph-volume/ceph_volume/tests/util/test_prepare.py413
-rw-r--r--src/ceph-volume/ceph_volume/tests/util/test_system.py309
-rw-r--r--src/ceph-volume/ceph_volume/tests/util/test_util.py116
-rw-r--r--src/ceph-volume/ceph_volume/util/__init__.py108
-rw-r--r--src/ceph-volume/ceph_volume/util/arg_validators.py222
-rw-r--r--src/ceph-volume/ceph_volume/util/constants.py46
-rw-r--r--src/ceph-volume/ceph_volume/util/device.py699
-rw-r--r--src/ceph-volume/ceph_volume/util/disk.py927
-rw-r--r--src/ceph-volume/ceph_volume/util/encryption.py278
-rw-r--r--src/ceph-volume/ceph_volume/util/lsmdisk.py196
-rw-r--r--src/ceph-volume/ceph_volume/util/prepare.py535
-rw-r--r--src/ceph-volume/ceph_volume/util/system.py419
-rw-r--r--src/ceph-volume/ceph_volume/util/templates.py49
-rw-r--r--src/ceph-volume/plugin/zfs/CMakeLists.txt3
-rw-r--r--src/ceph-volume/plugin/zfs/LICENSE32
-rw-r--r--src/ceph-volume/plugin/zfs/MANIFEST.in7
-rwxr-xr-xsrc/ceph-volume/plugin/zfs/ceph_volume_zfs/__init__.py13
-rw-r--r--src/ceph-volume/plugin/zfs/ceph_volume_zfs/api/__init__.py3
-rwxr-xr-xsrc/ceph-volume/plugin/zfs/ceph_volume_zfs/devices/__init__.py2
-rwxr-xr-xsrc/ceph-volume/plugin/zfs/ceph_volume_zfs/devices/zfs/__init__.py4
-rw-r--r--src/ceph-volume/plugin/zfs/ceph_volume_zfs/devices/zfs/inventory.py50
-rw-r--r--src/ceph-volume/plugin/zfs/ceph_volume_zfs/devices/zfs/main.py36
-rw-r--r--src/ceph-volume/plugin/zfs/ceph_volume_zfs/devices/zfs/prepare.py25
-rw-r--r--src/ceph-volume/plugin/zfs/ceph_volume_zfs/devices/zfs/zap.py34
-rw-r--r--src/ceph-volume/plugin/zfs/ceph_volume_zfs/util/__init__.py1
-rw-r--r--src/ceph-volume/plugin/zfs/ceph_volume_zfs/util/disk.py148
-rwxr-xr-xsrc/ceph-volume/plugin/zfs/ceph_volume_zfs/zfs.py152
-rw-r--r--src/ceph-volume/plugin/zfs/requirements_dev.txt5
-rw-r--r--src/ceph-volume/plugin/zfs/setup.py44
-rw-r--r--src/ceph-volume/plugin/zfs/tox.ini21
-rw-r--r--src/ceph-volume/setup.py42
-rw-r--r--src/ceph-volume/shell_tox.ini11
-rw-r--r--src/ceph-volume/tox.ini22
-rwxr-xr-xsrc/ceph-volume/tox_install_command.sh3
297 files changed, 24351 insertions, 0 deletions
diff --git a/src/ceph-volume/.gitignore b/src/ceph-volume/.gitignore
new file mode 100644
index 000000000..426d75d3f
--- /dev/null
+++ b/src/ceph-volume/.gitignore
@@ -0,0 +1,13 @@
+*~
+*.pyc
+*.pyo
+.coverage
+.tox
+*.egg-info
+*.egg
+dist
+virtualenv
+build
+*.log
+*.trs
+.cache
diff --git a/src/ceph-volume/CMakeLists.txt b/src/ceph-volume/CMakeLists.txt
new file mode 100644
index 000000000..9166553dc
--- /dev/null
+++ b/src/ceph-volume/CMakeLists.txt
@@ -0,0 +1,29 @@
+
+include(Distutils)
+
+distutils_install_module(ceph_volume
+ INSTALL_SCRIPT ${CMAKE_INSTALL_FULL_SBINDIR})
+
+if(FREEBSD)
+ add_subdirectory(plugin/zfs)
+endif()
+
+# Required for running ceph-volume inventory in a vstart environment
+set(CEPH_VOLUME_VIRTUALENV ${CEPH_BUILD_VIRTUALENV}/ceph-volume-virtualenv)
+
+add_custom_command(
+ OUTPUT ${CEPH_VOLUME_VIRTUALENV}/bin/python
+ COMMAND ${CMAKE_SOURCE_DIR}/src/tools/setup-virtualenv.sh --python=${Python3_EXECUTABLE} ${CEPH_VOLUME_VIRTUALENV}
+ WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/src/ceph-volume
+ COMMENT "ceph-volume venv is being created")
+
+add_custom_command(
+ OUTPUT ${CEPH_VOLUME_VIRTUALENV}/bin/ceph-volume
+ DEPENDS ${CEPH_VOLUME_VIRTUALENV}/bin/python
+ COMMAND . ${CEPH_VOLUME_VIRTUALENV}/bin/activate && ${CEPH_VOLUME_VIRTUALENV}/bin/python setup.py develop && deactivate
+ WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/src/ceph-volume
+ COMMENT "${CMAKE_SOURCE_DIR}/src/ceph-volume")
+
+add_custom_target(ceph-volume-venv-setup
+ DEPENDS ${CEPH_VOLUME_VIRTUALENV}/bin/ceph-volume)
+
diff --git a/src/ceph-volume/MANIFEST.in b/src/ceph-volume/MANIFEST.in
new file mode 100644
index 000000000..5b4a149c7
--- /dev/null
+++ b/src/ceph-volume/MANIFEST.in
@@ -0,0 +1,2 @@
+include bin/ceph-volume
+include tox.ini
diff --git a/src/ceph-volume/ceph_volume/__init__.py b/src/ceph-volume/ceph_volume/__init__.py
new file mode 100644
index 000000000..4d6ff1a2a
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/__init__.py
@@ -0,0 +1,22 @@
+from collections import namedtuple
+
+
+sys_info = namedtuple('sys_info', ['devices'])
+sys_info.devices = dict()
+
+
+class UnloadedConfig(object):
+ """
+ This class is used as the default value for conf.ceph so that if
+ a configuration file is not successfully loaded then it will give
+ a nice error message when values from the config are used.
+ """
+ def __getattr__(self, *a):
+ raise RuntimeError("No valid ceph configuration file was loaded.")
+
+conf = namedtuple('config', ['ceph', 'cluster', 'verbosity', 'path', 'log_path'])
+conf.ceph = UnloadedConfig()
+
+__version__ = "1.0.0"
+
+__release__ = "pacific"
diff --git a/src/ceph-volume/ceph_volume/activate/__init__.py b/src/ceph-volume/ceph_volume/activate/__init__.py
new file mode 100644
index 000000000..542bf32bd
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/activate/__init__.py
@@ -0,0 +1 @@
+from .main import Activate # noqa
diff --git a/src/ceph-volume/ceph_volume/activate/main.py b/src/ceph-volume/ceph_volume/activate/main.py
new file mode 100644
index 000000000..bcd468515
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/activate/main.py
@@ -0,0 +1,85 @@
+# -*- coding: utf-8 -*-
+
+import argparse
+
+from ceph_volume import terminal
+from ceph_volume.devices.lvm.activate import Activate as LVMActivate
+from ceph_volume.devices.raw.activate import Activate as RAWActivate
+from ceph_volume.devices.simple.activate import Activate as SimpleActivate
+
+
+class Activate(object):
+
+ help = "Activate an OSD"
+
+ def __init__(self, argv):
+ self.argv = argv
+
+ def main(self):
+ parser = argparse.ArgumentParser(
+ prog='ceph-volume activate',
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ description=self.help,
+ )
+ parser.add_argument(
+ '--osd-id',
+ help='OSD ID to activate'
+ )
+ parser.add_argument(
+ '--osd-uuid',
+ help='OSD UUID to active'
+ )
+ parser.add_argument(
+ '--no-systemd',
+ dest='no_systemd',
+ action='store_true',
+ help='Skip creating and enabling systemd units and starting OSD services'
+ )
+ parser.add_argument(
+ '--no-tmpfs',
+ action='store_true',
+ help='Do not use a tmpfs mount for OSD data dir'
+ )
+ self.args = parser.parse_args(self.argv)
+
+ # first try raw
+ try:
+ RAWActivate([]).activate(
+ devs=None,
+ start_osd_id=self.args.osd_id,
+ start_osd_uuid=self.args.osd_uuid,
+ tmpfs=not self.args.no_tmpfs,
+ systemd=not self.args.no_systemd,
+ )
+ return
+ except Exception as e:
+ terminal.info(f'Failed to activate via raw: {e}')
+
+ # then try lvm
+ try:
+ LVMActivate([]).activate(
+ argparse.Namespace(
+ osd_id=self.args.osd_id,
+ osd_fsid=self.args.osd_uuid,
+ no_tmpfs=self.args.no_tmpfs,
+ no_systemd=self.args.no_systemd,
+ )
+ )
+ return
+ except Exception as e:
+ terminal.info(f'Failed to activate via lvm: {e}')
+
+ # then try simple
+ try:
+ SimpleActivate([]).activate(
+ argparse.Namespace(
+ osd_id=self.args.osd_id,
+ osd_fsid=self.args.osd_uuid,
+ no_systemd=self.args.no_systemd,
+ )
+ )
+ return
+ except Exception as e:
+ terminal.info(f'Failed to activate via simple: {e}')
+
+ terminal.error('Failed to activate any OSD(s)')
diff --git a/src/ceph-volume/ceph_volume/api/__init__.py b/src/ceph-volume/ceph_volume/api/__init__.py
new file mode 100644
index 000000000..ecc971299
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/api/__init__.py
@@ -0,0 +1,3 @@
+"""
+Device API that can be shared among other implementations.
+"""
diff --git a/src/ceph-volume/ceph_volume/api/lvm.py b/src/ceph-volume/ceph_volume/api/lvm.py
new file mode 100644
index 000000000..3b554c7a4
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/api/lvm.py
@@ -0,0 +1,1212 @@
+"""
+API for CRUD lvm tag operations. Follows the Ceph LVM tag naming convention
+that prefixes tags with ``ceph.`` and uses ``=`` for assignment, and provides
+set of utilities for interacting with LVM.
+"""
+import logging
+import os
+import uuid
+from itertools import repeat
+from math import floor
+from ceph_volume import process, util, conf
+from ceph_volume.exceptions import SizeAllocationError
+
+logger = logging.getLogger(__name__)
+
+
+def convert_filters_to_str(filters):
+ """
+ Convert filter args from dictionary to following format -
+ filters={filter_name=filter_val,...}
+ """
+ if not filters:
+ return filters
+
+ filter_arg = ''
+ for k, v in filters.items():
+ filter_arg += k + '=' + v + ','
+ # get rid of extra comma at the end
+ filter_arg = filter_arg[:len(filter_arg) - 1]
+
+ return filter_arg
+
+
+def convert_tags_to_str(tags):
+ """
+ Convert tags from dictionary to following format -
+ tags={tag_name=tag_val,...}
+ """
+ if not tags:
+ return tags
+
+ tag_arg = 'tags={'
+ for k, v in tags.items():
+ tag_arg += k + '=' + v + ','
+ # get rid of extra comma at the end
+ tag_arg = tag_arg[:len(tag_arg) - 1] + '}'
+
+ return tag_arg
+
+
+def make_filters_lvmcmd_ready(filters, tags):
+ """
+ Convert filters (including tags) from dictionary to following format -
+ filter_name=filter_val...,tags={tag_name=tag_val,...}
+
+ The command will look as follows =
+ lvs -S filter_name=filter_val...,tags={tag_name=tag_val,...}
+ """
+ filters = convert_filters_to_str(filters)
+ tags = convert_tags_to_str(tags)
+
+ if filters and tags:
+ return filters + ',' + tags
+ if filters and not tags:
+ return filters
+ if not filters and tags:
+ return tags
+ else:
+ return ''
+
+
+def _output_parser(output, fields):
+ """
+ Newer versions of LVM allow ``--reportformat=json``, but older versions,
+ like the one included in Xenial do not. LVM has the ability to filter and
+ format its output so we assume the output will be in a format this parser
+ can handle (using ';' as a delimiter)
+
+ :param fields: A string, possibly using ',' to group many items, as it
+ would be used on the CLI
+ :param output: The CLI output from the LVM call
+ """
+ field_items = fields.split(',')
+ report = []
+ for line in output:
+ # clear the leading/trailing whitespace
+ line = line.strip()
+
+ # remove the extra '"' in each field
+ line = line.replace('"', '')
+
+ # prevent moving forward with empty contents
+ if not line:
+ continue
+
+ # splitting on ';' because that is what the lvm call uses as
+ # '--separator'
+ output_items = [i.strip() for i in line.split(';')]
+ # map the output to the fields
+ report.append(
+ dict(zip(field_items, output_items))
+ )
+
+ return report
+
+
+def _splitname_parser(line):
+ """
+ Parses the output from ``dmsetup splitname``, that should contain prefixes
+ (--nameprefixes) and set the separator to ";"
+
+ Output for /dev/mapper/vg-lv will usually look like::
+
+ DM_VG_NAME='/dev/mapper/vg';DM_LV_NAME='lv';DM_LV_LAYER=''
+
+
+ The ``VG_NAME`` will usually not be what other callers need (e.g. just 'vg'
+ in the example), so this utility will split ``/dev/mapper/`` out, so that
+ the actual volume group name is kept
+
+ :returns: dictionary with stripped prefixes
+ """
+ parsed = {}
+ try:
+ parts = line[0].split(';')
+ except IndexError:
+ logger.exception('Unable to parse mapper device: %s', line)
+ return parsed
+
+ for part in parts:
+ part = part.replace("'", '')
+ key, value = part.split('=')
+ if 'DM_VG_NAME' in key:
+ value = value.split('/dev/mapper/')[-1]
+ key = key.split('DM_')[-1]
+ parsed[key] = value
+
+ return parsed
+
+
+def sizing(device_size, parts=None, size=None):
+ """
+ Calculate proper sizing to fully utilize the volume group in the most
+ efficient way possible. To prevent situations where LVM might accept
+ a percentage that is beyond the vg's capabilities, it will refuse with
+ an error when requesting a larger-than-possible parameter, in addition
+ to rounding down calculations.
+
+ A dictionary with different sizing parameters is returned, to make it
+ easier for others to choose what they need in order to create logical
+ volumes::
+
+ >>> sizing(100, parts=2)
+ >>> {'parts': 2, 'percentages': 50, 'sizes': 50}
+
+ """
+ if parts is not None and size is not None:
+ raise ValueError(
+ "Cannot process sizing with both parts (%s) and size (%s)" % (parts, size)
+ )
+
+ if size and size > device_size:
+ raise SizeAllocationError(size, device_size)
+
+ def get_percentage(parts):
+ return int(floor(100 / float(parts)))
+
+ if parts is not None:
+ # Prevent parts being 0, falling back to 1 (100% usage)
+ parts = parts or 1
+ percentages = get_percentage(parts)
+
+ if size:
+ parts = int(device_size / size) or 1
+ percentages = get_percentage(parts)
+
+ sizes = device_size / parts if parts else int(floor(device_size))
+
+ return {
+ 'parts': parts,
+ 'percentages': percentages,
+ 'sizes': int(sizes/1024/1024/1024),
+ }
+
+
+def parse_tags(lv_tags):
+ """
+ Return a dictionary mapping of all the tags associated with
+ a Volume from the comma-separated tags coming from the LVM API
+
+ Input look like::
+
+ "ceph.osd_fsid=aaa-fff-bbbb,ceph.osd_id=0"
+
+ For the above example, the expected return value would be::
+
+ {
+ "ceph.osd_fsid": "aaa-fff-bbbb",
+ "ceph.osd_id": "0"
+ }
+ """
+ if not lv_tags:
+ return {}
+ tag_mapping = {}
+ tags = lv_tags.split(',')
+ for tag_assignment in tags:
+ if not tag_assignment.startswith('ceph.'):
+ continue
+ key, value = tag_assignment.split('=', 1)
+ tag_mapping[key] = value
+
+ return tag_mapping
+
+
+def _vdo_parents(devices):
+ """
+ It is possible we didn't get a logical volume, or a mapper path, but
+ a device like /dev/sda2, to resolve this, we must look at all the slaves of
+ every single device in /sys/block and if any of those devices is related to
+ VDO devices, then we can add the parent
+ """
+ parent_devices = []
+ for parent in os.listdir('/sys/block'):
+ for slave in os.listdir('/sys/block/%s/slaves' % parent):
+ if slave in devices:
+ parent_devices.append('/dev/%s' % parent)
+ parent_devices.append(parent)
+ return parent_devices
+
+
+def _vdo_slaves(vdo_names):
+ """
+ find all the slaves associated with each vdo name (from realpath) by going
+ into /sys/block/<realpath>/slaves
+ """
+ devices = []
+ for vdo_name in vdo_names:
+ mapper_path = '/dev/mapper/%s' % vdo_name
+ if not os.path.exists(mapper_path):
+ continue
+ # resolve the realpath and realname of the vdo mapper
+ vdo_realpath = os.path.realpath(mapper_path)
+ vdo_realname = vdo_realpath.split('/')[-1]
+ slaves_path = '/sys/block/%s/slaves' % vdo_realname
+ if not os.path.exists(slaves_path):
+ continue
+ devices.append(vdo_realpath)
+ devices.append(mapper_path)
+ devices.append(vdo_realname)
+ for slave in os.listdir(slaves_path):
+ devices.append('/dev/%s' % slave)
+ devices.append(slave)
+ return devices
+
+
+def _is_vdo(path):
+ """
+ A VDO device can be composed from many different devices, go through each
+ one of those devices and its slaves (if any) and correlate them back to
+ /dev/mapper and their realpaths, and then check if they appear as part of
+ /sys/kvdo/<name>/statistics
+
+ From the realpath of a logical volume, determine if it is a VDO device or
+ not, by correlating it to the presence of the name in
+ /sys/kvdo/<name>/statistics and all the previously captured devices
+ """
+ if not os.path.isdir('/sys/kvdo'):
+ return False
+ realpath = os.path.realpath(path)
+ realpath_name = realpath.split('/')[-1]
+ devices = []
+ vdo_names = set()
+ # get all the vdo names
+ for dirname in os.listdir('/sys/kvdo/'):
+ if os.path.isdir('/sys/kvdo/%s/statistics' % dirname):
+ vdo_names.add(dirname)
+
+ # find all the slaves associated with each vdo name (from realpath) by
+ # going into /sys/block/<realpath>/slaves
+ devices.extend(_vdo_slaves(vdo_names))
+
+ # Find all possible parents, looking into slaves that are related to VDO
+ devices.extend(_vdo_parents(devices))
+
+ return any([
+ path in devices,
+ realpath in devices,
+ realpath_name in devices])
+
+
+def is_vdo(path):
+ """
+ Detect if a path is backed by VDO, proxying the actual call to _is_vdo so
+ that we can prevent an exception breaking OSD creation. If an exception is
+ raised, it will get captured and logged to file, while returning
+ a ``False``.
+ """
+ try:
+ if _is_vdo(path):
+ return '1'
+ return '0'
+ except Exception:
+ logger.exception('Unable to properly detect device as VDO: %s', path)
+ return '0'
+
+
+def dmsetup_splitname(dev):
+ """
+ Run ``dmsetup splitname`` and parse the results.
+
+ .. warning:: This call does not ensure that the device is correct or that
+ it exists. ``dmsetup`` will happily take a non existing path and still
+ return a 0 exit status.
+ """
+ command = [
+ 'dmsetup', 'splitname', '--noheadings',
+ "--separator=';'", '--nameprefixes', dev
+ ]
+ out, err, rc = process.call(command)
+ return _splitname_parser(out)
+
+
+def is_ceph_device(lv):
+ try:
+ lv.tags['ceph.osd_id']
+ except (KeyError, AttributeError):
+ logger.warning('device is not part of ceph: %s', lv)
+ return False
+
+ if lv.tags['ceph.osd_id'] == 'null':
+ return False
+ else:
+ return True
+
+
+####################################
+#
+# Code for LVM Physical Volumes
+#
+################################
+
+PV_FIELDS = 'pv_name,pv_tags,pv_uuid,vg_name,lv_uuid'
+
+class PVolume(object):
+ """
+ Represents a Physical Volume from LVM, with some top-level attributes like
+ ``pv_name`` and parsed tags as a dictionary of key/value pairs.
+ """
+
+ def __init__(self, **kw):
+ for k, v in kw.items():
+ setattr(self, k, v)
+ self.pv_api = kw
+ self.name = kw['pv_name']
+ self.tags = parse_tags(kw['pv_tags'])
+
+ def __str__(self):
+ return '<%s>' % self.pv_api['pv_name']
+
+ def __repr__(self):
+ return self.__str__()
+
+ def set_tags(self, tags):
+ """
+ :param tags: A dictionary of tag names and values, like::
+
+ {
+ "ceph.osd_fsid": "aaa-fff-bbbb",
+ "ceph.osd_id": "0"
+ }
+
+ At the end of all modifications, the tags are refreshed to reflect
+ LVM's most current view.
+ """
+ for k, v in tags.items():
+ self.set_tag(k, v)
+ # after setting all the tags, refresh them for the current object, use the
+ # pv_* identifiers to filter because those shouldn't change
+ pv_object = self.get_single_pv(filter={'pv_name': self.pv_name,
+ 'pv_uuid': self.pv_uuid})
+
+ if not pv_object:
+ raise RuntimeError('No PV was found.')
+
+ self.tags = pv_object.tags
+
+ def set_tag(self, key, value):
+ """
+ Set the key/value pair as an LVM tag. Does not "refresh" the values of
+ the current object for its tags. Meant to be a "fire and forget" type
+ of modification.
+
+ **warning**: Altering tags on a PV has to be done ensuring that the
+ device is actually the one intended. ``pv_name`` is *not* a persistent
+ value, only ``pv_uuid`` is. Using ``pv_uuid`` is the best way to make
+ sure the device getting changed is the one needed.
+ """
+ # remove it first if it exists
+ if self.tags.get(key):
+ current_value = self.tags[key]
+ tag = "%s=%s" % (key, current_value)
+ process.call(['pvchange', '--deltag', tag, self.pv_name], run_on_host=True)
+
+ process.call(
+ [
+ 'pvchange',
+ '--addtag', '%s=%s' % (key, value), self.pv_name
+ ],
+ run_on_host=True
+ )
+
+
+def create_pv(device):
+ """
+ Create a physical volume from a device, useful when devices need to be later mapped
+ to journals.
+ """
+ process.run([
+ 'pvcreate',
+ '-v', # verbose
+ '-f', # force it
+ '--yes', # answer yes to any prompts
+ device
+ ], run_on_host=True)
+
+
+def remove_pv(pv_name):
+ """
+ Removes a physical volume using a double `-f` to prevent prompts and fully
+ remove anything related to LVM. This is tremendously destructive, but so is all other actions
+ when zapping a device.
+
+ In the case where multiple PVs are found, it will ignore that fact and
+ continue with the removal, specifically in the case of messages like::
+
+ WARNING: PV $UUID /dev/DEV-1 was already found on /dev/DEV-2
+
+ These situations can be avoided with custom filtering rules, which this API
+ cannot handle while accommodating custom user filters.
+ """
+ fail_msg = "Unable to remove vg %s" % pv_name
+ process.run(
+ [
+ 'pvremove',
+ '-v', # verbose
+ '-f', # force it
+ '-f', # force it
+ pv_name
+ ],
+ run_on_host=True,
+ fail_msg=fail_msg,
+ )
+
+
+def get_pvs(fields=PV_FIELDS, filters='', tags=None):
+ """
+ Return a list of PVs that are available on the system and match the
+ filters and tags passed. Argument filters takes a dictionary containing
+ arguments required by -S option of LVM. Passing a list of LVM tags can be
+ quite tricky to pass as a dictionary within dictionary, therefore pass
+ dictionary of tags via tags argument and tricky part will be taken care of
+ by the helper methods.
+
+ :param fields: string containing list of fields to be displayed by the
+ pvs command
+ :param sep: string containing separator to be used between two fields
+ :param filters: dictionary containing LVM filters
+ :param tags: dictionary containng LVM tags
+ :returns: list of class PVolume object representing pvs on the system
+ """
+ filters = make_filters_lvmcmd_ready(filters, tags)
+ args = ['pvs', '--noheadings', '--readonly', '--separator=";"', '-S',
+ filters, '-o', fields]
+
+ stdout, stderr, returncode = process.call(args, run_on_host=True, verbose_on_failure=False)
+ pvs_report = _output_parser(stdout, fields)
+ return [PVolume(**pv_report) for pv_report in pvs_report]
+
+
+def get_single_pv(fields=PV_FIELDS, filters=None, tags=None):
+ """
+ Wrapper of get_pvs() meant to be a convenience method to avoid the phrase::
+ pvs = get_pvs()
+ if len(pvs) >= 1:
+ pv = pvs[0]
+ """
+ pvs = get_pvs(fields=fields, filters=filters, tags=tags)
+
+ if len(pvs) == 0:
+ return None
+ if len(pvs) > 1:
+ raise RuntimeError('Filters {} matched more than 1 PV present on this host.'.format(str(filters)))
+
+ return pvs[0]
+
+
+################################
+#
+# Code for LVM Volume Groups
+#
+#############################
+
+VG_FIELDS = 'vg_name,pv_count,lv_count,vg_attr,vg_extent_count,vg_free_count,vg_extent_size'
+VG_CMD_OPTIONS = ['--noheadings', '--readonly', '--units=b', '--nosuffix', '--separator=";"']
+
+
+class VolumeGroup(object):
+ """
+ Represents an LVM group, with some top-level attributes like ``vg_name``
+ """
+
+ def __init__(self, **kw):
+ for k, v in kw.items():
+ setattr(self, k, v)
+ self.name = kw['vg_name']
+ if not self.name:
+ raise ValueError('VolumeGroup must have a non-empty name')
+ self.tags = parse_tags(kw.get('vg_tags', ''))
+
+ def __str__(self):
+ return '<%s>' % self.name
+
+ def __repr__(self):
+ return self.__str__()
+
+ @property
+ def free(self):
+ """
+ Return free space in VG in bytes
+ """
+ return int(self.vg_extent_size) * int(self.vg_free_count)
+
+ @property
+ def free_percent(self):
+ """
+ Return free space in VG in bytes
+ """
+ return int(self.vg_free_count) / int(self.vg_extent_count)
+
+ @property
+ def size(self):
+ """
+ Returns VG size in bytes
+ """
+ return int(self.vg_extent_size) * int(self.vg_extent_count)
+
+ def sizing(self, parts=None, size=None):
+ """
+ Calculate proper sizing to fully utilize the volume group in the most
+ efficient way possible. To prevent situations where LVM might accept
+ a percentage that is beyond the vg's capabilities, it will refuse with
+ an error when requesting a larger-than-possible parameter, in addition
+ to rounding down calculations.
+
+ A dictionary with different sizing parameters is returned, to make it
+ easier for others to choose what they need in order to create logical
+ volumes::
+
+ >>> data_vg.free
+ 1024
+ >>> data_vg.sizing(parts=4)
+ {'parts': 4, 'sizes': 256, 'percentages': 25}
+ >>> data_vg.sizing(size=512)
+ {'parts': 2, 'sizes': 512, 'percentages': 50}
+
+
+ :param parts: Number of parts to create LVs from
+ :param size: Size in gigabytes to divide the VG into
+
+ :raises SizeAllocationError: When requested size cannot be allocated with
+ :raises ValueError: If both ``parts`` and ``size`` are given
+ """
+ if parts is not None and size is not None:
+ raise ValueError(
+ "Cannot process sizing with both parts (%s) and size (%s)" % (parts, size)
+ )
+
+ # if size is given we need to map that to extents so that we avoid
+ # issues when trying to get this right with a size in gigabytes find
+ # the percentage first, cheating, because these values are thrown out
+ vg_free_count = util.str_to_int(self.vg_free_count)
+
+ if size:
+ size = size * 1024 * 1024 * 1024
+ extents = int(size / int(self.vg_extent_size))
+ disk_sizing = sizing(self.free, size=size, parts=parts)
+ else:
+ if parts is not None:
+ # Prevent parts being 0, falling back to 1 (100% usage)
+ parts = parts or 1
+ size = int(self.free / parts)
+ extents = size * vg_free_count / self.free
+ disk_sizing = sizing(self.free, parts=parts)
+
+ extent_sizing = sizing(vg_free_count, size=extents)
+
+ disk_sizing['extents'] = int(extents)
+ disk_sizing['percentages'] = extent_sizing['percentages']
+ return disk_sizing
+
+ def bytes_to_extents(self, size):
+ '''
+ Return a how many free extents we can fit into a size in bytes. This has
+ some uncertainty involved. If size/extent_size is within 1% of the
+ actual free extents we will return the extent count, otherwise we'll
+ throw an error.
+ This accomodates for the size calculation in batch. We need to report
+ the OSD layout but have not yet created any LVM structures. We use the
+ disk size in batch if no VG is present and that will overshoot the
+ actual free_extent count due to LVM overhead.
+
+ '''
+ b_to_ext = int(size / int(self.vg_extent_size))
+ if b_to_ext < int(self.vg_free_count):
+ # return bytes in extents if there is more space
+ return b_to_ext
+ elif b_to_ext / int(self.vg_free_count) - 1 < 0.01:
+ # return vg_fre_count if its less then 1% off
+ logger.info(
+ 'bytes_to_extents results in {} but only {} '
+ 'are available, adjusting the latter'.format(b_to_ext,
+ self.vg_free_count))
+ return int(self.vg_free_count)
+ # else raise an exception
+ raise RuntimeError('Can\'t convert {} to free extents, only {} ({} '
+ 'bytes) are free'.format(size, self.vg_free_count,
+ self.free))
+
+ def slots_to_extents(self, slots):
+ '''
+ Return how many extents fit the VG slot times
+ '''
+ return int(int(self.vg_extent_count) / slots)
+
+
+def create_vg(devices, name=None, name_prefix=None):
+ """
+ Create a Volume Group. Command looks like::
+
+ vgcreate --force --yes group_name device
+
+ Once created the volume group is returned as a ``VolumeGroup`` object
+
+ :param devices: A list of devices to create a VG. Optionally, a single
+ device (as a string) can be used.
+ :param name: Optionally set the name of the VG, defaults to 'ceph-{uuid}'
+ :param name_prefix: Optionally prefix the name of the VG, which will get combined
+ with a UUID string
+ """
+ if isinstance(devices, set):
+ devices = list(devices)
+ if not isinstance(devices, list):
+ devices = [devices]
+ if name_prefix:
+ name = "%s-%s" % (name_prefix, str(uuid.uuid4()))
+ elif name is None:
+ name = "ceph-%s" % str(uuid.uuid4())
+ process.run([
+ 'vgcreate',
+ '--force',
+ '--yes',
+ name] + devices,
+ run_on_host=True
+ )
+
+ return get_single_vg(filters={'vg_name': name})
+
+
+def extend_vg(vg, devices):
+ """
+ Extend a Volume Group. Command looks like::
+
+ vgextend --force --yes group_name [device, ...]
+
+ Once created the volume group is extended and returned as a ``VolumeGroup`` object
+
+ :param vg: A VolumeGroup object
+ :param devices: A list of devices to extend the VG. Optionally, a single
+ device (as a string) can be used.
+ """
+ if not isinstance(devices, list):
+ devices = [devices]
+ process.run([
+ 'vgextend',
+ '--force',
+ '--yes',
+ vg.name] + devices,
+ run_on_host=True
+ )
+
+ return get_single_vg(filters={'vg_name': vg.name})
+
+
+def reduce_vg(vg, devices):
+ """
+ Reduce a Volume Group. Command looks like::
+
+ vgreduce --force --yes group_name [device, ...]
+
+ :param vg: A VolumeGroup object
+ :param devices: A list of devices to remove from the VG. Optionally, a
+ single device (as a string) can be used.
+ """
+ if not isinstance(devices, list):
+ devices = [devices]
+ process.run([
+ 'vgreduce',
+ '--force',
+ '--yes',
+ vg.name] + devices,
+ run_on_host=True
+ )
+
+ return get_single_vg(filter={'vg_name': vg.name})
+
+
+def remove_vg(vg_name):
+ """
+ Removes a volume group.
+ """
+ if not vg_name:
+ logger.warning('Skipping removal of invalid VG name: "%s"', vg_name)
+ return
+ fail_msg = "Unable to remove vg %s" % vg_name
+ process.run(
+ [
+ 'vgremove',
+ '-v', # verbose
+ '-f', # force it
+ vg_name
+ ],
+ run_on_host=True,
+ fail_msg=fail_msg,
+ )
+
+
+def get_vgs(fields=VG_FIELDS, filters='', tags=None):
+ """
+ Return a list of VGs that are available on the system and match the
+ filters and tags passed. Argument filters takes a dictionary containing
+ arguments required by -S option of LVM. Passing a list of LVM tags can be
+ quite tricky to pass as a dictionary within dictionary, therefore pass
+ dictionary of tags via tags argument and tricky part will be taken care of
+ by the helper methods.
+
+ :param fields: string containing list of fields to be displayed by the
+ vgs command
+ :param sep: string containing separator to be used between two fields
+ :param filters: dictionary containing LVM filters
+ :param tags: dictionary containng LVM tags
+ :returns: list of class VolumeGroup object representing vgs on the system
+ """
+ filters = make_filters_lvmcmd_ready(filters, tags)
+ args = ['vgs'] + VG_CMD_OPTIONS + ['-S', filters, '-o', fields]
+
+ stdout, stderr, returncode = process.call(args, run_on_host=True, verbose_on_failure=False)
+ vgs_report =_output_parser(stdout, fields)
+ return [VolumeGroup(**vg_report) for vg_report in vgs_report]
+
+
+def get_single_vg(fields=VG_FIELDS, filters=None, tags=None):
+ """
+ Wrapper of get_vgs() meant to be a convenience method to avoid the phrase::
+ vgs = get_vgs()
+ if len(vgs) >= 1:
+ vg = vgs[0]
+ """
+ vgs = get_vgs(fields=fields, filters=filters, tags=tags)
+
+ if len(vgs) == 0:
+ return None
+ if len(vgs) > 1:
+ raise RuntimeError('Filters {} matched more than 1 VG present on this host.'.format(str(filters)))
+
+ return vgs[0]
+
+
+def get_device_vgs(device, name_prefix=''):
+ stdout, stderr, returncode = process.call(
+ ['pvs'] + VG_CMD_OPTIONS + ['-o', VG_FIELDS, device],
+ run_on_host=True,
+ verbose_on_failure=False
+ )
+ vgs = _output_parser(stdout, VG_FIELDS)
+ return [VolumeGroup(**vg) for vg in vgs if vg['vg_name'] and vg['vg_name'].startswith(name_prefix)]
+
+
+def get_all_devices_vgs(name_prefix=''):
+ vg_fields = f'pv_name,{VG_FIELDS}'
+ cmd = ['pvs'] + VG_CMD_OPTIONS + ['-o', vg_fields]
+ stdout, stderr, returncode = process.call(
+ cmd,
+ run_on_host=True,
+ verbose_on_failure=False
+ )
+ vgs = _output_parser(stdout, vg_fields)
+ return [VolumeGroup(**vg) for vg in vgs]
+
+#################################
+#
+# Code for LVM Logical Volumes
+#
+###############################
+
+LV_FIELDS = 'lv_tags,lv_path,lv_name,vg_name,lv_uuid,lv_size'
+LV_CMD_OPTIONS = ['--noheadings', '--readonly', '--separator=";"', '-a',
+ '--units=b', '--nosuffix']
+
+
+class Volume(object):
+ """
+ Represents a Logical Volume from LVM, with some top-level attributes like
+ ``lv_name`` and parsed tags as a dictionary of key/value pairs.
+ """
+
+ def __init__(self, **kw):
+ for k, v in kw.items():
+ setattr(self, k, v)
+ self.lv_api = kw
+ self.name = kw['lv_name']
+ if not self.name:
+ raise ValueError('Volume must have a non-empty name')
+ self.tags = parse_tags(kw['lv_tags'])
+ self.encrypted = self.tags.get('ceph.encrypted', '0') == '1'
+ self.used_by_ceph = 'ceph.osd_id' in self.tags
+
+ def __str__(self):
+ return '<%s>' % self.lv_api['lv_path']
+
+ def __repr__(self):
+ return self.__str__()
+
+ def as_dict(self):
+ obj = {}
+ obj.update(self.lv_api)
+ obj['tags'] = self.tags
+ obj['name'] = self.name
+ obj['type'] = self.tags['ceph.type']
+ obj['path'] = self.lv_path
+ return obj
+
+ def report(self):
+ if not self.used_by_ceph:
+ return {
+ 'name': self.lv_name,
+ 'comment': 'not used by ceph'
+ }
+ else:
+ type_ = self.tags['ceph.type']
+ report = {
+ 'name': self.lv_name,
+ 'osd_id': self.tags['ceph.osd_id'],
+ 'cluster_name': self.tags.get('ceph.cluster_name', conf.cluster),
+ 'type': type_,
+ 'osd_fsid': self.tags['ceph.osd_fsid'],
+ 'cluster_fsid': self.tags['ceph.cluster_fsid'],
+ 'osdspec_affinity': self.tags.get('ceph.osdspec_affinity', ''),
+ }
+ type_uuid = '{}_uuid'.format(type_)
+ report[type_uuid] = self.tags['ceph.{}'.format(type_uuid)]
+ return report
+
+ def _format_tag_args(self, op, tags):
+ tag_args = ['{}={}'.format(k, v) for k, v in tags.items()]
+ # weird but efficient way of ziping two lists and getting a flat list
+ return list(sum(zip(repeat(op), tag_args), ()))
+
+ def clear_tags(self, keys=None):
+ """
+ Removes all or passed tags from the Logical Volume.
+ """
+ if not keys:
+ keys = self.tags.keys()
+
+ del_tags = {k: self.tags[k] for k in keys if k in self.tags}
+ if not del_tags:
+ # nothing to clear
+ return
+ del_tag_args = self._format_tag_args('--deltag', del_tags)
+ # --deltag returns successful even if the to be deleted tag is not set
+ process.call(['lvchange'] + del_tag_args + [self.lv_path], run_on_host=True)
+ for k in del_tags.keys():
+ del self.tags[k]
+
+
+ def set_tags(self, tags):
+ """
+ :param tags: A dictionary of tag names and values, like::
+
+ {
+ "ceph.osd_fsid": "aaa-fff-bbbb",
+ "ceph.osd_id": "0"
+ }
+
+ At the end of all modifications, the tags are refreshed to reflect
+ LVM's most current view.
+ """
+ self.clear_tags(tags.keys())
+ add_tag_args = self._format_tag_args('--addtag', tags)
+ process.call(['lvchange'] + add_tag_args + [self.lv_path], run_on_host=True)
+ for k, v in tags.items():
+ self.tags[k] = v
+
+
+ def clear_tag(self, key):
+ if self.tags.get(key):
+ current_value = self.tags[key]
+ tag = "%s=%s" % (key, current_value)
+ process.call(['lvchange', '--deltag', tag, self.lv_path], run_on_host=True)
+ del self.tags[key]
+
+
+ def set_tag(self, key, value):
+ """
+ Set the key/value pair as an LVM tag.
+ """
+ # remove it first if it exists
+ self.clear_tag(key)
+
+ process.call(
+ [
+ 'lvchange',
+ '--addtag', '%s=%s' % (key, value), self.lv_path
+ ],
+ run_on_host=True
+ )
+ self.tags[key] = value
+
+ def deactivate(self):
+ """
+ Deactivate the LV by calling lvchange -an
+ """
+ process.call(['lvchange', '-an', self.lv_path], run_on_host=True)
+
+
+def create_lv(name_prefix,
+ uuid,
+ vg=None,
+ device=None,
+ slots=None,
+ extents=None,
+ size=None,
+ tags=None):
+ """
+ Create a Logical Volume in a Volume Group. Command looks like::
+
+ lvcreate -L 50G -n gfslv vg0
+
+ ``name_prefix`` is required. If ``size`` is provided its expected to be a
+ byte count. Tags are an optional dictionary and is expected to
+ conform to the convention of prefixing them with "ceph." like::
+
+ {"ceph.block_device": "/dev/ceph/osd-1"}
+
+ :param name_prefix: name prefix for the LV, typically somehting like ceph-osd-block
+ :param uuid: UUID to ensure uniqueness; is combined with name_prefix to
+ form the LV name
+ :param vg: optional, pass an existing VG to create LV
+ :param device: optional, device to use. Either device of vg must be passed
+ :param slots: optional, number of slots to divide vg up, LV will occupy one
+ one slot if enough space is available
+ :param extends: optional, how many lvm extends to use, supersedes slots
+ :param size: optional, target LV size in bytes, supersedes extents,
+ resulting LV might be smaller depending on extent
+ size of the underlying VG
+ :param tags: optional, a dict of lvm tags to set on the LV
+ """
+ name = '{}-{}'.format(name_prefix, uuid)
+ if not vg:
+ if not device:
+ raise RuntimeError("Must either specify vg or device, none given")
+ # check if a vgs starting with ceph already exists
+ vgs = get_device_vgs(device, 'ceph')
+ if vgs:
+ vg = vgs[0]
+ else:
+ # create on if not
+ vg = create_vg(device, name_prefix='ceph')
+ assert(vg)
+
+ if size:
+ extents = vg.bytes_to_extents(size)
+ logger.debug('size was passed: {} -> {}'.format(size, extents))
+ elif slots and not extents:
+ extents = vg.slots_to_extents(slots)
+ logger.debug('slots was passed: {} -> {}'.format(slots, extents))
+
+ if extents:
+ command = [
+ 'lvcreate',
+ '--yes',
+ '-l',
+ '{}'.format(extents),
+ '-n', name, vg.vg_name
+ ]
+ # create the lv with all the space available, this is needed because the
+ # system call is different for LVM
+ else:
+ command = [
+ 'lvcreate',
+ '--yes',
+ '-l',
+ '100%FREE',
+ '-n', name, vg.vg_name
+ ]
+ process.run(command, run_on_host=True)
+
+ lv = get_single_lv(filters={'lv_name': name, 'vg_name': vg.vg_name})
+
+ if tags is None:
+ tags = {
+ "ceph.osd_id": "null",
+ "ceph.type": "null",
+ "ceph.cluster_fsid": "null",
+ "ceph.osd_fsid": "null",
+ }
+ # when creating a distinct type, the caller doesn't know what the path will
+ # be so this function will set it after creation using the mapping
+ # XXX add CEPH_VOLUME_LVM_DEBUG to enable -vvvv on lv operations
+ type_path_tag = {
+ 'journal': 'ceph.journal_device',
+ 'data': 'ceph.data_device',
+ 'block': 'ceph.block_device',
+ 'wal': 'ceph.wal_device',
+ 'db': 'ceph.db_device',
+ 'lockbox': 'ceph.lockbox_device', # XXX might not ever need this lockbox sorcery
+ }
+ path_tag = type_path_tag.get(tags.get('ceph.type'))
+ if path_tag:
+ tags.update({path_tag: lv.lv_path})
+
+ lv.set_tags(tags)
+
+ return lv
+
+
+def create_lvs(volume_group, parts=None, size=None, name_prefix='ceph-lv'):
+ """
+ Create multiple Logical Volumes from a Volume Group by calculating the
+ proper extents from ``parts`` or ``size``. A custom prefix can be used
+ (defaults to ``ceph-lv``), these names are always suffixed with a uuid.
+
+ LV creation in ceph-volume will require tags, this is expected to be
+ pre-computed by callers who know Ceph metadata like OSD IDs and FSIDs. It
+ will probably not be the case when mass-creating LVs, so common/default
+ tags will be set to ``"null"``.
+
+ .. note:: LVs that are not in use can be detected by querying LVM for tags that are
+ set to ``"null"``.
+
+ :param volume_group: The volume group (vg) to use for LV creation
+ :type group: ``VolumeGroup()`` object
+ :param parts: Number of LVs to create *instead of* ``size``.
+ :type parts: int
+ :param size: Size (in gigabytes) of LVs to create, e.g. "as many 10gb LVs as possible"
+ :type size: int
+ :param extents: The number of LVM extents to use to create the LV. Useful if looking to have
+ accurate LV sizes (LVM rounds sizes otherwise)
+ """
+ if parts is None and size is None:
+ # fallback to just one part (using 100% of the vg)
+ parts = 1
+ lvs = []
+ tags = {
+ "ceph.osd_id": "null",
+ "ceph.type": "null",
+ "ceph.cluster_fsid": "null",
+ "ceph.osd_fsid": "null",
+ }
+ sizing = volume_group.sizing(parts=parts, size=size)
+ for part in range(0, sizing['parts']):
+ size = sizing['sizes']
+ extents = sizing['extents']
+ lvs.append(
+ create_lv(name_prefix, uuid.uuid4(), vg=volume_group, extents=extents, tags=tags)
+ )
+ return lvs
+
+
+def remove_lv(lv):
+ """
+ Removes a logical volume given it's absolute path.
+
+ Will return True if the lv is successfully removed or
+ raises a RuntimeError if the removal fails.
+
+ :param lv: A ``Volume`` object or the path for an LV
+ """
+ if isinstance(lv, Volume):
+ path = lv.lv_path
+ else:
+ path = lv
+
+ stdout, stderr, returncode = process.call(
+ [
+ 'lvremove',
+ '-v', # verbose
+ '-f', # force it
+ path
+ ],
+ run_on_host=True,
+ show_command=True,
+ terminal_verbose=True,
+ )
+ if returncode != 0:
+ raise RuntimeError("Unable to remove %s" % path)
+ return True
+
+
+def get_lvs(fields=LV_FIELDS, filters='', tags=None):
+ """
+ Return a list of LVs that are available on the system and match the
+ filters and tags passed. Argument filters takes a dictionary containing
+ arguments required by -S option of LVM. Passing a list of LVM tags can be
+ quite tricky to pass as a dictionary within dictionary, therefore pass
+ dictionary of tags via tags argument and tricky part will be taken care of
+ by the helper methods.
+
+ :param fields: string containing list of fields to be displayed by the
+ lvs command
+ :param sep: string containing separator to be used between two fields
+ :param filters: dictionary containing LVM filters
+ :param tags: dictionary containng LVM tags
+ :returns: list of class Volume object representing LVs on the system
+ """
+ filters = make_filters_lvmcmd_ready(filters, tags)
+ args = ['lvs'] + LV_CMD_OPTIONS + ['-S', filters, '-o', fields]
+
+ stdout, stderr, returncode = process.call(args, run_on_host=True, verbose_on_failure=False)
+ lvs_report = _output_parser(stdout, fields)
+ return [Volume(**lv_report) for lv_report in lvs_report]
+
+
+def get_single_lv(fields=LV_FIELDS, filters=None, tags=None):
+ """
+ Wrapper of get_lvs() meant to be a convenience method to avoid the phrase::
+ lvs = get_lvs()
+ if len(lvs) >= 1:
+ lv = lvs[0]
+ """
+ lvs = get_lvs(fields=fields, filters=filters, tags=tags)
+
+ if len(lvs) == 0:
+ return None
+ if len(lvs) > 1:
+ raise RuntimeError('Filters {} matched more than 1 LV present on this host.'.format(str(filters)))
+
+ return lvs[0]
+
+
+def get_lvs_from_osd_id(osd_id):
+ return get_lvs(tags={'ceph.osd_id': osd_id})
+
+
+def get_single_lv_from_osd_id(osd_id):
+ return get_single_lv(tags={'ceph.osd_id': osd_id})
+
+
+def get_lv_by_name(name):
+ stdout, stderr, returncode = process.call(
+ ['lvs', '--noheadings', '-o', LV_FIELDS, '-S',
+ 'lv_name={}'.format(name)],
+ run_on_host=True,
+ verbose_on_failure=False
+ )
+ lvs = _output_parser(stdout, LV_FIELDS)
+ return [Volume(**lv) for lv in lvs]
+
+
+def get_lvs_by_tag(lv_tag):
+ stdout, stderr, returncode = process.call(
+ ['lvs', '--noheadings', '--separator=";"', '-a', '-o', LV_FIELDS, '-S',
+ 'lv_tags={{{}}}'.format(lv_tag)],
+ run_on_host=True,
+ verbose_on_failure=False
+ )
+ lvs = _output_parser(stdout, LV_FIELDS)
+ return [Volume(**lv) for lv in lvs]
+
+
+def get_device_lvs(device, name_prefix=''):
+ stdout, stderr, returncode = process.call(
+ ['pvs'] + LV_CMD_OPTIONS + ['-o', LV_FIELDS, device],
+ run_on_host=True,
+ verbose_on_failure=False
+ )
+ lvs = _output_parser(stdout, LV_FIELDS)
+ return [Volume(**lv) for lv in lvs if lv['lv_name'] and
+ lv['lv_name'].startswith(name_prefix)]
+
+def get_lv_by_fullname(full_name):
+ """
+ returns LV by the specified LV's full name (formatted as vg_name/lv_name)
+ """
+ try:
+ vg_name, lv_name = full_name.split('/')
+ res_lv = get_single_lv(filters={'lv_name': lv_name,
+ 'vg_name': vg_name})
+ except ValueError:
+ res_lv = None
+ return res_lv
+
+def get_lvs_from_path(devpath):
+ lvs = []
+ if os.path.isabs(devpath):
+ # we have a block device
+ lvs = get_device_lvs(devpath)
+ if not lvs:
+ # maybe this was a LV path /dev/vg_name/lv_name or /dev/mapper/
+ lvs = get_lvs(filters={'path': devpath})
+
+ return lvs
diff --git a/src/ceph-volume/ceph_volume/configuration.py b/src/ceph-volume/ceph_volume/configuration.py
new file mode 100644
index 000000000..e0f7ef1f0
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/configuration.py
@@ -0,0 +1,232 @@
+import contextlib
+import logging
+import os
+import re
+from ceph_volume import terminal, conf
+from ceph_volume import exceptions
+from sys import version_info as sys_version_info
+
+if sys_version_info.major >= 3:
+ import configparser
+ conf_parentclass = configparser.ConfigParser
+elif sys_version_info.major < 3:
+ import ConfigParser as configparser
+ conf_parentclass = configparser.SafeConfigParser
+else:
+ raise RuntimeError('Not expecting python version > 3 yet.')
+
+
+logger = logging.getLogger(__name__)
+
+
+class _TrimIndentFile(object):
+ """
+ This is used to take a file-like object and removes any
+ leading tabs from each line when it's read. This is important
+ because some ceph configuration files include tabs which break
+ ConfigParser.
+ """
+ def __init__(self, fp):
+ self.fp = fp
+
+ def readline(self):
+ line = self.fp.readline()
+ return line.lstrip(' \t')
+
+ def __iter__(self):
+ return iter(self.readline, '')
+
+
+def load_ceph_conf_path(cluster_name='ceph'):
+ abspath = '/etc/ceph/%s.conf' % cluster_name
+ conf.path = os.getenv('CEPH_CONF', abspath)
+ conf.cluster = cluster_name
+
+
+def load(abspath=None):
+ if abspath is None:
+ abspath = conf.path
+
+ if not os.path.exists(abspath):
+ raise exceptions.ConfigurationError(abspath=abspath)
+
+ parser = Conf()
+
+ try:
+ ceph_file = open(abspath)
+ trimmed_conf = _TrimIndentFile(ceph_file)
+ with contextlib.closing(ceph_file):
+ parser.read_conf(trimmed_conf)
+ conf.ceph = parser
+ return parser
+ except configparser.ParsingError as error:
+ logger.exception('Unable to parse INI-style file: %s' % abspath)
+ terminal.error(str(error))
+ raise RuntimeError('Unable to read configuration file: %s' % abspath)
+
+
+class Conf(conf_parentclass):
+ """
+ Subclasses from ConfigParser to give a few helpers for Ceph
+ configuration.
+ """
+
+ def read_path(self, path):
+ self.path = path
+ return self.read(path)
+
+ def is_valid(self):
+ try:
+ self.get('global', 'fsid')
+ except (configparser.NoSectionError, configparser.NoOptionError):
+ raise exceptions.ConfigurationKeyError('global', 'fsid')
+
+ def optionxform(self, s):
+ s = s.replace('_', ' ')
+ s = '_'.join(s.split())
+ return s
+
+ def get_safe(self, section, key, default=None, check_valid=True):
+ """
+ Attempt to get a configuration value from a certain section
+ in a ``cfg`` object but returning None if not found. Avoids the need
+ to be doing try/except {ConfigParser Exceptions} every time.
+ """
+ if check_valid:
+ self.is_valid()
+ try:
+ return self.get(section, key)
+ except (configparser.NoSectionError, configparser.NoOptionError):
+ return default
+
+ def get_list(self, section, key, default=None, split=','):
+ """
+ Assumes that the value for a given key is going to be a list separated
+ by commas. It gets rid of trailing comments. If just one item is
+ present it returns a list with a single item, if no key is found an
+ empty list is returned.
+
+ Optionally split on other characters besides ',' and return a fallback
+ value if no items are found.
+ """
+ self.is_valid()
+ value = self.get_safe(section, key, [])
+ if value == []:
+ if default is not None:
+ return default
+ return value
+
+ # strip comments
+ value = re.split(r'\s+#', value)[0]
+
+ # split on commas
+ value = value.split(split)
+
+ # strip spaces
+ return [x.strip() for x in value]
+
+ # XXX Almost all of it lifted from the original ConfigParser._read method,
+ # except for the parsing of '#' in lines. This is only a problem in Python 2.7, and can be removed
+ # once tooling is Python3 only with `Conf(inline_comment_prefixes=('#',';'))`
+ def _read(self, fp, fpname):
+ """Parse a sectioned setup file.
+
+ The sections in setup file contains a title line at the top,
+ indicated by a name in square brackets (`[]'), plus key/value
+ options lines, indicated by `name: value' format lines.
+ Continuations are represented by an embedded newline then
+ leading whitespace. Blank lines, lines beginning with a '#',
+ and just about everything else are ignored.
+ """
+ cursect = None # None, or a dictionary
+ optname = None
+ lineno = 0
+ e = None # None, or an exception
+ while True:
+ line = fp.readline()
+ if not line:
+ break
+ lineno = lineno + 1
+ # comment or blank line?
+ if line.strip() == '' or line[0] in '#;':
+ continue
+ if line.split(None, 1)[0].lower() == 'rem' and line[0] in "rR":
+ # no leading whitespace
+ continue
+ # continuation line?
+ if line[0].isspace() and cursect is not None and optname:
+ value = line.strip()
+ if value:
+ cursect[optname].append(value)
+ # a section header or option header?
+ else:
+ # is it a section header?
+ mo = self.SECTCRE.match(line)
+ if mo:
+ sectname = mo.group('header')
+ if sectname in self._sections:
+ cursect = self._sections[sectname]
+ elif sectname == 'DEFAULT':
+ cursect = self._defaults
+ else:
+ cursect = self._dict()
+ cursect['__name__'] = sectname
+ self._sections[sectname] = cursect
+ # So sections can't start with a continuation line
+ optname = None
+ # no section header in the file?
+ elif cursect is None:
+ raise configparser.MissingSectionHeaderError(fpname, lineno, line)
+ # an option line?
+ else:
+ mo = self._optcre.match(line)
+ if mo:
+ optname, vi, optval = mo.group('option', 'vi', 'value')
+ optname = self.optionxform(optname.rstrip())
+ # This check is fine because the OPTCRE cannot
+ # match if it would set optval to None
+ if optval is not None:
+ # XXX Added support for '#' inline comments
+ if vi in ('=', ':') and (';' in optval or '#' in optval):
+ # strip comments
+ optval = re.split(r'\s+(;|#)', optval)[0]
+ # if what is left is comment as a value, fallback to an empty string
+ # that is: `foo = ;` would mean `foo` is '', which brings parity with
+ # what ceph-conf tool does
+ if optval in [';','#']:
+ optval = ''
+ optval = optval.strip()
+ # allow empty values
+ if optval == '""':
+ optval = ''
+ cursect[optname] = [optval]
+ else:
+ # valueless option handling
+ cursect[optname] = optval
+ else:
+ # a non-fatal parsing error occurred. set up the
+ # exception but keep going. the exception will be
+ # raised at the end of the file and will contain a
+ # list of all bogus lines
+ if not e:
+ e = configparser.ParsingError(fpname)
+ e.append(lineno, repr(line))
+ # if any parsing errors occurred, raise an exception
+ if e:
+ raise e
+
+ # join the multi-line values collected while reading
+ all_sections = [self._defaults]
+ all_sections.extend(self._sections.values())
+ for options in all_sections:
+ for name, val in options.items():
+ if isinstance(val, list):
+ options[name] = '\n'.join(val)
+
+ def read_conf(self, conffile):
+ if sys_version_info.major >= 3:
+ self.read_file(conffile)
+ elif sys_version_info.major < 3:
+ self.readfp(conffile)
+ else:
+ raise RuntimeError('Not expecting python version > 3 yet.')
diff --git a/src/ceph-volume/ceph_volume/decorators.py b/src/ceph-volume/ceph_volume/decorators.py
new file mode 100644
index 000000000..3c003ad77
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/decorators.py
@@ -0,0 +1,90 @@
+import os
+import sys
+from ceph_volume import terminal, exceptions
+from functools import wraps
+
+
+def needs_root(func):
+ """
+ Check for super user privileges on functions/methods. Raise
+ ``SuperUserError`` with a nice message.
+ """
+ @wraps(func)
+ def is_root(*a, **kw):
+ if not os.getuid() == 0 and not os.environ.get('CEPH_VOLUME_SKIP_NEEDS_ROOT', False):
+ raise exceptions.SuperUserError()
+ return func(*a, **kw)
+ return is_root
+
+
+def catches(catch=None, handler=None, exit=True):
+ """
+ Very simple decorator that tries any of the exception(s) passed in as
+ a single exception class or tuple (containing multiple ones) returning the
+ exception message and optionally handling the problem if it rises with the
+ handler if it is provided.
+
+ So instead of douing something like this::
+
+ def bar():
+ try:
+ some_call()
+ print("Success!")
+ except TypeError, exc:
+ print("Error while handling some call: %s" % exc)
+ sys.exit(1)
+
+ You would need to decorate it like this to have the same effect::
+
+ @catches(TypeError)
+ def bar():
+ some_call()
+ print("Success!")
+
+ If multiple exceptions need to be caught they need to be provided as a
+ tuple::
+
+ @catches((TypeError, AttributeError))
+ def bar():
+ some_call()
+ print("Success!")
+ """
+ catch = catch or Exception
+
+ def decorate(f):
+
+ @wraps(f)
+ def newfunc(*a, **kw):
+ try:
+ return f(*a, **kw)
+ except catch as e:
+ import logging
+ logger = logging.getLogger('ceph_volume')
+ logger.exception('exception caught by decorator')
+ if os.environ.get('CEPH_VOLUME_DEBUG'):
+ raise
+ if handler:
+ return handler(e)
+ else:
+ sys.stderr.write(make_exception_message(e))
+ if exit:
+ sys.exit(1)
+ return newfunc
+
+ return decorate
+
+#
+# Decorator helpers
+#
+
+
+def make_exception_message(exc):
+ """
+ An exception is passed in and this function
+ returns the proper string depending on the result
+ so it is readable enough.
+ """
+ if str(exc):
+ return '%s %s: %s\n' % (terminal.red_arrow, exc.__class__.__name__, exc)
+ else:
+ return '%s %s\n' % (terminal.red_arrow, exc.__class__.__name__)
diff --git a/src/ceph-volume/ceph_volume/devices/__init__.py b/src/ceph-volume/ceph_volume/devices/__init__.py
new file mode 100644
index 000000000..2b017d671
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/devices/__init__.py
@@ -0,0 +1 @@
+from . import lvm, simple, raw # noqa
diff --git a/src/ceph-volume/ceph_volume/devices/lvm/__init__.py b/src/ceph-volume/ceph_volume/devices/lvm/__init__.py
new file mode 100644
index 000000000..3c147123e
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/devices/lvm/__init__.py
@@ -0,0 +1 @@
+from .main import LVM # noqa
diff --git a/src/ceph-volume/ceph_volume/devices/lvm/activate.py b/src/ceph-volume/ceph_volume/devices/lvm/activate.py
new file mode 100644
index 000000000..4ad117ec0
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/devices/lvm/activate.py
@@ -0,0 +1,384 @@
+from __future__ import print_function
+import argparse
+import logging
+import os
+from textwrap import dedent
+from ceph_volume import process, conf, decorators, terminal, __release__, configuration
+from ceph_volume.util import system, disk
+from ceph_volume.util import prepare as prepare_utils
+from ceph_volume.util import encryption as encryption_utils
+from ceph_volume.systemd import systemctl
+from ceph_volume.api import lvm as api
+from .listing import direct_report
+
+
+logger = logging.getLogger(__name__)
+
+
+def activate_filestore(osd_lvs, no_systemd=False):
+ # find the osd
+ for osd_lv in osd_lvs:
+ if osd_lv.tags.get('ceph.type') == 'data':
+ data_lv = osd_lv
+ break
+ else:
+ raise RuntimeError('Unable to find a data LV for filestore activation')
+
+ is_encrypted = data_lv.tags.get('ceph.encrypted', '0') == '1'
+ is_vdo = data_lv.tags.get('ceph.vdo', '0')
+
+ osd_id = data_lv.tags['ceph.osd_id']
+ configuration.load_ceph_conf_path(data_lv.tags['ceph.cluster_name'])
+ configuration.load()
+ # it may have a volume with a journal
+ for osd_lv in osd_lvs:
+ if osd_lv.tags.get('ceph.type') == 'journal':
+ osd_journal_lv = osd_lv
+ break
+ else:
+ osd_journal_lv = None
+
+ # TODO: add sensible error reporting if this is ever the case
+ # blow up with a KeyError if this doesn't exist
+ osd_fsid = data_lv.tags['ceph.osd_fsid']
+ if not osd_journal_lv:
+ # must be a disk partition, by querying blkid by the uuid we are ensuring that the
+ # device path is always correct
+ journal_uuid = data_lv.tags['ceph.journal_uuid']
+ osd_journal = disk.get_device_from_partuuid(journal_uuid)
+ else:
+ journal_uuid = osd_journal_lv.lv_uuid
+ osd_journal = data_lv.tags['ceph.journal_device']
+
+ if not osd_journal:
+ raise RuntimeError('unable to detect an lv or device journal for OSD %s' % osd_id)
+
+ # this is done here, so that previous checks that ensure path availability
+ # and correctness can still be enforced, and report if any issues are found
+ if is_encrypted:
+ lockbox_secret = data_lv.tags['ceph.cephx_lockbox_secret']
+ # this keyring writing is idempotent
+ encryption_utils.write_lockbox_keyring(osd_id, osd_fsid, lockbox_secret)
+ dmcrypt_secret = encryption_utils.get_dmcrypt_key(osd_id, osd_fsid)
+ encryption_utils.luks_open(dmcrypt_secret, data_lv.lv_path, data_lv.lv_uuid)
+ encryption_utils.luks_open(dmcrypt_secret, osd_journal, journal_uuid)
+
+ osd_journal = '/dev/mapper/%s' % journal_uuid
+ source = '/dev/mapper/%s' % data_lv.lv_uuid
+ else:
+ source = data_lv.lv_path
+
+ # mount the osd
+ destination = '/var/lib/ceph/osd/%s-%s' % (conf.cluster, osd_id)
+ if not system.device_is_mounted(source, destination=destination):
+ prepare_utils.mount_osd(source, osd_id, is_vdo=is_vdo)
+
+ # ensure that the OSD destination is always chowned properly
+ system.chown(destination)
+
+ # always re-do the symlink regardless if it exists, so that the journal
+ # device path that may have changed can be mapped correctly every time
+ destination = '/var/lib/ceph/osd/%s-%s/journal' % (conf.cluster, osd_id)
+ process.run(['ln', '-snf', osd_journal, destination])
+
+ # make sure that the journal has proper permissions
+ system.chown(osd_journal)
+
+ if no_systemd is False:
+ # enable the ceph-volume unit for this OSD
+ systemctl.enable_volume(osd_id, osd_fsid, 'lvm')
+
+ # enable the OSD
+ systemctl.enable_osd(osd_id)
+
+ # start the OSD
+ systemctl.start_osd(osd_id)
+ terminal.success("ceph-volume lvm activate successful for osd ID: %s" % osd_id)
+
+
+def get_osd_device_path(osd_lvs, device_type, dmcrypt_secret=None):
+ """
+ ``device_type`` can be one of ``db``, ``wal`` or ``block`` so that we can
+ query LVs on system and fallback to querying the uuid if that is not
+ present.
+
+ Return a path if possible, failing to do that a ``None``, since some of
+ these devices are optional.
+ """
+ osd_block_lv = None
+ for lv in osd_lvs:
+ if lv.tags.get('ceph.type') == 'block':
+ osd_block_lv = lv
+ break
+ if osd_block_lv:
+ is_encrypted = osd_block_lv.tags.get('ceph.encrypted', '0') == '1'
+ logger.debug('Found block device (%s) with encryption: %s', osd_block_lv.name, is_encrypted)
+ uuid_tag = 'ceph.%s_uuid' % device_type
+ device_uuid = osd_block_lv.tags.get(uuid_tag)
+ if not device_uuid:
+ return None
+
+ device_lv = None
+ for lv in osd_lvs:
+ if lv.tags.get('ceph.type') == device_type:
+ device_lv = lv
+ break
+ if device_lv:
+ if is_encrypted:
+ encryption_utils.luks_open(dmcrypt_secret, device_lv.lv_path, device_uuid)
+ return '/dev/mapper/%s' % device_uuid
+ return device_lv.lv_path
+
+ # this could be a regular device, so query it with blkid
+ physical_device = disk.get_device_from_partuuid(device_uuid)
+ if physical_device:
+ if is_encrypted:
+ encryption_utils.luks_open(dmcrypt_secret, physical_device, device_uuid)
+ return '/dev/mapper/%s' % device_uuid
+ return physical_device
+
+ raise RuntimeError('could not find %s with uuid %s' % (device_type, device_uuid))
+
+
+def activate_bluestore(osd_lvs, no_systemd=False, no_tmpfs=False):
+ for lv in osd_lvs:
+ if lv.tags.get('ceph.type') == 'block':
+ osd_block_lv = lv
+ break
+ else:
+ raise RuntimeError('could not find a bluestore OSD to activate')
+
+ is_encrypted = osd_block_lv.tags.get('ceph.encrypted', '0') == '1'
+ dmcrypt_secret = None
+ osd_id = osd_block_lv.tags['ceph.osd_id']
+ conf.cluster = osd_block_lv.tags['ceph.cluster_name']
+ osd_fsid = osd_block_lv.tags['ceph.osd_fsid']
+ configuration.load_ceph_conf_path(osd_block_lv.tags['ceph.cluster_name'])
+ configuration.load()
+
+ # mount on tmpfs the osd directory
+ osd_path = '/var/lib/ceph/osd/%s-%s' % (conf.cluster, osd_id)
+ if not system.path_is_mounted(osd_path):
+ # mkdir -p and mount as tmpfs
+ prepare_utils.create_osd_path(osd_id, tmpfs=not no_tmpfs)
+ # XXX This needs to be removed once ceph-bluestore-tool can deal with
+ # symlinks that exist in the osd dir
+ for link_name in ['block', 'block.db', 'block.wal']:
+ link_path = os.path.join(osd_path, link_name)
+ if os.path.exists(link_path):
+ os.unlink(os.path.join(osd_path, link_name))
+ # encryption is handled here, before priming the OSD dir
+ if is_encrypted:
+ osd_lv_path = '/dev/mapper/%s' % osd_block_lv.lv_uuid
+ lockbox_secret = osd_block_lv.tags['ceph.cephx_lockbox_secret']
+ encryption_utils.write_lockbox_keyring(osd_id, osd_fsid, lockbox_secret)
+ dmcrypt_secret = encryption_utils.get_dmcrypt_key(osd_id, osd_fsid)
+ encryption_utils.luks_open(dmcrypt_secret, osd_block_lv.lv_path, osd_block_lv.lv_uuid)
+ else:
+ osd_lv_path = osd_block_lv.lv_path
+
+ db_device_path = get_osd_device_path(osd_lvs, 'db', dmcrypt_secret=dmcrypt_secret)
+ wal_device_path = get_osd_device_path(osd_lvs, 'wal', dmcrypt_secret=dmcrypt_secret)
+
+ # Once symlinks are removed, the osd dir can be 'primed again. chown first,
+ # regardless of what currently exists so that ``prime-osd-dir`` can succeed
+ # even if permissions are somehow messed up
+ system.chown(osd_path)
+ prime_command = [
+ 'ceph-bluestore-tool', '--cluster=%s' % conf.cluster,
+ 'prime-osd-dir', '--dev', osd_lv_path,
+ '--path', osd_path]
+
+ if __release__ != "luminous":
+ # mon-config changes are not available in Luminous
+ prime_command.append('--no-mon-config')
+
+ process.run(prime_command)
+ # always re-do the symlink regardless if it exists, so that the block,
+ # block.wal, and block.db devices that may have changed can be mapped
+ # correctly every time
+ process.run(['ln', '-snf', osd_lv_path, os.path.join(osd_path, 'block')])
+ system.chown(os.path.join(osd_path, 'block'))
+ system.chown(osd_path)
+ if db_device_path:
+ destination = os.path.join(osd_path, 'block.db')
+ process.run(['ln', '-snf', db_device_path, destination])
+ system.chown(db_device_path)
+ system.chown(destination)
+ if wal_device_path:
+ destination = os.path.join(osd_path, 'block.wal')
+ process.run(['ln', '-snf', wal_device_path, destination])
+ system.chown(wal_device_path)
+ system.chown(destination)
+
+ if no_systemd is False:
+ # enable the ceph-volume unit for this OSD
+ systemctl.enable_volume(osd_id, osd_fsid, 'lvm')
+
+ # enable the OSD
+ systemctl.enable_osd(osd_id)
+
+ # start the OSD
+ systemctl.start_osd(osd_id)
+ terminal.success("ceph-volume lvm activate successful for osd ID: %s" % osd_id)
+
+
+class Activate(object):
+
+ help = 'Discover and mount the LVM device associated with an OSD ID and start the Ceph OSD'
+
+ def __init__(self, argv):
+ self.argv = argv
+
+ @decorators.needs_root
+ def activate_all(self, args):
+ listed_osds = direct_report()
+ osds = {}
+ for osd_id, devices in listed_osds.items():
+ # the metadata for all devices in each OSD will contain
+ # the FSID which is required for activation
+ for device in devices:
+ fsid = device.get('tags', {}).get('ceph.osd_fsid')
+ if fsid:
+ osds[fsid] = osd_id
+ break
+ if not osds:
+ terminal.warning('Was unable to find any OSDs to activate')
+ terminal.warning('Verify OSDs are present with "ceph-volume lvm list"')
+ return
+ for osd_fsid, osd_id in osds.items():
+ if not args.no_systemd and systemctl.osd_is_active(osd_id):
+ terminal.warning(
+ 'OSD ID %s FSID %s process is active. Skipping activation' % (osd_id, osd_fsid)
+ )
+ else:
+ terminal.info('Activating OSD ID %s FSID %s' % (osd_id, osd_fsid))
+ self.activate(args, osd_id=osd_id, osd_fsid=osd_fsid)
+
+ @decorators.needs_root
+ def activate(self, args, osd_id=None, osd_fsid=None):
+ """
+ :param args: The parsed arguments coming from the CLI
+ :param osd_id: When activating all, this gets populated with an
+ existing OSD ID
+ :param osd_fsid: When activating all, this gets populated with an
+ existing OSD FSID
+ """
+ osd_id = osd_id if osd_id else args.osd_id
+ osd_fsid = osd_fsid if osd_fsid else args.osd_fsid
+
+ if osd_id and osd_fsid:
+ tags = {'ceph.osd_id': osd_id, 'ceph.osd_fsid': osd_fsid}
+ elif not osd_id and osd_fsid:
+ tags = {'ceph.osd_fsid': osd_fsid}
+ elif osd_id and not osd_fsid:
+ raise RuntimeError('could not activate osd.{}, please provide the '
+ 'osd_fsid too'.format(osd_id))
+ else:
+ raise RuntimeError('Please provide both osd_id and osd_fsid')
+ lvs = api.get_lvs(tags=tags)
+ if not lvs:
+ raise RuntimeError('could not find osd.%s with osd_fsid %s' %
+ (osd_id, osd_fsid))
+
+ # This argument is only available when passed in directly or via
+ # systemd, not when ``create`` is being used
+ if getattr(args, 'auto_detect_objectstore', False):
+ logger.info('auto detecting objectstore')
+ # may get multiple lvs, so can't do get_the_lvs() calls here
+ for lv in lvs:
+ has_journal = lv.tags.get('ceph.journal_uuid')
+ if has_journal:
+ logger.info('found a journal associated with the OSD, '
+ 'assuming filestore')
+ return activate_filestore(lvs, args.no_systemd)
+
+ logger.info('unable to find a journal associated with the OSD, '
+ 'assuming bluestore')
+
+ return activate_bluestore(lvs, args.no_systemd)
+
+ # explicit filestore/bluestore flags take precedence
+ if getattr(args, 'bluestore', False):
+ activate_bluestore(lvs, args.no_systemd, getattr(args, 'no_tmpfs', False))
+ elif getattr(args, 'filestore', False):
+ activate_filestore(lvs, args.no_systemd)
+ elif any('ceph.block_device' in lv.tags for lv in lvs):
+ activate_bluestore(lvs, args.no_systemd, getattr(args, 'no_tmpfs', False))
+ elif any('ceph.data_device' in lv.tags for lv in lvs):
+ activate_filestore(lvs, args.no_systemd)
+
+ def main(self):
+ sub_command_help = dedent("""
+ Activate OSDs by discovering them with LVM and mounting them in their
+ appropriate destination:
+
+ ceph-volume lvm activate {ID} {FSID}
+
+ The lvs associated with the OSD need to have been prepared previously,
+ so that all needed tags and metadata exist.
+
+ When migrating OSDs, or a multiple-osd activation is needed, the
+ ``--all`` flag can be used instead of the individual ID and FSID:
+
+ ceph-volume lvm activate --all
+
+ """)
+ parser = argparse.ArgumentParser(
+ prog='ceph-volume lvm activate',
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ description=sub_command_help,
+ )
+
+ parser.add_argument(
+ 'osd_id',
+ metavar='ID',
+ nargs='?',
+ help='The ID of the OSD, usually an integer, like 0'
+ )
+ parser.add_argument(
+ 'osd_fsid',
+ metavar='FSID',
+ nargs='?',
+ help='The FSID of the OSD, similar to a SHA1'
+ )
+ parser.add_argument(
+ '--auto-detect-objectstore',
+ action='store_true',
+ help='Autodetect the objectstore by inspecting the OSD',
+ )
+ parser.add_argument(
+ '--bluestore',
+ action='store_true',
+ help='force bluestore objectstore activation',
+ )
+ parser.add_argument(
+ '--filestore',
+ action='store_true',
+ help='force filestore objectstore activation',
+ )
+ parser.add_argument(
+ '--all',
+ dest='activate_all',
+ action='store_true',
+ help='Activate all OSDs found in the system',
+ )
+ parser.add_argument(
+ '--no-systemd',
+ dest='no_systemd',
+ action='store_true',
+ help='Skip creating and enabling systemd units and starting OSD services',
+ )
+ parser.add_argument(
+ '--no-tmpfs',
+ action='store_true',
+ help='Do not use a tmpfs mount for OSD data dir'
+ )
+ if len(self.argv) == 0:
+ print(sub_command_help)
+ return
+ args = parser.parse_args(self.argv)
+ if args.activate_all:
+ self.activate_all(args)
+ else:
+ self.activate(args)
diff --git a/src/ceph-volume/ceph_volume/devices/lvm/batch.py b/src/ceph-volume/ceph_volume/devices/lvm/batch.py
new file mode 100644
index 000000000..c97d3a25b
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/devices/lvm/batch.py
@@ -0,0 +1,654 @@
+import argparse
+from collections import namedtuple
+import json
+import logging
+from textwrap import dedent
+from ceph_volume import terminal, decorators
+from ceph_volume.util import disk, prompt_bool, arg_validators, templates
+from ceph_volume.util import prepare
+from . import common
+from .create import Create
+from .prepare import Prepare
+
+mlogger = terminal.MultiLogger(__name__)
+logger = logging.getLogger(__name__)
+
+
+device_list_template = """
+ * {path: <25} {size: <10} {state}"""
+
+
+def device_formatter(devices):
+ lines = []
+ for path, details in devices:
+ lines.append(device_list_template.format(
+ path=path, size=details['human_readable_size'],
+ state='solid' if details['rotational'] == '0' else 'rotational')
+ )
+
+ return ''.join(lines)
+
+
+def ensure_disjoint_device_lists(data, db=[], wal=[], journal=[]):
+ # check that all device lists are disjoint with each other
+ if not all([set(data).isdisjoint(set(db)),
+ set(data).isdisjoint(set(wal)),
+ set(data).isdisjoint(set(journal)),
+ set(db).isdisjoint(set(wal))]):
+ raise Exception('Device lists are not disjoint')
+
+
+def separate_devices_from_lvs(devices):
+ phys = []
+ lvm = []
+ for d in devices:
+ phys.append(d) if d.is_device else lvm.append(d)
+ return phys, lvm
+
+
+def get_physical_osds(devices, args):
+ '''
+ Goes through passed physical devices and assigns OSDs
+ '''
+ data_slots = args.osds_per_device
+ if args.data_slots:
+ data_slots = max(args.data_slots, args.osds_per_device)
+ rel_data_size = 1.0 / data_slots
+ mlogger.debug('relative data size: {}'.format(rel_data_size))
+ ret = []
+ for dev in devices:
+ if dev.available_lvm:
+ dev_size = dev.vg_size[0]
+ abs_size = disk.Size(b=int(dev_size * rel_data_size))
+ free_size = dev.vg_free[0]
+ for _ in range(args.osds_per_device):
+ if abs_size > free_size:
+ break
+ free_size -= abs_size.b
+ osd_id = None
+ if args.osd_ids:
+ osd_id = args.osd_ids.pop()
+ ret.append(Batch.OSD(dev.path,
+ rel_data_size,
+ abs_size,
+ args.osds_per_device,
+ osd_id,
+ 'dmcrypt' if args.dmcrypt else None,
+ dev.symlink))
+ return ret
+
+
+def get_lvm_osds(lvs, args):
+ '''
+ Goes through passed LVs and assigns planned osds
+ '''
+ ret = []
+ for lv in lvs:
+ if lv.used_by_ceph:
+ continue
+ osd_id = None
+ if args.osd_ids:
+ osd_id = args.osd_ids.pop()
+ osd = Batch.OSD("{}/{}".format(lv.vg_name, lv.lv_name),
+ 100.0,
+ disk.Size(b=int(lv.lvs[0].lv_size)),
+ 1,
+ osd_id,
+ 'dmcrypt' if args.dmcrypt else None)
+ ret.append(osd)
+ return ret
+
+
+def get_physical_fast_allocs(devices, type_, fast_slots_per_device, new_osds, args):
+ requested_slots = getattr(args, '{}_slots'.format(type_))
+ if not requested_slots or requested_slots < fast_slots_per_device:
+ if requested_slots:
+ mlogger.info('{}_slots argument is too small, ignoring'.format(type_))
+ requested_slots = fast_slots_per_device
+
+ requested_size = getattr(args, '{}_size'.format(type_), 0)
+ if not requested_size or requested_size == 0:
+ # no size argument was specified, check ceph.conf
+ get_size_fct = getattr(prepare, 'get_{}_size'.format(type_))
+ requested_size = get_size_fct(lv_format=False)
+
+ ret = []
+ vg_device_map = group_devices_by_vg(devices)
+ for vg_devices in vg_device_map.values():
+ for dev in vg_devices:
+ if not dev.available_lvm:
+ continue
+ # any LV present is considered a taken slot
+ occupied_slots = len(dev.lvs)
+ dev_size = dev.vg_size[0]
+ # this only looks at the first vg on device, unsure if there is a better
+ # way
+ abs_size = disk.Size(b=int(dev_size / requested_slots))
+ free_size = dev.vg_free[0]
+ relative_size = int(abs_size) / dev_size
+ if requested_size:
+ if requested_size <= abs_size:
+ abs_size = requested_size
+ relative_size = int(abs_size) / dev_size
+ else:
+ mlogger.error(
+ '{} was requested for {}, but only {} can be fulfilled'.format(
+ requested_size,
+ '{}_size'.format(type_),
+ abs_size,
+ ))
+ exit(1)
+ while abs_size <= free_size and len(ret) < new_osds and occupied_slots < fast_slots_per_device:
+ free_size -= abs_size.b
+ occupied_slots += 1
+ ret.append((dev.path, relative_size, abs_size, requested_slots))
+ return ret
+
+def group_devices_by_vg(devices):
+ result = dict()
+ result['unused_devices'] = []
+ for dev in devices:
+ if len(dev.vgs) > 0:
+ # already using assumption that a PV only belongs to single VG in other places
+ vg_name = dev.vgs[0].name
+ if vg_name in result:
+ result[vg_name].append(dev)
+ else:
+ result[vg_name] = [dev]
+ else:
+ result['unused_devices'].append(dev)
+ return result
+
+def get_lvm_fast_allocs(lvs):
+ return [("{}/{}".format(d.vg_name, d.lv_name), 100.0,
+ disk.Size(b=int(d.lvs[0].lv_size)), 1) for d in lvs if not
+ d.used_by_ceph]
+
+
+class Batch(object):
+
+ help = 'Automatically size devices for multi-OSD provisioning with minimal interaction'
+
+ _help = dedent("""
+ Automatically size devices ready for OSD provisioning based on default strategies.
+
+ Usage:
+
+ ceph-volume lvm batch [DEVICE...]
+
+ Devices can be physical block devices or LVs.
+ Optional reporting on possible outcomes is enabled with --report
+
+ ceph-volume lvm batch --report [DEVICE...]
+ """)
+
+ def __init__(self, argv):
+ parser = argparse.ArgumentParser(
+ prog='ceph-volume lvm batch',
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ description=self._help,
+ )
+
+ parser.add_argument(
+ 'devices',
+ metavar='DEVICES',
+ nargs='*',
+ type=arg_validators.ValidBatchDataDevice(),
+ default=[],
+ help='Devices to provision OSDs',
+ )
+ parser.add_argument(
+ '--db-devices',
+ nargs='*',
+ type=arg_validators.ValidBatchDevice(),
+ default=[],
+ help='Devices to provision OSDs db volumes',
+ )
+ parser.add_argument(
+ '--wal-devices',
+ nargs='*',
+ type=arg_validators.ValidBatchDevice(),
+ default=[],
+ help='Devices to provision OSDs wal volumes',
+ )
+ parser.add_argument(
+ '--journal-devices',
+ nargs='*',
+ type=arg_validators.ValidBatchDevice(),
+ default=[],
+ help='Devices to provision OSDs journal volumes',
+ )
+ parser.add_argument(
+ '--auto',
+ action='store_true',
+ help=('deploy multi-device OSDs if rotational and non-rotational drives '
+ 'are passed in DEVICES'),
+ default=True
+ )
+ parser.add_argument(
+ '--no-auto',
+ action='store_false',
+ dest='auto',
+ help=('deploy standalone OSDs if rotational and non-rotational drives '
+ 'are passed in DEVICES'),
+ )
+ parser.add_argument(
+ '--bluestore',
+ action='store_true',
+ help='bluestore objectstore (default)',
+ )
+ parser.add_argument(
+ '--filestore',
+ action='store_true',
+ help='filestore objectstore',
+ )
+ parser.add_argument(
+ '--report',
+ action='store_true',
+ help='Only report on OSD that would be created and exit',
+ )
+ parser.add_argument(
+ '--yes',
+ action='store_true',
+ help='Avoid prompting for confirmation when provisioning',
+ )
+ parser.add_argument(
+ '--format',
+ help='output format, defaults to "pretty"',
+ default='pretty',
+ choices=['json', 'json-pretty', 'pretty'],
+ )
+ parser.add_argument(
+ '--dmcrypt',
+ action='store_true',
+ help='Enable device encryption via dm-crypt',
+ )
+ parser.add_argument(
+ '--crush-device-class',
+ dest='crush_device_class',
+ help='Crush device class to assign this OSD to',
+ default=""
+ )
+ parser.add_argument(
+ '--no-systemd',
+ dest='no_systemd',
+ action='store_true',
+ help='Skip creating and enabling systemd units and starting OSD services',
+ )
+ parser.add_argument(
+ '--osds-per-device',
+ type=int,
+ default=1,
+ help='Provision more than 1 (the default) OSD per device',
+ )
+ parser.add_argument(
+ '--data-slots',
+ type=int,
+ help=('Provision more than 1 (the default) OSD slot per device'
+ ' if more slots then osds-per-device are specified, slots'
+ 'will stay unoccupied'),
+ )
+ parser.add_argument(
+ '--block-db-size',
+ type=disk.Size.parse,
+ help='Set (or override) the "bluestore_block_db_size" value, in bytes'
+ )
+ parser.add_argument(
+ '--block-db-slots',
+ type=int,
+ help='Provision slots on DB device, can remain unoccupied'
+ )
+ parser.add_argument(
+ '--block-wal-size',
+ type=disk.Size.parse,
+ help='Set (or override) the "bluestore_block_wal_size" value, in bytes'
+ )
+ parser.add_argument(
+ '--block-wal-slots',
+ type=int,
+ help='Provision slots on WAL device, can remain unoccupied'
+ )
+ def journal_size_in_mb_hack(size):
+ # TODO give user time to adjust, then remove this
+ if size and size[-1].isdigit():
+ mlogger.warning('DEPRECATION NOTICE')
+ mlogger.warning('--journal-size as integer is parsed as megabytes')
+ mlogger.warning('A future release will parse integers as bytes')
+ mlogger.warning('Add a "M" to explicitly pass a megabyte size')
+ size += 'M'
+ return disk.Size.parse(size)
+ parser.add_argument(
+ '--journal-size',
+ type=journal_size_in_mb_hack,
+ help='Override the "osd_journal_size" value, in megabytes'
+ )
+ parser.add_argument(
+ '--journal-slots',
+ type=int,
+ help='Provision slots on journal device, can remain unoccupied'
+ )
+ parser.add_argument(
+ '--prepare',
+ action='store_true',
+ help='Only prepare all OSDs, do not activate',
+ )
+ parser.add_argument(
+ '--osd-ids',
+ nargs='*',
+ default=[],
+ help='Reuse existing OSD ids',
+ type=arg_validators.valid_osd_id
+ )
+ self.args = parser.parse_args(argv)
+ self.parser = parser
+ for dev_list in ['', 'db_', 'wal_', 'journal_']:
+ setattr(self, '{}usable'.format(dev_list), [])
+
+ def report(self, plan):
+ report = self._create_report(plan)
+ print(report)
+
+ def _create_report(self, plan):
+ if self.args.format == 'pretty':
+ report = ''
+ report += templates.total_osds.format(total_osds=len(plan))
+
+ report += templates.osd_component_titles
+ for osd in plan:
+ report += templates.osd_header
+ report += osd.report()
+ return report
+ else:
+ json_report = []
+ for osd in plan:
+ json_report.append(osd.report_json())
+ if self.args.format == 'json':
+ return json.dumps(json_report)
+ elif self.args.format == 'json-pretty':
+ return json.dumps(json_report, indent=4,
+ sort_keys=True)
+
+ def _check_slot_args(self):
+ '''
+ checking if -slots args are consistent with other arguments
+ '''
+ if self.args.data_slots and self.args.osds_per_device:
+ if self.args.data_slots < self.args.osds_per_device:
+ raise ValueError('data_slots is smaller then osds_per_device')
+
+ def _sort_rotational_disks(self):
+ '''
+ Helper for legacy auto behaviour.
+ Sorts drives into rotating and non-rotating, the latter being used for
+ db or journal.
+ '''
+ mlogger.warning('DEPRECATION NOTICE')
+ mlogger.warning('You are using the legacy automatic disk sorting behavior')
+ mlogger.warning('The Pacific release will change the default to --no-auto')
+ rotating = []
+ ssd = []
+ for d in self.args.devices:
+ rotating.append(d) if d.rotational else ssd.append(d)
+ if ssd and not rotating:
+ # no need for additional sorting, we'll only deploy standalone on ssds
+ return
+ self.args.devices = rotating
+ if self.args.filestore:
+ self.args.journal_devices = ssd
+ else:
+ self.args.db_devices = ssd
+
+ @decorators.needs_root
+ def main(self):
+ if not self.args.devices:
+ return self.parser.print_help()
+
+ # Default to bluestore here since defaulting it in add_argument may
+ # cause both to be True
+ if not self.args.bluestore and not self.args.filestore:
+ self.args.bluestore = True
+
+ if (self.args.auto and not self.args.db_devices and not
+ self.args.wal_devices and not self.args.journal_devices):
+ self._sort_rotational_disks()
+
+ self._check_slot_args()
+
+ ensure_disjoint_device_lists(self.args.devices,
+ self.args.db_devices,
+ self.args.wal_devices,
+ self.args.journal_devices)
+
+ plan = self.get_plan(self.args)
+
+ if self.args.report:
+ self.report(plan)
+ return 0
+
+ if not self.args.yes:
+ self.report(plan)
+ terminal.info('The above OSDs would be created if the operation continues')
+ if not prompt_bool('do you want to proceed? (yes/no)'):
+ terminal.error('aborting OSD provisioning')
+ raise SystemExit(0)
+
+ self._execute(plan)
+
+ def _execute(self, plan):
+ defaults = common.get_default_args()
+ global_args = [
+ 'bluestore',
+ 'filestore',
+ 'dmcrypt',
+ 'crush_device_class',
+ 'no_systemd',
+ ]
+ defaults.update({arg: getattr(self.args, arg) for arg in global_args})
+ for osd in plan:
+ args = osd.get_args(defaults)
+ if self.args.prepare:
+ p = Prepare([])
+ p.safe_prepare(argparse.Namespace(**args))
+ else:
+ c = Create([])
+ c.create(argparse.Namespace(**args))
+
+
+ def get_plan(self, args):
+ if args.bluestore:
+ plan = self.get_deployment_layout(args, args.devices, args.db_devices,
+ args.wal_devices)
+ elif args.filestore:
+ plan = self.get_deployment_layout(args, args.devices, args.journal_devices)
+ return plan
+
+ def get_deployment_layout(self, args, devices, fast_devices=[],
+ very_fast_devices=[]):
+ '''
+ The methods here are mostly just organization, error reporting and
+ setting up of (default) args. The heavy lifting code for the deployment
+ layout can be found in the static get_*_osds and get_*_fast_allocs
+ functions.
+ '''
+ plan = []
+ phys_devs, lvm_devs = separate_devices_from_lvs(devices)
+ mlogger.debug(('passed data devices: {} physical,'
+ ' {} LVM').format(len(phys_devs), len(lvm_devs)))
+
+ plan.extend(get_physical_osds(phys_devs, args))
+
+ plan.extend(get_lvm_osds(lvm_devs, args))
+
+ num_osds = len(plan)
+ if num_osds == 0:
+ mlogger.info('All data devices are unavailable')
+ return plan
+ requested_osds = args.osds_per_device * len(phys_devs) + len(lvm_devs)
+
+ fast_type = 'block_db' if args.bluestore else 'journal'
+ fast_allocations = self.fast_allocations(fast_devices,
+ requested_osds,
+ num_osds,
+ fast_type)
+ if fast_devices and not fast_allocations:
+ mlogger.info('{} fast devices were passed, but none are available'.format(len(fast_devices)))
+ return []
+ if fast_devices and not len(fast_allocations) == num_osds:
+ mlogger.error('{} fast allocations != {} num_osds'.format(
+ len(fast_allocations), num_osds))
+ exit(1)
+
+ very_fast_allocations = self.fast_allocations(very_fast_devices,
+ requested_osds,
+ num_osds,
+ 'block_wal')
+ if very_fast_devices and not very_fast_allocations:
+ mlogger.info('{} very fast devices were passed, but none are available'.format(len(very_fast_devices)))
+ return []
+ if very_fast_devices and not len(very_fast_allocations) == num_osds:
+ mlogger.error('{} very fast allocations != {} num_osds'.format(
+ len(very_fast_allocations), num_osds))
+ exit(1)
+
+ for osd in plan:
+ if fast_devices:
+ osd.add_fast_device(*fast_allocations.pop(),
+ type_=fast_type)
+ if very_fast_devices and args.bluestore:
+ osd.add_very_fast_device(*very_fast_allocations.pop())
+ return plan
+
+ def fast_allocations(self, devices, requested_osds, new_osds, type_):
+ ret = []
+ if not devices:
+ return ret
+ phys_devs, lvm_devs = separate_devices_from_lvs(devices)
+ mlogger.debug(('passed {} devices: {} physical,'
+ ' {} LVM').format(type_, len(phys_devs), len(lvm_devs)))
+
+ ret.extend(get_lvm_fast_allocs(lvm_devs))
+
+ # fill up uneven distributions across fast devices: 5 osds and 2 fast
+ # devices? create 3 slots on each device rather then deploying
+ # heterogeneous osds
+ slot_divider = max(1, len(phys_devs))
+ if (requested_osds - len(lvm_devs)) % slot_divider:
+ fast_slots_per_device = int((requested_osds - len(lvm_devs)) / slot_divider) + 1
+ else:
+ fast_slots_per_device = int((requested_osds - len(lvm_devs)) / slot_divider)
+
+
+ ret.extend(get_physical_fast_allocs(phys_devs,
+ type_,
+ fast_slots_per_device,
+ new_osds,
+ self.args))
+ return ret
+
+ class OSD(object):
+ '''
+ This class simply stores info about to-be-deployed OSDs and provides an
+ easy way to retrieve the necessary create arguments.
+ '''
+ VolSpec = namedtuple('VolSpec',
+ ['path',
+ 'rel_size',
+ 'abs_size',
+ 'slots',
+ 'type_'])
+
+ def __init__(self,
+ data_path,
+ rel_size,
+ abs_size,
+ slots,
+ id_,
+ encryption,
+ symlink=None):
+ self.id_ = id_
+ self.data = self.VolSpec(path=data_path,
+ rel_size=rel_size,
+ abs_size=abs_size,
+ slots=slots,
+ type_='data')
+ self.fast = None
+ self.very_fast = None
+ self.encryption = encryption
+ self.symlink = symlink
+
+ def add_fast_device(self, path, rel_size, abs_size, slots, type_):
+ self.fast = self.VolSpec(path=path,
+ rel_size=rel_size,
+ abs_size=abs_size,
+ slots=slots,
+ type_=type_)
+
+ def add_very_fast_device(self, path, rel_size, abs_size, slots):
+ self.very_fast = self.VolSpec(path=path,
+ rel_size=rel_size,
+ abs_size=abs_size,
+ slots=slots,
+ type_='block_wal')
+
+ def _get_osd_plan(self):
+ plan = {
+ 'data': self.data.path,
+ 'data_size': self.data.abs_size,
+ 'encryption': self.encryption,
+ }
+ if self.fast:
+ type_ = self.fast.type_.replace('.', '_')
+ plan.update(
+ {
+ type_: self.fast.path,
+ '{}_size'.format(type_): self.fast.abs_size,
+ })
+ if self.very_fast:
+ plan.update(
+ {
+ 'block_wal': self.very_fast.path,
+ 'block_wal_size': self.very_fast.abs_size,
+ })
+ if self.id_:
+ plan.update({'osd_id': self.id_})
+ return plan
+
+ def get_args(self, defaults):
+ my_defaults = defaults.copy()
+ my_defaults.update(self._get_osd_plan())
+ return my_defaults
+
+ def report(self):
+ report = ''
+ if self.id_:
+ report += templates.osd_reused_id.format(
+ id_=self.id_)
+ if self.encryption:
+ report += templates.osd_encryption.format(
+ enc=self.encryption)
+ path = self.data.path
+ if self.symlink:
+ path = f'{self.symlink} -> {self.data.path}'
+ report += templates.osd_component.format(
+ _type=self.data.type_,
+ path=path,
+ size=self.data.abs_size,
+ percent=self.data.rel_size)
+ if self.fast:
+ report += templates.osd_component.format(
+ _type=self.fast.type_,
+ path=self.fast.path,
+ size=self.fast.abs_size,
+ percent=self.fast.rel_size)
+ if self.very_fast:
+ report += templates.osd_component.format(
+ _type=self.very_fast.type_,
+ path=self.very_fast.path,
+ size=self.very_fast.abs_size,
+ percent=self.very_fast.rel_size)
+ return report
+
+ def report_json(self):
+ # cast all values to string so that the report can be dumped in to
+ # json.dumps
+ return {k: str(v) for k, v in self._get_osd_plan().items()}
diff --git a/src/ceph-volume/ceph_volume/devices/lvm/common.py b/src/ceph-volume/ceph_volume/devices/lvm/common.py
new file mode 100644
index 000000000..1134b1754
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/devices/lvm/common.py
@@ -0,0 +1,190 @@
+from ceph_volume.util import arg_validators, disk
+from ceph_volume import process, conf
+from ceph_volume import terminal
+from ceph_volume.devices.lvm.zap import Zap
+import argparse
+
+
+def rollback_osd(args, osd_id=None):
+ """
+ When the process of creating or preparing fails, the OSD needs to be
+ destroyed so that the ID can be reused. This prevents from leaving the ID
+ around as "used" on the monitor, which can cause confusion if expecting
+ sequential OSD IDs.
+
+ The usage of `destroy-new` allows this to be done without requiring the
+ admin keyring (otherwise needed for destroy and purge commands)
+ """
+ if not osd_id:
+ # it means that it wasn't generated, so there is nothing to rollback here
+ return
+
+ # once here, this is an error condition that needs to be rolled back
+ terminal.error('Was unable to complete a new OSD, will rollback changes')
+ osd_name = 'osd.%s'
+ bootstrap_keyring = '/var/lib/ceph/bootstrap-osd/%s.keyring' % conf.cluster
+ cmd = [
+ 'ceph',
+ '--cluster', conf.cluster,
+ '--name', 'client.bootstrap-osd',
+ '--keyring', bootstrap_keyring,
+ 'osd', 'purge-new', osd_name % osd_id,
+ '--yes-i-really-mean-it',
+ ]
+
+ process.run(cmd)
+ Zap(['--destroy', '--osd-id', osd_id]).main()
+
+
+common_args = {
+ '--data': {
+ 'help': 'OSD data path. A physical device or logical volume',
+ 'required': True,
+ 'type': arg_validators.ValidDataDevice(as_string=True),
+ #'default':,
+ #'type':,
+ },
+ '--data-size': {
+ 'help': 'Size of data LV in case a device was passed in --data',
+ 'default': '0',
+ 'type': disk.Size.parse
+ },
+ '--data-slots': {
+ 'help': ('Intended number of slots on data device. The new OSD gets one'
+ 'of those slots or 1/nth of the available capacity'),
+ 'type': int,
+ 'default': 1,
+ },
+ '--osd-id': {
+ 'help': 'Reuse an existing OSD id',
+ 'default': None,
+ 'type': arg_validators.valid_osd_id,
+ },
+ '--osd-fsid': {
+ 'help': 'Reuse an existing OSD fsid',
+ 'default': None,
+ },
+ '--cluster-fsid': {
+ 'help': 'Specify the cluster fsid, useful when no ceph.conf is available',
+ 'default': None,
+ },
+ '--crush-device-class': {
+ 'dest': 'crush_device_class',
+ 'help': 'Crush device class to assign this OSD to',
+ 'default': "",
+ },
+ '--dmcrypt': {
+ 'action': 'store_true',
+ 'help': 'Enable device encryption via dm-crypt',
+ },
+ '--no-systemd': {
+ 'dest': 'no_systemd',
+ 'action': 'store_true',
+ 'help': 'Skip creating and enabling systemd units and starting OSD services when activating',
+ },
+}
+
+bluestore_args = {
+ '--bluestore': {
+ 'action': 'store_true',
+ 'help': 'Use the bluestore objectstore',
+ },
+ '--block.db': {
+ 'dest': 'block_db',
+ 'help': 'Path to bluestore block.db logical volume or device',
+ 'type': arg_validators.ValidDevice(as_string=True),
+ },
+ '--block.db-size': {
+ 'dest': 'block_db_size',
+ 'help': 'Size of block.db LV in case device was passed in --block.db',
+ 'default': '0',
+ 'type': disk.Size.parse
+ },
+ '--block.db-slots': {
+ 'dest': 'block_db_slots',
+ 'help': ('Intended number of slots on db device. The new OSD gets one'
+ 'of those slots or 1/nth of the available capacity'),
+ 'type': int,
+ 'default': 1,
+ },
+ '--block.wal': {
+ 'dest': 'block_wal',
+ 'help': 'Path to bluestore block.wal logical volume or device',
+ 'type': arg_validators.ValidDevice(as_string=True),
+ },
+ '--block.wal-size': {
+ 'dest': 'block_wal_size',
+ 'help': 'Size of block.wal LV in case device was passed in --block.wal',
+ 'default': '0',
+ 'type': disk.Size.parse
+ },
+ '--block.wal-slots': {
+ 'dest': 'block_wal_slots',
+ 'help': ('Intended number of slots on wal device. The new OSD gets one'
+ 'of those slots or 1/nth of the available capacity'),
+ 'type': int,
+ 'default': 1,
+ },
+}
+
+filestore_args = {
+ '--filestore': {
+ 'action': 'store_true',
+ 'help': 'Use the filestore objectstore',
+ },
+ '--journal': {
+ 'help': 'A logical volume (vg_name/lv_name), or path to a device',
+ 'type': arg_validators.ValidDevice(as_string=True),
+ },
+ '--journal-size': {
+ 'help': 'Size of journal LV in case a raw block device was passed in --journal',
+ 'default': '0',
+ 'type': disk.Size.parse
+ },
+ '--journal-slots': {
+ 'help': ('Intended number of slots on journal device. The new OSD gets one'
+ 'of those slots or 1/nth of the available capacity'),
+ 'type': int,
+ 'default': 1,
+ },
+}
+
+def get_default_args():
+ defaults = {}
+ def format_name(name):
+ return name.strip('-').replace('-', '_').replace('.', '_')
+ for argset in (common_args, filestore_args, bluestore_args):
+ defaults.update({format_name(name): val.get('default', None) for name, val in argset.items()})
+ return defaults
+
+
+def common_parser(prog, description):
+ """
+ Both prepare and create share the same parser, those are defined here to
+ avoid duplication
+ """
+ parser = argparse.ArgumentParser(
+ prog=prog,
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ description=description,
+ )
+
+ filestore_group = parser.add_argument_group('filestore')
+ bluestore_group = parser.add_argument_group('bluestore')
+
+ for name, kwargs in common_args.items():
+ parser.add_argument(name, **kwargs)
+
+ for name, kwargs in bluestore_args.items():
+ bluestore_group.add_argument(name, **kwargs)
+
+ for name, kwargs in filestore_args.items():
+ filestore_group.add_argument(name, **kwargs)
+
+ # Do not parse args, so that consumers can do something before the args get
+ # parsed triggering argparse behavior
+ return parser
+
+
+create_parser = common_parser # noqa
+prepare_parser = common_parser # noqa
diff --git a/src/ceph-volume/ceph_volume/devices/lvm/create.py b/src/ceph-volume/ceph_volume/devices/lvm/create.py
new file mode 100644
index 000000000..af2cd96c0
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/devices/lvm/create.py
@@ -0,0 +1,77 @@
+from __future__ import print_function
+from textwrap import dedent
+import logging
+from ceph_volume.util import system
+from ceph_volume.util.arg_validators import exclude_group_options
+from ceph_volume import decorators, terminal
+from .common import create_parser, rollback_osd
+from .prepare import Prepare
+from .activate import Activate
+
+logger = logging.getLogger(__name__)
+
+
+class Create(object):
+
+ help = 'Create a new OSD from an LVM device'
+
+ def __init__(self, argv):
+ self.argv = argv
+
+ @decorators.needs_root
+ def create(self, args):
+ if not args.osd_fsid:
+ args.osd_fsid = system.generate_uuid()
+ prepare_step = Prepare([])
+ prepare_step.safe_prepare(args)
+ osd_id = prepare_step.osd_id
+ try:
+ # we try this for activate only when 'creating' an OSD, because a rollback should not
+ # happen when doing normal activation. For example when starting an OSD, systemd will call
+ # activate, which would never need to be rolled back.
+ Activate([]).activate(args)
+ except Exception:
+ logger.exception('lvm activate was unable to complete, while creating the OSD')
+ logger.info('will rollback OSD ID creation')
+ rollback_osd(args, osd_id)
+ raise
+ terminal.success("ceph-volume lvm create successful for: %s" % args.data)
+
+ def main(self):
+ sub_command_help = dedent("""
+ Create an OSD by assigning an ID and FSID, registering them with the
+ cluster with an ID and FSID, formatting and mounting the volume, adding
+ all the metadata to the logical volumes using LVM tags, and starting
+ the OSD daemon. This is a convenience command that combines the prepare
+ and activate steps.
+
+ Encryption is supported via dmcrypt and the --dmcrypt flag.
+
+ Existing logical volume (lv):
+
+ ceph-volume lvm create --data {vg/lv}
+
+ Existing block device (a logical volume will be created):
+
+ ceph-volume lvm create --data /path/to/device
+
+ Optionally, can consume db and wal block devices, partitions or logical
+ volumes. A device will get a logical volume, partitions and existing
+ logical volumes will be used as is:
+
+ ceph-volume lvm create --data {vg/lv} --block.wal {partition} --block.db {/path/to/device}
+ """)
+ parser = create_parser(
+ prog='ceph-volume lvm create',
+ description=sub_command_help,
+ )
+ if len(self.argv) == 0:
+ print(sub_command_help)
+ return
+ exclude_group_options(parser, groups=['filestore', 'bluestore'], argv=self.argv)
+ args = parser.parse_args(self.argv)
+ # Default to bluestore here since defaulting it in add_argument may
+ # cause both to be True
+ if not args.bluestore and not args.filestore:
+ args.bluestore = True
+ self.create(args)
diff --git a/src/ceph-volume/ceph_volume/devices/lvm/deactivate.py b/src/ceph-volume/ceph_volume/devices/lvm/deactivate.py
new file mode 100644
index 000000000..46846a1dc
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/devices/lvm/deactivate.py
@@ -0,0 +1,88 @@
+import argparse
+import logging
+import sys
+from textwrap import dedent
+from ceph_volume import conf
+from ceph_volume.util import encryption, system
+from ceph_volume.api.lvm import get_lvs_by_tag
+
+logger = logging.getLogger(__name__)
+
+
+def deactivate_osd(osd_id=None, osd_uuid=None):
+
+ lvs = []
+ if osd_uuid is not None:
+ lvs = get_lvs_by_tag('ceph.osd_fsid={}'.format(osd_uuid))
+ osd_id = next(lv.tags['ceph.osd_id'] for lv in lvs)
+ else:
+ lvs = get_lvs_by_tag('ceph.osd_id={}'.format(osd_id))
+
+ data_lv = next(lv for lv in lvs if lv.tags['ceph.type'] in ['data', 'block'])
+
+ conf.cluster = data_lv.tags['ceph.cluster_name']
+ logger.debug('Found cluster name {}'.format(conf.cluster))
+
+ tmpfs_path = '/var/lib/ceph/osd/{}-{}'.format(conf.cluster, osd_id)
+ system.unmount_tmpfs(tmpfs_path)
+
+ for lv in lvs:
+ if lv.tags.get('ceph.encrypted', '0') == '1':
+ encryption.dmcrypt_close(lv.lv_uuid)
+
+
+class Deactivate(object):
+
+ help = 'Deactivate OSDs'
+
+ def deactivate(self, args=None):
+ if args:
+ self.args = args
+ try:
+ deactivate_osd(self.args.osd_id, self.args.osd_uuid)
+ except StopIteration:
+ logger.error(('No data or block LV found for OSD'
+ '{}').format(self.args.osd_id))
+ sys.exit(1)
+
+ def __init__(self, argv):
+ self.argv = argv
+
+ def main(self):
+ sub_command_help = dedent("""
+ Deactivate unmounts and OSDs tmpfs and closes any crypt devices.
+
+ ceph-volume lvm deactivate {ID} {FSID}
+
+ """)
+ parser = argparse.ArgumentParser(
+ prog='ceph-volume lvm deactivate',
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ description=sub_command_help,
+ )
+
+ parser.add_argument(
+ 'osd_id',
+ nargs='?',
+ help='The ID of the OSD'
+ )
+ parser.add_argument(
+ 'osd_uuid',
+ nargs='?',
+ help='The UUID of the OSD, similar to a SHA1, takes precedence over osd_id'
+ )
+ # parser.add_argument(
+ # '--all',
+ # action='store_true',
+ # help='Deactivate all OSD volumes found in the system',
+ # )
+ if len(self.argv) == 0:
+ print(sub_command_help)
+ return
+ args = parser.parse_args(self.argv)
+ # Default to bluestore here since defaulting it in add_argument may
+ # cause both to be True
+ if not args.osd_id and not args.osd_uuid:
+ raise ValueError(('Can not identify OSD, pass either all or'
+ 'osd_id or osd_uuid'))
+ self.deactivate(args)
diff --git a/src/ceph-volume/ceph_volume/devices/lvm/listing.py b/src/ceph-volume/ceph_volume/devices/lvm/listing.py
new file mode 100644
index 000000000..c16afdaa7
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/devices/lvm/listing.py
@@ -0,0 +1,223 @@
+from __future__ import print_function
+import argparse
+import json
+import logging
+from textwrap import dedent
+from ceph_volume import decorators
+from ceph_volume.api import lvm as api
+
+logger = logging.getLogger(__name__)
+
+
+osd_list_header_template = """\n
+{osd_id:=^20}"""
+
+
+osd_device_header_template = """
+
+ {type: <13} {path}
+"""
+
+device_metadata_item_template = """
+ {tag_name: <25} {value}"""
+
+
+def readable_tag(tag):
+ actual_name = tag.split('.')[-1]
+ return actual_name.replace('_', ' ')
+
+
+def pretty_report(report):
+ output = []
+ for osd_id, devices in sorted(report.items()):
+ output.append(
+ osd_list_header_template.format(osd_id=" osd.%s " % osd_id)
+ )
+ for device in devices:
+ output.append(
+ osd_device_header_template.format(
+ type='[%s]' % device['type'],
+ path=device['path']
+ )
+ )
+ for tag_name, value in sorted(device.get('tags', {}).items()):
+ output.append(
+ device_metadata_item_template.format(
+ tag_name=readable_tag(tag_name),
+ value=value
+ )
+ )
+ if not device.get('devices'):
+ continue
+ else:
+ output.append(
+ device_metadata_item_template.format(
+ tag_name='devices',
+ value=','.join(device['devices'])
+ )
+ )
+
+ print(''.join(output))
+
+
+def direct_report():
+ """
+ Other non-cli consumers of listing information will want to consume the
+ report without the need to parse arguments or other flags. This helper
+ bypasses the need to deal with the class interface which is meant for cli
+ handling.
+ """
+ return List([]).full_report()
+
+
+# TODO: Perhaps, get rid of this class and simplify this module further?
+class List(object):
+
+ help = 'list logical volumes and devices associated with Ceph'
+
+ def __init__(self, argv):
+ self.argv = argv
+
+ @decorators.needs_root
+ def list(self, args):
+ report = self.single_report(args.device) if args.device else \
+ self.full_report()
+ if args.format == 'json':
+ # If the report is empty, we don't return a non-zero exit status
+ # because it is assumed this is going to be consumed by automated
+ # systems like ceph-ansible which would be forced to ignore the
+ # non-zero exit status if all they need is the information in the
+ # JSON object
+ print(json.dumps(report, indent=4, sort_keys=True))
+ else:
+ if not report:
+ raise SystemExit('No valid Ceph lvm devices found')
+ pretty_report(report)
+
+ def create_report(self, lvs):
+ """
+ Create a report for LVM dev(s) passed. Returns '{}' to denote failure.
+ """
+
+ report = {}
+
+ pvs = api.get_pvs()
+
+ for lv in lvs:
+ if not api.is_ceph_device(lv):
+ continue
+
+ osd_id = lv.tags['ceph.osd_id']
+ report.setdefault(osd_id, [])
+ lv_report = lv.as_dict()
+
+ lv_report['devices'] = [pv.name for pv in pvs if pv.lv_uuid == lv.lv_uuid] if pvs else []
+ report[osd_id].append(lv_report)
+
+ phys_devs = self.create_report_non_lv_device(lv)
+ if phys_devs:
+ report[osd_id].append(phys_devs)
+
+ return report
+
+ def create_report_non_lv_device(self, lv):
+ report = {}
+ if lv.tags.get('ceph.type', '') in ['data', 'block']:
+ for dev_type in ['journal', 'wal', 'db']:
+ dev = lv.tags.get('ceph.{}_device'.format(dev_type), '')
+ # counting / in the device name seems brittle but should work,
+ # lvs will have 3
+ if dev and dev.count('/') == 2:
+ device_uuid = lv.tags.get('ceph.{}_uuid'.format(dev_type))
+ report = {'tags': {'PARTUUID': device_uuid},
+ 'type': dev_type,
+ 'path': dev}
+ return report
+
+ def full_report(self):
+ """
+ Create a report of all Ceph LVs. Returns '{}' to denote failure.
+ """
+ return self.create_report(api.get_lvs())
+
+ def single_report(self, arg):
+ """
+ Generate a report for a single device. This can be either a logical
+ volume in the form of vg/lv, a device with an absolute path like
+ /dev/sda1 or /dev/sda, or a list of devices under same OSD ID.
+
+ Return value '{}' denotes failure.
+ """
+ if isinstance(arg, int) or arg.isdigit():
+ lv = api.get_lvs_from_osd_id(arg)
+ elif arg[0] == '/':
+ lv = api.get_lvs_from_path(arg)
+ else:
+ lv = [api.get_single_lv(filters={'lv_name': arg.split('/')[1]})]
+
+ report = self.create_report(lv)
+
+ if not report:
+ # check if device is a non-lvm journals or wal/db
+ for dev_type in ['journal', 'wal', 'db']:
+ lvs = api.get_lvs(tags={
+ 'ceph.{}_device'.format(dev_type): arg})
+ if lvs:
+ # just taking the first lv here should work
+ lv = lvs[0]
+ phys_dev = self.create_report_non_lv_device(lv)
+ osd_id = lv.tags.get('ceph.osd_id')
+ if osd_id:
+ report[osd_id] = [phys_dev]
+
+
+ return report
+
+ def main(self):
+ sub_command_help = dedent("""
+ List devices or logical volumes associated with Ceph. An association is
+ determined if a device has information relating to an OSD. This is
+ verified by querying LVM's metadata and correlating it with devices.
+
+ The lvs associated with the OSD need to have been prepared previously,
+ so that all needed tags and metadata exist.
+
+ Full listing of all system devices associated with a cluster::
+
+ ceph-volume lvm list
+
+ List devices under same OSD ID::
+
+ ceph-volume lvm list <OSD-ID>
+
+ List a particular device, reporting all metadata about it::
+
+ ceph-volume lvm list /dev/sda1
+
+ List a logical volume, along with all its metadata (vg is a volume
+ group, and lv the logical volume name)::
+
+ ceph-volume lvm list {vg/lv}
+ """)
+ parser = argparse.ArgumentParser(
+ prog='ceph-volume lvm list',
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ description=sub_command_help,
+ )
+
+ parser.add_argument(
+ 'device',
+ metavar='DEVICE',
+ nargs='?',
+ help='Path to an lv (as vg/lv) or to a device like /dev/sda1'
+ )
+
+ parser.add_argument(
+ '--format',
+ help='output format, defaults to "pretty"',
+ default='pretty',
+ choices=['json', 'pretty'],
+ )
+
+ args = parser.parse_args(self.argv)
+ self.list(args)
diff --git a/src/ceph-volume/ceph_volume/devices/lvm/main.py b/src/ceph-volume/ceph_volume/devices/lvm/main.py
new file mode 100644
index 000000000..39947454d
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/devices/lvm/main.py
@@ -0,0 +1,54 @@
+import argparse
+from textwrap import dedent
+from ceph_volume import terminal
+from . import activate
+from . import deactivate
+from . import prepare
+from . import create
+from . import trigger
+from . import listing
+from . import zap
+from . import batch
+from . import migrate
+
+
+class LVM(object):
+
+ help = 'Use LVM and LVM-based technologies to deploy OSDs'
+
+ _help = dedent("""
+ Use LVM and LVM-based technologies to deploy OSDs
+
+ {sub_help}
+ """)
+
+ mapper = {
+ 'activate': activate.Activate,
+ 'deactivate': deactivate.Deactivate,
+ 'batch': batch.Batch,
+ 'prepare': prepare.Prepare,
+ 'create': create.Create,
+ 'trigger': trigger.Trigger,
+ 'list': listing.List,
+ 'zap': zap.Zap,
+ 'migrate': migrate.Migrate,
+ 'new-wal': migrate.NewWAL,
+ 'new-db': migrate.NewDB,
+ }
+
+ def __init__(self, argv):
+ self.argv = argv
+
+ def print_help(self, sub_help):
+ return self._help.format(sub_help=sub_help)
+
+ def main(self):
+ terminal.dispatch(self.mapper, self.argv)
+ parser = argparse.ArgumentParser(
+ prog='ceph-volume lvm',
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ description=self.print_help(terminal.subhelp(self.mapper)),
+ )
+ parser.parse_args(self.argv)
+ if len(self.argv) <= 1:
+ return parser.print_help()
diff --git a/src/ceph-volume/ceph_volume/devices/lvm/migrate.py b/src/ceph-volume/ceph_volume/devices/lvm/migrate.py
new file mode 100644
index 000000000..86159fd50
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/devices/lvm/migrate.py
@@ -0,0 +1,693 @@
+from __future__ import print_function
+import argparse
+import logging
+import os
+from textwrap import dedent
+from ceph_volume.util import system, disk, merge_dict
+from ceph_volume.util.device import Device
+from ceph_volume.util.arg_validators import valid_osd_id
+from ceph_volume import decorators, terminal, process
+from ceph_volume.api import lvm as api
+from ceph_volume.systemd import systemctl
+
+
+logger = logging.getLogger(__name__)
+mlogger = terminal.MultiLogger(__name__)
+
+def get_cluster_name(osd_id, osd_fsid):
+ """
+ From an ``osd_id`` and/or an ``osd_fsid``, filter out all the LVs in the
+ system that match those tag values, then return cluster_name for the first
+ one.
+ """
+ lv_tags = {}
+ lv_tags['ceph.osd_id'] = osd_id
+ lv_tags['ceph.osd_fsid'] = osd_fsid
+
+ lvs = api.get_lvs(tags=lv_tags)
+ if not lvs:
+ mlogger.error(
+ 'Unable to find any LV for source OSD: id:{} fsid:{}'.format(
+ osd_id, osd_fsid) )
+ raise SystemExit('Unexpected error, terminating')
+ return next(iter(lvs)).tags["ceph.cluster_name"]
+
+def get_osd_path(osd_id, osd_fsid):
+ return '/var/lib/ceph/osd/{}-{}'.format(
+ get_cluster_name(osd_id, osd_fsid), osd_id)
+
+def find_associated_devices(osd_id, osd_fsid):
+ """
+ From an ``osd_id`` and/or an ``osd_fsid``, filter out all the LVs in the
+ system that match those tag values, further detect if any partitions are
+ part of the OSD, and then return the set of LVs and partitions (if any).
+ """
+ lv_tags = {}
+ lv_tags['ceph.osd_id'] = osd_id
+ lv_tags['ceph.osd_fsid'] = osd_fsid
+
+ lvs = api.get_lvs(tags=lv_tags)
+ if not lvs:
+ mlogger.error(
+ 'Unable to find any LV for source OSD: id:{} fsid:{}'.format(
+ osd_id, osd_fsid) )
+ raise SystemExit('Unexpected error, terminating')
+
+ devices = set(ensure_associated_lvs(lvs, lv_tags))
+ return [(Device(path), type) for path, type in devices if path]
+
+def ensure_associated_lvs(lvs, lv_tags):
+ """
+ Go through each LV and ensure if backing devices (journal, wal, block)
+ are LVs or partitions, so that they can be accurately reported.
+ """
+ # look for many LVs for each backing type, because it is possible to
+ # receive a filtering for osd.1, and have multiple failed deployments
+ # leaving many journals with osd.1 - usually, only a single LV will be
+ # returned
+
+ block_lvs = api.get_lvs(tags=merge_dict(lv_tags, {'ceph.type': 'block'}))
+ db_lvs = api.get_lvs(tags=merge_dict(lv_tags, {'ceph.type': 'db'}))
+ wal_lvs = api.get_lvs(tags=merge_dict(lv_tags, {'ceph.type': 'wal'}))
+ backing_devices = [(block_lvs, 'block'), (db_lvs, 'db'),
+ (wal_lvs, 'wal')]
+
+ verified_devices = []
+
+ for lv in lvs:
+ # go through each lv and append it, otherwise query `blkid` to find
+ # a physical device. Do this for each type (journal,db,wal) regardless
+ # if they have been processed in the previous LV, so that bad devices
+ # with the same ID can be caught
+ for ceph_lvs, type in backing_devices:
+
+ if ceph_lvs:
+ verified_devices.extend([(l.lv_path, type) for l in ceph_lvs])
+ continue
+
+ # must be a disk partition, by querying blkid by the uuid we are
+ # ensuring that the device path is always correct
+ try:
+ device_uuid = lv.tags['ceph.{}_uuid'.format(type)]
+ except KeyError:
+ # Bluestore will not have ceph.journal_uuid, and Filestore
+ # will not not have ceph.db_uuid
+ continue
+
+ osd_device = disk.get_device_from_partuuid(device_uuid)
+ if not osd_device:
+ # if the osd_device is not found by the partuuid, then it is
+ # not possible to ensure this device exists anymore, so skip it
+ continue
+ verified_devices.append((osd_device, type))
+
+ return verified_devices
+
+class VolumeTagTracker(object):
+ def __init__(self, devices, target_lv):
+ self.target_lv = target_lv
+ self.data_device = self.db_device = self.wal_device = None
+ for device, type in devices:
+ if type == 'block':
+ self.data_device = device
+ elif type == 'db':
+ self.db_device = device
+ elif type == 'wal':
+ self.wal_device = device
+ if not self.data_device:
+ mlogger.error('Data device not found')
+ raise SystemExit(
+ "Unexpected error, terminating")
+ if not self.data_device.is_lv:
+ mlogger.error('Data device isn\'t LVM')
+ raise SystemExit(
+ "Unexpected error, terminating")
+
+ self.old_target_tags = self.target_lv.tags.copy()
+ self.old_data_tags = (
+ self.data_device.lv_api.tags.copy()
+ if self.data_device.is_lv else None)
+ self.old_db_tags = (
+ self.db_device.lv_api.tags.copy()
+ if self.db_device and self.db_device.is_lv else None)
+ self.old_wal_tags = (
+ self.wal_device.lv_api.tags.copy()
+ if self.wal_device and self.wal_device.is_lv else None)
+
+ def update_tags_when_lv_create(self, create_type):
+ tags = {}
+ if not self.data_device.is_lv:
+ mlogger.warning(
+ 'Data device is not LVM, wouldn\'t update LVM tags')
+ else:
+ tags["ceph.{}_uuid".format(create_type)] = self.target_lv.lv_uuid
+ tags["ceph.{}_device".format(create_type)] = self.target_lv.lv_path
+ self.data_device.lv_api.set_tags(tags)
+
+ tags = self.data_device.lv_api.tags.copy()
+ tags["ceph.type"] = create_type
+ self.target_lv.set_tags(tags)
+
+ aux_dev = None
+ if create_type == "db" and self.wal_device:
+ aux_dev = self.wal_device
+ elif create_type == "wal" and self.db_device:
+ aux_dev = self.db_device
+ else:
+ return
+ if not aux_dev.is_lv:
+ mlogger.warning(
+ '{} device is not LVM, wouldn\'t update LVM tags'.format(
+ create_type.upper()))
+ else:
+ tags = {}
+ tags["ceph.{}_uuid".format(create_type)] = self.target_lv.lv_uuid
+ tags["ceph.{}_device".format(create_type)] = self.target_lv.lv_path
+ aux_dev.lv_api.set_tags(tags)
+
+ def remove_lvs(self, source_devices, target_type):
+ remaining_devices = [self.data_device, self.db_device, self.wal_device]
+
+ outdated_tags = []
+ for device, type in source_devices:
+ if type == "block" or type == target_type:
+ continue
+ remaining_devices.remove(device)
+ if device.is_lv:
+ outdated_tags.append("ceph.{}_uuid".format(type))
+ outdated_tags.append("ceph.{}_device".format(type))
+ device.lv_api.clear_tags()
+ if len(outdated_tags) > 0:
+ for d in remaining_devices:
+ if d and d.is_lv:
+ d.lv_api.clear_tags(outdated_tags)
+
+ def replace_lvs(self, source_devices, target_type):
+ remaining_devices = [self.data_device]
+ if self.db_device:
+ remaining_devices.append(self.db_device)
+ if self.wal_device:
+ remaining_devices.append(self.wal_device)
+
+ outdated_tags = []
+ for device, type in source_devices:
+ if type == "block":
+ continue
+ remaining_devices.remove(device)
+ if device.is_lv:
+ outdated_tags.append("ceph.{}_uuid".format(type))
+ outdated_tags.append("ceph.{}_device".format(type))
+ device.lv_api.clear_tags()
+
+ new_tags = {}
+ new_tags["ceph.{}_uuid".format(target_type)] = self.target_lv.lv_uuid
+ new_tags["ceph.{}_device".format(target_type)] = self.target_lv.lv_path
+
+ for d in remaining_devices:
+ if d and d.is_lv:
+ if len(outdated_tags) > 0:
+ d.lv_api.clear_tags(outdated_tags)
+ d.lv_api.set_tags(new_tags)
+
+ if not self.data_device.is_lv:
+ mlogger.warning(
+ 'Data device is not LVM, wouldn\'t properly update target LVM tags')
+ else:
+ tags = self.data_device.lv_api.tags.copy()
+
+ tags["ceph.type"] = target_type
+ tags["ceph.{}_uuid".format(target_type)] = self.target_lv.lv_uuid
+ tags["ceph.{}_device".format(target_type)] = self.target_lv.lv_path
+ self.target_lv.set_tags(tags)
+
+ def undo(self):
+ mlogger.info(
+ 'Undoing lv tag set')
+ if self.data_device:
+ if self.old_data_tags:
+ self.data_device.lv_api.set_tags(self.old_data_tags)
+ else:
+ self.data_device.lv_api.clear_tags()
+ if self.db_device:
+ if self.old_db_tags:
+ self.db_device.lv_api.set_tags(self.old_db_tags)
+ else:
+ self.db_device.lv_api.clear_tags()
+ if self.wal_device:
+ if self.old_wal_tags:
+ self.wal_device.lv_api.set_tags(self.old_wal_tags)
+ else:
+ self.wal_device.lv_api.clear_tags()
+ if self.old_target_tags:
+ self.target_lv.set_tags(self.old_target_tags)
+ else:
+ self.target_lv.clear_tags()
+
+class Migrate(object):
+
+ help = 'Migrate BlueFS data from to another LVM device'
+
+ def __init__(self, argv):
+ self.argv = argv
+ self.osd_id = None
+
+ def get_source_devices(self, devices, target_type=""):
+ ret = []
+ for device, type in devices:
+ if type == target_type:
+ continue
+ if type == 'block':
+ if 'data' not in self.args.from_:
+ continue;
+ elif type == 'db':
+ if 'db' not in self.args.from_:
+ continue;
+ elif type == 'wal':
+ if 'wal' not in self.args.from_:
+ continue;
+ ret.append([device, type])
+ if ret == []:
+ mlogger.error('Source device list is empty')
+ raise SystemExit(
+ 'Unable to migrate to : {}'.format(self.args.target))
+ return ret
+
+ # ceph-bluestore-tool uses the following replacement rules
+ # (in the order of precedence, stop on the first match)
+ # if source list has DB volume - target device replaces it.
+ # if source list has WAL volume - target device replace it.
+ # if source list has slow volume only - operation isn't permitted,
+ # requires explicit allocation via new-db/new-wal command.detects which
+ def get_target_type_by_source(self, devices):
+ ret = None
+ for device, type in devices:
+ if type == 'db':
+ return 'db'
+ elif type == 'wal':
+ ret = 'wal'
+ return ret
+
+ def get_filename_by_type(self, type):
+ filename = 'block'
+ if type == 'db' or type == 'wal':
+ filename += '.' + type
+ return filename
+
+ def get_source_args(self, osd_path, devices):
+ ret = []
+ for device, type in devices:
+ ret = ret + ["--devs-source", os.path.join(
+ osd_path, self.get_filename_by_type(type))]
+ return ret
+
+ @decorators.needs_root
+ def migrate_to_new(self, osd_id, osd_fsid, devices, target_lv):
+ source_devices = self.get_source_devices(devices)
+ target_type = self.get_target_type_by_source(source_devices)
+ if not target_type:
+ mlogger.error(
+ "Unable to determine new volume type,"
+ " please use new-db or new-wal command before.")
+ raise SystemExit(
+ "Unable to migrate to : {}".format(self.args.target))
+
+ target_path = target_lv.lv_path
+
+ try:
+ tag_tracker = VolumeTagTracker(devices, target_lv)
+ # we need to update lvm tags for all the remaining volumes
+ # and clear for ones which to be removed
+
+ # ceph-bluestore-tool removes source volume(s) other than block one
+ # and attaches target one after successful migration
+ tag_tracker.replace_lvs(source_devices, target_type)
+
+ osd_path = get_osd_path(osd_id, osd_fsid)
+ source_args = self.get_source_args(osd_path, source_devices)
+ mlogger.info("Migrate to new, Source: {} Target: {}".format(
+ source_args, target_path))
+ stdout, stderr, exit_code = process.call([
+ 'ceph-bluestore-tool',
+ '--path',
+ osd_path,
+ '--dev-target',
+ target_path,
+ '--command',
+ 'bluefs-bdev-migrate'] +
+ source_args)
+ if exit_code != 0:
+ mlogger.error(
+ 'Failed to migrate device, error code:{}'.format(exit_code))
+ raise SystemExit(
+ 'Failed to migrate to : {}'.format(self.args.target))
+ else:
+ system.chown(os.path.join(osd_path, "block.{}".format(
+ target_type)))
+ terminal.success('Migration successful.')
+ except:
+ tag_tracker.undo()
+ raise
+
+ return
+
+ @decorators.needs_root
+ def migrate_to_existing(self, osd_id, osd_fsid, devices, target_lv):
+ target_type = target_lv.tags["ceph.type"]
+ if target_type == "wal":
+ mlogger.error("Migrate to WAL is not supported")
+ raise SystemExit(
+ "Unable to migrate to : {}".format(self.args.target))
+ target_filename = self.get_filename_by_type(target_type)
+ if (target_filename == ""):
+ mlogger.error(
+ "Target Logical Volume doesn't have proper volume type "
+ "(ceph.type LVM tag): {}".format(target_type))
+ raise SystemExit(
+ "Unable to migrate to : {}".format(self.args.target))
+
+ osd_path = get_osd_path(osd_id, osd_fsid)
+ source_devices = self.get_source_devices(devices, target_type)
+ target_path = os.path.join(osd_path, target_filename)
+ tag_tracker = VolumeTagTracker(devices, target_lv)
+
+ try:
+ # ceph-bluestore-tool removes source volume(s) other than
+ # block and target ones after successful migration
+ tag_tracker.remove_lvs(source_devices, target_type)
+ source_args = self.get_source_args(osd_path, source_devices)
+ mlogger.info("Migrate to existing, Source: {} Target: {}".format(
+ source_args, target_path))
+ stdout, stderr, exit_code = process.call([
+ 'ceph-bluestore-tool',
+ '--path',
+ osd_path,
+ '--dev-target',
+ target_path,
+ '--command',
+ 'bluefs-bdev-migrate'] +
+ source_args)
+ if exit_code != 0:
+ mlogger.error(
+ 'Failed to migrate device, error code:{}'.format(exit_code))
+ raise SystemExit(
+ 'Failed to migrate to : {}'.format(self.args.target))
+ else:
+ terminal.success('Migration successful.')
+ except:
+ tag_tracker.undo()
+ raise
+
+ return
+
+ @decorators.needs_root
+ def migrate_osd(self):
+ if self.args.osd_id and not self.args.no_systemd:
+ osd_is_running = systemctl.osd_is_active(self.args.osd_id)
+ if osd_is_running:
+ mlogger.error('OSD is running, stop it with: '
+ 'systemctl stop ceph-osd@{}'.format(
+ self.args.osd_id))
+ raise SystemExit(
+ 'Unable to migrate devices associated with OSD ID: {}'
+ .format(self.args.osd_id))
+
+ target_lv = api.get_lv_by_fullname(self.args.target)
+ if not target_lv:
+ mlogger.error(
+ 'Target path "{}" is not a Logical Volume'.format(
+ self.args.target))
+ raise SystemExit(
+ 'Unable to migrate to : {}'.format(self.args.target))
+ devices = find_associated_devices(self.args.osd_id, self.args.osd_fsid)
+ if (not target_lv.used_by_ceph):
+ self.migrate_to_new(self.args.osd_id, self.args.osd_fsid,
+ devices,
+ target_lv)
+ else:
+ if (target_lv.tags['ceph.osd_id'] != self.args.osd_id or
+ target_lv.tags['ceph.osd_fsid'] != self.args.osd_fsid):
+ mlogger.error(
+ 'Target Logical Volume isn\'t used by the specified OSD: '
+ '{} FSID: {}'.format(self.args.osd_id,
+ self.args.osd_fsid))
+ raise SystemExit(
+ 'Unable to migrate to : {}'.format(self.args.target))
+
+ self.migrate_to_existing(self.args.osd_id, self.args.osd_fsid,
+ devices,
+ target_lv)
+
+ def make_parser(self, prog, sub_command_help):
+ parser = argparse.ArgumentParser(
+ prog=prog,
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ description=sub_command_help,
+ )
+
+ parser.add_argument(
+ '--osd-id',
+ required=True,
+ help='Specify an OSD ID to detect associated devices for zapping',
+ type=valid_osd_id
+ )
+
+ parser.add_argument(
+ '--osd-fsid',
+ required=True,
+ help='Specify an OSD FSID to detect associated devices for zapping',
+ )
+ parser.add_argument(
+ '--target',
+ required=True,
+ help='Specify target Logical Volume (LV) to migrate data to',
+ )
+ parser.add_argument(
+ '--from',
+ nargs='*',
+ dest='from_',
+ required=True,
+ choices=['data', 'db', 'wal'],
+ help='Copy BlueFS data from DB device',
+ )
+ parser.add_argument(
+ '--no-systemd',
+ dest='no_systemd',
+ action='store_true',
+ help='Skip checking OSD systemd unit',
+ )
+ return parser
+
+ def main(self):
+ sub_command_help = dedent("""
+ Moves BlueFS data from source volume(s) to the target one, source
+ volumes (except the main (i.e. data or block) one) are removed on
+ success. LVM volumes are permitted for Target only, both already
+ attached or new logical one. In the latter case it is attached to OSD
+ replacing one of the source devices. Following replacement rules apply
+ (in the order of precedence, stop on the first match):
+ * if source list has DB volume - target device replaces it.
+ * if source list has WAL volume - target device replace it.
+ * if source list has slow volume only - operation is not permitted,
+ requires explicit allocation via new-db/new-wal command.
+
+ Example calls for supported scenarios:
+
+ Moves BlueFS data from main device to LV already attached as DB:
+
+ ceph-volume lvm migrate --osd-id 1 --osd-fsid <uuid> --from data --target vgname/db
+
+ Moves BlueFS data from shared main device to LV which will be attached
+ as a new DB:
+
+ ceph-volume lvm migrate --osd-id 1 --osd-fsid <uuid> --from data --target vgname/new_db
+
+ Moves BlueFS data from DB device to new LV, DB is replaced:
+
+ ceph-volume lvm migrate --osd-id 1 --osd-fsid <uuid> --from db --target vgname/new_db
+
+ Moves BlueFS data from main and DB devices to new LV, DB is replaced:
+
+ ceph-volume lvm migrate --osd-id 1 --osd-fsid <uuid> --from data db --target vgname/new_db
+
+ Moves BlueFS data from main, DB and WAL devices to new LV, WAL is
+ removed and DB is replaced:
+
+ ceph-volume lvm migrate --osd-id 1 --osd-fsid <uuid> --from data db wal --target vgname/new_db
+
+ Moves BlueFS data from main, DB and WAL devices to main device, WAL
+ and DB are removed:
+
+ ceph-volume lvm migrate --osd-id 1 --osd-fsid <uuid> --from db wal --target vgname/data
+
+ """)
+
+ parser = self.make_parser('ceph-volume lvm migrate', sub_command_help)
+
+ if len(self.argv) == 0:
+ print(sub_command_help)
+ return
+
+ self.args = parser.parse_args(self.argv)
+
+ self.migrate_osd()
+
+class NewVolume(object):
+ def __init__(self, create_type, argv):
+ self.create_type = create_type
+ self.argv = argv
+
+ def make_parser(self, prog, sub_command_help):
+ parser = argparse.ArgumentParser(
+ prog=prog,
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ description=sub_command_help,
+ )
+
+ parser.add_argument(
+ '--osd-id',
+ required=True,
+ help='Specify an OSD ID to attach new volume to',
+ type=valid_osd_id,
+ )
+
+ parser.add_argument(
+ '--osd-fsid',
+ required=True,
+ help='Specify an OSD FSIDto attach new volume to',
+ )
+ parser.add_argument(
+ '--target',
+ required=True,
+ help='Specify target Logical Volume (LV) to attach',
+ )
+ parser.add_argument(
+ '--no-systemd',
+ dest='no_systemd',
+ action='store_true',
+ help='Skip checking OSD systemd unit',
+ )
+ return parser
+
+ @decorators.needs_root
+ def make_new_volume(self, osd_id, osd_fsid, devices, target_lv):
+ osd_path = get_osd_path(osd_id, osd_fsid)
+ mlogger.info(
+ 'Making new volume at {} for OSD: {} ({})'.format(
+ target_lv.lv_path, osd_id, osd_path))
+ tag_tracker = VolumeTagTracker(devices, target_lv)
+
+ try:
+ tag_tracker.update_tags_when_lv_create(self.create_type)
+
+ stdout, stderr, exit_code = process.call([
+ 'ceph-bluestore-tool',
+ '--path',
+ osd_path,
+ '--dev-target',
+ target_lv.lv_path,
+ '--command',
+ 'bluefs-bdev-new-{}'.format(self.create_type)
+ ])
+ if exit_code != 0:
+ mlogger.error(
+ 'failed to attach new volume, error code:{}'.format(
+ exit_code))
+ raise SystemExit(
+ "Failed to attach new volume: {}".format(
+ self.args.target))
+ else:
+ system.chown(os.path.join(osd_path, "block.{}".format(
+ self.create_type)))
+ terminal.success('New volume attached.')
+ except:
+ tag_tracker.undo()
+ raise
+ return
+
+ @decorators.needs_root
+ def new_volume(self):
+ if self.args.osd_id and not self.args.no_systemd:
+ osd_is_running = systemctl.osd_is_active(self.args.osd_id)
+ if osd_is_running:
+ mlogger.error('OSD ID is running, stop it with:'
+ ' systemctl stop ceph-osd@{}'.format(self.args.osd_id))
+ raise SystemExit(
+ 'Unable to attach new volume for OSD: {}'.format(
+ self.args.osd_id))
+
+ target_lv = api.get_lv_by_fullname(self.args.target)
+ if not target_lv:
+ mlogger.error(
+ 'Target path {} is not a Logical Volume'.format(
+ self.args.target))
+ raise SystemExit(
+ 'Unable to attach new volume : {}'.format(self.args.target))
+ if target_lv.used_by_ceph:
+ mlogger.error(
+ 'Target Logical Volume is already used by ceph: {}'.format(
+ self.args.target))
+ raise SystemExit(
+ 'Unable to attach new volume : {}'.format(self.args.target))
+ else:
+ devices = find_associated_devices(self.args.osd_id,
+ self.args.osd_fsid)
+ self.make_new_volume(
+ self.args.osd_id,
+ self.args.osd_fsid,
+ devices,
+ target_lv)
+
+class NewWAL(NewVolume):
+
+ help = 'Allocate new WAL volume for OSD at specified Logical Volume'
+
+ def __init__(self, argv):
+ super(NewWAL, self).__init__("wal", argv)
+
+ def main(self):
+ sub_command_help = dedent("""
+ Attaches the given logical volume to the given OSD as a WAL volume.
+ Logical volume format is vg/lv. Fails if OSD has already got attached DB.
+
+ Example:
+
+ Attach vgname/lvname as a WAL volume to OSD 1
+
+ ceph-volume lvm new-wal --osd-id 1 --osd-fsid 55BD4219-16A7-4037-BC20-0F158EFCC83D --target vgname/new_wal
+ """)
+ parser = self.make_parser('ceph-volume lvm new-wal', sub_command_help)
+
+ if len(self.argv) == 0:
+ print(sub_command_help)
+ return
+
+ self.args = parser.parse_args(self.argv)
+
+ self.new_volume()
+
+class NewDB(NewVolume):
+
+ help = 'Allocate new DB volume for OSD at specified Logical Volume'
+
+ def __init__(self, argv):
+ super(NewDB, self).__init__("db", argv)
+
+ def main(self):
+ sub_command_help = dedent("""
+ Attaches the given logical volume to the given OSD as a DB volume.
+ Logical volume format is vg/lv. Fails if OSD has already got attached DB.
+
+ Example:
+
+ Attach vgname/lvname as a DB volume to OSD 1
+
+ ceph-volume lvm new-db --osd-id 1 --osd-fsid 55BD4219-16A7-4037-BC20-0F158EFCC83D --target vgname/new_db
+ """)
+
+ parser = self.make_parser('ceph-volume lvm new-db', sub_command_help)
+ if len(self.argv) == 0:
+ print(sub_command_help)
+ return
+ self.args = parser.parse_args(self.argv)
+
+ self.new_volume()
diff --git a/src/ceph-volume/ceph_volume/devices/lvm/prepare.py b/src/ceph-volume/ceph_volume/devices/lvm/prepare.py
new file mode 100644
index 000000000..2f715fdba
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/devices/lvm/prepare.py
@@ -0,0 +1,441 @@
+from __future__ import print_function
+import json
+import logging
+from textwrap import dedent
+from ceph_volume.util import prepare as prepare_utils
+from ceph_volume.util import encryption as encryption_utils
+from ceph_volume.util import system, disk
+from ceph_volume.util.arg_validators import exclude_group_options
+from ceph_volume import conf, decorators, terminal
+from ceph_volume.api import lvm as api
+from .common import prepare_parser, rollback_osd
+
+
+logger = logging.getLogger(__name__)
+
+
+def prepare_dmcrypt(key, device, device_type, tags):
+ """
+ Helper for devices that are encrypted. The operations needed for
+ block, db, wal, or data/journal devices are all the same
+ """
+ if not device:
+ return ''
+ tag_name = 'ceph.%s_uuid' % device_type
+ uuid = tags[tag_name]
+ # format data device
+ encryption_utils.luks_format(
+ key,
+ device
+ )
+ encryption_utils.luks_open(
+ key,
+ device,
+ uuid
+ )
+
+ return '/dev/mapper/%s' % uuid
+
+
+def prepare_filestore(device, journal, secrets, tags, osd_id, fsid):
+ """
+ :param device: The name of the logical volume to work with
+ :param journal: similar to device but can also be a regular/plain disk
+ :param secrets: A dict with the secrets needed to create the osd (e.g. cephx)
+ :param id_: The OSD id
+ :param fsid: The OSD fsid, also known as the OSD UUID
+ """
+ cephx_secret = secrets.get('cephx_secret', prepare_utils.create_key())
+
+ # encryption-only operations
+ if secrets.get('dmcrypt_key'):
+ # format and open ('decrypt' devices) and re-assign the device and journal
+ # variables so that the rest of the process can use the mapper paths
+ key = secrets['dmcrypt_key']
+ device = prepare_dmcrypt(key, device, 'data', tags)
+ journal = prepare_dmcrypt(key, journal, 'journal', tags)
+
+ # vdo detection
+ is_vdo = api.is_vdo(device)
+ # create the directory
+ prepare_utils.create_osd_path(osd_id)
+ # format the device
+ prepare_utils.format_device(device)
+ # mount the data device
+ prepare_utils.mount_osd(device, osd_id, is_vdo=is_vdo)
+ # symlink the journal
+ prepare_utils.link_journal(journal, osd_id)
+ # get the latest monmap
+ prepare_utils.get_monmap(osd_id)
+ # prepare the osd filesystem
+ prepare_utils.osd_mkfs_filestore(osd_id, fsid, cephx_secret)
+ # write the OSD keyring if it doesn't exist already
+ prepare_utils.write_keyring(osd_id, cephx_secret)
+ if secrets.get('dmcrypt_key'):
+ # if the device is going to get activated right away, this can be done
+ # here, otherwise it will be recreated
+ encryption_utils.write_lockbox_keyring(
+ osd_id,
+ fsid,
+ tags['ceph.cephx_lockbox_secret']
+ )
+
+
+def prepare_bluestore(block, wal, db, secrets, tags, osd_id, fsid):
+ """
+ :param block: The name of the logical volume for the bluestore data
+ :param wal: a regular/plain disk or logical volume, to be used for block.wal
+ :param db: a regular/plain disk or logical volume, to be used for block.db
+ :param secrets: A dict with the secrets needed to create the osd (e.g. cephx)
+ :param id_: The OSD id
+ :param fsid: The OSD fsid, also known as the OSD UUID
+ """
+ cephx_secret = secrets.get('cephx_secret', prepare_utils.create_key())
+ # encryption-only operations
+ if secrets.get('dmcrypt_key'):
+ # If encrypted, there is no need to create the lockbox keyring file because
+ # bluestore re-creates the files and does not have support for other files
+ # like the custom lockbox one. This will need to be done on activation.
+ # format and open ('decrypt' devices) and re-assign the device and journal
+ # variables so that the rest of the process can use the mapper paths
+ key = secrets['dmcrypt_key']
+ block = prepare_dmcrypt(key, block, 'block', tags)
+ wal = prepare_dmcrypt(key, wal, 'wal', tags)
+ db = prepare_dmcrypt(key, db, 'db', tags)
+
+ # create the directory
+ prepare_utils.create_osd_path(osd_id, tmpfs=True)
+ # symlink the block
+ prepare_utils.link_block(block, osd_id)
+ # get the latest monmap
+ prepare_utils.get_monmap(osd_id)
+ # write the OSD keyring if it doesn't exist already
+ prepare_utils.write_keyring(osd_id, cephx_secret)
+ # prepare the osd filesystem
+ prepare_utils.osd_mkfs_bluestore(
+ osd_id, fsid,
+ keyring=cephx_secret,
+ wal=wal,
+ db=db
+ )
+
+
+class Prepare(object):
+
+ help = 'Format an LVM device and associate it with an OSD'
+
+ def __init__(self, argv):
+ self.argv = argv
+ self.osd_id = None
+
+ def get_ptuuid(self, argument):
+ uuid = disk.get_partuuid(argument)
+ if not uuid:
+ terminal.error('blkid could not detect a PARTUUID for device: %s' % argument)
+ raise RuntimeError('unable to use device')
+ return uuid
+
+ def setup_device(self, device_type, device_name, tags, size, slots):
+ """
+ Check if ``device`` is an lv, if so, set the tags, making sure to
+ update the tags with the lv_uuid and lv_path which the incoming tags
+ will not have.
+
+ If the device is not a logical volume, then retrieve the partition UUID
+ by querying ``blkid``
+ """
+ if device_name is None:
+ return '', '', tags
+ tags['ceph.type'] = device_type
+ tags['ceph.vdo'] = api.is_vdo(device_name)
+
+ try:
+ vg_name, lv_name = device_name.split('/')
+ lv = api.get_single_lv(filters={'lv_name': lv_name,
+ 'vg_name': vg_name})
+ except ValueError:
+ lv = None
+
+ if lv:
+ lv_uuid = lv.lv_uuid
+ path = lv.lv_path
+ tags['ceph.%s_uuid' % device_type] = lv_uuid
+ tags['ceph.%s_device' % device_type] = path
+ lv.set_tags(tags)
+ elif disk.is_device(device_name):
+ # We got a disk, create an lv
+ lv_type = "osd-{}".format(device_type)
+ name_uuid = system.generate_uuid()
+ kwargs = {
+ 'device': device_name,
+ 'tags': tags,
+ 'slots': slots
+ }
+ #TODO use get_block_db_size and co here to get configured size in
+ #conf file
+ if size != 0:
+ kwargs['size'] = size
+ lv = api.create_lv(
+ lv_type,
+ name_uuid,
+ **kwargs)
+ path = lv.lv_path
+ tags['ceph.{}_device'.format(device_type)] = path
+ tags['ceph.{}_uuid'.format(device_type)] = lv.lv_uuid
+ lv_uuid = lv.lv_uuid
+ lv.set_tags(tags)
+ else:
+ # otherwise assume this is a regular disk partition
+ name_uuid = self.get_ptuuid(device_name)
+ path = device_name
+ tags['ceph.%s_uuid' % device_type] = name_uuid
+ tags['ceph.%s_device' % device_type] = path
+ lv_uuid = name_uuid
+ return path, lv_uuid, tags
+
+ def prepare_data_device(self, device_type, osd_uuid):
+ """
+ Check if ``arg`` is a device or partition to create an LV out of it
+ with a distinct volume group name, assigning LV tags on it and
+ ultimately, returning the logical volume object. Failing to detect
+ a device or partition will result in error.
+
+ :param arg: The value of ``--data`` when parsing args
+ :param device_type: Usually, either ``data`` or ``block`` (filestore vs. bluestore)
+ :param osd_uuid: The OSD uuid
+ """
+ device = self.args.data
+ if disk.is_partition(device) or disk.is_device(device):
+ # we must create a vg, and then a single lv
+ lv_name_prefix = "osd-{}".format(device_type)
+ kwargs = {'device': device,
+ 'tags': {'ceph.type': device_type},
+ 'slots': self.args.data_slots,
+ }
+ logger.debug('data device size: {}'.format(self.args.data_size))
+ if self.args.data_size != 0:
+ kwargs['size'] = self.args.data_size
+ return api.create_lv(
+ lv_name_prefix,
+ osd_uuid,
+ **kwargs)
+ else:
+ error = [
+ 'Cannot use device ({}).'.format(device),
+ 'A vg/lv path or an existing device is needed']
+ raise RuntimeError(' '.join(error))
+
+ raise RuntimeError('no data logical volume found with: {}'.format(device))
+
+ def safe_prepare(self, args=None):
+ """
+ An intermediate step between `main()` and `prepare()` so that we can
+ capture the `self.osd_id` in case we need to rollback
+
+ :param args: Injected args, usually from `lvm create` which compounds
+ both `prepare` and `create`
+ """
+ if args is not None:
+ self.args = args
+
+ try:
+ vgname, lvname = self.args.data.split('/')
+ lv = api.get_single_lv(filters={'lv_name': lvname,
+ 'vg_name': vgname})
+ except ValueError:
+ lv = None
+
+ if api.is_ceph_device(lv):
+ logger.info("device {} is already used".format(self.args.data))
+ raise RuntimeError("skipping {}, it is already prepared".format(self.args.data))
+ try:
+ self.prepare()
+ except Exception:
+ logger.exception('lvm prepare was unable to complete')
+ logger.info('will rollback OSD ID creation')
+ rollback_osd(self.args, self.osd_id)
+ raise
+ terminal.success("ceph-volume lvm prepare successful for: %s" % self.args.data)
+
+ def get_cluster_fsid(self):
+ """
+ Allows using --cluster-fsid as an argument, but can fallback to reading
+ from ceph.conf if that is unset (the default behavior).
+ """
+ if self.args.cluster_fsid:
+ return self.args.cluster_fsid
+ else:
+ return conf.ceph.get('global', 'fsid')
+
+ @decorators.needs_root
+ def prepare(self):
+ # FIXME we don't allow re-using a keyring, we always generate one for the
+ # OSD, this needs to be fixed. This could either be a file (!) or a string
+ # (!!) or some flags that we would need to compound into a dict so that we
+ # can convert to JSON (!!!)
+ secrets = {'cephx_secret': prepare_utils.create_key()}
+ cephx_lockbox_secret = ''
+ encrypted = 1 if self.args.dmcrypt else 0
+ cephx_lockbox_secret = '' if not encrypted else prepare_utils.create_key()
+
+ if encrypted:
+ secrets['dmcrypt_key'] = encryption_utils.create_dmcrypt_key()
+ secrets['cephx_lockbox_secret'] = cephx_lockbox_secret
+
+ cluster_fsid = self.get_cluster_fsid()
+
+ osd_fsid = self.args.osd_fsid or system.generate_uuid()
+ crush_device_class = self.args.crush_device_class
+ if crush_device_class:
+ secrets['crush_device_class'] = crush_device_class
+ # reuse a given ID if it exists, otherwise create a new ID
+ self.osd_id = prepare_utils.create_id(osd_fsid, json.dumps(secrets), osd_id=self.args.osd_id)
+ tags = {
+ 'ceph.osd_fsid': osd_fsid,
+ 'ceph.osd_id': self.osd_id,
+ 'ceph.cluster_fsid': cluster_fsid,
+ 'ceph.cluster_name': conf.cluster,
+ 'ceph.crush_device_class': crush_device_class,
+ 'ceph.osdspec_affinity': prepare_utils.get_osdspec_affinity()
+ }
+ if self.args.filestore:
+ if not self.args.journal:
+ logger.info(('no journal was specifed, creating journal lv '
+ 'on {}').format(self.args.data))
+ self.args.journal = self.args.data
+ self.args.journal_size = disk.Size(g=5)
+ # need to adjust data size/slots for colocated journal
+ if self.args.data_size:
+ self.args.data_size -= self.args.journal_size
+ if self.args.data_slots == 1:
+ self.args.data_slots = 0
+ else:
+ raise RuntimeError('Can\'t handle multiple filestore OSDs '
+ 'with colocated journals yet. Please '
+ 'create journal LVs manually')
+ tags['ceph.cephx_lockbox_secret'] = cephx_lockbox_secret
+ tags['ceph.encrypted'] = encrypted
+
+ journal_device, journal_uuid, tags = self.setup_device(
+ 'journal',
+ self.args.journal,
+ tags,
+ self.args.journal_size,
+ self.args.journal_slots)
+
+ try:
+ vg_name, lv_name = self.args.data.split('/')
+ data_lv = api.get_single_lv(filters={'lv_name': lv_name,
+ 'vg_name': vg_name})
+ except ValueError:
+ data_lv = None
+
+ if not data_lv:
+ data_lv = self.prepare_data_device('data', osd_fsid)
+
+ tags['ceph.data_device'] = data_lv.lv_path
+ tags['ceph.data_uuid'] = data_lv.lv_uuid
+ tags['ceph.vdo'] = api.is_vdo(data_lv.lv_path)
+ tags['ceph.type'] = 'data'
+ data_lv.set_tags(tags)
+ if not journal_device.startswith('/'):
+ # we got a journal lv, set rest of the tags
+ api.get_single_lv(filters={'lv_name': lv_name,
+ 'vg_name': vg_name}).set_tags(tags)
+
+ prepare_filestore(
+ data_lv.lv_path,
+ journal_device,
+ secrets,
+ tags,
+ self.osd_id,
+ osd_fsid,
+ )
+ elif self.args.bluestore:
+ try:
+ vg_name, lv_name = self.args.data.split('/')
+ block_lv = api.get_single_lv(filters={'lv_name': lv_name,
+ 'vg_name': vg_name})
+ except ValueError:
+ block_lv = None
+
+ if not block_lv:
+ block_lv = self.prepare_data_device('block', osd_fsid)
+
+ tags['ceph.block_device'] = block_lv.lv_path
+ tags['ceph.block_uuid'] = block_lv.lv_uuid
+ tags['ceph.cephx_lockbox_secret'] = cephx_lockbox_secret
+ tags['ceph.encrypted'] = encrypted
+ tags['ceph.vdo'] = api.is_vdo(block_lv.lv_path)
+
+ wal_device, wal_uuid, tags = self.setup_device(
+ 'wal',
+ self.args.block_wal,
+ tags,
+ self.args.block_wal_size,
+ self.args.block_wal_slots)
+ db_device, db_uuid, tags = self.setup_device(
+ 'db',
+ self.args.block_db,
+ tags,
+ self.args.block_db_size,
+ self.args.block_db_slots)
+
+ tags['ceph.type'] = 'block'
+ block_lv.set_tags(tags)
+
+ prepare_bluestore(
+ block_lv.lv_path,
+ wal_device,
+ db_device,
+ secrets,
+ tags,
+ self.osd_id,
+ osd_fsid,
+ )
+
+ def main(self):
+ sub_command_help = dedent("""
+ Prepare an OSD by assigning an ID and FSID, registering them with the
+ cluster with an ID and FSID, formatting and mounting the volume, and
+ finally by adding all the metadata to the logical volumes using LVM
+ tags, so that it can later be discovered.
+
+ Once the OSD is ready, an ad-hoc systemd unit will be enabled so that
+ it can later get activated and the OSD daemon can get started.
+
+ Encryption is supported via dmcrypt and the --dmcrypt flag.
+
+ Existing logical volume (lv):
+
+ ceph-volume lvm prepare --data {vg/lv}
+
+ Existing block device (a logical volume will be created):
+
+ ceph-volume lvm prepare --data /path/to/device
+
+ Optionally, can consume db and wal devices, partitions or logical
+ volumes. A device will get a logical volume, partitions and existing
+ logical volumes will be used as is:
+
+ ceph-volume lvm prepare --data {vg/lv} --block.wal {partition} --block.db {/path/to/device}
+ """)
+ parser = prepare_parser(
+ prog='ceph-volume lvm prepare',
+ description=sub_command_help,
+ )
+ if len(self.argv) == 0:
+ print(sub_command_help)
+ return
+ exclude_group_options(parser, argv=self.argv, groups=['filestore', 'bluestore'])
+ self.args = parser.parse_args(self.argv)
+ # the unfortunate mix of one superset for both filestore and bluestore
+ # makes this validation cumbersome
+ if self.args.filestore:
+ if not self.args.journal:
+ raise SystemExit('--journal is required when using --filestore')
+ # Default to bluestore here since defaulting it in add_argument may
+ # cause both to be True
+ if not self.args.bluestore and not self.args.filestore:
+ self.args.bluestore = True
+ self.safe_prepare()
diff --git a/src/ceph-volume/ceph_volume/devices/lvm/trigger.py b/src/ceph-volume/ceph_volume/devices/lvm/trigger.py
new file mode 100644
index 000000000..dc57011df
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/devices/lvm/trigger.py
@@ -0,0 +1,70 @@
+from __future__ import print_function
+import argparse
+from textwrap import dedent
+from ceph_volume.exceptions import SuffixParsingError
+from ceph_volume import decorators
+from .activate import Activate
+
+
+def parse_osd_id(string):
+ osd_id = string.split('-', 1)[0]
+ if not osd_id:
+ raise SuffixParsingError('OSD id', string)
+ if osd_id.isdigit():
+ return osd_id
+ raise SuffixParsingError('OSD id', string)
+
+
+def parse_osd_uuid(string):
+ osd_id = '%s-' % parse_osd_id(string)
+ # remove the id first
+ osd_uuid = string.split(osd_id, 1)[-1]
+ if not osd_uuid:
+ raise SuffixParsingError('OSD uuid', string)
+ return osd_uuid
+
+
+class Trigger(object):
+
+ help = 'systemd helper to activate an OSD'
+
+ def __init__(self, argv):
+ self.argv = argv
+
+ @decorators.needs_root
+ def main(self):
+ sub_command_help = dedent("""
+ ** DO NOT USE DIRECTLY **
+ This tool is meant to help the systemd unit that knows about OSDs.
+
+ Proxy OSD activation to ``ceph-volume lvm activate`` by parsing the
+ input from systemd, detecting the UUID and ID associated with an OSD::
+
+ ceph-volume lvm trigger {SYSTEMD-DATA}
+
+ The systemd "data" is expected to be in the format of::
+
+ {OSD ID}-{OSD UUID}
+
+ The lvs associated with the OSD need to have been prepared previously,
+ so that all needed tags and metadata exist.
+ """)
+ parser = argparse.ArgumentParser(
+ prog='ceph-volume lvm trigger',
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ description=sub_command_help,
+ )
+
+ parser.add_argument(
+ 'systemd_data',
+ metavar='SYSTEMD_DATA',
+ nargs='?',
+ help='Data from a systemd unit containing ID and UUID of the OSD, like asdf-lkjh-0'
+ )
+ if len(self.argv) == 0:
+ print(sub_command_help)
+ return
+ args = parser.parse_args(self.argv)
+ osd_id = parse_osd_id(args.systemd_data)
+ osd_uuid = parse_osd_uuid(args.systemd_data)
+ Activate(['--auto-detect-objectstore', osd_id, osd_uuid]).main()
diff --git a/src/ceph-volume/ceph_volume/devices/lvm/zap.py b/src/ceph-volume/ceph_volume/devices/lvm/zap.py
new file mode 100644
index 000000000..d6d778d16
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/devices/lvm/zap.py
@@ -0,0 +1,406 @@
+import argparse
+import os
+import logging
+import time
+
+from textwrap import dedent
+
+from ceph_volume import decorators, terminal, process
+from ceph_volume.api import lvm as api
+from ceph_volume.util import system, encryption, disk, arg_validators, str_to_int, merge_dict
+from ceph_volume.util.device import Device
+from ceph_volume.systemd import systemctl
+
+logger = logging.getLogger(__name__)
+mlogger = terminal.MultiLogger(__name__)
+
+
+def wipefs(path):
+ """
+ Removes the filesystem from an lv or partition.
+
+ Environment variables supported::
+
+ * ``CEPH_VOLUME_WIPEFS_TRIES``: Defaults to 8
+ * ``CEPH_VOLUME_WIPEFS_INTERVAL``: Defaults to 5
+
+ """
+ tries = str_to_int(
+ os.environ.get('CEPH_VOLUME_WIPEFS_TRIES', 8)
+ )
+ interval = str_to_int(
+ os.environ.get('CEPH_VOLUME_WIPEFS_INTERVAL', 5)
+ )
+
+ for trying in range(tries):
+ stdout, stderr, exit_code = process.call([
+ 'wipefs',
+ '--all',
+ path
+ ])
+ if exit_code != 0:
+ # this could narrow the retry by poking in the stderr of the output
+ # to verify that 'probing initialization failed' appears, but
+ # better to be broad in this retry to prevent missing on
+ # a different message that needs to be retried as well
+ terminal.warning(
+ 'failed to wipefs device, will try again to workaround probable race condition'
+ )
+ time.sleep(interval)
+ else:
+ return
+ raise RuntimeError("could not complete wipefs on device: %s" % path)
+
+
+def zap_data(path):
+ """
+ Clears all data from the given path. Path should be
+ an absolute path to an lv or partition.
+
+ 10M of data is written to the path to make sure that
+ there is no trace left of any previous Filesystem.
+ """
+ process.run([
+ 'dd',
+ 'if=/dev/zero',
+ 'of={path}'.format(path=path),
+ 'bs=1M',
+ 'count=10',
+ 'conv=fsync'
+ ])
+
+
+def find_associated_devices(osd_id=None, osd_fsid=None):
+ """
+ From an ``osd_id`` and/or an ``osd_fsid``, filter out all the LVs in the
+ system that match those tag values, further detect if any partitions are
+ part of the OSD, and then return the set of LVs and partitions (if any).
+ """
+ lv_tags = {}
+ if osd_id:
+ lv_tags['ceph.osd_id'] = osd_id
+ if osd_fsid:
+ lv_tags['ceph.osd_fsid'] = osd_fsid
+
+ lvs = api.get_lvs(tags=lv_tags)
+ if not lvs:
+ raise RuntimeError('Unable to find any LV for zapping OSD: '
+ '%s' % osd_id or osd_fsid)
+
+ devices_to_zap = ensure_associated_lvs(lvs, lv_tags)
+ return [Device(path) for path in set(devices_to_zap) if path]
+
+
+def ensure_associated_lvs(lvs, lv_tags={}):
+ """
+ Go through each LV and ensure if backing devices (journal, wal, block)
+ are LVs or partitions, so that they can be accurately reported.
+ """
+ # look for many LVs for each backing type, because it is possible to
+ # receive a filtering for osd.1, and have multiple failed deployments
+ # leaving many journals with osd.1 - usually, only a single LV will be
+ # returned
+
+ journal_lvs = api.get_lvs(tags=merge_dict(lv_tags, {'ceph.type': 'journal'}))
+ db_lvs = api.get_lvs(tags=merge_dict(lv_tags, {'ceph.type': 'db'}))
+ wal_lvs = api.get_lvs(tags=merge_dict(lv_tags, {'ceph.type': 'wal'}))
+ backing_devices = [(journal_lvs, 'journal'), (db_lvs, 'db'),
+ (wal_lvs, 'wal')]
+
+ verified_devices = []
+
+ for lv in lvs:
+ # go through each lv and append it, otherwise query `blkid` to find
+ # a physical device. Do this for each type (journal,db,wal) regardless
+ # if they have been processed in the previous LV, so that bad devices
+ # with the same ID can be caught
+ for ceph_lvs, _type in backing_devices:
+ if ceph_lvs:
+ verified_devices.extend([l.lv_path for l in ceph_lvs])
+ continue
+
+ # must be a disk partition, by querying blkid by the uuid we are
+ # ensuring that the device path is always correct
+ try:
+ device_uuid = lv.tags['ceph.%s_uuid' % _type]
+ except KeyError:
+ # Bluestore will not have ceph.journal_uuid, and Filestore
+ # will not not have ceph.db_uuid
+ continue
+
+ osd_device = disk.get_device_from_partuuid(device_uuid)
+ if not osd_device:
+ # if the osd_device is not found by the partuuid, then it is
+ # not possible to ensure this device exists anymore, so skip it
+ continue
+ verified_devices.append(osd_device)
+
+ verified_devices.append(lv.lv_path)
+
+ # reduce the list from all the duplicates that were added
+ return list(set(verified_devices))
+
+
+class Zap(object):
+
+ help = 'Removes all data and filesystems from a logical volume or partition.'
+
+ def __init__(self, argv):
+ self.argv = argv
+
+ def unmount_lv(self, lv):
+ if lv.tags.get('ceph.cluster_name') and lv.tags.get('ceph.osd_id'):
+ lv_path = "/var/lib/ceph/osd/{}-{}".format(lv.tags['ceph.cluster_name'], lv.tags['ceph.osd_id'])
+ else:
+ lv_path = lv.lv_path
+ dmcrypt_uuid = lv.lv_uuid
+ dmcrypt = lv.encrypted
+ if system.path_is_mounted(lv_path):
+ mlogger.info("Unmounting %s", lv_path)
+ system.unmount(lv_path)
+ if dmcrypt and dmcrypt_uuid:
+ self.dmcrypt_close(dmcrypt_uuid)
+
+ def zap_lv(self, device):
+ """
+ Device examples: vg-name/lv-name, /dev/vg-name/lv-name
+ Requirements: Must be a logical volume (LV)
+ """
+ lv = api.get_single_lv(filters={'lv_name': device.lv_name, 'vg_name':
+ device.vg_name})
+ pv = api.get_single_pv(filters={'lv_uuid': lv.lv_uuid})
+ self.unmount_lv(lv)
+
+ wipefs(device.path)
+ zap_data(device.path)
+
+ if self.args.destroy:
+ lvs = api.get_lvs(filters={'vg_name': device.vg_name})
+ if lvs == []:
+ mlogger.info('No LVs left, exiting', device.vg_name)
+ return
+ elif len(lvs) <= 1:
+ mlogger.info('Only 1 LV left in VG, will proceed to destroy '
+ 'volume group %s', device.vg_name)
+ api.remove_vg(device.vg_name)
+ api.remove_pv(pv.pv_name)
+ else:
+ mlogger.info('More than 1 LV left in VG, will proceed to '
+ 'destroy LV only')
+ mlogger.info('Removing LV because --destroy was given: %s',
+ device.path)
+ api.remove_lv(device.path)
+ elif lv:
+ # just remove all lvm metadata, leaving the LV around
+ lv.clear_tags()
+
+ def zap_partition(self, device):
+ """
+ Device example: /dev/sda1
+ Requirements: Must be a partition
+ """
+ if device.is_encrypted:
+ # find the holder
+ holders = [
+ '/dev/%s' % holder for holder in device.sys_api.get('holders', [])
+ ]
+ for mapper_uuid in os.listdir('/dev/mapper'):
+ mapper_path = os.path.join('/dev/mapper', mapper_uuid)
+ if os.path.realpath(mapper_path) in holders:
+ self.dmcrypt_close(mapper_uuid)
+
+ if system.device_is_mounted(device.path):
+ mlogger.info("Unmounting %s", device.path)
+ system.unmount(device.path)
+
+ wipefs(device.path)
+ zap_data(device.path)
+
+ if self.args.destroy:
+ mlogger.info("Destroying partition since --destroy was used: %s" % device.path)
+ disk.remove_partition(device)
+
+ def zap_lvm_member(self, device):
+ """
+ An LVM member may have more than one LV and or VG, for example if it is
+ a raw device with multiple partitions each belonging to a different LV
+
+ Device example: /dev/sda
+ Requirements: An LV or VG present in the device, making it an LVM member
+ """
+ for lv in device.lvs:
+ if lv.lv_name:
+ mlogger.info('Zapping lvm member {}. lv_path is {}'.format(device.path, lv.lv_path))
+ self.zap_lv(Device(lv.lv_path))
+ else:
+ vg = api.get_single_vg(filters={'vg_name': lv.vg_name})
+ if vg:
+ mlogger.info('Found empty VG {}, removing'.format(vg.vg_name))
+ api.remove_vg(vg.vg_name)
+
+
+
+ def zap_raw_device(self, device):
+ """
+ Any whole (raw) device passed in as input will be processed here,
+ checking for LVM membership and partitions (if any).
+
+ Device example: /dev/sda
+ Requirements: None
+ """
+ if not self.args.destroy:
+ # the use of dd on a raw device causes the partition table to be
+ # destroyed
+ mlogger.warning(
+ '--destroy was not specified, but zapping a whole device will remove the partition table'
+ )
+
+ # look for partitions and zap those
+ for part_name in device.sys_api.get('partitions', {}).keys():
+ self.zap_partition(Device('/dev/%s' % part_name))
+
+ wipefs(device.path)
+ zap_data(device.path)
+
+ @decorators.needs_root
+ def zap(self, devices=None):
+ devices = devices or self.args.devices
+
+ for device in devices:
+ mlogger.info("Zapping: %s", device.path)
+ if device.is_mapper and not device.is_mpath:
+ terminal.error("Refusing to zap the mapper device: {}".format(device))
+ raise SystemExit(1)
+ if device.is_lvm_member:
+ self.zap_lvm_member(device)
+ if device.is_lv:
+ self.zap_lv(device)
+ if device.is_partition:
+ self.zap_partition(device)
+ if device.is_device:
+ self.zap_raw_device(device)
+
+ if self.args.devices:
+ terminal.success(
+ "Zapping successful for: %s" % ", ".join([str(d) for d in self.args.devices])
+ )
+ else:
+ identifier = self.args.osd_id or self.args.osd_fsid
+ terminal.success(
+ "Zapping successful for OSD: %s" % identifier
+ )
+
+ @decorators.needs_root
+ def zap_osd(self):
+ if self.args.osd_id and not self.args.no_systemd:
+ osd_is_running = systemctl.osd_is_active(self.args.osd_id)
+ if osd_is_running:
+ mlogger.error("OSD ID %s is running, stop it with:" % self.args.osd_id)
+ mlogger.error("systemctl stop ceph-osd@%s" % self.args.osd_id)
+ raise SystemExit("Unable to zap devices associated with OSD ID: %s" % self.args.osd_id)
+ devices = find_associated_devices(self.args.osd_id, self.args.osd_fsid)
+ self.zap(devices)
+
+ def dmcrypt_close(self, dmcrypt_uuid):
+ dmcrypt_path = "/dev/mapper/{}".format(dmcrypt_uuid)
+ mlogger.info("Closing encrypted path %s", dmcrypt_path)
+ encryption.dmcrypt_close(dmcrypt_path)
+
+ def main(self):
+ sub_command_help = dedent("""
+ Zaps the given logical volume(s), raw device(s) or partition(s) for reuse by ceph-volume.
+ If given a path to a logical volume it must be in the format of vg/lv. Any
+ filesystems present on the given device, vg/lv, or partition will be removed and
+ all data will be purged.
+
+ If the logical volume, raw device or partition is being used for any ceph related
+ mount points they will be unmounted.
+
+ However, the lv or partition will be kept intact.
+
+ Example calls for supported scenarios:
+
+ Zapping a logical volume:
+
+ ceph-volume lvm zap {vg name/lv name}
+
+ Zapping a partition:
+
+ ceph-volume lvm zap /dev/sdc1
+
+ Zapping many raw devices:
+
+ ceph-volume lvm zap /dev/sda /dev/sdb /db/sdc
+
+ Zapping devices associated with an OSD ID:
+
+ ceph-volume lvm zap --osd-id 1
+
+ Optionally include the OSD FSID
+
+ ceph-volume lvm zap --osd-id 1 --osd-fsid 55BD4219-16A7-4037-BC20-0F158EFCC83D
+
+ If the --destroy flag is given and you are zapping a raw device or partition
+ then all vgs and lvs that exist on that raw device or partition will be destroyed.
+
+ This is especially useful if a raw device or partition was used by ceph-volume lvm create
+ or ceph-volume lvm prepare commands previously and now you want to reuse that device.
+
+ For example:
+
+ ceph-volume lvm zap /dev/sda --destroy
+
+ If the --destroy flag is given and you are zapping an lv then the lv is still
+ kept intact for reuse.
+
+ """)
+ parser = argparse.ArgumentParser(
+ prog='ceph-volume lvm zap',
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ description=sub_command_help,
+ )
+
+ parser.add_argument(
+ 'devices',
+ metavar='DEVICES',
+ nargs='*',
+ type=arg_validators.ValidZapDevice(gpt_ok=True),
+ default=[],
+ help='Path to one or many lv (as vg/lv), partition (as /dev/sda1) or device (as /dev/sda)'
+ )
+
+ parser.add_argument(
+ '--destroy',
+ action='store_true',
+ default=False,
+ help='Destroy all volume groups and logical volumes if you are zapping a raw device or partition',
+ )
+
+ parser.add_argument(
+ '--osd-id',
+ type=arg_validators.valid_osd_id,
+ help='Specify an OSD ID to detect associated devices for zapping',
+ )
+
+ parser.add_argument(
+ '--osd-fsid',
+ help='Specify an OSD FSID to detect associated devices for zapping',
+ )
+
+ parser.add_argument(
+ '--no-systemd',
+ dest='no_systemd',
+ action='store_true',
+ help='Skip systemd unit checks',
+ )
+
+ if len(self.argv) == 0:
+ print(sub_command_help)
+ return
+
+ self.args = parser.parse_args(self.argv)
+
+ if self.args.osd_id or self.args.osd_fsid:
+ self.zap_osd()
+ else:
+ self.zap()
diff --git a/src/ceph-volume/ceph_volume/devices/raw/__init__.py b/src/ceph-volume/ceph_volume/devices/raw/__init__.py
new file mode 100644
index 000000000..dd0a6534c
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/devices/raw/__init__.py
@@ -0,0 +1 @@
+from .main import Raw # noqa
diff --git a/src/ceph-volume/ceph_volume/devices/raw/activate.py b/src/ceph-volume/ceph_volume/devices/raw/activate.py
new file mode 100644
index 000000000..17be57dfe
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/devices/raw/activate.py
@@ -0,0 +1,166 @@
+from __future__ import print_function
+import argparse
+import logging
+import os
+from textwrap import dedent
+from ceph_volume import process, conf, decorators, terminal
+from ceph_volume.util import system
+from ceph_volume.util import prepare as prepare_utils
+from .list import direct_report
+
+
+logger = logging.getLogger(__name__)
+
+def activate_bluestore(meta, tmpfs, systemd):
+ # find the osd
+ osd_id = meta['osd_id']
+ osd_uuid = meta['osd_uuid']
+
+ # mount on tmpfs the osd directory
+ osd_path = '/var/lib/ceph/osd/%s-%s' % (conf.cluster, osd_id)
+ if not system.path_is_mounted(osd_path):
+ # mkdir -p and mount as tmpfs
+ prepare_utils.create_osd_path(osd_id, tmpfs=tmpfs)
+
+ # XXX This needs to be removed once ceph-bluestore-tool can deal with
+ # symlinks that exist in the osd dir
+ for link_name in ['block', 'block.db', 'block.wal']:
+ link_path = os.path.join(osd_path, link_name)
+ if os.path.exists(link_path):
+ os.unlink(os.path.join(osd_path, link_name))
+
+ # Once symlinks are removed, the osd dir can be 'primed again. chown first,
+ # regardless of what currently exists so that ``prime-osd-dir`` can succeed
+ # even if permissions are somehow messed up
+ system.chown(osd_path)
+ prime_command = [
+ 'ceph-bluestore-tool',
+ 'prime-osd-dir',
+ '--path', osd_path,
+ '--no-mon-config',
+ '--dev', meta['device'],
+ ]
+ process.run(prime_command)
+
+ # always re-do the symlink regardless if it exists, so that the block,
+ # block.wal, and block.db devices that may have changed can be mapped
+ # correctly every time
+ prepare_utils.link_block(meta['device'], osd_id)
+
+ if 'device_db' in meta:
+ prepare_utils.link_db(meta['device_db'], osd_id, osd_uuid)
+
+ if 'device_wal' in meta:
+ prepare_utils.link_wal(meta['device_wal'], osd_id, osd_uuid)
+
+ system.chown(osd_path)
+ terminal.success("ceph-volume raw activate successful for osd ID: %s" % osd_id)
+
+
+class Activate(object):
+
+ help = 'Discover and prepare a data directory for a (BlueStore) OSD on a raw device'
+
+ def __init__(self, argv):
+ self.argv = argv
+ self.args = None
+
+ @decorators.needs_root
+ def activate(self, devs, start_osd_id, start_osd_uuid,
+ tmpfs, systemd):
+ """
+ :param args: The parsed arguments coming from the CLI
+ """
+ assert devs or start_osd_id or start_osd_uuid
+ found = direct_report(devs)
+
+ activated_any = False
+ for osd_uuid, meta in found.items():
+ osd_id = meta['osd_id']
+ if start_osd_id is not None and str(osd_id) != str(start_osd_id):
+ continue
+ if start_osd_uuid is not None and osd_uuid != start_osd_uuid:
+ continue
+ logger.info('Activating osd.%s uuid %s cluster %s' % (
+ osd_id, osd_uuid, meta['ceph_fsid']))
+ activate_bluestore(meta,
+ tmpfs=tmpfs,
+ systemd=systemd)
+ activated_any = True
+
+ if not activated_any:
+ raise RuntimeError('did not find any matching OSD to activate')
+
+ def main(self):
+ sub_command_help = dedent("""
+ Activate (BlueStore) OSD on a raw block device(s) based on the
+ device label (normally the first block of the device).
+
+ ceph-volume raw activate [/dev/sdb2 ...]
+
+ or
+
+ ceph-volume raw activate --osd-id NUM --osd-uuid UUID
+
+ The device(s) associated with the OSD need to have been prepared
+ previously, so that all needed tags and metadata exist.
+ """)
+ parser = argparse.ArgumentParser(
+ prog='ceph-volume raw activate',
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ description=sub_command_help,
+ )
+ parser.add_argument(
+ '--device',
+ help='The device for the OSD to start'
+ )
+ parser.add_argument(
+ '--osd-id',
+ help='OSD ID to activate'
+ )
+ parser.add_argument(
+ '--osd-uuid',
+ help='OSD UUID to active'
+ )
+ parser.add_argument(
+ '--no-systemd',
+ dest='no_systemd',
+ action='store_true',
+ help='Skip creating and enabling systemd units and starting OSD services'
+ )
+ parser.add_argument(
+ '--block.db',
+ dest='block_db',
+ help='Path to bluestore block.db block device'
+ )
+ parser.add_argument(
+ '--block.wal',
+ dest='block_wal',
+ help='Path to bluestore block.wal block device'
+ )
+ parser.add_argument(
+ '--no-tmpfs',
+ action='store_true',
+ help='Do not use a tmpfs mount for OSD data dir'
+ )
+
+ if not self.argv:
+ print(sub_command_help)
+ return
+ args = parser.parse_args(self.argv)
+ self.args = args
+ if not args.no_systemd:
+ terminal.error('systemd support not yet implemented')
+ raise SystemExit(1)
+
+ devs = [args.device]
+ if args.block_wal:
+ devs.append(args.block_wal)
+ if args.block_db:
+ devs.append(args.block_db)
+
+ self.activate(devs=devs,
+ start_osd_id=args.osd_id,
+ start_osd_uuid=args.osd_uuid,
+ tmpfs=not args.no_tmpfs,
+ systemd=not self.args.no_systemd)
diff --git a/src/ceph-volume/ceph_volume/devices/raw/common.py b/src/ceph-volume/ceph_volume/devices/raw/common.py
new file mode 100644
index 000000000..19de81fe5
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/devices/raw/common.py
@@ -0,0 +1,52 @@
+import argparse
+from ceph_volume.util import arg_validators
+
+def create_parser(prog, description):
+ """
+ Both prepare and create share the same parser, those are defined here to
+ avoid duplication
+ """
+ parser = argparse.ArgumentParser(
+ prog=prog,
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ description=description,
+ )
+ parser.add_argument(
+ '--data',
+ required=True,
+ type=arg_validators.ValidRawDevice(as_string=True),
+ help='a raw device to use for the OSD',
+ )
+ parser.add_argument(
+ '--bluestore',
+ action='store_true',
+ help='Use BlueStore backend')
+ parser.add_argument(
+ '--crush-device-class',
+ dest='crush_device_class',
+ help='Crush device class to assign this OSD to',
+ default=""
+ )
+ parser.add_argument(
+ '--no-tmpfs',
+ action='store_true',
+ help='Do not use a tmpfs mount for OSD data dir'
+ )
+ parser.add_argument(
+ '--block.db',
+ dest='block_db',
+ help='Path to bluestore block.db block device',
+ type=arg_validators.ValidRawDevice(as_string=True)
+ )
+ parser.add_argument(
+ '--block.wal',
+ dest='block_wal',
+ help='Path to bluestore block.wal block device',
+ type=arg_validators.ValidRawDevice(as_string=True)
+ )
+ parser.add_argument(
+ '--dmcrypt',
+ action='store_true',
+ help='Enable device encryption via dm-crypt',
+ )
+ return parser
diff --git a/src/ceph-volume/ceph_volume/devices/raw/list.py b/src/ceph-volume/ceph_volume/devices/raw/list.py
new file mode 100644
index 000000000..06a2b3c22
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/devices/raw/list.py
@@ -0,0 +1,163 @@
+from __future__ import print_function
+import argparse
+import json
+import logging
+from textwrap import dedent
+from ceph_volume import decorators, process
+from ceph_volume.util import disk
+
+
+logger = logging.getLogger(__name__)
+
+
+def direct_report(devices):
+ """
+ Other non-cli consumers of listing information will want to consume the
+ report without the need to parse arguments or other flags. This helper
+ bypasses the need to deal with the class interface which is meant for cli
+ handling.
+ """
+ _list = List([])
+ return _list.generate(devices)
+
+def _get_bluestore_info(dev):
+ out, err, rc = process.call([
+ 'ceph-bluestore-tool', 'show-label',
+ '--dev', dev], verbose_on_failure=False)
+ if rc:
+ # ceph-bluestore-tool returns an error (below) if device is not bluestore OSD
+ # > unable to read label for <device>: (2) No such file or directory
+ # but it's possible the error could be for a different reason (like if the disk fails)
+ logger.debug('assuming device {} is not BlueStore; ceph-bluestore-tool failed to get info from device: {}\n{}'.format(dev, out, err))
+ return None
+ oj = json.loads(''.join(out))
+ if dev not in oj:
+ # should be impossible, so warn
+ logger.warning('skipping device {} because it is not reported in ceph-bluestore-tool output: {}'.format(dev, out))
+ return None
+ try:
+ r = {
+ 'osd_uuid': oj[dev]['osd_uuid'],
+ }
+ if oj[dev]['description'] == 'main':
+ whoami = oj[dev]['whoami']
+ r.update({
+ 'type': 'bluestore',
+ 'osd_id': int(whoami),
+ 'ceph_fsid': oj[dev]['ceph_fsid'],
+ 'device': dev,
+ })
+ elif oj[dev]['description'] == 'bluefs db':
+ r['device_db'] = dev
+ elif oj[dev]['description'] == 'bluefs wal':
+ r['device_wal'] = dev
+ return r
+ except KeyError as e:
+ # this will appear for devices that have a bluestore header but aren't valid OSDs
+ # for example, due to incomplete rollback of OSDs: https://tracker.ceph.com/issues/51869
+ logger.error('device {} does not have all BlueStore data needed to be a valid OSD: {}\n{}'.format(dev, out, e))
+ return None
+
+
+class List(object):
+
+ help = 'list BlueStore OSDs on raw devices'
+
+ def __init__(self, argv):
+ self.argv = argv
+
+ def generate(self, devs=None):
+ logger.debug('Listing block devices via lsblk...')
+ info_devices = disk.lsblk_all(abspath=True)
+ if devs is None or devs == []:
+ # If no devs are given initially, we want to list ALL devices including children and
+ # parents. Parent disks with child partitions may be the appropriate device to return if
+ # the parent disk has a bluestore header, but children may be the most appropriate
+ # devices to return if the parent disk does not have a bluestore header.
+ devs = [device['NAME'] for device in info_devices if device.get('NAME',)]
+
+ result = {}
+ logger.debug('inspecting devices: {}'.format(devs))
+ for dev in devs:
+ # Linux kernels built with CONFIG_ATARI_PARTITION enabled can falsely interpret
+ # bluestore's on-disk format as an Atari partition table. These false Atari partitions
+ # can be interpreted as real OSDs if a bluestore OSD was previously created on the false
+ # partition. See https://tracker.ceph.com/issues/52060 for more info. If a device has a
+ # parent, it is a child. If the parent is a valid bluestore OSD, the child will only
+ # exist if it is a phantom Atari partition, and the child should be ignored. If the
+ # parent isn't bluestore, then the child could be a valid bluestore OSD. If we fail to
+ # determine whether a parent is bluestore, we should err on the side of not reporting
+ # the child so as not to give a false negative.
+ for info_device in info_devices:
+ if 'PKNAME' in info_device and info_device['PKNAME'] != "":
+ parent = info_device['PKNAME']
+ try:
+ if disk.has_bluestore_label(parent):
+ logger.warning(('ignoring child device {} whose parent {} is a BlueStore OSD.'.format(dev, parent),
+ 'device is likely a phantom Atari partition. device info: {}'.format(info_device)))
+ continue
+ except OSError as e:
+ logger.error(('ignoring child device {} to avoid reporting invalid BlueStore data from phantom Atari partitions.'.format(dev),
+ 'failed to determine if parent device {} is BlueStore. err: {}'.format(parent, e)))
+ continue
+
+ bs_info = _get_bluestore_info(dev)
+ if bs_info is None:
+ # None is also returned in the rare event that there is an issue reading info from
+ # a BlueStore disk, so be sure to log our assumption that it isn't bluestore
+ logger.info('device {} does not have BlueStore information'.format(dev))
+ continue
+ uuid = bs_info['osd_uuid']
+ if uuid not in result:
+ result[uuid] = {}
+ result[uuid].update(bs_info)
+
+ return result
+
+ @decorators.needs_root
+ def list(self, args):
+ report = self.generate(args.device)
+ if args.format == 'json':
+ print(json.dumps(report, indent=4, sort_keys=True))
+ else:
+ if not report:
+ raise SystemExit('No valid Ceph devices found')
+ raise RuntimeError('not implemented yet')
+
+ def main(self):
+ sub_command_help = dedent("""
+ List OSDs on raw devices with raw device labels (usually the first
+ block of the device).
+
+ Full listing of all identifiable (currently, BlueStore) OSDs
+ on raw devices:
+
+ ceph-volume raw list
+
+ List a particular device, reporting all metadata about it::
+
+ ceph-volume raw list /dev/sda1
+
+ """)
+ parser = argparse.ArgumentParser(
+ prog='ceph-volume raw list',
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ description=sub_command_help,
+ )
+
+ parser.add_argument(
+ 'device',
+ metavar='DEVICE',
+ nargs='*',
+ help='Path to a device like /dev/sda1'
+ )
+
+ parser.add_argument(
+ '--format',
+ help='output format, defaults to "pretty"',
+ default='json',
+ choices=['json', 'pretty'],
+ )
+
+ args = parser.parse_args(self.argv)
+ self.list(args)
diff --git a/src/ceph-volume/ceph_volume/devices/raw/main.py b/src/ceph-volume/ceph_volume/devices/raw/main.py
new file mode 100644
index 000000000..efa251090
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/devices/raw/main.py
@@ -0,0 +1,40 @@
+import argparse
+from textwrap import dedent
+from ceph_volume import terminal
+from . import list
+from . import prepare
+from . import activate
+
+class Raw(object):
+
+ help = 'Manage single-device OSDs on raw block devices'
+
+ _help = dedent("""
+ Manage a single-device OSD on a raw block device. Rely on
+ the existing device labels to store any needed metadata.
+
+ {sub_help}
+ """)
+
+ mapper = {
+ 'list': list.List,
+ 'prepare': prepare.Prepare,
+ 'activate': activate.Activate,
+ }
+
+ def __init__(self, argv):
+ self.argv = argv
+
+ def print_help(self, sub_help):
+ return self._help.format(sub_help=sub_help)
+
+ def main(self):
+ terminal.dispatch(self.mapper, self.argv)
+ parser = argparse.ArgumentParser(
+ prog='ceph-volume raw',
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ description=self.print_help(terminal.subhelp(self.mapper)),
+ )
+ parser.parse_args(self.argv)
+ if len(self.argv) <= 1:
+ return parser.print_help()
diff --git a/src/ceph-volume/ceph_volume/devices/raw/prepare.py b/src/ceph-volume/ceph_volume/devices/raw/prepare.py
new file mode 100644
index 000000000..3c96eedac
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/devices/raw/prepare.py
@@ -0,0 +1,169 @@
+from __future__ import print_function
+import json
+import logging
+import os
+from textwrap import dedent
+from ceph_volume.util import prepare as prepare_utils
+from ceph_volume.util import encryption as encryption_utils
+from ceph_volume.util import disk
+from ceph_volume.util import system
+from ceph_volume import decorators, terminal
+from ceph_volume.devices.lvm.common import rollback_osd
+from .common import create_parser
+
+logger = logging.getLogger(__name__)
+
+def prepare_dmcrypt(key, device, device_type, fsid):
+ """
+ Helper for devices that are encrypted. The operations needed for
+ block, db, wal, or data/journal devices are all the same
+ """
+ if not device:
+ return ''
+ kname = disk.lsblk(device)['KNAME']
+ mapping = 'ceph-{}-{}-{}-dmcrypt'.format(fsid, kname, device_type)
+ # format data device
+ encryption_utils.luks_format(
+ key,
+ device
+ )
+ encryption_utils.luks_open(
+ key,
+ device,
+ mapping
+ )
+
+ return '/dev/mapper/{}'.format(mapping)
+
+def prepare_bluestore(block, wal, db, secrets, osd_id, fsid, tmpfs):
+ """
+ :param block: The name of the logical volume for the bluestore data
+ :param wal: a regular/plain disk or logical volume, to be used for block.wal
+ :param db: a regular/plain disk or logical volume, to be used for block.db
+ :param secrets: A dict with the secrets needed to create the osd (e.g. cephx)
+ :param id_: The OSD id
+ :param fsid: The OSD fsid, also known as the OSD UUID
+ """
+ cephx_secret = secrets.get('cephx_secret', prepare_utils.create_key())
+
+ if secrets.get('dmcrypt_key'):
+ key = secrets['dmcrypt_key']
+ block = prepare_dmcrypt(key, block, 'block', fsid)
+ wal = prepare_dmcrypt(key, wal, 'wal', fsid)
+ db = prepare_dmcrypt(key, db, 'db', fsid)
+
+ # create the directory
+ prepare_utils.create_osd_path(osd_id, tmpfs=tmpfs)
+ # symlink the block
+ prepare_utils.link_block(block, osd_id)
+ # get the latest monmap
+ prepare_utils.get_monmap(osd_id)
+ # write the OSD keyring if it doesn't exist already
+ prepare_utils.write_keyring(osd_id, cephx_secret)
+ # prepare the osd filesystem
+ prepare_utils.osd_mkfs_bluestore(
+ osd_id, fsid,
+ keyring=cephx_secret,
+ wal=wal,
+ db=db
+ )
+
+
+class Prepare(object):
+
+ help = 'Format a raw device and associate it with a (BlueStore) OSD'
+
+ def __init__(self, argv):
+ self.argv = argv
+ self.osd_id = None
+
+ def safe_prepare(self, args=None):
+ """
+ An intermediate step between `main()` and `prepare()` so that we can
+ capture the `self.osd_id` in case we need to rollback
+
+ :param args: Injected args, usually from `raw create` which compounds
+ both `prepare` and `create`
+ """
+ if args is not None:
+ self.args = args
+ try:
+ self.prepare()
+ except Exception:
+ logger.exception('raw prepare was unable to complete')
+ logger.info('will rollback OSD ID creation')
+ rollback_osd(self.args, self.osd_id)
+ raise
+ dmcrypt_log = 'dmcrypt' if args.dmcrypt else 'clear'
+ terminal.success("ceph-volume raw {} prepare successful for: {}".format(dmcrypt_log, self.args.data))
+
+
+ @decorators.needs_root
+ def prepare(self):
+ secrets = {'cephx_secret': prepare_utils.create_key()}
+ encrypted = 1 if self.args.dmcrypt else 0
+ cephx_lockbox_secret = '' if not encrypted else prepare_utils.create_key()
+
+ if encrypted:
+ secrets['dmcrypt_key'] = os.getenv('CEPH_VOLUME_DMCRYPT_SECRET')
+ secrets['cephx_lockbox_secret'] = cephx_lockbox_secret # dummy value to make `ceph osd new` not complaining
+
+ osd_fsid = system.generate_uuid()
+ crush_device_class = self.args.crush_device_class
+ if crush_device_class:
+ secrets['crush_device_class'] = crush_device_class
+ tmpfs = not self.args.no_tmpfs
+ wal = ""
+ db = ""
+ if self.args.block_wal:
+ wal = self.args.block_wal
+ if self.args.block_db:
+ db = self.args.block_db
+
+ # reuse a given ID if it exists, otherwise create a new ID
+ self.osd_id = prepare_utils.create_id(
+ osd_fsid, json.dumps(secrets))
+
+ prepare_bluestore(
+ self.args.data,
+ wal,
+ db,
+ secrets,
+ self.osd_id,
+ osd_fsid,
+ tmpfs,
+ )
+
+ def main(self):
+ sub_command_help = dedent("""
+ Prepare an OSD by assigning an ID and FSID, registering them with the
+ cluster with an ID and FSID, formatting the volume.
+
+ Once the OSD is ready, an ad-hoc systemd unit will be enabled so that
+ it can later get activated and the OSD daemon can get started.
+
+ ceph-volume raw prepare --bluestore --data {device}
+
+ DB and WAL devices are supported.
+
+ ceph-volume raw prepare --bluestore --data {device} --block.db {device} --block.wal {device}
+
+ """)
+ parser = create_parser(
+ prog='ceph-volume raw prepare',
+ description=sub_command_help,
+ )
+ if not self.argv:
+ print(sub_command_help)
+ return
+ self.args = parser.parse_args(self.argv)
+ if not self.args.bluestore:
+ terminal.error('must specify --bluestore (currently the only supported backend)')
+ raise SystemExit(1)
+ if self.args.dmcrypt and not os.getenv('CEPH_VOLUME_DMCRYPT_SECRET'):
+ terminal.error('encryption was requested (--dmcrypt) but environment variable ' \
+ 'CEPH_VOLUME_DMCRYPT_SECRET is not set, you must set ' \
+ 'this variable to provide a dmcrypt secret.')
+ raise SystemExit(1)
+
+ self.safe_prepare(self.args)
diff --git a/src/ceph-volume/ceph_volume/devices/simple/__init__.py b/src/ceph-volume/ceph_volume/devices/simple/__init__.py
new file mode 100644
index 000000000..280e130ed
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/devices/simple/__init__.py
@@ -0,0 +1 @@
+from .main import Simple # noqa
diff --git a/src/ceph-volume/ceph_volume/devices/simple/activate.py b/src/ceph-volume/ceph_volume/devices/simple/activate.py
new file mode 100644
index 000000000..7439141c0
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/devices/simple/activate.py
@@ -0,0 +1,302 @@
+from __future__ import print_function
+import argparse
+import base64
+import glob
+import json
+import logging
+import os
+from textwrap import dedent
+from ceph_volume import process, decorators, terminal, conf
+from ceph_volume.util import system, disk
+from ceph_volume.util import encryption as encryption_utils
+from ceph_volume.util import prepare as prepare_utils
+from ceph_volume.systemd import systemctl
+
+
+logger = logging.getLogger(__name__)
+mlogger = terminal.MultiLogger(__name__)
+
+
+class Activate(object):
+
+ help = 'Enable systemd units to mount configured devices and start a Ceph OSD'
+
+ def __init__(self, argv, from_trigger=False):
+ self.argv = argv
+ self.from_trigger = from_trigger
+ self.skip_systemd = False
+
+ def validate_devices(self, json_config):
+ """
+ ``json_config`` is the loaded dictionary coming from the JSON file. It is usually mixed with
+ other non-device items, but for sakes of comparison it doesn't really matter. This method is
+ just making sure that the keys needed exist
+ """
+ devices = json_config.keys()
+ try:
+ objectstore = json_config['type']
+ except KeyError:
+ if {'data', 'journal'}.issubset(set(devices)):
+ logger.warning(
+ '"type" key not found, assuming "filestore" since journal key is present'
+ )
+ objectstore = 'filestore'
+ else:
+ logger.warning(
+ '"type" key not found, assuming "bluestore" since journal key is not present'
+ )
+ objectstore = 'bluestore'
+
+ # Go through all the device combinations that are absolutely required,
+ # raise an error describing what was expected and what was found
+ # otherwise.
+ if objectstore == 'filestore':
+ if {'data', 'journal'}.issubset(set(devices)):
+ return True
+ else:
+ found = [i for i in devices if i in ['data', 'journal']]
+ mlogger.error("Required devices (data, and journal) not present for filestore")
+ mlogger.error('filestore devices found: %s', found)
+ raise RuntimeError('Unable to activate filestore OSD due to missing devices')
+ else:
+ # This is a bit tricky, with newer bluestore we don't need data, older implementations
+ # do (e.g. with ceph-disk). ceph-volume just uses a tmpfs that doesn't require data.
+ if {'block', 'data'}.issubset(set(devices)):
+ return True
+ else:
+ bluestore_devices = ['block.db', 'block.wal', 'block', 'data']
+ found = [i for i in devices if i in bluestore_devices]
+ mlogger.error("Required devices (block and data) not present for bluestore")
+ mlogger.error('bluestore devices found: %s', found)
+ raise RuntimeError('Unable to activate bluestore OSD due to missing devices')
+
+ def get_device(self, uuid):
+ """
+ If a device is encrypted, it will decrypt/open and return the mapper
+ path, if it isn't encrypted it will just return the device found that
+ is mapped to the uuid. This will make it easier for the caller to
+ avoid if/else to check if devices need decrypting
+
+ :param uuid: The partition uuid of the device (PARTUUID)
+ """
+ device = disk.get_device_from_partuuid(uuid)
+
+ # If device is not found, it is fine to return an empty string from the
+ # helper that finds `device`. If it finds anything and it is not
+ # encrypted, just return what was found
+ if not self.is_encrypted or not device:
+ return device
+
+ if self.encryption_type == 'luks':
+ encryption_utils.luks_open(self.dmcrypt_secret, device, uuid)
+ else:
+ encryption_utils.plain_open(self.dmcrypt_secret, device, uuid)
+
+ return '/dev/mapper/%s' % uuid
+
+ def enable_systemd_units(self, osd_id, osd_fsid):
+ """
+ * disables the ceph-disk systemd units to prevent them from running when
+ a UDEV event matches Ceph rules
+ * creates the ``simple`` systemd units to handle the activation and
+ startup of the OSD with ``osd_id`` and ``osd_fsid``
+ * enables the OSD systemd unit and finally starts the OSD.
+ """
+ if not self.from_trigger and not self.skip_systemd:
+ # means it was scanned and now activated directly, so ensure that
+ # ceph-disk units are disabled, and that the `simple` systemd unit
+ # is created and enabled
+
+ # enable the ceph-volume unit for this OSD
+ systemctl.enable_volume(osd_id, osd_fsid, 'simple')
+
+ # disable any/all ceph-disk units
+ systemctl.mask_ceph_disk()
+ terminal.warning(
+ ('All ceph-disk systemd units have been disabled to '
+ 'prevent OSDs getting triggered by UDEV events')
+ )
+ else:
+ terminal.info('Skipping enabling of `simple` systemd unit')
+ terminal.info('Skipping masking of ceph-disk systemd units')
+
+ if not self.skip_systemd:
+ # enable the OSD
+ systemctl.enable_osd(osd_id)
+
+ # start the OSD
+ systemctl.start_osd(osd_id)
+ else:
+ terminal.info(
+ 'Skipping enabling and starting OSD simple systemd unit because --no-systemd was used'
+ )
+
+ @decorators.needs_root
+ def activate(self, args):
+ with open(args.json_config, 'r') as fp:
+ osd_metadata = json.load(fp)
+
+ # Make sure that required devices are configured
+ self.validate_devices(osd_metadata)
+
+ osd_id = osd_metadata.get('whoami', args.osd_id)
+ osd_fsid = osd_metadata.get('fsid', args.osd_fsid)
+ data_uuid = osd_metadata.get('data', {}).get('uuid')
+ conf.cluster = osd_metadata.get('cluster_name', 'ceph')
+ if not data_uuid:
+ raise RuntimeError(
+ 'Unable to activate OSD %s - no "uuid" key found for data' % args.osd_id
+ )
+
+ # Encryption detection, and capturing of the keys to decrypt
+ self.is_encrypted = osd_metadata.get('encrypted', False)
+ self.encryption_type = osd_metadata.get('encryption_type')
+ if self.is_encrypted:
+ lockbox_secret = osd_metadata.get('lockbox.keyring')
+ # write the keyring always so that we can unlock
+ encryption_utils.write_lockbox_keyring(osd_id, osd_fsid, lockbox_secret)
+ # Store the secret around so that the decrypt method can reuse
+ raw_dmcrypt_secret = encryption_utils.get_dmcrypt_key(osd_id, osd_fsid)
+ # Note how both these calls need b64decode. For some reason, the
+ # way ceph-disk creates these keys, it stores them in the monitor
+ # *undecoded*, requiring this decode call again. The lvm side of
+ # encryption doesn't need it, so we are assuming here that anything
+ # that `simple` scans, will come from ceph-disk and will need this
+ # extra decode call here
+ self.dmcrypt_secret = base64.b64decode(raw_dmcrypt_secret)
+
+ cluster_name = osd_metadata.get('cluster_name', 'ceph')
+ osd_dir = '/var/lib/ceph/osd/%s-%s' % (cluster_name, osd_id)
+
+ # XXX there is no support for LVM here
+ data_device = self.get_device(data_uuid)
+
+ if not data_device:
+ raise RuntimeError("osd fsid {} doesn't exist, this file will "
+ "be skipped, consider cleaning legacy "
+ "json file {}".format(osd_metadata['fsid'], args.json_config))
+
+ journal_device = self.get_device(osd_metadata.get('journal', {}).get('uuid'))
+ block_device = self.get_device(osd_metadata.get('block', {}).get('uuid'))
+ block_db_device = self.get_device(osd_metadata.get('block.db', {}).get('uuid'))
+ block_wal_device = self.get_device(osd_metadata.get('block.wal', {}).get('uuid'))
+
+ if not system.device_is_mounted(data_device, destination=osd_dir):
+ if osd_metadata.get('type') == 'filestore':
+ prepare_utils.mount_osd(data_device, osd_id)
+ else:
+ process.run(['mount', '-v', data_device, osd_dir])
+
+ device_map = {
+ 'journal': journal_device,
+ 'block': block_device,
+ 'block.db': block_db_device,
+ 'block.wal': block_wal_device
+ }
+
+ for name, device in device_map.items():
+ if not device:
+ continue
+ # always re-do the symlink regardless if it exists, so that the journal
+ # device path that may have changed can be mapped correctly every time
+ destination = os.path.join(osd_dir, name)
+ process.run(['ln', '-snf', device, destination])
+
+ # make sure that the journal has proper permissions
+ system.chown(device)
+
+ self.enable_systemd_units(osd_id, osd_fsid)
+
+ terminal.success('Successfully activated OSD %s with FSID %s' % (osd_id, osd_fsid))
+
+ def main(self):
+ sub_command_help = dedent("""
+ Activate OSDs by mounting devices previously configured to their
+ appropriate destination::
+
+ ceph-volume simple activate {ID} {FSID}
+
+ Or using a JSON file directly::
+
+ ceph-volume simple activate --file /etc/ceph/osd/{ID}-{FSID}.json
+
+ The OSD must have been "scanned" previously (see ``ceph-volume simple
+ scan``), so that all needed OSD device information and metadata exist.
+
+ A previously scanned OSD would exist like::
+
+ /etc/ceph/osd/{ID}-{FSID}.json
+
+
+ Environment variables supported:
+
+ CEPH_VOLUME_SIMPLE_JSON_DIR: Directory location for scanned OSD JSON configs
+ """)
+ parser = argparse.ArgumentParser(
+ prog='ceph-volume simple activate',
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ description=sub_command_help,
+ )
+ parser.add_argument(
+ 'osd_id',
+ metavar='ID',
+ nargs='?',
+ help='The ID of the OSD, usually an integer, like 0'
+ )
+ parser.add_argument(
+ 'osd_fsid',
+ metavar='FSID',
+ nargs='?',
+ help='The FSID of the OSD, similar to a SHA1'
+ )
+ parser.add_argument(
+ '--all',
+ help='Activate all OSDs with a OSD JSON config',
+ action='store_true',
+ default=False,
+ )
+ parser.add_argument(
+ '--file',
+ help='The path to a JSON file, from a scanned OSD'
+ )
+ parser.add_argument(
+ '--no-systemd',
+ dest='skip_systemd',
+ action='store_true',
+ help='Skip creating and enabling systemd units and starting OSD services',
+ )
+ if len(self.argv) == 0:
+ print(sub_command_help)
+ return
+ args = parser.parse_args(self.argv)
+ if not args.file and not args.all:
+ if not args.osd_id and not args.osd_fsid:
+ terminal.error('ID and FSID are required to find the right OSD to activate')
+ terminal.error('from a scanned OSD location in /etc/ceph/osd/')
+ raise RuntimeError('Unable to activate without both ID and FSID')
+ # don't allow a CLI flag to specify the JSON dir, because that might
+ # implicitly indicate that it would be possible to activate a json file
+ # at a non-default location which would not work at boot time if the
+ # custom location is not exposed through an ENV var
+ self.skip_systemd = args.skip_systemd
+ json_dir = os.environ.get('CEPH_VOLUME_SIMPLE_JSON_DIR', '/etc/ceph/osd/')
+ if args.all:
+ if args.file or args.osd_id:
+ mlogger.warn('--all was passed, ignoring --file and ID/FSID arguments')
+ json_configs = glob.glob('{}/*.json'.format(json_dir))
+ for json_config in json_configs:
+ mlogger.info('activating OSD specified in {}'.format(json_config))
+ args.json_config = json_config
+ try:
+ self.activate(args)
+ except RuntimeError as e:
+ terminal.warning(e.message)
+ else:
+ if args.file:
+ json_config = args.file
+ else:
+ json_config = os.path.join(json_dir, '%s-%s.json' % (args.osd_id, args.osd_fsid))
+ if not os.path.exists(json_config):
+ raise RuntimeError('Expected JSON config path not found: %s' % json_config)
+ args.json_config = json_config
+ self.activate(args)
diff --git a/src/ceph-volume/ceph_volume/devices/simple/main.py b/src/ceph-volume/ceph_volume/devices/simple/main.py
new file mode 100644
index 000000000..2119963f8
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/devices/simple/main.py
@@ -0,0 +1,41 @@
+import argparse
+from textwrap import dedent
+from ceph_volume import terminal
+from . import scan
+from . import activate
+from . import trigger
+
+
+class Simple(object):
+
+ help = 'Manage already deployed OSDs with ceph-volume'
+
+ _help = dedent("""
+ Take over a deployed OSD, persisting its metadata in /etc/ceph/osd/ so that it can be managed
+ with ceph-volume directly. Avoids UDEV and ceph-disk handling.
+
+ {sub_help}
+ """)
+
+ mapper = {
+ 'scan': scan.Scan,
+ 'activate': activate.Activate,
+ 'trigger': trigger.Trigger,
+ }
+
+ def __init__(self, argv):
+ self.argv = argv
+
+ def print_help(self, sub_help):
+ return self._help.format(sub_help=sub_help)
+
+ def main(self):
+ terminal.dispatch(self.mapper, self.argv)
+ parser = argparse.ArgumentParser(
+ prog='ceph-volume simple',
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ description=self.print_help(terminal.subhelp(self.mapper)),
+ )
+ parser.parse_args(self.argv)
+ if len(self.argv) <= 1:
+ return parser.print_help()
diff --git a/src/ceph-volume/ceph_volume/devices/simple/scan.py b/src/ceph-volume/ceph_volume/devices/simple/scan.py
new file mode 100644
index 000000000..ff7040beb
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/devices/simple/scan.py
@@ -0,0 +1,385 @@
+from __future__ import print_function
+import argparse
+import base64
+import json
+import logging
+import os
+from textwrap import dedent
+from ceph_volume import decorators, terminal, conf
+from ceph_volume.api import lvm
+from ceph_volume.systemd import systemctl
+from ceph_volume.util import arg_validators, system, disk, encryption
+from ceph_volume.util.device import Device
+
+
+logger = logging.getLogger(__name__)
+
+
+def parse_keyring(file_contents):
+ """
+ Extract the actual key from a string. Usually from a keyring file, where
+ the keyring will be in a client section. In the case of a lockbox, it is
+ something like::
+
+ [client.osd-lockbox.8d7a8ab2-5db0-4f83-a785-2809aba403d5]\n\tkey = AQDtoGha/GYJExAA7HNl7Ukhqr7AKlCpLJk6UA==\n
+
+ From the above case, it would return::
+
+ AQDtoGha/GYJExAA7HNl7Ukhqr7AKlCpLJk6UA==
+ """
+ # remove newlines that might be trailing
+ keyring = file_contents.strip('\n')
+
+ # Now split on spaces
+ keyring = keyring.split(' ')[-1]
+
+ # Split on newlines
+ keyring = keyring.split('\n')[-1]
+
+ return keyring.strip()
+
+
+class Scan(object):
+
+ help = 'Capture metadata from all running ceph-disk OSDs, OSD data partition or directory'
+
+ def __init__(self, argv):
+ self.argv = argv
+ self._etc_path = '/etc/ceph/osd/'
+
+ @property
+ def etc_path(self):
+ if os.path.isdir(self._etc_path):
+ return self._etc_path
+
+ if not os.path.exists(self._etc_path):
+ os.mkdir(self._etc_path)
+ return self._etc_path
+
+ error = "OSD Configuration path (%s) needs to be a directory" % self._etc_path
+ raise RuntimeError(error)
+
+ def get_contents(self, path):
+ with open(path, 'r') as fp:
+ contents = fp.readlines()
+ if len(contents) > 1:
+ return ''.join(contents)
+ return ''.join(contents).strip().strip('\n')
+
+ def scan_device(self, path):
+ device_metadata = {'path': None, 'uuid': None}
+ if not path:
+ return device_metadata
+ if self.is_encrypted:
+ encryption_metadata = encryption.legacy_encrypted(path)
+ device_metadata['path'] = encryption_metadata['device']
+ device_metadata['uuid'] = disk.get_partuuid(encryption_metadata['device'])
+ return device_metadata
+ # cannot read the symlink if this is tmpfs
+ if os.path.islink(path):
+ device = os.readlink(path)
+ else:
+ device = path
+ lvm_device = lvm.get_single_lv(filters={'lv_path': device})
+ if lvm_device:
+ device_uuid = lvm_device.lv_uuid
+ else:
+ device_uuid = disk.get_partuuid(device)
+
+ device_metadata['uuid'] = device_uuid
+ device_metadata['path'] = device
+
+ return device_metadata
+
+ def scan_directory(self, path):
+ osd_metadata = {'cluster_name': conf.cluster}
+ directory_files = os.listdir(path)
+ if 'keyring' not in directory_files:
+ raise RuntimeError(
+ 'OSD files not found, required "keyring" file is not present at: %s' % path
+ )
+ for file_ in os.listdir(path):
+ file_path = os.path.join(path, file_)
+ file_json_key = file_
+ if file_.endswith('_dmcrypt'):
+ file_json_key = file_.rstrip('_dmcrypt')
+ logger.info(
+ 'reading file {}, stripping _dmcrypt suffix'.format(file_)
+ )
+ if os.path.islink(file_path):
+ if os.path.exists(file_path):
+ osd_metadata[file_json_key] = self.scan_device(file_path)
+ else:
+ msg = 'broken symlink found %s -> %s' % (file_path, os.path.realpath(file_path))
+ terminal.warning(msg)
+ logger.warning(msg)
+
+ if os.path.isdir(file_path):
+ continue
+
+ # the check for binary needs to go before the file, to avoid
+ # capturing data from binary files but still be able to capture
+ # contents from actual files later
+ try:
+ if system.is_binary(file_path):
+ logger.info('skipping binary file: %s' % file_path)
+ continue
+ except IOError:
+ logger.exception('skipping due to IOError on file: %s' % file_path)
+ continue
+ if os.path.isfile(file_path):
+ content = self.get_contents(file_path)
+ if 'keyring' in file_path:
+ content = parse_keyring(content)
+ try:
+ osd_metadata[file_json_key] = int(content)
+ except ValueError:
+ osd_metadata[file_json_key] = content
+
+ # we must scan the paths again because this might be a temporary mount
+ path_mounts = system.Mounts(paths=True)
+ device = path_mounts.get_mounts().get(path)
+
+ # it is possible to have more than one device, pick the first one, and
+ # warn that it is possible that more than one device is 'data'
+ if not device:
+ terminal.error('Unable to detect device mounted for path: %s' % path)
+ raise RuntimeError('Cannot activate OSD')
+ osd_metadata['data'] = self.scan_device(device[0] if len(device) else None)
+
+ return osd_metadata
+
+ def scan_encrypted(self, directory=None):
+ device = self.encryption_metadata['device']
+ lockbox = self.encryption_metadata['lockbox']
+ encryption_type = self.encryption_metadata['type']
+ osd_metadata = {}
+ # Get the PARTUUID of the device to make sure have the right one and
+ # that maps to the data device
+ device_uuid = disk.get_partuuid(device)
+ dm_path = '/dev/mapper/%s' % device_uuid
+ # check if this partition is already mapped
+ device_status = encryption.status(device_uuid)
+
+ # capture all the information from the lockbox first, reusing the
+ # directory scan method
+ if self.device_mounts.get(lockbox):
+ lockbox_path = self.device_mounts.get(lockbox)[0]
+ lockbox_metadata = self.scan_directory(lockbox_path)
+ # ceph-disk stores the fsid as osd-uuid in the lockbox, thanks ceph-disk
+ dmcrypt_secret = encryption.get_dmcrypt_key(
+ None, # There is no ID stored in the lockbox
+ lockbox_metadata['osd-uuid'],
+ os.path.join(lockbox_path, 'keyring')
+ )
+ else:
+ with system.tmp_mount(lockbox) as lockbox_path:
+ lockbox_metadata = self.scan_directory(lockbox_path)
+ # ceph-disk stores the fsid as osd-uuid in the lockbox, thanks ceph-disk
+ dmcrypt_secret = encryption.get_dmcrypt_key(
+ None, # There is no ID stored in the lockbox
+ lockbox_metadata['osd-uuid'],
+ os.path.join(lockbox_path, 'keyring')
+ )
+
+ if not device_status:
+ # Note how both these calls need b64decode. For some reason, the
+ # way ceph-disk creates these keys, it stores them in the monitor
+ # *undecoded*, requiring this decode call again. The lvm side of
+ # encryption doesn't need it, so we are assuming here that anything
+ # that `simple` scans, will come from ceph-disk and will need this
+ # extra decode call here
+ dmcrypt_secret = base64.b64decode(dmcrypt_secret)
+ if encryption_type == 'luks':
+ encryption.luks_open(dmcrypt_secret, device, device_uuid)
+ else:
+ encryption.plain_open(dmcrypt_secret, device, device_uuid)
+
+ # If we have a directory, use that instead of checking for mounts
+ if directory:
+ osd_metadata = self.scan_directory(directory)
+ else:
+ # Now check if that mapper is mounted already, to avoid remounting and
+ # decrypting the device
+ dm_path_mount = self.device_mounts.get(dm_path)
+ if dm_path_mount:
+ osd_metadata = self.scan_directory(dm_path_mount[0])
+ else:
+ with system.tmp_mount(dm_path, encrypted=True) as device_path:
+ osd_metadata = self.scan_directory(device_path)
+
+ osd_metadata['encrypted'] = True
+ osd_metadata['encryption_type'] = encryption_type
+ osd_metadata['lockbox.keyring'] = parse_keyring(lockbox_metadata['keyring'])
+ return osd_metadata
+
+ @decorators.needs_root
+ def scan(self, args):
+ osd_metadata = {'cluster_name': conf.cluster}
+ osd_path = None
+ logger.info('detecting if argument is a device or a directory: %s', args.osd_path)
+ if os.path.isdir(args.osd_path):
+ logger.info('will scan directly, path is a directory')
+ osd_path = args.osd_path
+ else:
+ # assume this is a device, check if it is mounted and use that path
+ logger.info('path is not a directory, will check if mounted')
+ if system.device_is_mounted(args.osd_path):
+ logger.info('argument is a device, which is mounted')
+ mounted_osd_paths = self.device_mounts.get(args.osd_path)
+ osd_path = mounted_osd_paths[0] if len(mounted_osd_paths) else None
+
+ # argument is not a directory, and it is not a device that is mounted
+ # somewhere so temporarily mount it to poke inside, otherwise, scan
+ # directly
+ if not osd_path:
+ # check if we have an encrypted device first, so that we can poke at
+ # the lockbox instead
+ if self.is_encrypted:
+ if not self.encryption_metadata.get('lockbox'):
+ raise RuntimeError(
+ 'Lockbox partition was not found for device: %s' % args.osd_path
+ )
+ osd_metadata = self.scan_encrypted()
+ else:
+ logger.info('device is not mounted, will mount it temporarily to scan')
+ with system.tmp_mount(args.osd_path) as osd_path:
+ osd_metadata = self.scan_directory(osd_path)
+ else:
+ if self.is_encrypted:
+ logger.info('will scan encrypted OSD directory at path: %s', osd_path)
+ osd_metadata = self.scan_encrypted(osd_path)
+ else:
+ logger.info('will scan OSD directory at path: %s', osd_path)
+ osd_metadata = self.scan_directory(osd_path)
+
+ osd_id = osd_metadata['whoami']
+ osd_fsid = osd_metadata['fsid']
+ filename = '%s-%s.json' % (osd_id, osd_fsid)
+ json_path = os.path.join(self.etc_path, filename)
+
+ if os.path.exists(json_path) and not args.stdout:
+ if not args.force:
+ raise RuntimeError(
+ '--force was not used and OSD metadata file exists: %s' % json_path
+ )
+
+ if args.stdout:
+ print(json.dumps(osd_metadata, indent=4, sort_keys=True, ensure_ascii=False))
+ else:
+ with open(json_path, 'w') as fp:
+ json.dump(osd_metadata, fp, indent=4, sort_keys=True, ensure_ascii=False)
+ fp.write(os.linesep)
+ terminal.success(
+ 'OSD %s got scanned and metadata persisted to file: %s' % (
+ osd_id,
+ json_path
+ )
+ )
+ terminal.success(
+ 'To take over management of this scanned OSD, and disable ceph-disk and udev, run:'
+ )
+ terminal.success(' ceph-volume simple activate %s %s' % (osd_id, osd_fsid))
+
+ if not osd_metadata.get('data'):
+ msg = 'Unable to determine device mounted on %s' % args.osd_path
+ logger.warning(msg)
+ terminal.warning(msg)
+ terminal.warning('OSD will not be able to start without this information:')
+ terminal.warning(' "data": "/path/to/device",')
+ logger.warning('Unable to determine device mounted on %s' % args.osd_path)
+
+ def main(self):
+ sub_command_help = dedent("""
+ Scan running OSDs, an OSD directory (or data device) for files and configurations
+ that will allow to take over the management of the OSD.
+
+ Scanned OSDs will get their configurations stored in
+ /etc/ceph/osd/<id>-<fsid>.json
+
+ For an OSD ID of 0 with fsid of ``a9d50838-e823-43d6-b01f-2f8d0a77afc2``
+ that could mean a scan command that looks like::
+
+ ceph-volume simple scan /var/lib/ceph/osd/ceph-0
+
+ Which would store the metadata in a JSON file at::
+
+ /etc/ceph/osd/0-a9d50838-e823-43d6-b01f-2f8d0a77afc2.json
+
+ To scan all running OSDs:
+
+ ceph-volume simple scan
+
+ To a scan a specific running OSD:
+
+ ceph-volume simple scan /var/lib/ceph/osd/{cluster}-{osd id}
+
+ And to scan a device (mounted or unmounted) that has OSD data in it, for example /dev/sda1
+
+ ceph-volume simple scan /dev/sda1
+
+ Scanning a device or directory that belongs to an OSD not created by ceph-disk will be ingored.
+ """)
+ parser = argparse.ArgumentParser(
+ prog='ceph-volume simple scan',
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ description=sub_command_help,
+ )
+
+ parser.add_argument(
+ '-f', '--force',
+ action='store_true',
+ help='If OSD has already been scanned, the JSON file will be overwritten'
+ )
+
+ parser.add_argument(
+ '--stdout',
+ action='store_true',
+ help='Do not save to a file, output metadata to stdout'
+ )
+
+ parser.add_argument(
+ 'osd_path',
+ metavar='OSD_PATH',
+ type=arg_validators.OSDPath(),
+ nargs='?',
+ default=None,
+ help='Path to an existing OSD directory or OSD data partition'
+ )
+
+ args = parser.parse_args(self.argv)
+ paths = []
+ if args.osd_path:
+ paths.append(args.osd_path)
+ else:
+ osd_ids = systemctl.get_running_osd_ids()
+ for osd_id in osd_ids:
+ paths.append("/var/lib/ceph/osd/{}-{}".format(
+ conf.cluster,
+ osd_id,
+ ))
+
+ # Capture some environment status, so that it can be reused all over
+ self.device_mounts = system.Mounts(devices=True).get_mounts()
+ self.path_mounts = system.Mounts(paths=True).get_mounts()
+
+ for path in paths:
+ args.osd_path = path
+ device = Device(args.osd_path)
+ if device.is_partition:
+ if device.ceph_disk.type != 'data':
+ label = device.ceph_disk.partlabel
+ msg = 'Device must be the ceph data partition, but PARTLABEL reported: "%s"' % label
+ raise RuntimeError(msg)
+
+ self.encryption_metadata = encryption.legacy_encrypted(args.osd_path)
+ self.is_encrypted = self.encryption_metadata['encrypted']
+
+ if self.encryption_metadata['device'] != "tmpfs":
+ device = Device(self.encryption_metadata['device'])
+ if not device.is_ceph_disk_member:
+ terminal.warning("Ignoring %s because it's not a ceph-disk created osd." % path)
+ else:
+ self.scan(args)
+ else:
+ terminal.warning("Ignoring %s because it's not a ceph-disk created osd." % path)
diff --git a/src/ceph-volume/ceph_volume/devices/simple/trigger.py b/src/ceph-volume/ceph_volume/devices/simple/trigger.py
new file mode 100644
index 000000000..c01d9ae2a
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/devices/simple/trigger.py
@@ -0,0 +1,70 @@
+from __future__ import print_function
+import argparse
+from textwrap import dedent
+from ceph_volume.exceptions import SuffixParsingError
+from ceph_volume import decorators
+from .activate import Activate
+
+
+def parse_osd_id(string):
+ osd_id = string.split('-', 1)[0]
+ if not osd_id:
+ raise SuffixParsingError('OSD id', string)
+ if osd_id.isdigit():
+ return osd_id
+ raise SuffixParsingError('OSD id', string)
+
+
+def parse_osd_uuid(string):
+ osd_id = '%s-' % parse_osd_id(string)
+ # remove the id first
+ osd_uuid = string.split(osd_id, 1)[-1]
+ if not osd_uuid:
+ raise SuffixParsingError('OSD uuid', string)
+ return osd_uuid
+
+
+class Trigger(object):
+
+ help = 'systemd helper to activate an OSD'
+
+ def __init__(self, argv):
+ self.argv = argv
+
+ @decorators.needs_root
+ def main(self):
+ sub_command_help = dedent("""
+ ** DO NOT USE DIRECTLY **
+ This tool is meant to help the systemd unit that knows about OSDs.
+
+ Proxy OSD activation to ``ceph-volume simple activate`` by parsing the
+ input from systemd, detecting the UUID and ID associated with an OSD::
+
+ ceph-volume simple trigger {SYSTEMD-DATA}
+
+ The systemd "data" is expected to be in the format of::
+
+ {OSD ID}-{OSD UUID}
+
+ The devices associated with the OSD need to have been scanned previously,
+ so that all needed metadata can be used for starting the OSD process.
+ """)
+ parser = argparse.ArgumentParser(
+ prog='ceph-volume simple trigger',
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ description=sub_command_help,
+ )
+
+ parser.add_argument(
+ 'systemd_data',
+ metavar='SYSTEMD_DATA',
+ nargs='?',
+ help='Data from a systemd unit containing ID and UUID of the OSD, like 0-asdf-lkjh'
+ )
+ if len(self.argv) == 0:
+ print(sub_command_help)
+ return
+ args = parser.parse_args(self.argv)
+ osd_id = parse_osd_id(args.systemd_data)
+ osd_uuid = parse_osd_uuid(args.systemd_data)
+ Activate([osd_id, osd_uuid], from_trigger=True).main()
diff --git a/src/ceph-volume/ceph_volume/drive_group/__init__.py b/src/ceph-volume/ceph_volume/drive_group/__init__.py
new file mode 100644
index 000000000..14a0fd721
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/drive_group/__init__.py
@@ -0,0 +1 @@
+from .main import Deploy # noqa
diff --git a/src/ceph-volume/ceph_volume/drive_group/main.py b/src/ceph-volume/ceph_volume/drive_group/main.py
new file mode 100644
index 000000000..9e93bc759
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/drive_group/main.py
@@ -0,0 +1,98 @@
+# -*- coding: utf-8 -*-
+
+import argparse
+import json
+import logging
+import sys
+
+from ceph.deployment.drive_group import DriveGroupSpec
+from ceph.deployment.drive_selection.selector import DriveSelection
+from ceph.deployment.translate import to_ceph_volume
+from ceph.deployment.inventory import Device
+from ceph_volume.inventory import Inventory
+from ceph_volume.devices.lvm.batch import Batch
+
+logger = logging.getLogger(__name__)
+
+class Deploy(object):
+
+ help = '''
+ Deploy OSDs according to a drive groups specification.
+
+ The DriveGroup specification must be passed in json.
+ It can either be (preference in this order)
+ - in a file, path passed as a positional argument
+ - read from stdin, pass "-" as a positional argument
+ - a json string passed via the --spec argument
+
+ Either the path postional argument or --spec must be specifed.
+ '''
+
+ def __init__(self, argv):
+ self.argv = argv
+
+ def main(self):
+ parser = argparse.ArgumentParser(
+ prog='ceph-volume drive-group',
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ description=self.help,
+ )
+ parser.add_argument(
+ 'path',
+ nargs='?',
+ default=None,
+ help=('Path to file containing drive group spec or "-" to read from stdin'),
+ )
+ parser.add_argument(
+ '--spec',
+ default='',
+ nargs='?',
+ help=('drive-group json string')
+ )
+ parser.add_argument(
+ '--dry-run',
+ default=False,
+ action='store_true',
+ help=('dry run, only print the batch command that would be run'),
+ )
+ self.args = parser.parse_args(self.argv)
+ if self.args.path:
+ if self.args.path == "-":
+ commands = self.from_json(sys.stdin)
+ else:
+ with open(self.args.path, 'r') as f:
+ commands = self.from_json(f)
+ elif self.args.spec:
+ dg = json.loads(self.args.spec)
+ commands = self.get_dg_spec(dg)
+ else:
+ # either --spec or path arg must be specified
+ parser.print_help(sys.stderr)
+ sys.exit(0)
+ cmd = commands.run()
+ if not cmd:
+ logger.error('DriveGroup didn\'t produce any commands')
+ return
+ if self.args.dry_run:
+ logger.info('Returning ceph-volume command (--dry-run was passed): {}'.format(cmd))
+ print(cmd)
+ else:
+ logger.info('Running ceph-volume command: {}'.format(cmd))
+ batch_args = cmd.split(' ')[2:]
+ b = Batch(batch_args)
+ b.main()
+
+ def from_json(self, file_):
+ dg = {}
+ dg = json.load(file_)
+ return self.get_dg_spec(dg)
+
+ def get_dg_spec(self, dg):
+ dg_spec = DriveGroupSpec._from_json_impl(dg)
+ dg_spec.validate()
+ i = Inventory(['--filter-for-batch'])
+ i.main()
+ inventory = i.get_report()
+ devices = [Device.from_json(i) for i in inventory]
+ selection = DriveSelection(dg_spec, devices)
+ return to_ceph_volume(selection)
diff --git a/src/ceph-volume/ceph_volume/exceptions.py b/src/ceph-volume/ceph_volume/exceptions.py
new file mode 100644
index 000000000..5c6429483
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/exceptions.py
@@ -0,0 +1,63 @@
+import os
+
+
+class ConfigurationError(Exception):
+
+ def __init__(self, cluster_name='ceph', path='/etc/ceph', abspath=None):
+ self.cluster_name = cluster_name
+ self.path = path
+ self.abspath = abspath or "%s.conf" % os.path.join(self.path, self.cluster_name)
+
+ def __str__(self):
+ return 'Unable to load expected Ceph config at: %s' % self.abspath
+
+
+class ConfigurationSectionError(Exception):
+
+ def __init__(self, section):
+ self.section = section
+
+ def __str__(self):
+ return 'Unable to find expected configuration section: "%s"' % self.section
+
+
+class ConfigurationKeyError(Exception):
+
+ def __init__(self, section, key):
+ self.section = section
+ self.key = key
+
+ def __str__(self):
+ return 'Unable to find expected configuration key: "%s" from section "%s"' % (
+ self.key,
+ self.section
+ )
+
+
+class SuffixParsingError(Exception):
+
+ def __init__(self, suffix, part=None):
+ self.suffix = suffix
+ self.part = part
+
+ def __str__(self):
+ return 'Unable to parse the %s from systemd suffix: %s' % (self.part, self.suffix)
+
+
+class SuperUserError(Exception):
+
+ def __str__(self):
+ return 'This command needs to be executed with sudo or as root'
+
+
+class SizeAllocationError(Exception):
+
+ def __init__(self, requested, available):
+ self.requested = requested
+ self.available = available
+
+ def __str__(self):
+ msg = 'Unable to allocate size (%s), not enough free space (%s)' % (
+ self.requested, self.available
+ )
+ return msg
diff --git a/src/ceph-volume/ceph_volume/inventory/__init__.py b/src/ceph-volume/ceph_volume/inventory/__init__.py
new file mode 100644
index 000000000..c9e0c0ccc
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/inventory/__init__.py
@@ -0,0 +1 @@
+from .main import Inventory # noqa
diff --git a/src/ceph-volume/ceph_volume/inventory/main.py b/src/ceph-volume/ceph_volume/inventory/main.py
new file mode 100644
index 000000000..aa70e92f1
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/inventory/main.py
@@ -0,0 +1,67 @@
+# -*- coding: utf-8 -*-
+
+import argparse
+import json
+
+from ceph_volume.util.device import Devices, Device
+
+
+class Inventory(object):
+
+ help = "Get this nodes available disk inventory"
+
+ def __init__(self, argv):
+ self.argv = argv
+
+ def main(self):
+ parser = argparse.ArgumentParser(
+ prog='ceph-volume inventory',
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ description=self.help,
+ )
+ parser.add_argument(
+ 'path',
+ nargs='?',
+ default=None,
+ help=('Report on specific disk'),
+ )
+ parser.add_argument(
+ '--format',
+ choices=['plain', 'json', 'json-pretty'],
+ default='plain',
+ help='Output format',
+ )
+ parser.add_argument(
+ '--filter-for-batch',
+ action='store_true',
+ help=('Filter devices unsuitable to pass to an OSD service spec, '
+ 'no effect when <path> is passed'),
+ default=False,
+ )
+ parser.add_argument(
+ '--with-lsm',
+ action='store_true',
+ help=('Attempt to retrieve additional health and metadata through '
+ 'libstoragemgmt'),
+ default=False,
+ )
+ self.args = parser.parse_args(self.argv)
+ if self.args.path:
+ self.format_report(Device(self.args.path, with_lsm=self.args.with_lsm))
+ else:
+ self.format_report(Devices(filter_for_batch=self.args.filter_for_batch,
+ with_lsm=self.args.with_lsm))
+
+ def get_report(self):
+ if self.args.path:
+ return Device(self.args.path, with_lsm=self.args.with_lsm).json_report()
+ else:
+ return Devices(filter_for_batch=self.args.filter_for_batch, with_lsm=self.args.with_lsm).json_report()
+
+ def format_report(self, inventory):
+ if self.args.format == 'json':
+ print(json.dumps(inventory.json_report()))
+ elif self.args.format == 'json-pretty':
+ print(json.dumps(inventory.json_report(), indent=4, sort_keys=True))
+ else:
+ print(inventory.pretty_report())
diff --git a/src/ceph-volume/ceph_volume/log.py b/src/ceph-volume/ceph_volume/log.py
new file mode 100644
index 000000000..b283bedbb
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/log.py
@@ -0,0 +1,49 @@
+import logging
+import os
+from ceph_volume import terminal
+from ceph_volume import conf
+
+BASE_FORMAT = "[%(name)s][%(levelname)-6s] %(message)s"
+FILE_FORMAT = "[%(asctime)s]" + BASE_FORMAT
+
+
+def setup(name='ceph-volume.log', log_path=None, log_level=None):
+ log_path = log_path or conf.log_path
+ # if a non-root user calls help or other no-sudo-required command the
+ # logger will fail to write to /var/lib/ceph/ so this /tmp/ path is used as
+ # a fallback
+ tmp_log_file = os.path.join('/tmp/', name)
+ root_logger = logging.getLogger()
+ # The default path is where all ceph log files are, and will get rotated by
+ # Ceph's logrotate rules.
+ log_level = log_level or "DEBUG"
+ log_level = getattr(logging, log_level.upper())
+ root_logger.setLevel(log_level)
+
+ try:
+ fh = logging.FileHandler(log_path)
+ except (OSError, IOError) as err:
+ terminal.warning("Falling back to /tmp/ for logging. Can't use %s" % log_path)
+ terminal.warning(str(err))
+ conf.log_path = tmp_log_file
+ fh = logging.FileHandler(tmp_log_file)
+
+ fh.setLevel(log_level)
+ fh.setFormatter(logging.Formatter(FILE_FORMAT))
+
+ root_logger.addHandler(fh)
+
+
+def setup_console():
+ # TODO: At some point ceph-volume should stop using the custom logger
+ # interface that exists in terminal.py and use the logging module to
+ # produce output for the terminal
+ # Console Logger
+ sh = logging.StreamHandler()
+ sh.setFormatter(logging.Formatter('[terminal] %(message)s'))
+ sh.setLevel(logging.DEBUG)
+
+ terminal_logger = logging.getLogger('terminal')
+
+ # allow all levels at root_logger, handlers control individual levels
+ terminal_logger.addHandler(sh)
diff --git a/src/ceph-volume/ceph_volume/main.py b/src/ceph-volume/ceph_volume/main.py
new file mode 100644
index 000000000..7868665ce
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/main.py
@@ -0,0 +1,183 @@
+from __future__ import print_function
+import argparse
+import os
+import pkg_resources
+import sys
+import logging
+
+from ceph_volume.decorators import catches
+from ceph_volume import log, devices, configuration, conf, exceptions, terminal, inventory, drive_group, activate
+
+
+class Volume(object):
+ _help = """
+ceph-volume: Deploy Ceph OSDs using different device technologies like lvm or
+physical disks.
+
+Log Path: {log_path}
+Ceph Conf: {ceph_path}
+
+{sub_help}
+{plugins}
+{environ_vars}
+{warning}
+ """
+
+ def __init__(self, argv=None, parse=True):
+ self.mapper = {
+ 'lvm': devices.lvm.LVM,
+ 'simple': devices.simple.Simple,
+ 'raw': devices.raw.Raw,
+ 'inventory': inventory.Inventory,
+ 'activate': activate.Activate,
+ 'drive-group': drive_group.Deploy,
+ }
+ self.plugin_help = "No plugins found/loaded"
+ if argv is None:
+ self.argv = sys.argv
+ else:
+ self.argv = argv
+ if parse:
+ self.main(self.argv)
+
+ def help(self, warning=False):
+ warning = 'See "ceph-volume --help" for full list of options.' if warning else ''
+ return self._help.format(
+ warning=warning,
+ log_path=conf.log_path,
+ ceph_path=self.stat_ceph_conf(),
+ plugins=self.plugin_help,
+ sub_help=terminal.subhelp(self.mapper),
+ environ_vars=self.get_environ_vars()
+ )
+
+ def get_environ_vars(self):
+ environ_vars = []
+ for key, value in os.environ.items():
+ if key.startswith('CEPH_'):
+ environ_vars.append("%s=%s" % (key, value))
+ if not environ_vars:
+ return ''
+ else:
+ environ_vars.insert(0, '\nEnviron Variables:')
+ return '\n'.join(environ_vars)
+
+ def enable_plugins(self):
+ """
+ Load all plugins available, add them to the mapper and extend the help
+ string with the information from each one
+ """
+ plugins = _load_library_extensions()
+ for plugin in plugins:
+ self.mapper[plugin._ceph_volume_name_] = plugin
+ self.plugin_help = '\n'.join(['%-19s %s\n' % (
+ plugin.name, getattr(plugin, 'help_menu', ''))
+ for plugin in plugins])
+ if self.plugin_help:
+ self.plugin_help = '\nPlugins:\n' + self.plugin_help
+
+ def load_log_path(self):
+ conf.log_path = os.getenv('CEPH_VOLUME_LOG_PATH', '/var/log/ceph')
+
+ def stat_ceph_conf(self):
+ try:
+ configuration.load(conf.path)
+ return terminal.green(conf.path)
+ except exceptions.ConfigurationError as error:
+ return terminal.red(error)
+
+ def _get_split_args(self):
+ subcommands = self.mapper.keys()
+ slice_on_index = len(self.argv) + 1
+ pruned_args = self.argv[1:]
+ for count, arg in enumerate(pruned_args):
+ if arg in subcommands:
+ slice_on_index = count
+ break
+ return pruned_args[:slice_on_index], pruned_args[slice_on_index:]
+
+ @catches()
+ def main(self, argv):
+ # these need to be available for the help, which gets parsed super
+ # early
+ configuration.load_ceph_conf_path()
+ self.load_log_path()
+ self.enable_plugins()
+ main_args, subcommand_args = self._get_split_args()
+ # no flags where passed in, return the help menu instead of waiting for
+ # argparse which will end up complaning that there are no args
+ if len(argv) <= 1:
+ print(self.help(warning=True))
+ raise SystemExit(0)
+ parser = argparse.ArgumentParser(
+ prog='ceph-volume',
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ description=self.help(),
+ )
+ parser.add_argument(
+ '--cluster',
+ default='ceph',
+ help='Cluster name (defaults to "ceph")',
+ )
+ parser.add_argument(
+ '--log-level',
+ default='debug',
+ choices=['debug', 'info', 'warning', 'error', 'critical'],
+ help='Change the file log level (defaults to debug)',
+ )
+ parser.add_argument(
+ '--log-path',
+ default='/var/log/ceph/',
+ help='Change the log path (defaults to /var/log/ceph)',
+ )
+ args = parser.parse_args(main_args)
+ conf.log_path = args.log_path
+ if os.path.isdir(conf.log_path):
+ conf.log_path = os.path.join(args.log_path, 'ceph-volume.log')
+ log.setup(log_level=args.log_level)
+ log.setup_console()
+ logger = logging.getLogger(__name__)
+ logger.info("Running command: ceph-volume %s %s", " ".join(main_args), " ".join(subcommand_args))
+ # set all variables from args and load everything needed according to
+ # them
+ configuration.load_ceph_conf_path(cluster_name=args.cluster)
+ try:
+ conf.ceph = configuration.load(conf.path)
+ except exceptions.ConfigurationError as error:
+ # we warn only here, because it is possible that the configuration
+ # file is not needed, or that it will be loaded by some other means
+ # (like reading from lvm tags)
+ logger.warning('ignoring inability to load ceph.conf', exc_info=1)
+ terminal.yellow(error)
+ # dispatch to sub-commands
+ terminal.dispatch(self.mapper, subcommand_args)
+
+
+def _load_library_extensions():
+ """
+ Locate all setuptools entry points by the name 'ceph_volume_handlers'
+ and initialize them.
+ Any third-party library may register an entry point by adding the
+ following to their setup.py::
+
+ entry_points = {
+ 'ceph_volume_handlers': [
+ 'plugin_name = mylib.mymodule:Handler_Class',
+ ],
+ },
+
+ `plugin_name` will be used to load it as a sub command.
+ """
+ logger = logging.getLogger('ceph_volume.plugins')
+ group = 'ceph_volume_handlers'
+ entry_points = pkg_resources.iter_entry_points(group=group)
+ plugins = []
+ for ep in entry_points:
+ try:
+ logger.debug('loading %s' % ep.name)
+ plugin = ep.load()
+ plugin._ceph_volume_name_ = ep.name
+ plugins.append(plugin)
+ except Exception as error:
+ logger.exception("Error initializing plugin %s: %s" % (ep, error))
+ return plugins
diff --git a/src/ceph-volume/ceph_volume/process.py b/src/ceph-volume/ceph_volume/process.py
new file mode 100644
index 000000000..10ee0318e
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/process.py
@@ -0,0 +1,229 @@
+from fcntl import fcntl, F_GETFL, F_SETFL
+from os import O_NONBLOCK, read, path
+import subprocess
+from select import select
+from ceph_volume import terminal
+from ceph_volume.util import as_bytes
+from ceph_volume.util.system import which, run_host_cmd, host_rootfs
+
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+def log_output(descriptor, message, terminal_logging, logfile_logging):
+ """
+ log output to both the logger and the terminal if terminal_logging is
+ enabled
+ """
+ if not message:
+ return
+ message = message.strip()
+ line = '%s %s' % (descriptor, message)
+ if terminal_logging:
+ getattr(terminal, descriptor)(message)
+ if logfile_logging:
+ logger.info(line)
+
+
+def log_descriptors(reads, process, terminal_logging):
+ """
+ Helper to send output to the terminal while polling the subprocess
+ """
+ # these fcntl are set to O_NONBLOCK for the filedescriptors coming from
+ # subprocess so that the logging does not block. Without these a prompt in
+ # a subprocess output would hang and nothing would get printed. Note how
+ # these are just set when logging subprocess, not globally.
+ stdout_flags = fcntl(process.stdout, F_GETFL) # get current p.stdout flags
+ stderr_flags = fcntl(process.stderr, F_GETFL) # get current p.stderr flags
+ fcntl(process.stdout, F_SETFL, stdout_flags | O_NONBLOCK)
+ fcntl(process.stderr, F_SETFL, stderr_flags | O_NONBLOCK)
+ descriptor_names = {
+ process.stdout.fileno(): 'stdout',
+ process.stderr.fileno(): 'stderr'
+ }
+ for descriptor in reads:
+ descriptor_name = descriptor_names[descriptor]
+ try:
+ message = read(descriptor, 1024)
+ if not isinstance(message, str):
+ message = message.decode('utf-8')
+ log_output(descriptor_name, message, terminal_logging, True)
+ except (IOError, OSError):
+ # nothing else to log
+ pass
+
+
+def obfuscate(command_, on=None):
+ """
+ Certain commands that are useful to log might contain information that
+ should be replaced by '*' like when creating OSDs and the keyrings are
+ being passed, which should not be logged.
+
+ :param on: A string (will match a flag) or an integer (will match an index)
+
+ If matching on a flag (when ``on`` is a string) it will obfuscate on the
+ value for that flag. That is a command like ['ls', '-l', '/'] that calls
+ `obfuscate(command, on='-l')` will obfustace '/' which is the value for
+ `-l`.
+
+ The reason for `on` to allow either a string or an integer, altering
+ behavior for both is because it is easier for ``run`` and ``call`` to just
+ pop a value to obfuscate (vs. allowing an index or a flag)
+ """
+ command = command_[:]
+ msg = "Running command: %s" % ' '.join(command)
+ if on in [None, False]:
+ return msg
+
+ if isinstance(on, int):
+ index = on
+
+ else:
+ try:
+ index = command.index(on) + 1
+ except ValueError:
+ # if the flag just doesn't exist then it doesn't matter just return
+ # the base msg
+ return msg
+
+ try:
+ command[index] = '*' * len(command[index])
+ except IndexError: # the index was completely out of range
+ return msg
+
+ return "Running command: %s" % ' '.join(command)
+
+
+def run(command, run_on_host=False, **kw):
+ """
+ A real-time-logging implementation of a remote subprocess.Popen call where
+ a command is just executed on the remote end and no other handling is done.
+
+ :param command: The command to pass in to the remote subprocess.Popen as a list
+ :param stop_on_error: If a nonzero exit status is return, it raises a ``RuntimeError``
+ :param fail_msg: If a nonzero exit status is returned this message will be included in the log
+ """
+ executable = which(command.pop(0), run_on_host)
+ command.insert(0, executable)
+ if run_on_host and path.isdir(host_rootfs):
+ command = run_host_cmd + command
+ stop_on_error = kw.pop('stop_on_error', True)
+ command_msg = obfuscate(command, kw.pop('obfuscate', None))
+ fail_msg = kw.pop('fail_msg', None)
+ logger.info(command_msg)
+ terminal.write(command_msg)
+ terminal_logging = kw.pop('terminal_logging', True)
+
+ process = subprocess.Popen(
+ command,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ close_fds=True,
+ **kw
+ )
+
+ while True:
+ reads, _, _ = select(
+ [process.stdout.fileno(), process.stderr.fileno()],
+ [], []
+ )
+ log_descriptors(reads, process, terminal_logging)
+
+ if process.poll() is not None:
+ # ensure we do not have anything pending in stdout or stderr
+ log_descriptors(reads, process, terminal_logging)
+
+ break
+
+ returncode = process.wait()
+ if returncode != 0:
+ msg = "command returned non-zero exit status: %s" % returncode
+ if fail_msg:
+ logger.warning(fail_msg)
+ if terminal_logging:
+ terminal.warning(fail_msg)
+ if stop_on_error:
+ raise RuntimeError(msg)
+ else:
+ if terminal_logging:
+ terminal.warning(msg)
+ logger.warning(msg)
+
+
+def call(command, run_on_host=False, **kw):
+ """
+ Similar to ``subprocess.Popen`` with the following changes:
+
+ * returns stdout, stderr, and exit code (vs. just the exit code)
+ * logs the full contents of stderr and stdout (separately) to the file log
+
+ By default, no terminal output is given, not even the command that is going
+ to run.
+
+ Useful when system calls are needed to act on output, and that same output
+ shouldn't get displayed on the terminal.
+
+ Optionally, the command can be displayed on the terminal and the log file,
+ and log file output can be turned off. This is useful to prevent sensitive
+ output going to stderr/stdout and being captured on a log file.
+
+ :param terminal_verbose: Log command output to terminal, defaults to False, and
+ it is forcefully set to True if a return code is non-zero
+ :param logfile_verbose: Log stderr/stdout output to log file. Defaults to True
+ :param verbose_on_failure: On a non-zero exit status, it will forcefully set logging ON for
+ the terminal. Defaults to True
+ """
+ executable = which(command.pop(0), run_on_host)
+ command.insert(0, executable)
+ if run_on_host and path.isdir(host_rootfs):
+ command = run_host_cmd + command
+ terminal_verbose = kw.pop('terminal_verbose', False)
+ logfile_verbose = kw.pop('logfile_verbose', True)
+ verbose_on_failure = kw.pop('verbose_on_failure', True)
+ show_command = kw.pop('show_command', False)
+ command_msg = "Running command: %s" % ' '.join(command)
+ stdin = kw.pop('stdin', None)
+ logger.info(command_msg)
+ if show_command:
+ terminal.write(command_msg)
+
+ process = subprocess.Popen(
+ command,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ stdin=subprocess.PIPE,
+ close_fds=True,
+ **kw
+ )
+
+ if stdin:
+ stdout_stream, stderr_stream = process.communicate(as_bytes(stdin))
+ else:
+ stdout_stream = process.stdout.read()
+ stderr_stream = process.stderr.read()
+ returncode = process.wait()
+ if not isinstance(stdout_stream, str):
+ stdout_stream = stdout_stream.decode('utf-8')
+ if not isinstance(stderr_stream, str):
+ stderr_stream = stderr_stream.decode('utf-8')
+ stdout = stdout_stream.splitlines()
+ stderr = stderr_stream.splitlines()
+
+ if returncode != 0:
+ # set to true so that we can log the stderr/stdout that callers would
+ # do anyway as long as verbose_on_failure is set (defaults to True)
+ if verbose_on_failure:
+ terminal_verbose = True
+ # logfiles aren't disruptive visually, unlike the terminal, so this
+ # should always be on when there is a failure
+ logfile_verbose = True
+
+ # the following can get a messed up order in the log if the system call
+ # returns output with both stderr and stdout intermingled. This separates
+ # that.
+ for line in stdout:
+ log_output('stdout', line, terminal_verbose, logfile_verbose)
+ for line in stderr:
+ log_output('stderr', line, terminal_verbose, logfile_verbose)
+ return stdout, stderr, returncode
diff --git a/src/ceph-volume/ceph_volume/systemd/__init__.py b/src/ceph-volume/ceph_volume/systemd/__init__.py
new file mode 100644
index 000000000..493b8814b
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/systemd/__init__.py
@@ -0,0 +1 @@
+from .main import main # noqa
diff --git a/src/ceph-volume/ceph_volume/systemd/main.py b/src/ceph-volume/ceph_volume/systemd/main.py
new file mode 100644
index 000000000..2cb1d1b80
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/systemd/main.py
@@ -0,0 +1,108 @@
+"""
+This file is used only by systemd units that are passing their instance suffix
+as arguments to this script so that it can parse the suffix into arguments that
+``ceph-volume <sub command>`` can consume
+"""
+import os
+import sys
+import time
+import logging
+from ceph_volume import log, process
+from ceph_volume.exceptions import SuffixParsingError
+
+
+def parse_subcommand(string):
+ subcommand = string.split('-', 1)[0]
+ if not subcommand:
+ raise SuffixParsingError('subcommand', string)
+ return subcommand
+
+
+def parse_extra_data(string):
+ # get the subcommand to split on that
+ sub_command = parse_subcommand(string)
+
+ # the split will leave data with a dash, so remove that
+ data = string.split(sub_command)[-1]
+ if not data:
+ raise SuffixParsingError('data', string)
+ return data.lstrip('-')
+
+
+def parse_osd_id(string):
+ osd_id = string.split('-', 1)[0]
+ if not osd_id:
+ raise SuffixParsingError('OSD id', string)
+ if osd_id.isdigit():
+ return osd_id
+ raise SuffixParsingError('OSD id', string)
+
+
+def parse_osd_uuid(string):
+ osd_id = '%s-' % parse_osd_id(string)
+ osd_subcommand = '-%s' % parse_subcommand(string)
+ # remove the id first
+ trimmed_suffix = string.split(osd_id)[-1]
+ # now remove the sub command
+ osd_uuid = trimmed_suffix.split(osd_subcommand)[0]
+ if not osd_uuid:
+ raise SuffixParsingError('OSD uuid', string)
+ return osd_uuid
+
+
+def main(args=None):
+ """
+ Main entry point for the ``ceph-volume-systemd`` executable. ``args`` are
+ optional for easier testing of arguments.
+
+ Expected input is similar to::
+
+ ['/path/to/ceph-volume-systemd', '<type>-<extra metadata>']
+
+ For example::
+
+ [
+ '/usr/bin/ceph-volume-systemd',
+ 'lvm-0-8715BEB4-15C5-49DE-BA6F-401086EC7B41'
+ ]
+
+ The first part of the argument is the only interesting bit, which contains
+ the metadata needed to proxy the call to ``ceph-volume`` itself.
+
+ Reusing the example, the proxy call to ``ceph-volume`` would look like::
+
+ ceph-volume lvm trigger 0-8715BEB4-15C5-49DE-BA6F-401086EC7B41
+
+ That means that ``lvm`` is used as the subcommand and it is **expected**
+ that a ``trigger`` sub-commmand will be present to make sense of the extra
+ piece of the string.
+
+ """
+ log.setup(name='ceph-volume-systemd.log', log_path='/var/log/ceph/ceph-volume-systemd.log')
+ logger = logging.getLogger('systemd')
+
+ args = args if args is not None else sys.argv
+ try:
+ suffix = args[-1]
+ except IndexError:
+ raise RuntimeError('no arguments supplied')
+ sub_command = parse_subcommand(suffix)
+ extra_data = parse_extra_data(suffix)
+ logger.info('raw systemd input received: %s', suffix)
+ logger.info('parsed sub-command: %s, extra data: %s', sub_command, extra_data)
+ command = ['ceph-volume', sub_command, 'trigger', extra_data]
+
+ tries = int(os.environ.get('CEPH_VOLUME_SYSTEMD_TRIES', 30))
+ interval = int(os.environ.get('CEPH_VOLUME_SYSTEMD_INTERVAL', 5))
+ while tries > 0:
+ try:
+ # don't log any output to the terminal, just rely on stderr/stdout
+ # going to logging
+ process.run(command, terminal_logging=False)
+ logger.info('successfully triggered activation for: %s', extra_data)
+ break
+ except RuntimeError as error:
+ logger.warning(error)
+ logger.warning('failed activating OSD, retries left: %s', tries)
+ tries -= 1
+ time.sleep(interval)
diff --git a/src/ceph-volume/ceph_volume/systemd/systemctl.py b/src/ceph-volume/ceph_volume/systemd/systemctl.py
new file mode 100644
index 000000000..778ad1479
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/systemd/systemctl.py
@@ -0,0 +1,101 @@
+"""
+Utilities to control systemd units
+"""
+import logging
+
+from ceph_volume import process
+
+logger = logging.getLogger(__name__)
+
+def start(unit):
+ process.run(['systemctl', 'start', unit])
+
+
+def stop(unit):
+ process.run(['systemctl', 'stop', unit])
+
+
+def enable(unit, runtime=False):
+ if runtime:
+ process.run(['systemctl', 'enable', '--runtime', unit])
+ else:
+ process.run(['systemctl', 'enable', unit])
+
+
+def disable(unit):
+ process.run(['systemctl', 'disable', unit])
+
+
+def mask(unit):
+ process.run(['systemctl', 'mask', unit])
+
+
+def is_active(unit):
+ out, err, rc = process.call(
+ ['systemctl', 'is-active', unit],
+ verbose_on_failure=False
+ )
+ return rc == 0
+
+def get_running_osd_ids():
+ out, err, rc = process.call([
+ 'systemctl',
+ 'show',
+ '--no-pager',
+ '--property=Id',
+ '--state=running',
+ 'ceph-osd@*',
+ ])
+ osd_ids = []
+ if rc == 0:
+ for line in out:
+ if line:
+ # example line looks like: Id=ceph-osd@1.service
+ try:
+ osd_id = line.split("@")[1].split(".service")[0]
+ osd_ids.append(osd_id)
+ except (IndexError, TypeError):
+ logger.warning("Failed to parse output from systemctl: %s", line)
+ return osd_ids
+
+def start_osd(id_):
+ return start(osd_unit % id_)
+
+
+def stop_osd(id_):
+ return stop(osd_unit % id_)
+
+
+def enable_osd(id_):
+ return enable(osd_unit % id_, runtime=True)
+
+
+def disable_osd(id_):
+ return disable(osd_unit % id_)
+
+
+def osd_is_active(id_):
+ return is_active(osd_unit % id_)
+
+
+def enable_volume(id_, fsid, device_type='lvm'):
+ return enable(volume_unit % (device_type, id_, fsid))
+
+
+def mask_ceph_disk():
+ # systemctl allows using a glob like '*' for masking, but there was a bug
+ # in that it wouldn't allow this for service templates. This means that
+ # masking ceph-disk@* will not work, so we must link the service directly.
+ # /etc/systemd takes precedence regardless of the location of the unit
+ process.run(
+ ['ln', '-sf', '/dev/null', '/etc/systemd/system/ceph-disk@.service']
+ )
+
+
+#
+# templates
+#
+
+osd_unit = "ceph-osd@%s"
+ceph_disk_unit = "ceph-disk@%s"
+volume_unit = "ceph-volume@%s-%s-%s"
diff --git a/src/ceph-volume/ceph_volume/terminal.py b/src/ceph-volume/ceph_volume/terminal.py
new file mode 100644
index 000000000..a34946f92
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/terminal.py
@@ -0,0 +1,214 @@
+import logging
+import sys
+
+
+terminal_logger = logging.getLogger('terminal')
+
+
+class colorize(str):
+ """
+ Pretty simple to use::
+
+ colorize.make('foo').bold
+ colorize.make('foo').green
+ colorize.make('foo').yellow
+ colorize.make('foo').red
+ colorize.make('foo').blue
+
+ Otherwise you could go the long way (for example if you are
+ testing this class)::
+
+ string = colorize('foo')
+ string._set_attributes()
+ string.red
+
+ """
+
+ def __init__(self, string):
+ self.appends = ''
+ self.prepends = ''
+ self.isatty = sys.__stderr__.isatty()
+
+ def _set_attributes(self):
+ """
+ Sets the attributes here because the str class does not
+ allow to pass in anything other than a string to the constructor
+ so we can't really mess with the other attributes.
+ """
+ for k, v in self.__colors__.items():
+ setattr(self, k, self.make_color(v))
+
+ def make_color(self, color):
+ if not self.isatty:
+ return self
+ return color + self + '\033[0m' + self.appends
+
+ @property
+ def __colors__(self):
+ return dict(
+ blue='\033[34m',
+ green='\033[92m',
+ yellow='\033[33m',
+ red='\033[91m',
+ bold='\033[1m',
+ ends='\033[0m'
+ )
+
+ @classmethod
+ def make(cls, string):
+ """
+ A helper method to return itself and workaround the fact that
+ the str object doesn't allow extra arguments passed in to the
+ constructor
+ """
+ obj = cls(string)
+ obj._set_attributes()
+ return obj
+
+#
+# Common string manipulations
+#
+yellow = lambda x: colorize.make(x).yellow # noqa
+blue = lambda x: colorize.make(x).blue # noqa
+green = lambda x: colorize.make(x).green # noqa
+red = lambda x: colorize.make(x).red # noqa
+bold = lambda x: colorize.make(x).bold # noqa
+red_arrow = red('--> ')
+blue_arrow = blue('--> ')
+green_arrow = green('--> ')
+yellow_arrow = yellow('--> ')
+
+
+class _Write(object):
+
+ def __init__(self, _writer=None, prefix='', suffix='', flush=False):
+ # we can't set sys.stderr as the default for _writer. otherwise
+ # pytest's capturing gets confused
+ self._writer = _writer or sys.stderr
+ self.suffix = suffix
+ self.prefix = prefix
+ self.flush = flush
+
+ def bold(self, string):
+ self.write(bold(string))
+
+ def raw(self, string):
+ if not string.endswith('\n'):
+ string = '%s\n' % string
+ self.write(string)
+
+ def write(self, line):
+ entry = self.prefix + line + self.suffix
+
+ try:
+ self._writer.write(entry)
+ if self.flush:
+ self._writer.flush()
+ except (UnicodeDecodeError, UnicodeEncodeError):
+ try:
+ terminal_logger.info(entry.strip('\n'))
+ except (AttributeError, TypeError):
+ terminal_logger.info(entry)
+
+
+def stdout(msg):
+ return _Write(prefix=blue(' stdout: ')).raw(msg)
+
+
+def stderr(msg):
+ return _Write(prefix=yellow(' stderr: ')).raw(msg)
+
+
+def write(msg):
+ return _Write().raw(msg)
+
+
+def error(msg):
+ return _Write(prefix=red_arrow).raw(msg)
+
+
+def info(msg):
+ return _Write(prefix=blue_arrow).raw(msg)
+
+
+def debug(msg):
+ return _Write(prefix=blue_arrow).raw(msg)
+
+
+def warning(msg):
+ return _Write(prefix=yellow_arrow).raw(msg)
+
+
+def success(msg):
+ return _Write(prefix=green_arrow).raw(msg)
+
+
+class MultiLogger(object):
+ """
+ Proxy class to be able to report on both logger instances and terminal
+ messages avoiding the issue of having to call them both separately
+
+ Initialize it in the same way a logger object::
+
+ logger = terminal.MultiLogger(__name__)
+ """
+
+ def __init__(self, name):
+ self.logger = logging.getLogger(name)
+
+ def _make_record(self, msg, *args):
+ if len(str(args)):
+ try:
+ return msg % args
+ except TypeError:
+ self.logger.exception('unable to produce log record: %s' % msg)
+ return msg
+
+ def warning(self, msg, *args):
+ record = self._make_record(msg, *args)
+ warning(record)
+ self.logger.warning(record)
+
+ def debug(self, msg, *args):
+ record = self._make_record(msg, *args)
+ debug(record)
+ self.logger.debug(record)
+
+ def info(self, msg, *args):
+ record = self._make_record(msg, *args)
+ info(record)
+ self.logger.info(record)
+
+ def error(self, msg, *args):
+ record = self._make_record(msg, *args)
+ error(record)
+ self.logger.error(record)
+
+
+def dispatch(mapper, argv=None):
+ argv = argv or sys.argv
+ for count, arg in enumerate(argv, 1):
+ if arg in mapper.keys():
+ instance = mapper.get(arg)(argv[count:])
+ if hasattr(instance, 'main'):
+ instance.main()
+ raise SystemExit(0)
+
+
+def subhelp(mapper):
+ """
+ Look at every value of every key in the mapper and will output any
+ ``class.help`` possible to return it as a string that will be sent to
+ stderr.
+ """
+ help_text_lines = []
+ for key, value in mapper.items():
+ try:
+ help_text = value.help
+ except AttributeError:
+ continue
+ help_text_lines.append("%-24s %s" % (key, help_text))
+
+ if help_text_lines:
+ return "Available subcommands:\n\n%s" % '\n'.join(help_text_lines)
+ return ''
diff --git a/src/ceph-volume/ceph_volume/tests/__init__.py b/src/ceph-volume/ceph_volume/tests/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/__init__.py
diff --git a/src/ceph-volume/ceph_volume/tests/api/test_lvm.py b/src/ceph-volume/ceph_volume/tests/api/test_lvm.py
new file mode 100644
index 000000000..0bfc34075
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/api/test_lvm.py
@@ -0,0 +1,885 @@
+import os
+import pytest
+from mock.mock import patch
+from ceph_volume import process, exceptions
+from ceph_volume.api import lvm as api
+
+
+class TestParseTags(object):
+
+ def test_no_tags_means_empty_dict(self):
+ result = api.parse_tags('')
+ assert result == {}
+
+ def test_single_tag_gets_parsed(self):
+ result = api.parse_tags('ceph.osd_something=1')
+ assert result == {'ceph.osd_something': '1'}
+
+ def test_non_ceph_tags_are_skipped(self):
+ result = api.parse_tags('foo')
+ assert result == {}
+
+ def test_mixed_non_ceph_tags(self):
+ result = api.parse_tags('foo,ceph.bar=1')
+ assert result == {'ceph.bar': '1'}
+
+ def test_multiple_csv_expands_in_dict(self):
+ result = api.parse_tags('ceph.osd_something=1,ceph.foo=2,ceph.fsid=0000')
+ # assert them piecemeal to avoid the un-ordered dict nature
+ assert result['ceph.osd_something'] == '1'
+ assert result['ceph.foo'] == '2'
+ assert result['ceph.fsid'] == '0000'
+
+
+class TestVolume(object):
+
+ def test_is_ceph_device(self):
+ lv_tags = "ceph.type=data,ceph.osd_id=0"
+ osd = api.Volume(lv_name='osd/volume', lv_tags=lv_tags)
+ assert api.is_ceph_device(osd)
+
+ @pytest.mark.parametrize('dev',[
+ '/dev/sdb',
+ api.VolumeGroup(vg_name='foo'),
+ api.Volume(lv_name='vg/no_osd', lv_tags='', lv_path='lv/path'),
+ api.Volume(lv_name='vg/no_osd', lv_tags='ceph.osd_id=null', lv_path='lv/path'),
+ None,
+ ])
+ def test_is_not_ceph_device(self, dev):
+ assert not api.is_ceph_device(dev)
+
+ def test_no_empty_lv_name(self):
+ with pytest.raises(ValueError):
+ api.Volume(lv_name='', lv_tags='')
+
+
+class TestVolumeGroup(object):
+
+ def test_volume_group_no_empty_name(self):
+ with pytest.raises(ValueError):
+ api.VolumeGroup(vg_name='')
+
+
+class TestVolumeGroupFree(object):
+
+ def test_integer_gets_produced(self):
+ vg = api.VolumeGroup(vg_name='nosize', vg_free_count=100, vg_extent_size=4194304)
+ assert vg.free == 100 * 4194304
+
+
+class TestCreateLVs(object):
+
+ def setup(self):
+ self.vg = api.VolumeGroup(vg_name='ceph',
+ vg_extent_size=1073741824,
+ vg_extent_count=99999999,
+ vg_free_count=999)
+
+ def test_creates_correct_lv_number_from_parts(self, monkeypatch):
+ monkeypatch.setattr('ceph_volume.api.lvm.create_lv', lambda *a, **kw: (a, kw))
+ lvs = api.create_lvs(self.vg, parts=4)
+ assert len(lvs) == 4
+
+ def test_suffixes_the_size_arg(self, monkeypatch):
+ monkeypatch.setattr('ceph_volume.api.lvm.create_lv', lambda *a, **kw: (a, kw))
+ lvs = api.create_lvs(self.vg, parts=4)
+ assert lvs[0][1]['extents'] == 249
+
+ def test_only_uses_free_size(self, monkeypatch):
+ monkeypatch.setattr('ceph_volume.api.lvm.create_lv', lambda *a, **kw: (a, kw))
+ vg = api.VolumeGroup(vg_name='ceph',
+ vg_extent_size=1073741824,
+ vg_extent_count=99999999,
+ vg_free_count=1000)
+ lvs = api.create_lvs(vg, parts=4)
+ assert lvs[0][1]['extents'] == 250
+
+ def test_null_tags_are_set_by_default(self, monkeypatch):
+ monkeypatch.setattr('ceph_volume.api.lvm.create_lv', lambda *a, **kw: (a, kw))
+ kwargs = api.create_lvs(self.vg, parts=4)[0][1]
+ assert list(kwargs['tags'].values()) == ['null', 'null', 'null', 'null']
+
+ def test_fallback_to_one_part(self, monkeypatch):
+ monkeypatch.setattr('ceph_volume.api.lvm.create_lv', lambda *a, **kw: (a, kw))
+ lvs = api.create_lvs(self.vg)
+ assert len(lvs) == 1
+
+
+class TestVolumeGroupSizing(object):
+
+ def setup(self):
+ self.vg = api.VolumeGroup(vg_name='ceph',
+ vg_extent_size=1073741824,
+ vg_free_count=1024)
+
+ def test_parts_and_size_errors(self):
+ with pytest.raises(ValueError) as error:
+ self.vg.sizing(parts=4, size=10)
+ assert "Cannot process sizing" in str(error.value)
+
+ def test_zero_parts_produces_100_percent(self):
+ result = self.vg.sizing(parts=0)
+ assert result['percentages'] == 100
+
+ def test_two_parts_produces_50_percent(self):
+ result = self.vg.sizing(parts=2)
+ assert result['percentages'] == 50
+
+ def test_two_parts_produces_half_size(self):
+ result = self.vg.sizing(parts=2)
+ assert result['sizes'] == 512
+
+ def test_half_size_produces_round_sizes(self):
+ result = self.vg.sizing(size=512)
+ assert result['sizes'] == 512
+ assert result['percentages'] == 50
+ assert result['parts'] == 2
+
+ def test_bit_more_than_half_size_allocates_full_size(self):
+ # 513 can't allocate more than 1, so it just fallsback to using the
+ # whole device
+ result = self.vg.sizing(size=513)
+ assert result['sizes'] == 1024
+ assert result['percentages'] == 100
+ assert result['parts'] == 1
+
+ def test_extents_are_halfed_rounded_down(self):
+ result = self.vg.sizing(size=512)
+ assert result['extents'] == 512
+
+ def test_bit_less_size_rounds_down(self):
+ result = self.vg.sizing(size=129)
+ assert result['sizes'] == 146
+ assert result['percentages'] == 14
+ assert result['parts'] == 7
+
+ def test_unable_to_allocate_past_free_size(self):
+ with pytest.raises(exceptions.SizeAllocationError):
+ self.vg.sizing(size=2048)
+
+
+class TestRemoveLV(object):
+
+ def test_removes_lv(self, monkeypatch):
+ def mock_call(cmd, **kw):
+ return ('', '', 0)
+ monkeypatch.setattr(process, 'call', mock_call)
+ assert api.remove_lv("vg/lv")
+
+ def test_removes_lv_object(self, fake_call):
+ foo_volume = api.Volume(lv_name='foo', lv_path='/path', vg_name='foo_group', lv_tags='')
+ api.remove_lv(foo_volume)
+ # last argument from the list passed to process.call
+ assert fake_call.calls[0]['args'][0][-1] == '/path'
+
+ def test_fails_to_remove_lv(self, monkeypatch):
+ def mock_call(cmd, **kw):
+ return ('', '', 1)
+ monkeypatch.setattr(process, 'call', mock_call)
+ with pytest.raises(RuntimeError):
+ api.remove_lv("vg/lv")
+
+
+class TestCreateLV(object):
+
+ def setup(self):
+ self.foo_volume = api.Volume(lv_name='foo', lv_path='/path', vg_name='foo_group', lv_tags='')
+ self.foo_group = api.VolumeGroup(vg_name='foo_group',
+ vg_extent_size="4194304",
+ vg_extent_count="100",
+ vg_free_count="100")
+
+ @patch('ceph_volume.api.lvm.process.run')
+ @patch('ceph_volume.api.lvm.process.call')
+ @patch('ceph_volume.api.lvm.get_single_lv')
+ def test_uses_size(self, m_get_single_lv, m_call, m_run, monkeypatch):
+ m_get_single_lv.return_value = self.foo_volume
+ api.create_lv('foo', 0, vg=self.foo_group, size=419430400, tags={'ceph.type': 'data'})
+ expected = (['lvcreate', '--yes', '-l', '100', '-n', 'foo-0', 'foo_group'])
+ m_run.assert_called_with(expected, run_on_host=True)
+
+ @patch('ceph_volume.api.lvm.process.run')
+ @patch('ceph_volume.api.lvm.process.call')
+ @patch('ceph_volume.api.lvm.get_single_lv')
+ def test_uses_size_adjust_if_1percent_over(self, m_get_single_lv, m_call, m_run, monkeypatch):
+ foo_volume = api.Volume(lv_name='foo', lv_path='/path', vg_name='foo_group', lv_tags='')
+ foo_group = api.VolumeGroup(vg_name='foo_group',
+ vg_extent_size="4194304",
+ vg_extent_count="1000",
+ vg_free_count="1000")
+ m_get_single_lv.return_value = foo_volume
+ # 423624704 should be just under 1% off of the available size 419430400
+ api.create_lv('foo', 0, vg=foo_group, size=4232052736, tags={'ceph.type': 'data'})
+ expected = ['lvcreate', '--yes', '-l', '1000', '-n', 'foo-0', 'foo_group']
+ m_run.assert_called_with(expected, run_on_host=True)
+
+ @patch('ceph_volume.api.lvm.process.run')
+ @patch('ceph_volume.api.lvm.process.call')
+ @patch('ceph_volume.api.lvm.get_single_lv')
+ def test_uses_size_too_large(self, m_get_single_lv, m_call, m_run, monkeypatch):
+ m_get_single_lv.return_value = self.foo_volume
+ with pytest.raises(RuntimeError):
+ api.create_lv('foo', 0, vg=self.foo_group, size=5368709120, tags={'ceph.type': 'data'})
+
+ @patch('ceph_volume.api.lvm.process.run')
+ @patch('ceph_volume.api.lvm.process.call')
+ @patch('ceph_volume.api.lvm.get_single_lv')
+ def test_uses_extents(self, m_get_single_lv, m_call, m_run, monkeypatch):
+ m_get_single_lv.return_value = self.foo_volume
+ api.create_lv('foo', 0, vg=self.foo_group, extents='50', tags={'ceph.type': 'data'})
+ expected = ['lvcreate', '--yes', '-l', '50', '-n', 'foo-0', 'foo_group']
+ m_run.assert_called_with(expected, run_on_host=True)
+
+ @pytest.mark.parametrize("test_input,expected",
+ [(2, 50),
+ (3, 33),])
+ @patch('ceph_volume.api.lvm.process.run')
+ @patch('ceph_volume.api.lvm.process.call')
+ @patch('ceph_volume.api.lvm.get_single_lv')
+ def test_uses_slots(self, m_get_single_lv, m_call, m_run, monkeypatch, test_input, expected):
+ m_get_single_lv.return_value = self.foo_volume
+ api.create_lv('foo', 0, vg=self.foo_group, slots=test_input, tags={'ceph.type': 'data'})
+ expected = ['lvcreate', '--yes', '-l', str(expected), '-n', 'foo-0', 'foo_group']
+ m_run.assert_called_with(expected, run_on_host=True)
+
+ @patch('ceph_volume.api.lvm.process.run')
+ @patch('ceph_volume.api.lvm.process.call')
+ @patch('ceph_volume.api.lvm.get_single_lv')
+ def test_uses_all(self, m_get_single_lv, m_call, m_run, monkeypatch):
+ m_get_single_lv.return_value = self.foo_volume
+ api.create_lv('foo', 0, vg=self.foo_group, tags={'ceph.type': 'data'})
+ expected = ['lvcreate', '--yes', '-l', '100%FREE', '-n', 'foo-0', 'foo_group']
+ m_run.assert_called_with(expected, run_on_host=True)
+
+ @patch('ceph_volume.api.lvm.process.run')
+ @patch('ceph_volume.api.lvm.process.call')
+ @patch('ceph_volume.api.lvm.Volume.set_tags')
+ @patch('ceph_volume.api.lvm.get_single_lv')
+ def test_calls_to_set_tags_default(self, m_get_single_lv, m_set_tags, m_call, m_run, monkeypatch):
+ m_get_single_lv.return_value = self.foo_volume
+ api.create_lv('foo', 0, vg=self.foo_group)
+ tags = {
+ "ceph.osd_id": "null",
+ "ceph.type": "null",
+ "ceph.cluster_fsid": "null",
+ "ceph.osd_fsid": "null",
+ }
+ m_set_tags.assert_called_with(tags)
+
+ @patch('ceph_volume.api.lvm.process.run')
+ @patch('ceph_volume.api.lvm.process.call')
+ @patch('ceph_volume.api.lvm.Volume.set_tags')
+ @patch('ceph_volume.api.lvm.get_single_lv')
+ def test_calls_to_set_tags_arg(self, m_get_single_lv, m_set_tags, m_call, m_run, monkeypatch):
+ m_get_single_lv.return_value = self.foo_volume
+ api.create_lv('foo', 0, vg=self.foo_group, tags={'ceph.type': 'data'})
+ tags = {
+ "ceph.type": "data",
+ "ceph.data_device": "/path"
+ }
+ m_set_tags.assert_called_with(tags)
+
+ @patch('ceph_volume.api.lvm.process.run')
+ @patch('ceph_volume.api.lvm.process.call')
+ @patch('ceph_volume.api.lvm.get_device_vgs')
+ @patch('ceph_volume.api.lvm.create_vg')
+ @patch('ceph_volume.api.lvm.get_single_lv')
+ def test_create_vg(self, m_get_single_lv, m_create_vg, m_get_device_vgs, m_call,
+ m_run, monkeypatch):
+ m_get_single_lv.return_value = self.foo_volume
+ m_get_device_vgs.return_value = []
+ api.create_lv('foo', 0, device='dev/foo', size='5G', tags={'ceph.type': 'data'})
+ m_create_vg.assert_called_with('dev/foo', name_prefix='ceph')
+
+
+class TestTags(object):
+
+ def setup(self):
+ self.foo_volume_clean = api.Volume(lv_name='foo_clean', lv_path='/pathclean',
+ vg_name='foo_group',
+ lv_tags='')
+ self.foo_volume = api.Volume(lv_name='foo', lv_path='/path',
+ vg_name='foo_group',
+ lv_tags='ceph.foo0=bar0,ceph.foo1=bar1,ceph.foo2=bar2')
+
+ def test_set_tag(self, monkeypatch, capture):
+ monkeypatch.setattr(process, 'run', capture)
+ monkeypatch.setattr(process, 'call', capture)
+ self.foo_volume_clean.set_tag('foo', 'bar')
+ expected = ['lvchange', '--addtag', 'foo=bar', '/pathclean']
+ assert capture.calls[0]['args'][0] == expected
+ assert self.foo_volume_clean.tags == {'foo': 'bar'}
+
+ def test_set_clear_tag(self, monkeypatch, capture):
+ monkeypatch.setattr(process, 'run', capture)
+ monkeypatch.setattr(process, 'call', capture)
+ self.foo_volume_clean.set_tag('foo', 'bar')
+ assert self.foo_volume_clean.tags == {'foo': 'bar'}
+ self.foo_volume_clean.clear_tag('foo')
+ expected = ['lvchange', '--deltag', 'foo=bar', '/pathclean']
+ assert self.foo_volume_clean.tags == {}
+ assert capture.calls[1]['args'][0] == expected
+
+ def test_set_tags(self, monkeypatch, capture):
+ monkeypatch.setattr(process, 'run', capture)
+ monkeypatch.setattr(process, 'call', capture)
+ tags = {'ceph.foo0': 'bar0', 'ceph.foo1': 'bar1', 'ceph.foo2': 'bar2'}
+ assert self.foo_volume.tags == tags
+
+ tags = {'ceph.foo0': 'bar0', 'ceph.foo1': 'baz1', 'ceph.foo2': 'baz2'}
+ self.foo_volume.set_tags(tags)
+ assert self.foo_volume.tags == tags
+
+ self.foo_volume.set_tag('ceph.foo1', 'other1')
+ tags['ceph.foo1'] = 'other1'
+ assert self.foo_volume.tags == tags
+
+ expected = [
+ sorted(['lvchange', '--deltag', 'ceph.foo0=bar0', '--deltag',
+ 'ceph.foo1=bar1', '--deltag', 'ceph.foo2=bar2', '/path']),
+ sorted(['lvchange', '--deltag', 'ceph.foo1=baz1', '/path']),
+ sorted(['lvchange', '--addtag', 'ceph.foo0=bar0', '--addtag',
+ 'ceph.foo1=baz1', '--addtag', 'ceph.foo2=baz2', '/path']),
+ sorted(['lvchange', '--addtag', 'ceph.foo1=other1', '/path']),
+ ]
+ # The order isn't guaranted
+ for call in capture.calls:
+ assert sorted(call['args'][0]) in expected
+ assert len(capture.calls) == len(expected)
+
+ def test_clear_tags(self, monkeypatch, capture):
+ monkeypatch.setattr(process, 'run', capture)
+ monkeypatch.setattr(process, 'call', capture)
+ tags = {'ceph.foo0': 'bar0', 'ceph.foo1': 'bar1', 'ceph.foo2': 'bar2'}
+
+ self.foo_volume_clean.set_tags(tags)
+ assert self.foo_volume_clean.tags == tags
+ self.foo_volume_clean.clear_tags()
+ assert self.foo_volume_clean.tags == {}
+
+ expected = [
+ sorted(['lvchange', '--addtag', 'ceph.foo0=bar0', '--addtag',
+ 'ceph.foo1=bar1', '--addtag', 'ceph.foo2=bar2',
+ '/pathclean']),
+ sorted(['lvchange', '--deltag', 'ceph.foo0=bar0', '--deltag',
+ 'ceph.foo1=bar1', '--deltag', 'ceph.foo2=bar2',
+ '/pathclean']),
+ ]
+ # The order isn't guaranted
+ for call in capture.calls:
+ assert sorted(call['args'][0]) in expected
+ assert len(capture.calls) == len(expected)
+
+
+class TestExtendVG(object):
+
+ def setup(self):
+ self.foo_volume = api.VolumeGroup(vg_name='foo', lv_tags='')
+
+ def test_uses_single_device_in_list(self, monkeypatch, fake_run):
+ monkeypatch.setattr(api, 'get_single_vg', lambda **kw: True)
+ api.extend_vg(self.foo_volume, ['/dev/sda'])
+ expected = ['vgextend', '--force', '--yes', 'foo', '/dev/sda']
+ assert fake_run.calls[0]['args'][0] == expected
+
+ def test_uses_single_device(self, monkeypatch, fake_run):
+ monkeypatch.setattr(api, 'get_single_vg', lambda **kw: True)
+ api.extend_vg(self.foo_volume, '/dev/sda')
+ expected = ['vgextend', '--force', '--yes', 'foo', '/dev/sda']
+ assert fake_run.calls[0]['args'][0] == expected
+
+ def test_uses_multiple_devices(self, monkeypatch, fake_run):
+ monkeypatch.setattr(api, 'get_single_vg', lambda **kw: True)
+ api.extend_vg(self.foo_volume, ['/dev/sda', '/dev/sdb'])
+ expected = ['vgextend', '--force', '--yes', 'foo', '/dev/sda', '/dev/sdb']
+ assert fake_run.calls[0]['args'][0] == expected
+
+
+class TestReduceVG(object):
+
+ def setup(self):
+ self.foo_volume = api.VolumeGroup(vg_name='foo', lv_tags='')
+
+ def test_uses_single_device_in_list(self, monkeypatch, fake_run):
+ monkeypatch.setattr(api, 'get_single_vg', lambda **kw: True)
+ api.reduce_vg(self.foo_volume, ['/dev/sda'])
+ expected = ['vgreduce', '--force', '--yes', 'foo', '/dev/sda']
+ assert fake_run.calls[0]['args'][0] == expected
+
+ def test_uses_single_device(self, monkeypatch, fake_run):
+ monkeypatch.setattr(api, 'get_single_vg', lambda **kw: True)
+ api.reduce_vg(self.foo_volume, '/dev/sda')
+ expected = ['vgreduce', '--force', '--yes', 'foo', '/dev/sda']
+ assert fake_run.calls[0]['args'][0] == expected
+
+ def test_uses_multiple_devices(self, monkeypatch, fake_run):
+ monkeypatch.setattr(api, 'get_single_vg', lambda **kw: True)
+ api.reduce_vg(self.foo_volume, ['/dev/sda', '/dev/sdb'])
+ expected = ['vgreduce', '--force', '--yes', 'foo', '/dev/sda', '/dev/sdb']
+ assert fake_run.calls[0]['args'][0] == expected
+
+
+class TestCreateVG(object):
+
+ def setup(self):
+ self.foo_volume = api.VolumeGroup(vg_name='foo', lv_tags='')
+
+ def test_no_name(self, monkeypatch, fake_run):
+ monkeypatch.setattr(api, 'get_single_vg', lambda **kw: True)
+ api.create_vg('/dev/sda')
+ result = fake_run.calls[0]['args'][0]
+ assert '/dev/sda' in result
+ assert result[-2].startswith('ceph-')
+
+ def test_devices_list(self, monkeypatch, fake_run):
+ monkeypatch.setattr(api, 'get_single_vg', lambda **kw: True)
+ api.create_vg(['/dev/sda', '/dev/sdb'], name='ceph')
+ result = fake_run.calls[0]['args'][0]
+ expected = ['vgcreate', '--force', '--yes', 'ceph', '/dev/sda', '/dev/sdb']
+ assert result == expected
+
+ def test_name_prefix(self, monkeypatch, fake_run):
+ monkeypatch.setattr(api, 'get_single_vg', lambda **kw: True)
+ api.create_vg('/dev/sda', name_prefix='master')
+ result = fake_run.calls[0]['args'][0]
+ assert '/dev/sda' in result
+ assert result[-2].startswith('master-')
+
+ def test_specific_name(self, monkeypatch, fake_run):
+ monkeypatch.setattr(api, 'get_single_vg', lambda **kw: True)
+ api.create_vg('/dev/sda', name='master')
+ result = fake_run.calls[0]['args'][0]
+ assert '/dev/sda' in result
+ assert result[-2] == 'master'
+
+#
+# The following tests are pretty gnarly. VDO detection is very convoluted and
+# involves correlating information from device mappers, realpaths, slaves of
+# those mappers, and parents or related mappers. This makes it very hard to
+# patch nicely or keep tests short and readable. These tests are trying to
+# ensure correctness, the better approach will be to do some functional testing
+# with VDO.
+#
+
+
+@pytest.fixture
+def disable_kvdo_path(monkeypatch):
+ monkeypatch.setattr('os.path.isdir', lambda x, **kw: False)
+
+
+@pytest.fixture
+def enable_kvdo_path(monkeypatch):
+ monkeypatch.setattr('os.path.isdir', lambda x, **kw: True)
+
+
+# Stub for os.listdir
+
+
+class ListDir(object):
+
+ def __init__(self, paths):
+ self.paths = paths
+ self._normalize_paths()
+ self.listdir = os.listdir
+
+ def _normalize_paths(self):
+ for k, v in self.paths.items():
+ self.paths[k.rstrip('/')] = v.rstrip('/')
+
+ def add(self, original, fake):
+ self.paths[original.rstrip('/')] = fake.rstrip('/')
+
+ def __call__(self, path):
+ return self.listdir(self.paths[path.rstrip('/')])
+
+
+@pytest.fixture(scope='function')
+def listdir(monkeypatch):
+ def apply(paths=None, stub=None):
+ if not stub:
+ stub = ListDir(paths)
+ if paths:
+ for original, fake in paths.items():
+ stub.add(original, fake)
+
+ monkeypatch.setattr('os.listdir', stub)
+ return apply
+
+
+@pytest.fixture(scope='function')
+def makedirs(tmpdir):
+ def create(directory):
+ path = os.path.join(str(tmpdir), directory)
+ os.makedirs(path)
+ return path
+ create.base = str(tmpdir)
+ return create
+
+
+class TestIsVdo(object):
+
+ def test_no_vdo_dir(self, disable_kvdo_path):
+ assert api._is_vdo('/path') is False
+
+ def test_exceptions_return_false(self, monkeypatch):
+ def throw():
+ raise Exception()
+ monkeypatch.setattr('ceph_volume.api.lvm._is_vdo', throw)
+ assert api.is_vdo('/path') == '0'
+
+ def test_is_vdo_returns_a_string(self, monkeypatch):
+ monkeypatch.setattr('ceph_volume.api.lvm._is_vdo', lambda x, **kw: True)
+ assert api.is_vdo('/path') == '1'
+
+ def test_kvdo_dir_no_devices(self, makedirs, enable_kvdo_path, listdir, monkeypatch):
+ kvdo_path = makedirs('sys/kvdo')
+ listdir(paths={'/sys/kvdo': kvdo_path})
+ monkeypatch.setattr('ceph_volume.api.lvm._vdo_slaves', lambda x, **kw: [])
+ monkeypatch.setattr('ceph_volume.api.lvm._vdo_parents', lambda x, **kw: [])
+ assert api._is_vdo('/dev/mapper/vdo0') is False
+
+ def test_vdo_slaves_found_and_matched(self, makedirs, enable_kvdo_path, listdir, monkeypatch):
+ kvdo_path = makedirs('sys/kvdo')
+ listdir(paths={'/sys/kvdo': kvdo_path})
+ monkeypatch.setattr('ceph_volume.api.lvm._vdo_slaves', lambda x, **kw: ['/dev/dm-3'])
+ monkeypatch.setattr('ceph_volume.api.lvm._vdo_parents', lambda x, **kw: [])
+ assert api._is_vdo('/dev/dm-3') is True
+
+ def test_vdo_parents_found_and_matched(self, makedirs, enable_kvdo_path, listdir, monkeypatch):
+ kvdo_path = makedirs('sys/kvdo')
+ listdir(paths={'/sys/kvdo': kvdo_path})
+ monkeypatch.setattr('ceph_volume.api.lvm._vdo_slaves', lambda x, **kw: [])
+ monkeypatch.setattr('ceph_volume.api.lvm._vdo_parents', lambda x, **kw: ['/dev/dm-4'])
+ assert api._is_vdo('/dev/dm-4') is True
+
+
+class TestVdoSlaves(object):
+
+ def test_slaves_are_not_found(self, makedirs, listdir, monkeypatch):
+ slaves_path = makedirs('sys/block/vdo0/slaves')
+ listdir(paths={'/sys/block/vdo0/slaves': slaves_path})
+ monkeypatch.setattr('ceph_volume.api.lvm.os.path.exists', lambda x, **kw: True)
+ result = sorted(api._vdo_slaves(['vdo0']))
+ assert '/dev/mapper/vdo0' in result
+ assert 'vdo0' in result
+
+ def test_slaves_are_found(self, makedirs, listdir, monkeypatch):
+ slaves_path = makedirs('sys/block/vdo0/slaves')
+ makedirs('sys/block/vdo0/slaves/dm-4')
+ makedirs('dev/mapper/vdo0')
+ listdir(paths={'/sys/block/vdo0/slaves': slaves_path})
+ monkeypatch.setattr('ceph_volume.api.lvm.os.path.exists', lambda x, **kw: True)
+ result = sorted(api._vdo_slaves(['vdo0']))
+ assert '/dev/dm-4' in result
+ assert 'dm-4' in result
+
+
+class TestVDOParents(object):
+
+ def test_parents_are_found(self, makedirs, listdir):
+ block_path = makedirs('sys/block')
+ slaves_path = makedirs('sys/block/dm-4/slaves')
+ makedirs('sys/block/dm-4/slaves/dm-3')
+ listdir(paths={
+ '/sys/block/dm-4/slaves': slaves_path,
+ '/sys/block': block_path})
+ result = api._vdo_parents(['dm-3'])
+ assert '/dev/dm-4' in result
+ assert 'dm-4' in result
+
+ def test_parents_are_not_found(self, makedirs, listdir):
+ block_path = makedirs('sys/block')
+ slaves_path = makedirs('sys/block/dm-4/slaves')
+ makedirs('sys/block/dm-4/slaves/dm-5')
+ listdir(paths={
+ '/sys/block/dm-4/slaves': slaves_path,
+ '/sys/block': block_path})
+ result = api._vdo_parents(['dm-3'])
+ assert result == []
+
+
+class TestSplitNameParser(object):
+
+ def test_keys_are_parsed_without_prefix(self):
+ line = ["DM_VG_NAME='/dev/mapper/vg';DM_LV_NAME='lv';DM_LV_LAYER=''"]
+ result = api._splitname_parser(line)
+ assert result['VG_NAME'] == 'vg'
+ assert result['LV_NAME'] == 'lv'
+ assert result['LV_LAYER'] == ''
+
+ def test_vg_name_sans_mapper(self):
+ line = ["DM_VG_NAME='/dev/mapper/vg';DM_LV_NAME='lv';DM_LV_LAYER=''"]
+ result = api._splitname_parser(line)
+ assert '/dev/mapper' not in result['VG_NAME']
+
+
+class TestGetDeviceVgs(object):
+
+ @patch('ceph_volume.process.call')
+ @patch('ceph_volume.api.lvm._output_parser')
+ def test_get_device_vgs_with_empty_pv(self, patched_output_parser, pcall):
+ patched_output_parser.return_value = [{'vg_name': ''}]
+ pcall.return_value = ('', '', '')
+ vgs = api.get_device_vgs('/dev/foo')
+ assert vgs == []
+
+class TestGetDeviceLvs(object):
+
+ @patch('ceph_volume.process.call')
+ @patch('ceph_volume.api.lvm._output_parser')
+ def test_get_device_lvs_with_empty_vg(self, patched_output_parser, pcall):
+ patched_output_parser.return_value = [{'lv_name': ''}]
+ pcall.return_value = ('', '', '')
+ vgs = api.get_device_lvs('/dev/foo')
+ assert vgs == []
+
+
+# NOTE: api.convert_filters_to_str() and api.convert_tags_to_str() should get
+# tested automatically while testing api.make_filters_lvmcmd_ready()
+class TestMakeFiltersLVMCMDReady(object):
+
+ def test_with_no_filters_and_no_tags(self):
+ retval = api.make_filters_lvmcmd_ready(None, None)
+
+ assert isinstance(retval, str)
+ assert retval == ''
+
+ def test_with_filters_and_no_tags(self):
+ filters = {'lv_name': 'lv1', 'lv_path': '/dev/sda'}
+
+ retval = api.make_filters_lvmcmd_ready(filters, None)
+
+ assert isinstance(retval, str)
+ for k, v in filters.items():
+ assert k in retval
+ assert v in retval
+
+ def test_with_no_filters_and_with_tags(self):
+ tags = {'ceph.type': 'data', 'ceph.osd_id': '0'}
+
+ retval = api.make_filters_lvmcmd_ready(None, tags)
+
+ assert isinstance(retval, str)
+ assert 'tags' in retval
+ for k, v in tags.items():
+ assert k in retval
+ assert v in retval
+ assert retval.find('tags') < retval.find(k) < retval.find(v)
+
+ def test_with_filters_and_tags(self):
+ filters = {'lv_name': 'lv1', 'lv_path': '/dev/sda'}
+ tags = {'ceph.type': 'data', 'ceph.osd_id': '0'}
+
+ retval = api.make_filters_lvmcmd_ready(filters, tags)
+
+ assert isinstance(retval, str)
+ for f, t in zip(filters.items(), tags.items()):
+ assert f[0] in retval
+ assert f[1] in retval
+ assert t[0] in retval
+ assert t[1] in retval
+ assert retval.find(f[0]) < retval.find(f[1]) < \
+ retval.find('tags') < retval.find(t[0]) < retval.find(t[1])
+
+
+class TestGetPVs(object):
+
+ def test_get_pvs(self, monkeypatch):
+ pv1 = api.PVolume(pv_name='/dev/sda', pv_uuid='0000', pv_tags={},
+ vg_name='vg1')
+ pv2 = api.PVolume(pv_name='/dev/sdb', pv_uuid='0001', pv_tags={},
+ vg_name='vg2')
+ pvs = [pv1, pv2]
+ stdout = ['{};{};{};{};;'.format(pv1.pv_name, pv1.pv_tags, pv1.pv_uuid, pv1.vg_name),
+ '{};{};{};{};;'.format(pv2.pv_name, pv2.pv_tags, pv2.pv_uuid, pv2.vg_name)]
+ monkeypatch.setattr(api.process, 'call', lambda x,**kw: (stdout, '', 0))
+
+ pvs_ = api.get_pvs()
+ assert len(pvs_) == len(pvs)
+ for pv, pv_ in zip(pvs, pvs_):
+ assert pv_.pv_name == pv.pv_name
+
+ def test_get_pvs_single_pv(self, monkeypatch):
+ pv1 = api.PVolume(pv_name='/dev/sda', pv_uuid='0000', pv_tags={},
+ vg_name='vg1')
+ pvs = [pv1]
+ stdout = ['{};;;;;;'.format(pv1.pv_name)]
+ monkeypatch.setattr(api.process, 'call', lambda x,**kw: (stdout, '', 0))
+
+ pvs_ = api.get_pvs()
+ assert len(pvs_) == 1
+ assert pvs_[0].pv_name == pvs[0].pv_name
+
+ def test_get_pvs_empty(self, monkeypatch):
+ monkeypatch.setattr(api.process, 'call', lambda x,**kw: ('', '', 0))
+ assert api.get_pvs() == []
+
+
+class TestGetVGs(object):
+
+ def test_get_vgs(self, monkeypatch):
+ vg1 = api.VolumeGroup(vg_name='vg1')
+ vg2 = api.VolumeGroup(vg_name='vg2')
+ vgs = [vg1, vg2]
+ stdout = ['{};;;;;;'.format(vg1.vg_name),
+ '{};;;;;;'.format(vg2.vg_name)]
+ monkeypatch.setattr(api.process, 'call', lambda x,**kw: (stdout, '', 0))
+
+ vgs_ = api.get_vgs()
+ assert len(vgs_) == len(vgs)
+ for vg, vg_ in zip(vgs, vgs_):
+ assert vg_.vg_name == vg.vg_name
+
+ def test_get_vgs_single_vg(self, monkeypatch):
+ vg1 = api.VolumeGroup(vg_name='vg'); vgs = [vg1]
+ stdout = ['{};;;;;;'.format(vg1.vg_name)]
+ monkeypatch.setattr(api.process, 'call', lambda x,**kw: (stdout, '', 0))
+
+ vgs_ = api.get_vgs()
+ assert len(vgs_) == 1
+ assert vgs_[0].vg_name == vgs[0].vg_name
+
+ def test_get_vgs_empty(self, monkeypatch):
+ monkeypatch.setattr(api.process, 'call', lambda x,**kw: ('', '', 0))
+ assert api.get_vgs() == []
+
+
+class TestGetLVs(object):
+
+ def test_get_lvs(self, monkeypatch):
+ lv1 = api.Volume(lv_tags='ceph.type=data', lv_path='/dev/vg1/lv1',
+ lv_name='lv1', vg_name='vg1')
+ lv2 = api.Volume(lv_tags='ceph.type=data', lv_path='/dev/vg2/lv2',
+ lv_name='lv2', vg_name='vg2')
+ lvs = [lv1, lv2]
+ stdout = ['{};{};{};{}'.format(lv1.lv_tags, lv1.lv_path, lv1.lv_name,
+ lv1.vg_name),
+ '{};{};{};{}'.format(lv2.lv_tags, lv2.lv_path, lv2.lv_name,
+ lv2.vg_name)]
+ monkeypatch.setattr(api.process, 'call', lambda x,**kw: (stdout, '', 0))
+
+ lvs_ = api.get_lvs()
+ assert len(lvs_) == len(lvs)
+ for lv, lv_ in zip(lvs, lvs_):
+ assert lv.__dict__ == lv_.__dict__
+
+ def test_get_lvs_single_lv(self, monkeypatch):
+ stdout = ['ceph.type=data;/dev/vg/lv;lv;vg']
+ monkeypatch.setattr(api.process, 'call', lambda x,**kw: (stdout, '', 0))
+ lvs = []
+ lvs.append((api.Volume(lv_tags='ceph.type=data',
+ lv_path='/dev/vg/lv',
+ lv_name='lv', vg_name='vg')))
+
+ lvs_ = api.get_lvs()
+ assert len(lvs_) == len(lvs)
+ assert lvs[0].__dict__ == lvs_[0].__dict__
+
+ def test_get_lvs_empty(self, monkeypatch):
+ monkeypatch.setattr(api.process, 'call', lambda x,**kw: ('', '', 0))
+ assert api.get_lvs() == []
+
+
+class TestGetSinglePV(object):
+
+ @patch('ceph_volume.devices.lvm.prepare.api.get_pvs')
+ def test_get_single_pv_multiple_matches_raises_runtimeerror(self, m_get_pvs):
+ fake_pvs = []
+ fake_pvs.append(api.PVolume(pv_name='/dev/sda', pv_tags={}))
+ fake_pvs.append(api.PVolume(pv_name='/dev/sdb', pv_tags={}))
+
+ m_get_pvs.return_value = fake_pvs
+
+ with pytest.raises(RuntimeError) as e:
+ api.get_single_pv()
+ assert "matched more than 1 PV present on this host." in str(e.value)
+
+ @patch('ceph_volume.devices.lvm.prepare.api.get_pvs')
+ def test_get_single_pv_no_match_returns_none(self, m_get_pvs):
+ m_get_pvs.return_value = []
+
+ pv = api.get_single_pv()
+ assert pv == None
+
+ @patch('ceph_volume.devices.lvm.prepare.api.get_pvs')
+ def test_get_single_pv_one_match(self, m_get_pvs):
+ fake_pvs = []
+ fake_pvs.append(api.PVolume(pv_name='/dev/sda', pv_tags={}))
+ m_get_pvs.return_value = fake_pvs
+
+ pv = api.get_single_pv()
+
+ assert isinstance(pv, api.PVolume)
+ assert pv.name == '/dev/sda'
+
+
+class TestGetSingleVG(object):
+
+ @patch('ceph_volume.devices.lvm.prepare.api.get_vgs')
+ def test_get_single_vg_multiple_matches_raises_runtimeerror(self, m_get_vgs):
+ fake_vgs = []
+ fake_vgs.append(api.VolumeGroup(vg_name='vg1'))
+ fake_vgs.append(api.VolumeGroup(vg_name='vg2'))
+
+ m_get_vgs.return_value = fake_vgs
+
+ with pytest.raises(RuntimeError) as e:
+ api.get_single_vg()
+ assert "matched more than 1 VG present on this host." in str(e.value)
+
+ @patch('ceph_volume.devices.lvm.prepare.api.get_vgs')
+ def test_get_single_vg_no_match_returns_none(self, m_get_vgs):
+ m_get_vgs.return_value = []
+
+ vg = api.get_single_vg()
+ assert vg == None
+
+ @patch('ceph_volume.devices.lvm.prepare.api.get_vgs')
+ def test_get_single_vg_one_match(self, m_get_vgs):
+ fake_vgs = []
+ fake_vgs.append(api.VolumeGroup(vg_name='vg1'))
+ m_get_vgs.return_value = fake_vgs
+
+ vg = api.get_single_vg()
+
+ assert isinstance(vg, api.VolumeGroup)
+ assert vg.name == 'vg1'
+
+class TestGetSingleLV(object):
+
+ @patch('ceph_volume.devices.lvm.prepare.api.get_lvs')
+ def test_get_single_lv_multiple_matches_raises_runtimeerror(self, m_get_lvs):
+ fake_lvs = []
+ fake_lvs.append(api.Volume(lv_name='lv1',
+ lv_path='/dev/vg1/lv1',
+ vg_name='vg1',
+ lv_tags='',
+ lv_uuid='fake-uuid'))
+ fake_lvs.append(api.Volume(lv_name='lv1',
+ lv_path='/dev/vg2/lv1',
+ vg_name='vg2',
+ lv_tags='',
+ lv_uuid='fake-uuid'))
+ m_get_lvs.return_value = fake_lvs
+
+ with pytest.raises(RuntimeError) as e:
+ api.get_single_lv()
+ assert "matched more than 1 LV present on this host" in str(e.value)
+
+ @patch('ceph_volume.devices.lvm.prepare.api.get_lvs')
+ def test_get_single_lv_no_match_returns_none(self, m_get_lvs):
+ m_get_lvs.return_value = []
+
+ lv = api.get_single_lv()
+ assert lv == None
+
+ @patch('ceph_volume.devices.lvm.prepare.api.get_lvs')
+ def test_get_single_lv_one_match(self, m_get_lvs):
+ fake_lvs = []
+ fake_lvs.append(api.Volume(lv_name='lv1', lv_path='/dev/vg1/lv1', vg_name='vg1', lv_tags='', lv_uuid='fake-uuid'))
+ m_get_lvs.return_value = fake_lvs
+
+ lv_ = api.get_single_lv()
+
+ assert isinstance(lv_, api.Volume)
+ assert lv_.name == 'lv1'
diff --git a/src/ceph-volume/ceph_volume/tests/conftest.py b/src/ceph-volume/ceph_volume/tests/conftest.py
new file mode 100644
index 000000000..2cedd68ef
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/conftest.py
@@ -0,0 +1,317 @@
+import os
+import pytest
+from mock.mock import patch, PropertyMock, create_autospec
+from ceph_volume.api import lvm
+from ceph_volume.util import disk
+from ceph_volume.util import device
+from ceph_volume.util.constants import ceph_disk_guids
+from ceph_volume import conf, configuration
+
+
+class Capture(object):
+
+ def __init__(self, *a, **kw):
+ self.a = a
+ self.kw = kw
+ self.calls = []
+ self.return_values = kw.get('return_values', False)
+ self.always_returns = kw.get('always_returns', False)
+
+ def __call__(self, *a, **kw):
+ self.calls.append({'args': a, 'kwargs': kw})
+ if self.always_returns:
+ return self.always_returns
+ if self.return_values:
+ return self.return_values.pop()
+
+
+class Factory(object):
+
+ def __init__(self, **kw):
+ for k, v in kw.items():
+ setattr(self, k, v)
+
+
+@pytest.fixture
+def factory():
+ return Factory
+
+
+@pytest.fixture
+def capture():
+ return Capture()
+
+@pytest.fixture
+def mock_lv_device_generator():
+ def mock_lv():
+ size = 21474836480
+ dev = create_autospec(device.Device)
+ dev.lv_name = 'lv'
+ dev.vg_name = 'vg'
+ dev.path = '{}/{}'.format(dev.vg_name, dev.lv_name)
+ dev.used_by_ceph = False
+ dev.vg_size = [size]
+ dev.vg_free = dev.vg_size
+ dev.available_lvm = True
+ dev.is_device = False
+ dev.lvs = [lvm.Volume(vg_name=dev.vg_name, lv_name=dev.lv_name, lv_size=size, lv_tags='')]
+ return dev
+ return mock_lv
+
+def mock_device():
+ dev = create_autospec(device.Device)
+ dev.path = '/dev/foo'
+ dev.vg_name = 'vg_foo'
+ dev.lv_name = 'lv_foo'
+ dev.symlink = None
+ dev.vgs = [lvm.VolumeGroup(vg_name=dev.vg_name, lv_name=dev.lv_name)]
+ dev.available_lvm = True
+ dev.vg_size = [21474836480]
+ dev.vg_free = dev.vg_size
+ dev.lvs = []
+ return dev
+
+@pytest.fixture(params=range(1,3))
+def mock_devices_available(request):
+ ret = []
+ for _ in range(request.param):
+ ret.append(mock_device())
+ return ret
+
+@pytest.fixture
+def mock_device_generator():
+ return mock_device
+
+
+@pytest.fixture(params=range(1,11))
+def osds_per_device(request):
+ return request.param
+
+
+@pytest.fixture
+def fake_run(monkeypatch):
+ fake_run = Capture()
+ monkeypatch.setattr('ceph_volume.process.run', fake_run)
+ return fake_run
+
+
+@pytest.fixture
+def fake_call(monkeypatch):
+ fake_call = Capture(always_returns=([], [], 0))
+ monkeypatch.setattr('ceph_volume.process.call', fake_call)
+ return fake_call
+
+
+@pytest.fixture
+def fakedevice(factory):
+ def apply(**kw):
+ params = dict(
+ path='/dev/sda',
+ abspath='/dev/sda',
+ lv_api=None,
+ pvs_api=[],
+ disk_api={},
+ sys_api={},
+ exists=True,
+ is_lvm_member=True,
+ )
+ params.update(dict(kw))
+ params['lvm_size'] = disk.Size(b=params['sys_api'].get("size", 0))
+ return factory(**params)
+ return apply
+
+
+@pytest.fixture
+def stub_call(monkeypatch):
+ """
+ Monkeypatches process.call, so that a caller can add behavior to the response
+ """
+ def apply(return_values):
+ if isinstance(return_values, tuple):
+ return_values = [return_values]
+ stubbed_call = Capture(return_values=return_values)
+ monkeypatch.setattr('ceph_volume.process.call', stubbed_call)
+ return stubbed_call
+
+ return apply
+
+
+@pytest.fixture(autouse=True)
+def reset_cluster_name(request, monkeypatch):
+ """
+ The globally available ``ceph_volume.conf.cluster`` might get mangled in
+ tests, make sure that after evert test, it gets reset, preventing pollution
+ going into other tests later.
+ """
+ def fin():
+ conf.cluster = None
+ try:
+ os.environ.pop('CEPH_CONF')
+ except KeyError:
+ pass
+ request.addfinalizer(fin)
+
+
+@pytest.fixture
+def conf_ceph(monkeypatch):
+ """
+ Monkeypatches ceph_volume.conf.ceph, which is meant to parse/read
+ a ceph.conf. The patching is naive, it allows one to set return values for
+ specific method calls.
+ """
+ def apply(**kw):
+ stub = Factory(**kw)
+ monkeypatch.setattr(conf, 'ceph', stub)
+ return stub
+ return apply
+
+
+@pytest.fixture
+def conf_ceph_stub(monkeypatch, tmpfile):
+ """
+ Monkeypatches ceph_volume.conf.ceph with contents from a string that are
+ written to a temporary file and then is fed through the same ceph.conf
+ loading mechanisms for testing. Unlike ``conf_ceph`` which is just a fake,
+ we are actually loading values as seen on a ceph.conf file
+
+ This is useful when more complex ceph.conf's are needed. In the case of
+ just trying to validate a key/value behavior ``conf_ceph`` is better
+ suited.
+ """
+ def apply(contents):
+ conf_path = tmpfile(contents=contents)
+ parser = configuration.load(conf_path)
+ monkeypatch.setattr(conf, 'ceph', parser)
+ return parser
+ return apply
+
+
+@pytest.fixture
+def is_root(monkeypatch):
+ """
+ Patch ``os.getuid()`` so that ceph-volume's decorators that ensure a user
+ is root (or is sudoing to superuser) can continue as-is
+ """
+ monkeypatch.setattr('os.getuid', lambda: 0)
+
+
+@pytest.fixture
+def tmpfile(tmpdir):
+ """
+ Create a temporary file, optionally filling it with contents, returns an
+ absolute path to the file when called
+ """
+ def generate_file(name='file', contents='', directory=None):
+ directory = directory or str(tmpdir)
+ path = os.path.join(directory, name)
+ with open(path, 'w') as fp:
+ fp.write(contents)
+ return path
+ return generate_file
+
+
+@pytest.fixture
+def disable_kernel_queries(monkeypatch):
+ '''
+ This speeds up calls to Device and Disk
+ '''
+ monkeypatch.setattr("ceph_volume.util.device.disk.get_devices", lambda device='': {})
+ monkeypatch.setattr("ceph_volume.util.disk.udevadm_property", lambda *a, **kw: {})
+
+
+@pytest.fixture(params=[
+ '', 'ceph data', 'ceph journal', 'ceph block',
+ 'ceph block.wal', 'ceph block.db', 'ceph lockbox'])
+def ceph_partlabel(request):
+ return request.param
+
+
+@pytest.fixture(params=list(ceph_disk_guids.keys()))
+def ceph_parttype(request):
+ return request.param
+
+
+@pytest.fixture
+def lsblk_ceph_disk_member(monkeypatch, request, ceph_partlabel, ceph_parttype):
+ monkeypatch.setattr("ceph_volume.util.device.disk.lsblk",
+ lambda path: {'TYPE': 'disk',
+ 'NAME': 'sda',
+ 'PARTLABEL': ceph_partlabel,
+ 'PARTTYPE': ceph_parttype})
+ monkeypatch.setattr("ceph_volume.util.device.disk.lsblk_all",
+ lambda: [{'TYPE': 'disk',
+ 'NAME': 'sda',
+ 'PARTLABEL': ceph_partlabel,
+ 'PARTTYPE': ceph_parttype}])
+
+@pytest.fixture
+def blkid_ceph_disk_member(monkeypatch, request, ceph_partlabel, ceph_parttype):
+ monkeypatch.setattr("ceph_volume.util.device.disk.blkid",
+ lambda path: {'TYPE': 'disk',
+ 'PARTLABEL': ceph_partlabel,
+ 'PARTTYPE': ceph_parttype})
+
+
+@pytest.fixture(params=[
+ ('gluster partition', 'gluster partition'),
+ # falls back to blkid
+ ('', 'gluster partition'),
+ ('gluster partition', ''),
+])
+def device_info_not_ceph_disk_member(monkeypatch, request):
+ monkeypatch.setattr("ceph_volume.util.device.disk.lsblk",
+ lambda path: {'TYPE': 'disk',
+ 'NAME': 'sda',
+ 'PARTLABEL': request.param[0]})
+ monkeypatch.setattr("ceph_volume.util.device.disk.lsblk_all",
+ lambda: [{'TYPE': 'disk',
+ 'NAME': 'sda',
+ 'PARTLABEL': request.param[0]}])
+ monkeypatch.setattr("ceph_volume.util.device.disk.blkid",
+ lambda path: {'TYPE': 'disk',
+ 'PARTLABEL': request.param[1]})
+
+@pytest.fixture
+def patched_get_block_devs_sysfs():
+ with patch('ceph_volume.util.disk.get_block_devs_sysfs') as p:
+ yield p
+
+@pytest.fixture
+def patch_bluestore_label():
+ with patch('ceph_volume.util.device.Device.has_bluestore_label',
+ new_callable=PropertyMock) as p:
+ p.return_value = False
+ yield p
+
+@pytest.fixture
+def device_info(monkeypatch, patch_bluestore_label):
+ def apply(devices=None, lsblk=None, lv=None, blkid=None, udevadm=None,
+ has_bluestore_label=False):
+ if devices:
+ for dev in devices.keys():
+ devices[dev]['device_nodes'] = os.path.basename(dev)
+ else:
+ devices = {}
+ lsblk = lsblk if lsblk else {}
+ blkid = blkid if blkid else {}
+ udevadm = udevadm if udevadm else {}
+ lv = Factory(**lv) if lv else None
+ monkeypatch.setattr("ceph_volume.sys_info.devices", {})
+ monkeypatch.setattr("ceph_volume.util.device.disk.get_devices", lambda device='': devices)
+ if not devices:
+ monkeypatch.setattr("ceph_volume.util.device.lvm.get_single_lv", lambda filters: lv)
+ else:
+ monkeypatch.setattr("ceph_volume.util.device.lvm.get_device_lvs",
+ lambda path: [lv])
+ monkeypatch.setattr("ceph_volume.util.device.disk.lsblk", lambda path: lsblk)
+ monkeypatch.setattr("ceph_volume.util.device.disk.blkid", lambda path: blkid)
+ monkeypatch.setattr("ceph_volume.util.disk.udevadm_property", lambda *a, **kw: udevadm)
+ return apply
+
+@pytest.fixture
+def fake_filesystem(fs):
+
+ fs.create_dir('/sys/block/sda/slaves')
+ fs.create_dir('/sys/block/sda/queue')
+ fs.create_dir('/sys/block/rbd0')
+ yield fs
diff --git a/src/ceph-volume/ceph_volume/tests/devices/__init__.py b/src/ceph-volume/ceph_volume/tests/devices/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/__init__.py
diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/__init__.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/__init__.py
diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_activate.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_activate.py
new file mode 100644
index 000000000..2237f259e
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_activate.py
@@ -0,0 +1,442 @@
+import pytest
+from copy import deepcopy
+from ceph_volume.devices.lvm import activate
+from ceph_volume.api import lvm as api
+from ceph_volume.tests.conftest import Capture
+
+
+class Args(object):
+
+ def __init__(self, **kw):
+ # default flags
+ self.bluestore = False
+ self.filestore = False
+ self.no_systemd = False
+ self.auto_detect_objectstore = None
+ for k, v in kw.items():
+ setattr(self, k, v)
+
+
+class TestActivate(object):
+
+ # these tests are very functional, hence the heavy patching, it is hard to
+ # test the negative side effect with an actual functional run, so we must
+ # setup a perfect scenario for this test to check it can really work
+ # with/without osd_id
+ def test_no_osd_id_matches_fsid(self, is_root, monkeypatch, capture):
+ FooVolume = api.Volume(lv_name='foo', lv_path='/dev/vg/foo',
+ lv_tags="ceph.osd_fsid=1234")
+ volumes = []
+ volumes.append(FooVolume)
+ monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: volumes)
+ monkeypatch.setattr(activate, 'activate_filestore', capture)
+ args = Args(osd_id=None, osd_fsid='1234', filestore=True)
+ activate.Activate([]).activate(args)
+ assert capture.calls[0]['args'][0] == [FooVolume]
+
+ def test_no_osd_id_matches_fsid_bluestore(self, is_root, monkeypatch, capture):
+ FooVolume = api.Volume(lv_name='foo', lv_path='/dev/vg/foo',
+ lv_tags="ceph.osd_fsid=1234")
+ volumes = []
+ volumes.append(FooVolume)
+ monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: volumes)
+ monkeypatch.setattr(activate, 'activate_bluestore', capture)
+ args = Args(osd_id=None, osd_fsid='1234', bluestore=True)
+ activate.Activate([]).activate(args)
+ assert capture.calls[0]['args'][0] == [FooVolume]
+
+ def test_no_osd_id_no_matching_fsid(self, is_root, monkeypatch, capture):
+ FooVolume = api.Volume(lv_name='foo', lv_path='/dev/vg/foo',
+ lv_tags="ceph.osd_fsid=1111")
+ volumes = []
+ volumes.append(FooVolume)
+ monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: [])
+ monkeypatch.setattr(api, 'get_single_lv', lambda **kwargs: [])
+ monkeypatch.setattr(activate, 'activate_filestore', capture)
+
+ args = Args(osd_id=None, osd_fsid='2222')
+ with pytest.raises(RuntimeError):
+ activate.Activate([]).activate(args)
+
+ def test_osd_id_no_osd_fsid(self, is_root):
+ args = Args(osd_id=42, osd_fsid=None)
+ with pytest.raises(RuntimeError) as result:
+ activate.Activate([]).activate(args)
+ assert result.value.args[0] == 'could not activate osd.42, please provide the osd_fsid too'
+
+ def test_no_osd_id_no_osd_fsid(self, is_root):
+ args = Args(osd_id=None, osd_fsid=None)
+ with pytest.raises(RuntimeError) as result:
+ activate.Activate([]).activate(args)
+ assert result.value.args[0] == 'Please provide both osd_id and osd_fsid'
+
+ def test_filestore_no_systemd(self, is_root, monkeypatch, capture):
+ monkeypatch.setattr('ceph_volume.configuration.load', lambda: None)
+ fake_enable = Capture()
+ fake_start_osd = Capture()
+ monkeypatch.setattr('ceph_volume.util.system.device_is_mounted', lambda *a, **kw: True)
+ monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw: True)
+ monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True)
+ monkeypatch.setattr(activate.systemctl, 'enable_volume', fake_enable)
+ monkeypatch.setattr(activate.systemctl, 'start_osd', fake_start_osd)
+ JournalVolume = api.Volume(
+ lv_name='journal',
+ lv_path='/dev/vg/journal',
+ lv_uuid='000',
+ lv_tags=','.join([
+ "ceph.cluster_name=ceph", "ceph.journal_device=/dev/vg/journal",
+ "ceph.journal_uuid=000", "ceph.type=journal",
+ "ceph.osd_id=0", "ceph.osd_fsid=1234"])
+ )
+ DataVolume = api.Volume(
+ lv_name='data',
+ lv_path='/dev/vg/data',
+ lv_uuid='001',
+ lv_tags="ceph.cluster_name=ceph,ceph.journal_device=/dev/vg/" + \
+ "journal,ceph.journal_uuid=000,ceph.type=data," + \
+ "ceph.osd_id=0,ceph.osd_fsid=1234")
+ volumes = []
+ volumes.append(DataVolume)
+ volumes.append(JournalVolume)
+ monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: deepcopy(volumes))
+
+ args = Args(osd_id=None, osd_fsid='1234', no_systemd=True, filestore=True)
+ activate.Activate([]).activate(args)
+ assert fake_enable.calls == []
+ assert fake_start_osd.calls == []
+
+ def test_filestore_no_systemd_autodetect(self, is_root, monkeypatch, capture):
+ monkeypatch.setattr('ceph_volume.configuration.load', lambda: None)
+ fake_enable = Capture()
+ fake_start_osd = Capture()
+ monkeypatch.setattr('ceph_volume.util.system.device_is_mounted', lambda *a, **kw: True)
+ monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw: True)
+ monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True)
+ monkeypatch.setattr(activate.systemctl, 'enable_volume', fake_enable)
+ monkeypatch.setattr(activate.systemctl, 'start_osd', fake_start_osd)
+ JournalVolume = api.Volume(
+ lv_name='journal',
+ lv_path='/dev/vg/journal',
+ lv_uuid='000',
+ lv_tags=','.join([
+ "ceph.cluster_name=ceph", "ceph.journal_device=/dev/vg/journal",
+ "ceph.journal_uuid=000", "ceph.type=journal",
+ "ceph.osd_id=0", "ceph.osd_fsid=1234"])
+ )
+ DataVolume = api.Volume(
+ lv_name='data',
+ lv_path='/dev/vg/data',
+ lv_uuid='001',
+ lv_tags="ceph.cluster_name=ceph,ceph.journal_device=/dev/vg/" + \
+ "journal,ceph.journal_uuid=000,ceph.type=data," + \
+ "ceph.osd_id=0,ceph.osd_fsid=1234")
+ volumes = []
+ volumes.append(DataVolume)
+ volumes.append(JournalVolume)
+ monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: deepcopy(volumes))
+
+ args = Args(osd_id=None, osd_fsid='1234', no_systemd=True,
+ filestore=True, auto_detect_objectstore=True)
+ activate.Activate([]).activate(args)
+ assert fake_enable.calls == []
+ assert fake_start_osd.calls == []
+
+ def test_filestore_systemd_autodetect(self, is_root, monkeypatch, capture):
+ fake_enable = Capture()
+ fake_start_osd = Capture()
+ monkeypatch.setattr('ceph_volume.configuration.load', lambda: None)
+ monkeypatch.setattr('ceph_volume.util.system.device_is_mounted', lambda *a, **kw: True)
+ monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw: True)
+ monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True)
+ monkeypatch.setattr(activate.systemctl, 'enable_volume', fake_enable)
+ monkeypatch.setattr(activate.systemctl, 'start_osd', fake_start_osd)
+ JournalVolume = api.Volume(
+ lv_name='journal',
+ lv_path='/dev/vg/journal',
+ lv_uuid='000',
+ lv_tags=','.join([
+ "ceph.cluster_name=ceph", "ceph.journal_device=/dev/vg/journal",
+ "ceph.journal_uuid=000", "ceph.type=journal",
+ "ceph.osd_id=0","ceph.osd_fsid=1234"])
+ )
+ DataVolume = api.Volume(
+ lv_name='data',
+ lv_path='/dev/vg/data',
+ lv_uuid='001',
+ lv_tags="ceph.cluster_name=ceph,ceph.journal_device=/dev/vg/" + \
+ "journal,ceph.journal_uuid=000,ceph.type=data," + \
+ "ceph.osd_id=0,ceph.osd_fsid=1234")
+ volumes = []
+ volumes.append(DataVolume)
+ volumes.append(JournalVolume)
+ monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: deepcopy(volumes))
+
+ args = Args(osd_id=None, osd_fsid='1234', no_systemd=False,
+ filestore=True, auto_detect_objectstore=False)
+ activate.Activate([]).activate(args)
+ assert fake_enable.calls != []
+ assert fake_start_osd.calls != []
+
+ def test_filestore_systemd(self, is_root, monkeypatch, capture):
+ fake_enable = Capture()
+ fake_start_osd = Capture()
+ monkeypatch.setattr('ceph_volume.configuration.load', lambda: None)
+ monkeypatch.setattr('ceph_volume.util.system.device_is_mounted', lambda *a, **kw: True)
+ monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw: True)
+ monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True)
+ monkeypatch.setattr(activate.systemctl, 'enable_volume', fake_enable)
+ monkeypatch.setattr(activate.systemctl, 'start_osd', fake_start_osd)
+ JournalVolume = api.Volume(
+ lv_name='journal',
+ lv_path='/dev/vg/journal',
+ lv_uuid='000',
+ lv_tags=','.join([
+ "ceph.cluster_name=ceph", "ceph.journal_device=/dev/vg/journal",
+ "ceph.journal_uuid=000", "ceph.type=journal",
+ "ceph.osd_id=0","ceph.osd_fsid=1234"])
+ )
+ DataVolume = api.Volume(
+ lv_name='data',
+ lv_path='/dev/vg/data',
+ lv_uuid='001',
+ lv_tags="ceph.cluster_name=ceph,ceph.journal_device=/dev/vg/" + \
+ "journal,ceph.journal_uuid=000,ceph.type=data," + \
+ "ceph.osd_id=0,ceph.osd_fsid=1234")
+ volumes = []
+ volumes.append(DataVolume)
+ volumes.append(JournalVolume)
+ monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: deepcopy(volumes))
+
+ args = Args(osd_id=None, osd_fsid='1234', no_systemd=False,
+ filestore=True)
+ activate.Activate([]).activate(args)
+ assert fake_enable.calls != []
+ assert fake_start_osd.calls != []
+
+ def test_bluestore_no_systemd(self, is_root, monkeypatch, capture):
+ monkeypatch.setattr('ceph_volume.configuration.load', lambda: None)
+ fake_enable = Capture()
+ fake_start_osd = Capture()
+ monkeypatch.setattr('ceph_volume.util.system.path_is_mounted', lambda *a, **kw: True)
+ monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw: True)
+ monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True)
+ monkeypatch.setattr(activate.systemctl, 'enable_volume', fake_enable)
+ monkeypatch.setattr(activate.systemctl, 'start_osd', fake_start_osd)
+ DataVolume = api.Volume(
+ lv_name='data',
+ lv_path='/dev/vg/data',
+ lv_tags="ceph.cluster_name=ceph,,ceph.journal_uuid=000," + \
+ "ceph.type=block,ceph.osd_id=0,ceph.osd_fsid=1234")
+ volumes = []
+ volumes.append(DataVolume)
+ monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: deepcopy(volumes))
+
+ args = Args(osd_id=None, osd_fsid='1234', no_systemd=True, bluestore=True)
+ activate.Activate([]).activate(args)
+ assert fake_enable.calls == []
+ assert fake_start_osd.calls == []
+
+ def test_bluestore_systemd(self, is_root, monkeypatch, capture):
+ monkeypatch.setattr('ceph_volume.configuration.load', lambda: None)
+ fake_enable = Capture()
+ fake_start_osd = Capture()
+ monkeypatch.setattr('ceph_volume.util.system.path_is_mounted', lambda *a, **kw: True)
+ monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw: True)
+ monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True)
+ monkeypatch.setattr(activate.systemctl, 'enable_volume', fake_enable)
+ monkeypatch.setattr(activate.systemctl, 'start_osd', fake_start_osd)
+ DataVolume = api.Volume(
+ lv_name='data',
+ lv_path='/dev/vg/data',
+ lv_tags="ceph.cluster_name=ceph,,ceph.journal_uuid=000," + \
+ "ceph.type=block,ceph.osd_id=0,ceph.osd_fsid=1234")
+ volumes = []
+ volumes.append(DataVolume)
+ monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: deepcopy(volumes))
+
+ args = Args(osd_id=None, osd_fsid='1234', no_systemd=False,
+ bluestore=True)
+ activate.Activate([]).activate(args)
+ assert fake_enable.calls != []
+ assert fake_start_osd.calls != []
+
+ def test_bluestore_no_systemd_autodetect(self, is_root, monkeypatch, capture):
+ monkeypatch.setattr('ceph_volume.configuration.load', lambda: None)
+ fake_enable = Capture()
+ fake_start_osd = Capture()
+ monkeypatch.setattr('ceph_volume.util.system.path_is_mounted', lambda *a, **kw: True)
+ monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw: True)
+ monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True)
+ monkeypatch.setattr(activate.systemctl, 'enable_volume', fake_enable)
+ monkeypatch.setattr(activate.systemctl, 'start_osd', fake_start_osd)
+ DataVolume = api.Volume(
+ lv_name='data',
+ lv_path='/dev/vg/data',
+ lv_tags="ceph.cluster_name=ceph,,ceph.block_uuid=000," + \
+ "ceph.type=block,ceph.osd_id=0,ceph.osd_fsid=1234")
+ volumes = []
+ volumes.append(DataVolume)
+ monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: deepcopy(volumes))
+
+ args = Args(osd_id=None, osd_fsid='1234', no_systemd=True,
+ bluestore=True, auto_detect_objectstore=True)
+ activate.Activate([]).activate(args)
+ assert fake_enable.calls == []
+ assert fake_start_osd.calls == []
+
+ def test_bluestore_systemd_autodetect(self, is_root, monkeypatch, capture):
+ monkeypatch.setattr('ceph_volume.configuration.load', lambda: None)
+ fake_enable = Capture()
+ fake_start_osd = Capture()
+ monkeypatch.setattr('ceph_volume.util.system.path_is_mounted',
+ lambda *a, **kw: True)
+ monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw:
+ True)
+ monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True)
+ monkeypatch.setattr(activate.systemctl, 'enable_volume', fake_enable)
+ monkeypatch.setattr(activate.systemctl, 'start_osd', fake_start_osd)
+ DataVolume = api.Volume(
+ lv_name='data',
+ lv_path='/dev/vg/data',
+ lv_tags="ceph.cluster_name=ceph,,ceph.journal_uuid=000," + \
+ "ceph.type=block,ceph.osd_id=0,ceph.osd_fsid=1234")
+ volumes = []
+ volumes.append(DataVolume)
+ monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: deepcopy(volumes))
+
+ args = Args(osd_id=None, osd_fsid='1234', no_systemd=False,
+ bluestore=True, auto_detect_objectstore=False)
+ activate.Activate([]).activate(args)
+ assert fake_enable.calls != []
+ assert fake_start_osd.calls != []
+
+class TestActivateFlags(object):
+
+ def test_default_objectstore(self, capture):
+ args = ['0', 'asdf-ljh-asdf']
+ activation = activate.Activate(args)
+ activation.activate = capture
+ activation.main()
+ parsed_args = capture.calls[0]['args'][0]
+ assert parsed_args.filestore is False
+ assert parsed_args.bluestore is False
+
+ def test_uses_filestore(self, capture):
+ args = ['--filestore', '0', 'asdf-ljh-asdf']
+ activation = activate.Activate(args)
+ activation.activate = capture
+ activation.main()
+ parsed_args = capture.calls[0]['args'][0]
+ assert parsed_args.filestore is True
+ assert parsed_args.bluestore is False
+
+ def test_uses_bluestore(self, capture):
+ args = ['--bluestore', '0', 'asdf-ljh-asdf']
+ activation = activate.Activate(args)
+ activation.activate = capture
+ activation.main()
+ parsed_args = capture.calls[0]['args'][0]
+ assert parsed_args.filestore is False
+ assert parsed_args.bluestore is True
+
+
+class TestActivateAll(object):
+
+ def test_does_not_detect_osds(self, capsys, is_root, capture, monkeypatch):
+ monkeypatch.setattr('ceph_volume.devices.lvm.activate.direct_report', lambda: {})
+ args = ['--all']
+ activation = activate.Activate(args)
+ activation.main()
+ out, err = capsys.readouterr()
+ assert 'Was unable to find any OSDs to activate' in err
+ assert 'Verify OSDs are present with ' in err
+
+ def test_detects_running_osds(self, capsys, is_root, capture, monkeypatch):
+ monkeypatch.setattr('ceph_volume.devices.lvm.activate.direct_report', lambda: direct_report)
+ monkeypatch.setattr('ceph_volume.devices.lvm.activate.systemctl.osd_is_active', lambda x: True)
+ args = ['--all']
+ activation = activate.Activate(args)
+ activation.main()
+ out, err = capsys.readouterr()
+ assert 'a8789a96ce8b process is active. Skipping activation' in err
+ assert 'b8218eaa1634 process is active. Skipping activation' in err
+
+ def test_detects_osds_to_activate_systemd(self, is_root, capture, monkeypatch):
+ monkeypatch.setattr('ceph_volume.devices.lvm.activate.direct_report', lambda: direct_report)
+ monkeypatch.setattr('ceph_volume.devices.lvm.activate.systemctl.osd_is_active', lambda x: False)
+ args = ['--all']
+ activation = activate.Activate(args)
+ activation.activate = capture
+ activation.main()
+ calls = sorted(capture.calls, key=lambda x: x['kwargs']['osd_id'])
+ assert calls[0]['kwargs']['osd_id'] == '0'
+ assert calls[0]['kwargs']['osd_fsid'] == '957d22b7-24ce-466a-9883-b8218eaa1634'
+ assert calls[1]['kwargs']['osd_id'] == '1'
+ assert calls[1]['kwargs']['osd_fsid'] == 'd0f3e4ad-e52a-4520-afc0-a8789a96ce8b'
+
+ def test_detects_osds_to_activate_no_systemd(self, is_root, capture, monkeypatch):
+ monkeypatch.setattr('ceph_volume.devices.lvm.activate.direct_report', lambda: direct_report)
+ args = ['--all', '--no-systemd']
+ activation = activate.Activate(args)
+ activation.activate = capture
+ activation.main()
+ calls = sorted(capture.calls, key=lambda x: x['kwargs']['osd_id'])
+ assert calls[0]['kwargs']['osd_id'] == '0'
+ assert calls[0]['kwargs']['osd_fsid'] == '957d22b7-24ce-466a-9883-b8218eaa1634'
+ assert calls[1]['kwargs']['osd_id'] == '1'
+ assert calls[1]['kwargs']['osd_fsid'] == 'd0f3e4ad-e52a-4520-afc0-a8789a96ce8b'
+
+#
+# Activate All fixture
+#
+
+direct_report = {
+ "0": [
+ {
+ "lv_name": "osd-block-957d22b7-24ce-466a-9883-b8218eaa1634",
+ "lv_path": "/dev/ceph-d4962338-46ff-4cd5-8ea6-c033dbdc5b44/osd-block-957d22b7-24ce-466a-9883-b8218eaa1634",
+ "lv_tags": "ceph.block_device=/dev/ceph-d4962338-46ff-4cd5-8ea6-c033dbdc5b44/osd-block-957d22b7-24ce-466a-9883-b8218eaa1634,ceph.block_uuid=6MixOd-2Q1I-f8K3-PPOq-UJGV-L3A0-0XwUm4,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=d4962338-46ff-4cd5-8ea6-c033dbdc5b44,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=957d22b7-24ce-466a-9883-b8218eaa1634,ceph.osd_id=0,ceph.type=block",
+ "lv_uuid": "6MixOd-2Q1I-f8K3-PPOq-UJGV-L3A0-0XwUm4",
+ "name": "osd-block-957d22b7-24ce-466a-9883-b8218eaa1634",
+ "path": "/dev/ceph-d4962338-46ff-4cd5-8ea6-c033dbdc5b44/osd-block-957d22b7-24ce-466a-9883-b8218eaa1634",
+ "tags": {
+ "ceph.block_device": "/dev/ceph-d4962338-46ff-4cd5-8ea6-c033dbdc5b44/osd-block-957d22b7-24ce-466a-9883-b8218eaa1634",
+ "ceph.block_uuid": "6MixOd-2Q1I-f8K3-PPOq-UJGV-L3A0-0XwUm4",
+ "ceph.cephx_lockbox_secret": "",
+ "ceph.cluster_fsid": "d4962338-46ff-4cd5-8ea6-c033dbdc5b44",
+ "ceph.cluster_name": "ceph",
+ "ceph.crush_device_class": "",
+ "ceph.encrypted": "0",
+ "ceph.osd_fsid": "957d22b7-24ce-466a-9883-b8218eaa1634",
+ "ceph.osd_id": "0",
+ "ceph.type": "block"
+ },
+ "type": "block",
+ "vg_name": "ceph-d4962338-46ff-4cd5-8ea6-c033dbdc5b44"
+ }
+ ],
+ "1": [
+ {
+ "lv_name": "osd-block-d0f3e4ad-e52a-4520-afc0-a8789a96ce8b",
+ "lv_path": "/dev/ceph-7538bcf0-f155-4d3f-a9fd-d8b15905e532/osd-block-d0f3e4ad-e52a-4520-afc0-a8789a96ce8b",
+ "lv_tags": "ceph.block_device=/dev/ceph-7538bcf0-f155-4d3f-a9fd-d8b15905e532/osd-block-d0f3e4ad-e52a-4520-afc0-a8789a96ce8b,ceph.block_uuid=1igwLb-ZlmV-eLgp-hapx-c1Hr-M5gz-sHjnyW,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=d4962338-46ff-4cd5-8ea6-c033dbdc5b44,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=d0f3e4ad-e52a-4520-afc0-a8789a96ce8b,ceph.osd_id=1,ceph.type=block",
+ "lv_uuid": "1igwLb-ZlmV-eLgp-hapx-c1Hr-M5gz-sHjnyW",
+ "name": "osd-block-d0f3e4ad-e52a-4520-afc0-a8789a96ce8b",
+ "path": "/dev/ceph-7538bcf0-f155-4d3f-a9fd-d8b15905e532/osd-block-d0f3e4ad-e52a-4520-afc0-a8789a96ce8b",
+ "tags": {
+ "ceph.block_device": "/dev/ceph-7538bcf0-f155-4d3f-a9fd-d8b15905e532/osd-block-d0f3e4ad-e52a-4520-afc0-a8789a96ce8b",
+ "ceph.block_uuid": "1igwLb-ZlmV-eLgp-hapx-c1Hr-M5gz-sHjnyW",
+ "ceph.cephx_lockbox_secret": "",
+ "ceph.cluster_fsid": "d4962338-46ff-4cd5-8ea6-c033dbdc5b44",
+ "ceph.cluster_name": "ceph",
+ "ceph.crush_device_class": "",
+ "ceph.encrypted": "0",
+ "ceph.osd_fsid": "d0f3e4ad-e52a-4520-afc0-a8789a96ce8b",
+ "ceph.osd_id": "1",
+ "ceph.type": "block"
+ },
+ "type": "block",
+ "vg_name": "ceph-7538bcf0-f155-4d3f-a9fd-d8b15905e532"
+ }
+ ]
+}
diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_batch.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_batch.py
new file mode 100644
index 000000000..25c8a990c
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_batch.py
@@ -0,0 +1,306 @@
+import pytest
+import json
+import random
+
+from argparse import ArgumentError
+from mock import MagicMock, patch
+
+from ceph_volume.devices.lvm import batch
+from ceph_volume.util import arg_validators
+
+
+class TestBatch(object):
+
+ def test_batch_instance(self, is_root):
+ b = batch.Batch([])
+ b.main()
+
+ def test_invalid_osd_ids_passed(self):
+ with pytest.raises(SystemExit):
+ batch.Batch(argv=['--osd-ids', '1', 'foo']).main()
+
+ def test_disjoint_device_lists(self, factory):
+ device1 = factory(used_by_ceph=False, available=True, abspath="/dev/sda")
+ device2 = factory(used_by_ceph=False, available=True, abspath="/dev/sdb")
+ devices = [device1, device2]
+ db_devices = [device2]
+ with pytest.raises(Exception) as disjoint_ex:
+ batch.ensure_disjoint_device_lists(devices, db_devices)
+ assert 'Device lists are not disjoint' in str(disjoint_ex.value)
+
+ @patch('ceph_volume.util.arg_validators.Device')
+ def test_reject_partition(self, mocked_device):
+ mocked_device.return_value = MagicMock(
+ is_partition=True,
+ has_fs=False,
+ is_lvm_member=False,
+ has_gpt_headers=False,
+ has_partitions=False,
+ )
+ with pytest.raises(ArgumentError):
+ arg_validators.ValidBatchDevice()('foo')
+
+ @pytest.mark.parametrize('format_', ['pretty', 'json', 'json-pretty'])
+ def test_report(self, format_, factory, conf_ceph_stub, mock_device_generator):
+ # just ensure reporting works
+ conf_ceph_stub('[global]\nfsid=asdf-lkjh')
+ devs = [mock_device_generator() for _ in range(5)]
+ args = factory(data_slots=1,
+ osds_per_device=1,
+ osd_ids=[],
+ report=True,
+ format=format_,
+ devices=devs,
+ db_devices=[],
+ wal_devices=[],
+ bluestore=True,
+ block_db_size="1G",
+ dmcrypt=True,
+ )
+ b = batch.Batch([])
+ plan = b.get_plan(args)
+ b.args = args
+ b.report(plan)
+
+ @pytest.mark.parametrize('format_', ['json', 'json-pretty'])
+ def test_json_report_valid_empty(self, format_, factory, conf_ceph_stub, mock_device_generator):
+ # ensure json reports are valid when empty
+ conf_ceph_stub('[global]\nfsid=asdf-lkjh')
+ devs = []
+ args = factory(data_slots=1,
+ osds_per_device=1,
+ osd_ids=[],
+ report=True,
+ format=format_,
+ devices=devs,
+ db_devices=[],
+ wal_devices=[],
+ bluestore=True,
+ block_db_size="1G",
+ dmcrypt=True,
+ )
+ b = batch.Batch([])
+ plan = b.get_plan(args)
+ b.args = args
+ report = b._create_report(plan)
+ json.loads(report)
+
+ @pytest.mark.parametrize('format_', ['json', 'json-pretty'])
+ def test_json_report_valid_empty_unavailable_fast(self, format_, factory, conf_ceph_stub, mock_device_generator):
+ # ensure json reports are valid when empty
+ conf_ceph_stub('[global]\nfsid=asdf-lkjh')
+ devs = [mock_device_generator() for _ in range(5)]
+ fast_devs = [mock_device_generator()]
+ fast_devs[0].available_lvm = False
+ args = factory(data_slots=1,
+ osds_per_device=1,
+ osd_ids=[],
+ report=True,
+ format=format_,
+ devices=devs,
+ db_devices=fast_devs,
+ wal_devices=[],
+ bluestore=True,
+ block_db_size="1G",
+ dmcrypt=True,
+ )
+ b = batch.Batch([])
+ plan = b.get_plan(args)
+ b.args = args
+ report = b._create_report(plan)
+ json.loads(report)
+
+
+ @pytest.mark.parametrize('format_', ['json', 'json-pretty'])
+ def test_json_report_valid_empty_unavailable_very_fast(self, format_, factory, conf_ceph_stub, mock_device_generator):
+ # ensure json reports are valid when empty
+ conf_ceph_stub('[global]\nfsid=asdf-lkjh')
+ devs = [mock_device_generator() for _ in range(5)]
+ fast_devs = [mock_device_generator()]
+ very_fast_devs = [mock_device_generator()]
+ very_fast_devs[0].available_lvm = False
+ args = factory(data_slots=1,
+ osds_per_device=1,
+ osd_ids=[],
+ report=True,
+ format=format_,
+ devices=devs,
+ db_devices=fast_devs,
+ wal_devices=very_fast_devs,
+ bluestore=True,
+ block_db_size="1G",
+ dmcrypt=True,
+ )
+ b = batch.Batch([])
+ plan = b.get_plan(args)
+ b.args = args
+ report = b._create_report(plan)
+ json.loads(report)
+
+ @pytest.mark.parametrize('rota', [0, 1])
+ def test_batch_sort_full(self, factory, rota):
+ device1 = factory(used_by_ceph=False, available=True, rotational=rota, abspath="/dev/sda")
+ device2 = factory(used_by_ceph=False, available=True, rotational=rota, abspath="/dev/sdb")
+ device3 = factory(used_by_ceph=False, available=True, rotational=rota, abspath="/dev/sdc")
+ devices = [device1, device2, device3]
+ args = factory(report=True,
+ devices=devices,
+ filestore=False,
+ )
+ b = batch.Batch([])
+ b.args = args
+ b._sort_rotational_disks()
+ assert len(b.args.devices) == 3
+
+ @pytest.mark.parametrize('objectstore', ['bluestore', 'filestore'])
+ def test_batch_sort_mixed(self, factory, objectstore):
+ device1 = factory(used_by_ceph=False, available=True, rotational=1, abspath="/dev/sda")
+ device2 = factory(used_by_ceph=False, available=True, rotational=1, abspath="/dev/sdb")
+ device3 = factory(used_by_ceph=False, available=True, rotational=0, abspath="/dev/sdc")
+ devices = [device1, device2, device3]
+ args = factory(report=True,
+ devices=devices,
+ filestore=False if objectstore == 'bluestore' else True,
+ )
+ b = batch.Batch([])
+ b.args = args
+ b._sort_rotational_disks()
+ assert len(b.args.devices) == 2
+ if objectstore == 'bluestore':
+ assert len(b.args.db_devices) == 1
+ else:
+ assert len(b.args.journal_devices) == 1
+
+ def test_get_physical_osds_return_len(self, factory,
+ mock_devices_available,
+ conf_ceph_stub,
+ osds_per_device):
+ conf_ceph_stub('[global]\nfsid=asdf-lkjh')
+ args = factory(data_slots=1, osds_per_device=osds_per_device,
+ osd_ids=[], dmcrypt=False)
+ osds = batch.get_physical_osds(mock_devices_available, args)
+ assert len(osds) == len(mock_devices_available) * osds_per_device
+
+ def test_get_physical_osds_rel_size(self, factory,
+ mock_devices_available,
+ conf_ceph_stub,
+ osds_per_device):
+ args = factory(data_slots=1, osds_per_device=osds_per_device,
+ osd_ids=[], dmcrypt=False)
+ osds = batch.get_physical_osds(mock_devices_available, args)
+ for osd in osds:
+ assert osd.data[1] == 1.0 / osds_per_device
+
+ def test_get_physical_osds_abs_size(self, factory,
+ mock_devices_available,
+ conf_ceph_stub,
+ osds_per_device):
+ conf_ceph_stub('[global]\nfsid=asdf-lkjh')
+ args = factory(data_slots=1, osds_per_device=osds_per_device,
+ osd_ids=[], dmcrypt=False)
+ osds = batch.get_physical_osds(mock_devices_available, args)
+ for osd, dev in zip(osds, mock_devices_available):
+ assert osd.data[2] == int(dev.vg_size[0] / osds_per_device)
+
+ def test_get_physical_osds_osd_ids(self, factory,
+ mock_devices_available,
+ osds_per_device):
+ pass
+
+ def test_get_physical_fast_allocs_length(self, factory,
+ conf_ceph_stub,
+ mock_devices_available):
+ conf_ceph_stub('[global]\nfsid=asdf-lkjh')
+ args = factory(block_db_slots=None, get_block_db_size=None)
+ fast = batch.get_physical_fast_allocs(mock_devices_available,
+ 'block_db', 2, 2, args)
+ assert len(fast) == 2
+
+ def test_get_physical_fast_allocs_abs_size(self, factory,
+ conf_ceph_stub,
+ mock_devices_available):
+ conf_ceph_stub('[global]\nfsid=asdf-lkjh')
+ args = factory(block_db_slots=None, get_block_db_size=None)
+ fasts = batch.get_physical_fast_allocs(mock_devices_available,
+ 'block_db', 2, 2, args)
+ for fast, dev in zip(fasts, mock_devices_available):
+ assert fast[2] == int(dev.vg_size[0] / 2)
+
+ def test_batch_fast_allocations_one_block_db_length(self, factory, conf_ceph_stub,
+ mock_lv_device_generator):
+ conf_ceph_stub('[global]\nfsid=asdf-lkjh')
+
+ b = batch.Batch([])
+ db_lv_devices = [mock_lv_device_generator()]
+ fast = b.fast_allocations(db_lv_devices, 1, 0, 'block_db')
+ assert len(fast) == 1
+
+ @pytest.mark.parametrize('occupied_prior', range(7))
+ @pytest.mark.parametrize('slots,num_devs',
+ [l for sub in [list(zip([x]*x, range(1, x + 1))) for x in range(1,7)] for l in sub])
+ def test_get_physical_fast_allocs_length_existing(self,
+ num_devs,
+ slots,
+ occupied_prior,
+ factory,
+ conf_ceph_stub,
+ mock_device_generator):
+ conf_ceph_stub('[global]\nfsid=asdf-lkjh')
+ occupied_prior = min(occupied_prior, slots)
+ devs = [mock_device_generator() for _ in range(num_devs)]
+ already_assigned = 0
+ while already_assigned < occupied_prior:
+ dev_i = random.randint(0, num_devs - 1)
+ dev = devs[dev_i]
+ if len(dev.lvs) < occupied_prior:
+ dev.lvs.append('foo')
+ dev.path = '/dev/bar'
+ already_assigned = sum([len(d.lvs) for d in devs])
+ args = factory(block_db_slots=None, get_block_db_size=None)
+ expected_num_osds = max(len(devs) * slots - occupied_prior, 0)
+ fast = batch.get_physical_fast_allocs(devs,
+ 'block_db', slots,
+ expected_num_osds, args)
+ assert len(fast) == expected_num_osds
+ expected_assignment_on_used_devices = sum([slots - len(d.lvs) for d in devs if len(d.lvs) > 0])
+ assert len([f for f in fast if f[0] == '/dev/bar']) == expected_assignment_on_used_devices
+ assert len([f for f in fast if f[0] != '/dev/bar']) == expected_num_osds - expected_assignment_on_used_devices
+
+ def test_get_lvm_osds_return_len(self, factory,
+ mock_lv_device_generator,
+ conf_ceph_stub,
+ osds_per_device):
+ conf_ceph_stub('[global]\nfsid=asdf-lkjh')
+ args = factory(data_slots=1, osds_per_device=osds_per_device,
+ osd_ids=[], dmcrypt=False)
+ mock_lvs = [mock_lv_device_generator()]
+ osds = batch.get_lvm_osds(mock_lvs, args)
+ assert len(osds) == 1
+
+
+class TestBatchOsd(object):
+
+ def test_osd_class_ctor(self):
+ osd = batch.Batch.OSD('/dev/data', 1, '5G', 1, 1, None)
+ assert osd.data == batch.Batch.OSD.VolSpec('/dev/data',
+ 1,
+ '5G',
+ 1,
+ 'data')
+ def test_add_fast(self):
+ osd = batch.Batch.OSD('/dev/data', 1, '5G', 1, 1, None)
+ osd.add_fast_device('/dev/db', 1, '5G', 1, 'block_db')
+ assert osd.fast == batch.Batch.OSD.VolSpec('/dev/db',
+ 1,
+ '5G',
+ 1,
+ 'block_db')
+
+ def test_add_very_fast(self):
+ osd = batch.Batch.OSD('/dev/data', 1, '5G', 1, 1, None)
+ osd.add_very_fast_device('/dev/wal', 1, '5G', 1)
+ assert osd.very_fast == batch.Batch.OSD.VolSpec('/dev/wal',
+ 1,
+ '5G',
+ 1,
+ 'block_wal')
diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_common.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_common.py
new file mode 100644
index 000000000..fe792d5ab
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_common.py
@@ -0,0 +1,8 @@
+from ceph_volume.devices.lvm import common
+
+
+class TestCommon(object):
+
+ def test_get_default_args_smoke(self):
+ default_args = common.get_default_args()
+ assert default_args
diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_create.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_create.py
new file mode 100644
index 000000000..1665d76c3
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_create.py
@@ -0,0 +1,52 @@
+import pytest
+from ceph_volume.devices import lvm
+from mock import patch
+
+
+class TestCreate(object):
+
+ def test_main_spits_help_with_no_arguments(self, capsys):
+ lvm.create.Create([]).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'Create an OSD by assigning an ID and FSID' in stdout
+
+ def test_main_shows_full_help(self, capsys):
+ with pytest.raises(SystemExit):
+ lvm.create.Create(argv=['--help']).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'Use the filestore objectstore' in stdout
+ assert 'Use the bluestore objectstore' in stdout
+ assert 'A physical device or logical' in stdout
+
+ @patch('ceph_volume.util.disk.has_bluestore_label', return_value=False)
+ def test_excludes_filestore_bluestore_flags(self, m_has_bs_label, fake_call, capsys, device_info):
+ device_info()
+ with pytest.raises(SystemExit):
+ lvm.create.Create(argv=['--data', '/dev/sdfoo', '--filestore', '--bluestore']).main()
+ stdout, stderr = capsys.readouterr()
+ expected = 'Cannot use --filestore (filestore) with --bluestore (bluestore)'
+ assert expected in stderr
+
+ @patch('ceph_volume.util.disk.has_bluestore_label', return_value=False)
+ def test_excludes_other_filestore_bluestore_flags(self, m_has_bs_label, fake_call, capsys, device_info):
+ device_info()
+ with pytest.raises(SystemExit):
+ lvm.create.Create(argv=[
+ '--bluestore', '--data', '/dev/sdfoo',
+ '--journal', '/dev/sf14',
+ ]).main()
+ stdout, stderr = capsys.readouterr()
+ expected = 'Cannot use --bluestore (bluestore) with --journal (filestore)'
+ assert expected in stderr
+
+ @patch('ceph_volume.util.disk.has_bluestore_label', return_value=False)
+ def test_excludes_block_and_journal_flags(self, m_has_bs_label, fake_call, capsys, device_info):
+ device_info()
+ with pytest.raises(SystemExit):
+ lvm.create.Create(argv=[
+ '--bluestore', '--data', '/dev/sdfoo', '--block.db', 'vg/ceph1',
+ '--journal', '/dev/sf14',
+ ]).main()
+ stdout, stderr = capsys.readouterr()
+ expected = 'Cannot use --block.db (bluestore) with --journal (filestore)'
+ assert expected in stderr
diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_deactivate.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_deactivate.py
new file mode 100644
index 000000000..4b8304ce6
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_deactivate.py
@@ -0,0 +1,59 @@
+import pytest
+from mock.mock import patch
+from ceph_volume.api import lvm
+from ceph_volume.devices.lvm import deactivate
+
+class TestDeactivate(object):
+
+ @patch("ceph_volume.devices.lvm.deactivate.get_lvs_by_tag")
+ def test_no_osd(self, p_get_lvs):
+ p_get_lvs.return_value = []
+ with pytest.raises(StopIteration):
+ deactivate.deactivate_osd(0)
+
+ @patch("ceph_volume.devices.lvm.deactivate.get_lvs_by_tag")
+ @patch("ceph_volume.util.system.unmount_tmpfs")
+ def test_unmount_tmpfs_called_osd_id(self, p_u_tmpfs, p_get_lvs):
+ FooVolume = lvm.Volume(
+ lv_name='foo', lv_path='/dev/vg/foo',
+ lv_tags="ceph.osd_id=0,ceph.cluster_name=foo,ceph.type=data")
+ p_get_lvs.return_value = [FooVolume]
+
+ deactivate.deactivate_osd(0)
+ p_u_tmpfs.assert_called_with(
+ '/var/lib/ceph/osd/{}-{}'.format('foo', 0))
+
+ @patch("ceph_volume.devices.lvm.deactivate.get_lvs_by_tag")
+ @patch("ceph_volume.util.system.unmount_tmpfs")
+ def test_unmount_tmpfs_called_osd_uuid(self, p_u_tmpfs, p_get_lvs):
+ FooVolume = lvm.Volume(
+ lv_name='foo', lv_path='/dev/vg/foo',
+ lv_tags="ceph.osd_fsid=0,ceph.osd_id=1,ceph.cluster_name=foo,ceph.type=data")
+ p_get_lvs.return_value = [FooVolume]
+
+ deactivate.deactivate_osd(None, 0)
+ p_u_tmpfs.assert_called_with(
+ '/var/lib/ceph/osd/{}-{}'.format('foo', 1))
+
+ @patch("ceph_volume.devices.lvm.deactivate.get_lvs_by_tag")
+ @patch("ceph_volume.util.system.unmount_tmpfs")
+ @patch("ceph_volume.util.encryption.dmcrypt_close")
+ def test_no_crypt_no_dmclose(self, p_dm_close, p_u_tmpfs, p_get_lvs):
+ FooVolume = lvm.Volume(
+ lv_name='foo', lv_path='/dev/vg/foo',
+ lv_tags="ceph.osd_id=0,ceph.cluster_name=foo,ceph.type=data")
+ p_get_lvs.return_value = [FooVolume]
+
+ deactivate.deactivate_osd(0)
+
+ @patch("ceph_volume.devices.lvm.deactivate.get_lvs_by_tag")
+ @patch("ceph_volume.util.system.unmount_tmpfs")
+ @patch("ceph_volume.util.encryption.dmcrypt_close")
+ def test_crypt_dmclose(self, p_dm_close, p_u_tmpfs, p_get_lvs):
+ FooVolume = lvm.Volume(
+ lv_name='foo', lv_path='/dev/vg/foo', lv_uuid='123',
+ lv_tags="ceph.osd_id=0,ceph.encrypted=1,ceph.cluster_name=foo,ceph.type=data")
+ p_get_lvs.return_value = [FooVolume]
+
+ deactivate.deactivate_osd(0)
+ p_dm_close.assert_called_with('123')
diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_listing.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_listing.py
new file mode 100644
index 000000000..7e4d963c8
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_listing.py
@@ -0,0 +1,352 @@
+import pytest
+from ceph_volume.devices import lvm
+from ceph_volume.api import lvm as api
+
+# TODO: add tests for following commands -
+# ceph-volume list
+# ceph-volume list <path-to-pv>
+# ceph-volume list <path-to-vg>
+# ceph-volume list <path-to-lv>
+
+class TestReadableTag(object):
+
+ def test_dots_get_replaced(self):
+ result = lvm.listing.readable_tag('ceph.foo')
+ assert result == 'foo'
+
+ def test_underscores_are_replaced_with_spaces(self):
+ result = lvm.listing.readable_tag('ceph.long_tag')
+ assert result == 'long tag'
+
+
+class TestPrettyReport(object):
+
+ def test_is_empty(self, capsys):
+ lvm.listing.pretty_report({})
+ stdout, stderr = capsys.readouterr()
+ assert stdout == '\n'
+
+ def test_type_and_path_are_reported(self, capsys):
+ lvm.listing.pretty_report({0: [
+ {'type': 'data', 'path': '/dev/sda1', 'devices': ['/dev/sda']}
+ ]})
+ stdout, stderr = capsys.readouterr()
+ assert '[data] /dev/sda1' in stdout
+
+ def test_osd_id_header_is_reported(self, capsys):
+ lvm.listing.pretty_report({0: [
+ {'type': 'data', 'path': '/dev/sda1', 'devices': ['/dev/sda']}
+ ]})
+ stdout, stderr = capsys.readouterr()
+ assert '====== osd.0 =======' in stdout
+
+ def test_tags_are_included(self, capsys):
+ lvm.listing.pretty_report(
+ {0: [{
+ 'type': 'data',
+ 'path': '/dev/sda1',
+ 'tags': {'ceph.osd_id': '0'},
+ 'devices': ['/dev/sda'],
+ }]}
+ )
+ stdout, stderr = capsys.readouterr()
+ assert 'osd id' in stdout
+
+ def test_devices_are_comma_separated(self, capsys):
+ lvm.listing.pretty_report({0: [
+ {'type': 'data', 'path': '/dev/sda1', 'devices': ['/dev/sda', '/dev/sdb1']}
+ ]})
+ stdout, stderr = capsys.readouterr()
+ assert '/dev/sda,/dev/sdb1' in stdout
+
+
+class TestList(object):
+
+ def test_empty_full_json_zero_exit_status(self, fake_call, is_root, factory, capsys):
+ args = factory(format='json', device=None)
+ lvm.listing.List([]).list(args)
+ stdout, stderr = capsys.readouterr()
+ assert stdout == '{}\n'
+
+ def test_empty_device_json_zero_exit_status(self, is_root,factory,capsys):
+ args = factory(format='json', device='/dev/sda1')
+ lvm.listing.List([]).list(args)
+ stdout, stderr = capsys.readouterr()
+ assert stdout == '{}\n'
+
+ def test_empty_full_zero_exit_status(self, fake_call, is_root, factory):
+ args = factory(format='pretty', device=None)
+ with pytest.raises(SystemExit):
+ lvm.listing.List([]).list(args)
+
+ def test_empty_device_zero_exit_status(self, is_root, factory):
+ args = factory(format='pretty', device='/dev/sda1')
+ with pytest.raises(SystemExit):
+ lvm.listing.List([]).list(args)
+
+class TestFullReport(object):
+
+ def test_no_ceph_lvs(self, monkeypatch):
+ # ceph lvs are detected by looking into its tags
+ osd = api.Volume(lv_name='volume1', lv_path='/dev/VolGroup/lv',
+ lv_tags={})
+ volumes = []
+ volumes.append(osd)
+ monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs:
+ volumes)
+
+ result = lvm.listing.List([]).full_report()
+ assert result == {}
+
+ def test_ceph_data_lv_reported(self, monkeypatch):
+ tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data'
+ pv = api.PVolume(pv_name='/dev/sda1', pv_tags={}, pv_uuid="0000",
+ vg_name='VolGroup', lv_uuid="aaaa")
+ osd = api.Volume(lv_name='volume1', lv_uuid='y', lv_tags=tags,
+ lv_path='/dev/VolGroup/lv', vg_name='VolGroup')
+ volumes = []
+ volumes.append(osd)
+ monkeypatch.setattr(lvm.listing.api, 'get_single_pv', lambda **kwargs: pv)
+ monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs:
+ volumes)
+
+ result = lvm.listing.List([]).full_report()
+ assert result['0'][0]['name'] == 'volume1'
+
+ def test_ceph_journal_lv_reported(self, monkeypatch):
+ tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data'
+ journal_tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=journal'
+ pv = api.PVolume(pv_name='/dev/sda1', pv_tags={}, pv_uuid="0000",
+ vg_name="VolGroup", lv_uuid="aaaa")
+ osd = api.Volume(lv_name='volume1', lv_uuid='y', lv_tags=tags,
+ lv_path='/dev/VolGroup/lv', vg_name='VolGroup')
+ journal = api.Volume(
+ lv_name='journal', lv_uuid='x', lv_tags=journal_tags,
+ lv_path='/dev/VolGroup/journal', vg_name='VolGroup')
+ volumes = []
+ volumes.append(osd)
+ volumes.append(journal)
+ monkeypatch.setattr(lvm.listing.api,'get_single_pv',lambda **kwargs:pv)
+ monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs:
+ volumes)
+
+ result = lvm.listing.List([]).full_report()
+ assert result['0'][0]['name'] == 'volume1'
+ assert result['0'][1]['name'] == 'journal'
+
+ def test_ceph_wal_lv_reported(self, monkeypatch):
+ tags = 'ceph.osd_id=0,ceph.wal_uuid=x,ceph.type=data'
+ wal_tags = 'ceph.osd_id=0,ceph.wal_uuid=x,ceph.type=wal'
+ osd = api.Volume(lv_name='volume1', lv_uuid='y', lv_tags=tags,
+ lv_path='/dev/VolGroup/lv', vg_name='VolGroup')
+ wal = api.Volume(lv_name='wal', lv_uuid='x', lv_tags=wal_tags,
+ lv_path='/dev/VolGroup/wal', vg_name='VolGroup')
+ volumes = []
+ volumes.append(osd)
+ volumes.append(wal)
+ monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs:
+ volumes)
+
+ result = lvm.listing.List([]).full_report()
+ assert result['0'][0]['name'] == 'volume1'
+ assert result['0'][1]['name'] == 'wal'
+
+ @pytest.mark.parametrize('type_', ['journal', 'db', 'wal'])
+ def test_physical_2nd_device_gets_reported(self, type_, monkeypatch):
+ tags = ('ceph.osd_id=0,ceph.{t}_uuid=x,ceph.type=data,'
+ 'ceph.{t}_device=/dev/sda1').format(t=type_)
+ osd = api.Volume(lv_name='volume1', lv_uuid='y', lv_tags=tags,
+ vg_name='VolGroup', lv_path='/dev/VolGroup/lv')
+ monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs:
+ [osd])
+
+ result = lvm.listing.List([]).full_report()
+ assert result['0'][1]['path'] == '/dev/sda1'
+ assert result['0'][1]['tags'] == {'PARTUUID': 'x'}
+ assert result['0'][1]['type'] == type_
+
+
+class TestSingleReport(object):
+
+ def test_not_a_ceph_lv(self, monkeypatch):
+ # ceph lvs are detected by looking into its tags
+ lv = api.Volume(lv_name='lv', lv_tags={}, lv_path='/dev/VolGroup/lv',
+ vg_name='VolGroup')
+ monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs:
+ [lv])
+
+ result = lvm.listing.List([]).single_report('VolGroup/lv')
+ assert result == {}
+
+ def test_report_a_ceph_lv(self, monkeypatch):
+ # ceph lvs are detected by looking into its tags
+ tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data'
+ lv = api.Volume(lv_name='lv', vg_name='VolGroup', lv_uuid='aaaa',
+ lv_path='/dev/VolGroup/lv', lv_tags=tags)
+ volumes = []
+ volumes.append(lv)
+ monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs:
+ volumes)
+
+ result = lvm.listing.List([]).single_report('VolGroup/lv')
+ assert result['0'][0]['name'] == 'lv'
+ assert result['0'][0]['lv_tags'] == tags
+ assert result['0'][0]['path'] == '/dev/VolGroup/lv'
+ assert result['0'][0]['devices'] == []
+
+ def test_report_a_ceph_journal_device(self, monkeypatch):
+ # ceph lvs are detected by looking into its tags
+ tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data,' + \
+ 'ceph.journal_device=/dev/sda1'
+ lv = api.Volume(lv_name='lv', lv_uuid='aaa', lv_tags=tags,
+ lv_path='/dev/VolGroup/lv', vg_name='VolGroup')
+ monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs:
+ [lv] if 'tags' in kwargs else [])
+
+ result = lvm.listing.List([]).single_report('/dev/sda1')
+ assert result['0'][0]['tags'] == {'PARTUUID': 'x'}
+ assert result['0'][0]['type'] == 'journal'
+ assert result['0'][0]['path'] == '/dev/sda1'
+
+ def test_report_a_ceph_lv_with_devices(self, monkeypatch):
+ pvolumes = []
+
+ tags = 'ceph.osd_id=0,ceph.type=data'
+ pv1 = api.PVolume(vg_name="VolGroup", pv_name='/dev/sda1',
+ pv_uuid='', pv_tags={}, lv_uuid="aaaa")
+ pv2 = api.PVolume(vg_name="VolGroup", pv_name='/dev/sdb1',
+ pv_uuid='', pv_tags={}, lv_uuid="aaaa")
+ pvolumes.append(pv1)
+ pvolumes.append(pv2)
+
+
+ volumes = []
+ lv = api.Volume(lv_name='lv', vg_name='VolGroup',lv_uuid='aaaa',
+ lv_path='/dev/VolGroup/lv', lv_tags=tags)
+ volumes.append(lv)
+
+ monkeypatch.setattr(lvm.listing.api, 'get_pvs', lambda **kwargs:
+ pvolumes)
+ monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs:
+ volumes)
+
+ listing = lvm.listing.List([])
+ listing._pvs = [
+ {'lv_uuid': 'aaaa', 'pv_name': '/dev/sda1', 'pv_tags': '', 'pv_uuid': ''},
+ {'lv_uuid': 'aaaa', 'pv_name': '/dev/sdb1', 'pv_tags': '', 'pv_uuid': ''},
+ ]
+
+ result = listing.single_report('VolGroup/lv')
+ assert result['0'][0]['name'] == 'lv'
+ assert result['0'][0]['lv_tags'] == tags
+ assert result['0'][0]['path'] == '/dev/VolGroup/lv'
+ assert result['0'][0]['devices'] == ['/dev/sda1', '/dev/sdb1']
+
+ def test_report_by_osd_id_for_just_block_dev(self, monkeypatch):
+ tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=block'
+ lvs = [ api.Volume(lv_name='lv1', lv_tags=tags, lv_path='/dev/vg/lv1',
+ lv_uuid='aaaa', vg_name='vg')
+ ]
+ monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs: lvs)
+
+ listing = lvm.listing.List([])
+ result = listing.single_report(0)
+ assert result['0'][0]['name'] == 'lv1'
+ assert result['0'][0]['lv_tags'] == tags
+ assert result['0'][0]['lv_path'] == '/dev/vg/lv1'
+ assert result['0'][0]['vg_name'] == 'vg'
+
+ def test_report_by_osd_id_for_just_data_dev(self, monkeypatch):
+ tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data'
+ lvs = [ api.Volume(lv_name='lv1', lv_tags=tags, lv_path='/dev/vg/lv1',
+ lv_uuid='bbbb', vg_name='vg'),
+ ]
+ monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs: lvs)
+
+ listing = lvm.listing.List([])
+ result = listing.single_report(0)
+ assert result['0'][0]['name'] == 'lv1'
+ assert result['0'][0]['lv_tags'] == tags
+ assert result['0'][0]['lv_path'] == '/dev/vg/lv1'
+ assert result['0'][0]['vg_name'] == 'vg'
+
+ def test_report_by_osd_id_for_just_block_wal_and_db_dev(self, monkeypatch):
+ tags1 = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=block'
+ tags2 = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=wal'
+ tags3 = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=db'
+ lvs = [ api.Volume(lv_name='lv1', lv_tags=tags1, lv_path='/dev/vg/lv1',
+ lv_uuid='aaaa', vg_name='vg'),
+ api.Volume(lv_name='lv2', lv_tags=tags2, lv_path='/dev/vg/lv2',
+ lv_uuid='bbbb', vg_name='vg'),
+ api.Volume(lv_name='lv3', lv_tags=tags3, lv_path='/dev/vg/lv3',
+ lv_uuid='cccc', vg_name='vg'),
+ ]
+ monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs: lvs)
+
+ listing = lvm.listing.List([])
+ result = listing.single_report(0)
+ assert result['0'][0]['name'] == 'lv1'
+ assert result['0'][0]['lv_tags'] == tags1
+ assert result['0'][0]['lv_path'] == '/dev/vg/lv1'
+ assert result['0'][0]['vg_name'] == 'vg'
+ assert result['0'][1]['name'] == 'lv2'
+ assert result['0'][1]['lv_tags'] == tags2
+ assert result['0'][1]['lv_path'] == '/dev/vg/lv2'
+ assert result['0'][1]['vg_name'] == 'vg'
+ assert result['0'][2]['name'] == 'lv3'
+ assert result['0'][2]['lv_tags'] == tags3
+ assert result['0'][2]['lv_path'] == '/dev/vg/lv3'
+ assert result['0'][2]['vg_name'] == 'vg'
+
+
+ def test_report_by_osd_id_for_data_and_journal_dev(self, monkeypatch):
+ tags1 = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data'
+ tags2 = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=journal'
+ lvs = [ api.Volume(lv_name='lv1', lv_tags=tags1, lv_path='/dev/vg/lv1',
+ lv_uuid='aaaa', vg_name='vg'),
+ api.Volume(lv_name='lv2', lv_tags=tags2, lv_path='/dev/vg/lv2',
+ lv_uuid='bbbb', vg_name='vg'),
+ ]
+ monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs: lvs)
+
+ listing = lvm.listing.List([])
+ result = listing.single_report(0)
+ assert result['0'][0]['name'] == 'lv1'
+ assert result['0'][0]['lv_tags'] == tags1
+ assert result['0'][0]['lv_path'] == '/dev/vg/lv1'
+ assert result['0'][0]['vg_name'] == 'vg'
+ assert result['0'][1]['name'] == 'lv2'
+ assert result['0'][1]['lv_tags'] == tags2
+ assert result['0'][1]['lv_path'] == '/dev/vg/lv2'
+ assert result['0'][1]['vg_name'] == 'vg'
+
+ def test_report_by_nonexistent_osd_id(self, monkeypatch):
+ lv = api.Volume(lv_name='lv', lv_tags={}, lv_path='/dev/VolGroup/lv',
+ vg_name='VolGroup')
+ monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs:
+ [lv])
+
+ result = lvm.listing.List([]).single_report('1')
+ assert result == {}
+
+ def test_report_a_ceph_lv_with_no_matching_devices(self, monkeypatch):
+ tags = 'ceph.osd_id=0,ceph.type=data'
+ lv = api.Volume(lv_name='lv', vg_name='VolGroup', lv_uuid='aaaa',
+ lv_path='/dev/VolGroup/lv', lv_tags=tags)
+ volumes = []
+ volumes.append(lv)
+ monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs:
+ volumes)
+
+ listing = lvm.listing.List([])
+ listing._pvs = [
+ {'lv_uuid': 'ffff', 'pv_name': '/dev/sda1', 'pv_tags': '',
+ 'pv_uuid': ''},
+ {'lv_uuid': 'ffff', 'pv_name': '/dev/sdb1', 'pv_tags': '',
+ 'pv_uuid': ''}]
+
+ result = listing.single_report('VolGroup/lv')
+ assert result['0'][0]['name'] == 'lv'
+ assert result['0'][0]['lv_tags'] == tags
+ assert result['0'][0]['path'] == '/dev/VolGroup/lv'
+ assert result['0'][0]['devices'] == []
diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_migrate.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_migrate.py
new file mode 100644
index 000000000..4c86d0ca1
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_migrate.py
@@ -0,0 +1,2299 @@
+import pytest
+from mock.mock import patch
+from ceph_volume import process
+from ceph_volume.api import lvm as api
+from ceph_volume.devices.lvm import migrate
+from ceph_volume.util.device import Device
+from ceph_volume.util import system
+
+class TestGetClusterName(object):
+
+ mock_volumes = []
+ def mock_get_lvs(self, *args, **kwargs):
+ return self.mock_volumes.pop(0)
+
+ def test_cluster_found(self, monkeypatch):
+ tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data,ceph.osd_fsid=1234,ceph.cluster_name=name_of_the_cluster'
+ vol = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='',
+ lv_path='/dev/VolGroup/lv1', lv_tags=tags)
+ self.mock_volumes = []
+ self.mock_volumes.append([vol])
+
+ monkeypatch.setattr(migrate.api, 'get_lvs', self.mock_get_lvs)
+ monkeypatch.setattr(process, 'call', lambda x, **kw: ('', '', 0))
+
+ result = migrate.get_cluster_name(osd_id='0', osd_fsid='1234')
+ assert "name_of_the_cluster" == result
+
+ def test_cluster_not_found(self, monkeypatch, capsys):
+ self.mock_volumes = []
+ self.mock_volumes.append([])
+
+ monkeypatch.setattr(migrate.api, 'get_lvs', self.mock_get_lvs)
+ monkeypatch.setattr(process, 'call', lambda x, **kw: ('', '', 0))
+
+ with pytest.raises(SystemExit) as error:
+ migrate.get_cluster_name(osd_id='0', osd_fsid='1234')
+ stdout, stderr = capsys.readouterr()
+ expected = 'Unexpected error, terminating'
+ assert expected in str(error.value)
+ expected = 'Unable to find any LV for source OSD: id:0 fsid:1234'
+ assert expected in stderr
+
+class TestFindAssociatedDevices(object):
+
+ mock_volumes = []
+ def mock_get_lvs(self, *args, **kwargs):
+ return self.mock_volumes.pop(0)
+
+ mock_single_volumes = {}
+ def mock_get_single_lv(self, *args, **kwargs):
+ p = kwargs['filters']['lv_path']
+ return self.mock_single_volumes[p]
+
+ def test_lv_is_matched_id(self, monkeypatch):
+ tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data,ceph.osd_fsid=1234'
+ vol = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='',
+ lv_path='/dev/VolGroup/lv1', lv_tags=tags)
+ self.mock_volumes = []
+ self.mock_volumes.append([vol])
+ self.mock_volumes.append([vol])
+ self.mock_volumes.append([])
+ self.mock_volumes.append([])
+
+ self.mock_single_volumes = {'/dev/VolGroup/lv1': vol}
+
+ monkeypatch.setattr(migrate.api, 'get_lvs', self.mock_get_lvs)
+ monkeypatch.setattr(migrate.api, 'get_single_lv', self.mock_get_single_lv)
+ monkeypatch.setattr(process, 'call', lambda x, **kw: ('', '', 0))
+
+ result = migrate.find_associated_devices(osd_id='0', osd_fsid='1234')
+ assert len(result) == 1
+ assert result[0][0].path == '/dev/VolGroup/lv1'
+ assert result[0][0].lvs == [vol]
+ assert result[0][1] == 'block'
+
+ def test_lv_is_matched_id2(self, monkeypatch):
+ tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data,ceph.osd_fsid=1234'
+ vol = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='vg',
+ lv_path='/dev/VolGroup/lv1', lv_tags=tags)
+ tags2 = 'ceph.osd_id=0,ceph.journal_uuid=xx,ceph.type=wal,ceph.osd_fsid=1234'
+ vol2 = api.Volume(lv_name='volume2', lv_uuid='z', vg_name='vg',
+ lv_path='/dev/VolGroup/lv2', lv_tags=tags2)
+ self.mock_volumes = []
+ self.mock_volumes.append([vol])
+ self.mock_volumes.append([vol])
+ self.mock_volumes.append([])
+ self.mock_volumes.append([vol2])
+
+ self.mock_single_volumes = {'/dev/VolGroup/lv1': vol, '/dev/VolGroup/lv2': vol2}
+
+ monkeypatch.setattr(migrate.api, 'get_lvs', self.mock_get_lvs)
+ monkeypatch.setattr(migrate.api, 'get_single_lv', self.mock_get_single_lv)
+ monkeypatch.setattr(process, 'call', lambda x, **kw: ('', '', 0))
+
+ result = migrate.find_associated_devices(osd_id='0', osd_fsid='1234')
+ assert len(result) == 2
+ for d in result:
+ if d[1] == 'block':
+ assert d[0].path == '/dev/VolGroup/lv1'
+ assert d[0].lvs == [vol]
+ elif d[1] == 'wal':
+ assert d[0].path == '/dev/VolGroup/lv2'
+ assert d[0].lvs == [vol2]
+ else:
+ assert False
+
+ def test_lv_is_matched_id3(self, monkeypatch):
+ tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data,ceph.osd_fsid=1234'
+ vol = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='vg',
+ lv_path='/dev/VolGroup/lv1', lv_tags=tags)
+ tags2 = 'ceph.osd_id=0,ceph.journal_uuid=xx,ceph.type=wal,ceph.osd_fsid=1234'
+ vol2 = api.Volume(lv_name='volume2', lv_uuid='z', vg_name='vg',
+ lv_path='/dev/VolGroup/lv2', lv_tags=tags2)
+ tags3 = 'ceph.osd_id=0,ceph.journal_uuid=xx,ceph.type=db,ceph.osd_fsid=1234'
+ vol3 = api.Volume(lv_name='volume3', lv_uuid='z', vg_name='vg',
+ lv_path='/dev/VolGroup/lv3', lv_tags=tags3)
+
+ self.mock_volumes = []
+ self.mock_volumes.append([vol])
+ self.mock_volumes.append([vol])
+ self.mock_volumes.append([vol3])
+ self.mock_volumes.append([vol2])
+
+ self.mock_single_volumes = {'/dev/VolGroup/lv1': vol,
+ '/dev/VolGroup/lv2': vol2,
+ '/dev/VolGroup/lv3': vol3}
+
+ monkeypatch.setattr(migrate.api, 'get_lvs', self.mock_get_lvs)
+ monkeypatch.setattr(migrate.api, 'get_single_lv', self.mock_get_single_lv)
+ monkeypatch.setattr(process, 'call', lambda x, **kw: ('', '', 0))
+
+ result = migrate.find_associated_devices(osd_id='0', osd_fsid='1234')
+ assert len(result) == 3
+ for d in result:
+ if d[1] == 'block':
+ assert d[0].path == '/dev/VolGroup/lv1'
+ assert d[0].lvs == [vol]
+ elif d[1] == 'wal':
+ assert d[0].path == '/dev/VolGroup/lv2'
+ assert d[0].lvs == [vol2]
+ elif d[1] == 'db':
+ assert d[0].path == '/dev/VolGroup/lv3'
+ assert d[0].lvs == [vol3]
+ else:
+ assert False
+
+ def test_lv_is_not_matched(self, monkeypatch, capsys):
+ self.mock_volumes = [None]
+ monkeypatch.setattr(migrate.api, 'get_lvs', self.mock_get_lvs)
+ monkeypatch.setattr(process, 'call', lambda x, **kw: ('', '', 0))
+
+ with pytest.raises(SystemExit) as error:
+ migrate.find_associated_devices(osd_id='1', osd_fsid='1234')
+ stdout, stderr = capsys.readouterr()
+ expected = 'Unexpected error, terminating'
+ assert expected in str(error.value)
+ expected = 'Unable to find any LV for source OSD: id:1 fsid:1234'
+ assert expected in stderr
+
+class TestVolumeTagTracker(object):
+ mock_single_volumes = {}
+ def mock_get_single_lv(self, *args, **kwargs):
+ p = kwargs['filters']['lv_path']
+ return self.mock_single_volumes[p]
+
+ mock_process_input = []
+ def mock_process(self, *args, **kwargs):
+ self.mock_process_input.append(args[0]);
+ return ('', '', 0)
+
+ def test_init(self, monkeypatch):
+ source_tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data,ceph.osd_fsid=1234'
+ source_db_tags = 'ceph.osd_id=0,journal_uuid=x,ceph.type=db, osd_fsid=1234'
+ source_wal_tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=wal'
+ target_tags="ceph.a=1,ceph.b=2,c=3,ceph.d=4" # 'c' to be bypassed
+ devices=[]
+
+ data_vol = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='vg',
+ lv_path='/dev/VolGroup/lv1', lv_tags=source_tags)
+ db_vol = api.Volume(lv_name='volume2', lv_uuid='y', vg_name='vg',
+ lv_path='/dev/VolGroup/lv2', lv_tags=source_db_tags)
+ wal_vol = api.Volume(lv_name='volume3', lv_uuid='y', vg_name='vg',
+ lv_path='/dev/VolGroup/lv3', lv_tags=source_wal_tags)
+
+ self.mock_single_volumes = {'/dev/VolGroup/lv1': data_vol,
+ '/dev/VolGroup/lv2': db_vol,
+ '/dev/VolGroup/lv3': wal_vol}
+ monkeypatch.setattr(migrate.api, 'get_single_lv', self.mock_get_single_lv)
+
+ self.mock_process_input = []
+ monkeypatch.setattr(process, 'call', self.mock_process)
+
+ data_device = Device(path = '/dev/VolGroup/lv1')
+ db_device = Device(path = '/dev/VolGroup/lv2')
+ wal_device = Device(path = '/dev/VolGroup/lv3')
+ devices.append([data_device, 'block'])
+ devices.append([db_device, 'db'])
+ devices.append([wal_device, 'wal'])
+
+ target = api.Volume(lv_name='target_name', lv_tags=target_tags,
+ lv_path='/dev/VolGroup/lv_target')
+ t = migrate.VolumeTagTracker(devices, target);
+
+ assert 3 == len(t.old_target_tags)
+
+ assert data_device == t.data_device
+ assert 4 == len(t.old_data_tags)
+ assert 'data' == t.old_data_tags['ceph.type']
+
+ assert db_device == t.db_device
+ assert 2 == len(t.old_db_tags)
+ assert 'db' == t.old_db_tags['ceph.type']
+
+ assert wal_device == t.wal_device
+ assert 3 == len(t.old_wal_tags)
+ assert 'wal' == t.old_wal_tags['ceph.type']
+
+ def test_update_tags_when_lv_create(self, monkeypatch):
+ source_tags = \
+ 'ceph.osd_id=0,ceph.journal_uuid=x,' \
+ 'ceph.type=data,ceph.osd_fsid=1234'
+ source_db_tags = \
+ 'ceph.osd_id=0,journal_uuid=x,ceph.type=db,' \
+ 'osd_fsid=1234'
+
+ devices=[]
+
+ data_vol = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='vg',
+ lv_path='/dev/VolGroup/lv1', lv_tags=source_tags)
+ db_vol = api.Volume(lv_name='volume2', lv_uuid='y', vg_name='vg',
+ lv_path='/dev/VolGroup/lv2', lv_tags=source_db_tags)
+
+ self.mock_single_volumes = {'/dev/VolGroup/lv1': data_vol,
+ '/dev/VolGroup/lv2': db_vol}
+
+ monkeypatch.setattr(migrate.api, 'get_single_lv', self.mock_get_single_lv)
+
+ self.mock_process_input = []
+ monkeypatch.setattr(process, 'call', self.mock_process)
+
+ data_device = Device(path = '/dev/VolGroup/lv1')
+ db_device = Device(path = '/dev/VolGroup/lv2')
+ devices.append([data_device, 'block'])
+ devices.append([db_device, 'db'])
+
+ target = api.Volume(lv_name='target_name', lv_tags='',
+ lv_uuid='wal_uuid',
+ lv_path='/dev/VolGroup/lv_target')
+ t = migrate.VolumeTagTracker(devices, target);
+
+ self.mock_process_input = []
+ t.update_tags_when_lv_create('wal')
+
+ assert 3 == len(self.mock_process_input)
+
+ assert ['lvchange',
+ '--addtag', 'ceph.wal_uuid=wal_uuid',
+ '--addtag', 'ceph.wal_device=/dev/VolGroup/lv_target',
+ '/dev/VolGroup/lv1'] == self.mock_process_input[0]
+
+ assert self.mock_process_input[1].sort() == [
+ 'lvchange',
+ '--addtag', 'ceph.osd_id=0',
+ '--addtag', 'ceph.journal_uuid=x',
+ '--addtag', 'ceph.type=wal',
+ '--addtag', 'ceph.osd_fsid=1234',
+ '--addtag', 'ceph.wal_uuid=wal_uuid',
+ '--addtag', 'ceph.wal_device=/dev/VolGroup/lv_target',
+ '/dev/VolGroup/lv_target'].sort()
+
+ assert ['lvchange',
+ '--addtag', 'ceph.wal_uuid=wal_uuid',
+ '--addtag', 'ceph.wal_device=/dev/VolGroup/lv_target',
+ '/dev/VolGroup/lv2'] == self.mock_process_input[2]
+
+ def test_remove_lvs(self, monkeypatch):
+ source_tags = \
+ 'ceph.osd_id=0,ceph.journal_uuid=x,' \
+ 'ceph.type=data,ceph.osd_fsid=1234,ceph.wal_uuid=aaaaa'
+ source_db_tags = \
+ 'ceph.osd_id=0,journal_uuid=x,ceph.type=db,' \
+ 'osd_fsid=1234,ceph.wal_device=aaaaa'
+ source_wal_tags = \
+ 'ceph.wal_uuid=uuid,ceph.wal_device=device,' \
+ 'ceph.osd_id=0,ceph.type=wal'
+
+ devices=[]
+
+ data_vol = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='vg',
+ lv_path='/dev/VolGroup/lv1', lv_tags=source_tags)
+ db_vol = api.Volume(lv_name='volume2', lv_uuid='y', vg_name='vg',
+ lv_path='/dev/VolGroup/lv2', lv_tags=source_db_tags)
+ wal_vol = api.Volume(lv_name='volume3', lv_uuid='y', vg_name='vg',
+ lv_path='/dev/VolGroup/lv3', lv_tags=source_wal_tags)
+
+ self.mock_single_volumes = {'/dev/VolGroup/lv1': data_vol,
+ '/dev/VolGroup/lv2': db_vol,
+ '/dev/VolGroup/lv3': wal_vol}
+
+ monkeypatch.setattr(migrate.api, 'get_single_lv', self.mock_get_single_lv)
+
+ self.mock_process_input = []
+ monkeypatch.setattr(process, 'call', self.mock_process)
+
+ data_device = Device(path = '/dev/VolGroup/lv1')
+ db_device = Device(path = '/dev/VolGroup/lv2')
+ wal_device = Device(path = '/dev/VolGroup/lv3')
+ devices.append([data_device, 'block'])
+ devices.append([db_device, 'db'])
+ devices.append([wal_device, 'wal'])
+
+ target = api.Volume(lv_name='target_name', lv_tags='',
+ lv_path='/dev/VolGroup/lv_target')
+ t = migrate.VolumeTagTracker(devices, target);
+
+ device_to_remove = devices.copy()
+
+ self.mock_process_input = []
+ t.remove_lvs(device_to_remove, 'db')
+
+ assert 3 == len(self.mock_process_input)
+ assert ['lvchange',
+ '--deltag', 'ceph.wal_uuid=uuid',
+ '--deltag', 'ceph.wal_device=device',
+ '--deltag', 'ceph.osd_id=0',
+ '--deltag', 'ceph.type=wal',
+ '/dev/VolGroup/lv3'] == self.mock_process_input[0]
+ assert ['lvchange',
+ '--deltag', 'ceph.wal_uuid=aaaaa',
+ '/dev/VolGroup/lv1'] == self.mock_process_input[1]
+ assert ['lvchange',
+ '--deltag', 'ceph.wal_device=aaaaa',
+ '/dev/VolGroup/lv2'] == self.mock_process_input[2]
+
+ def test_replace_lvs(self, monkeypatch):
+ source_tags = \
+ 'ceph.osd_id=0,ceph.type=data,ceph.osd_fsid=1234,'\
+ 'ceph.wal_uuid=wal_uuid,ceph.db_device=/dbdevice'
+ source_db_tags = \
+ 'ceph.osd_id=0,ceph.type=db,ceph.osd_fsid=1234'
+ source_wal_tags = \
+ 'ceph.wal_uuid=uuid,ceph.wal_device=device,' \
+ 'ceph.osd_id=0,ceph.type=wal'
+
+ devices=[]
+
+ data_vol = api.Volume(lv_name='volume1', lv_uuid='datauuid', vg_name='vg',
+ lv_path='/dev/VolGroup/lv1', lv_tags=source_tags)
+ db_vol = api.Volume(lv_name='volume2', lv_uuid='dbuuid', vg_name='vg',
+ lv_path='/dev/VolGroup/lv2', lv_tags=source_db_tags)
+ wal_vol = api.Volume(lv_name='volume3', lv_uuid='waluuid', vg_name='vg',
+ lv_path='/dev/VolGroup/lv3', lv_tags=source_wal_tags)
+
+ self.mock_single_volumes = {'/dev/VolGroup/lv1': data_vol,
+ '/dev/VolGroup/lv2': db_vol,
+ '/dev/VolGroup/lv3': wal_vol}
+
+ monkeypatch.setattr(migrate.api, 'get_single_lv', self.mock_get_single_lv)
+
+ self.mock_process_input = []
+ monkeypatch.setattr(process, 'call', self.mock_process)
+
+ data_device = Device(path = '/dev/VolGroup/lv1')
+ db_device = Device(path = '/dev/VolGroup/lv2')
+ wal_device = Device(path = '/dev/VolGroup/lv3')
+ devices.append([data_device, 'block'])
+ devices.append([db_device, 'db'])
+ devices.append([wal_device, 'wal'])
+
+ target = api.Volume(lv_name='target_name',
+ lv_uuid='ttt',
+ lv_tags='ceph.tag_to_remove=aaa',
+ lv_path='/dev/VolGroup/lv_target')
+ t = migrate.VolumeTagTracker(devices, target);
+
+ self.mock_process_input = []
+ t.replace_lvs(devices, 'db')
+
+ assert 5 == len(self.mock_process_input)
+
+ assert ['lvchange',
+ '--deltag', 'ceph.osd_id=0',
+ '--deltag', 'ceph.type=db',
+ '--deltag', 'ceph.osd_fsid=1234',
+ '/dev/VolGroup/lv2'] == self.mock_process_input[0]
+ assert ['lvchange',
+ '--deltag', 'ceph.wal_uuid=uuid',
+ '--deltag', 'ceph.wal_device=device',
+ '--deltag', 'ceph.osd_id=0',
+ '--deltag', 'ceph.type=wal',
+ '/dev/VolGroup/lv3'] == self.mock_process_input[1]
+ assert ['lvchange',
+ '--deltag', 'ceph.db_device=/dbdevice',
+ '--deltag', 'ceph.wal_uuid=wal_uuid',
+ '/dev/VolGroup/lv1'] == self.mock_process_input[2]
+
+ assert ['lvchange',
+ '--addtag', 'ceph.db_uuid=ttt',
+ '--addtag', 'ceph.db_device=/dev/VolGroup/lv_target',
+ '/dev/VolGroup/lv1'] == self.mock_process_input[3]
+
+ assert self.mock_process_input[4].sort() == [
+ 'lvchange',
+ '--addtag', 'ceph.osd_id=0',
+ '--addtag', 'ceph.osd_fsid=1234',
+ '--addtag', 'ceph.type=db',
+ '--addtag', 'ceph.db_uuid=ttt',
+ '--addtag', 'ceph.db_device=/dev/VolGroup/lv_target',
+ '/dev/VolGroup/lv_target'].sort()
+
+ def test_undo(self, monkeypatch):
+ source_tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data,ceph.osd_fsid=1234'
+ source_db_tags = 'ceph.osd_id=0,journal_uuid=x,ceph.type=db, osd_fsid=1234'
+ source_wal_tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=wal'
+ target_tags=""
+ devices=[]
+
+ data_vol = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='vg',
+ lv_path='/dev/VolGroup/lv1', lv_tags=source_tags)
+ db_vol = api.Volume(lv_name='volume2', lv_uuid='y', vg_name='vg',
+ lv_path='/dev/VolGroup/lv2', lv_tags=source_db_tags)
+ wal_vol = api.Volume(lv_name='volume3', lv_uuid='y', vg_name='vg',
+ lv_path='/dev/VolGroup/lv3', lv_tags=source_wal_tags)
+
+ self.mock_single_volumes = {'/dev/VolGroup/lv1': data_vol,
+ '/dev/VolGroup/lv2': db_vol,
+ '/dev/VolGroup/lv3': wal_vol}
+
+ monkeypatch.setattr(migrate.api, 'get_single_lv', self.mock_get_single_lv)
+
+ self.mock_process_input = []
+ monkeypatch.setattr(process, 'call', self.mock_process)
+
+ data_device = Device(path = '/dev/VolGroup/lv1')
+ db_device = Device(path = '/dev/VolGroup/lv2')
+ wal_device = Device(path = '/dev/VolGroup/lv3')
+ devices.append([data_device, 'block'])
+ devices.append([db_device, 'db'])
+ devices.append([wal_device, 'wal'])
+
+ target = api.Volume(lv_name='target_name', lv_tags=target_tags,
+ lv_path='/dev/VolGroup/lv_target')
+ t = migrate.VolumeTagTracker(devices, target);
+
+ target.tags['ceph.a'] = 'aa';
+ target.tags['ceph.b'] = 'bb';
+
+ data_vol.tags['ceph.journal_uuid'] = 'z';
+
+ db_vol.tags.pop('ceph.type')
+
+ wal_vol.tags.clear()
+
+ assert 2 == len(target.tags)
+ assert 4 == len(data_vol.tags)
+ assert 1 == len(db_vol.tags)
+
+ self.mock_process_input = []
+ t.undo()
+
+ assert 0 == len(target.tags)
+ assert 4 == len(data_vol.tags)
+ assert 'x' == data_vol.tags['ceph.journal_uuid']
+
+ assert 2 == len(db_vol.tags)
+ assert 'db' == db_vol.tags['ceph.type']
+
+ assert 3 == len(wal_vol.tags)
+ assert 'wal' == wal_vol.tags['ceph.type']
+
+ assert 6 == len(self.mock_process_input)
+ assert 'lvchange' in self.mock_process_input[0]
+ assert '--deltag' in self.mock_process_input[0]
+ assert 'ceph.journal_uuid=z' in self.mock_process_input[0]
+ assert '/dev/VolGroup/lv1' in self.mock_process_input[0]
+
+ assert 'lvchange' in self.mock_process_input[1]
+ assert '--addtag' in self.mock_process_input[1]
+ assert 'ceph.journal_uuid=x' in self.mock_process_input[1]
+ assert '/dev/VolGroup/lv1' in self.mock_process_input[1]
+
+ assert 'lvchange' in self.mock_process_input[2]
+ assert '--deltag' in self.mock_process_input[2]
+ assert 'ceph.osd_id=0' in self.mock_process_input[2]
+ assert '/dev/VolGroup/lv2' in self.mock_process_input[2]
+
+ assert 'lvchange' in self.mock_process_input[3]
+ assert '--addtag' in self.mock_process_input[3]
+ assert 'ceph.type=db' in self.mock_process_input[3]
+ assert '/dev/VolGroup/lv2' in self.mock_process_input[3]
+
+ assert 'lvchange' in self.mock_process_input[4]
+ assert '--addtag' in self.mock_process_input[4]
+ assert 'ceph.type=wal' in self.mock_process_input[4]
+ assert '/dev/VolGroup/lv3' in self.mock_process_input[4]
+
+ assert 'lvchange' in self.mock_process_input[5]
+ assert '--deltag' in self.mock_process_input[5]
+ assert 'ceph.a=aa' in self.mock_process_input[5]
+ assert 'ceph.b=bb' in self.mock_process_input[5]
+ assert '/dev/VolGroup/lv_target' in self.mock_process_input[5]
+
+class TestNew(object):
+
+ mock_volume = None
+ def mock_get_lv_by_fullname(self, *args, **kwargs):
+ return self.mock_volume
+
+ mock_process_input = []
+ def mock_process(self, *args, **kwargs):
+ self.mock_process_input.append(args[0]);
+ return ('', '', 0)
+
+ mock_single_volumes = {}
+ def mock_get_single_lv(self, *args, **kwargs):
+ p = kwargs['filters']['lv_path']
+ return self.mock_single_volumes[p]
+
+ mock_volumes = []
+ def mock_get_lvs(self, *args, **kwargs):
+ return self.mock_volumes.pop(0)
+
+ def test_newdb_non_root(self):
+ with pytest.raises(Exception) as error:
+ migrate.NewDB(argv=[
+ '--osd-id', '1',
+ '--osd-fsid', '55BD4219-16A7-4037-BC20-0F158EFCC83D',
+ '--target', 'vgname/new_db']).main()
+ expected = 'This command needs to be executed with sudo or as root'
+ assert expected in str(error.value)
+
+ @patch('os.getuid')
+ def test_newdb_not_target_lvm(self, m_getuid, capsys):
+ m_getuid.return_value = 0
+ with pytest.raises(SystemExit) as error:
+ migrate.NewDB(argv=[
+ '--osd-id', '1',
+ '--osd-fsid', '55BD4219-16A7-4037-BC20-0F158EFCC83D',
+ '--target', 'vgname/new_db']).main()
+ stdout, stderr = capsys.readouterr()
+ expected = 'Unable to attach new volume : vgname/new_db'
+ assert expected in str(error.value)
+ expected = 'Target path vgname/new_db is not a Logical Volume'
+ assert expected in stderr
+
+
+ @patch('os.getuid')
+ def test_newdb_already_in_use(self, m_getuid, monkeypatch, capsys):
+ m_getuid.return_value = 0
+
+ self.mock_volume = api.Volume(lv_name='volume1',
+ lv_uuid='y',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv1',
+ lv_tags='ceph.osd_id=5') # this results in set used_by_ceph
+ monkeypatch.setattr(api, 'get_lv_by_fullname', self.mock_get_lv_by_fullname)
+
+ with pytest.raises(SystemExit) as error:
+ migrate.NewDB(argv=[
+ '--osd-id', '1',
+ '--osd-fsid', '55BD4219-16A7-4037-BC20-0F158EFCC83D',
+ '--target', 'vgname/new_db']).main()
+ stdout, stderr = capsys.readouterr()
+ expected = 'Unable to attach new volume : vgname/new_db'
+ assert expected in str(error.value)
+ expected = 'Target Logical Volume is already used by ceph: vgname/new_db'
+ assert expected in stderr
+
+ @patch('os.getuid')
+ def test_newdb(self, m_getuid, monkeypatch, capsys):
+ m_getuid.return_value = 0
+
+ source_tags = \
+ 'ceph.osd_id=0,ceph.type=data,ceph.osd_fsid=1234,'\
+ 'ceph.wal_uuid=wal_uuid,ceph.db_device=/dbdevice'
+ source_wal_tags = \
+ 'ceph.wal_uuid=uuid,ceph.wal_device=device,' \
+ 'ceph.osd_id=0,ceph.type=wal'
+
+ data_vol = api.Volume(lv_name='volume1', lv_uuid='datauuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv1',
+ lv_tags=source_tags)
+ wal_vol = api.Volume(lv_name='volume3',
+ lv_uuid='waluuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv3',
+ lv_tags=source_wal_tags)
+
+ self.mock_single_volumes = {'/dev/VolGroup/lv1': data_vol,
+ '/dev/VolGroup/lv3': wal_vol}
+
+ monkeypatch.setattr(migrate.api, 'get_single_lv',
+ self.mock_get_single_lv)
+
+ self.mock_process_input = []
+ monkeypatch.setattr(process, 'call', self.mock_process)
+
+ self.mock_volume = api.Volume(lv_name='target_volume1', lv_uuid='y',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/target_volume',
+ lv_tags='')
+ monkeypatch.setattr(api, 'get_lv_by_fullname',
+ self.mock_get_lv_by_fullname)
+
+ monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active",
+ lambda id: False)
+
+ #find_associated_devices will call get_lvs() 4 times
+ # and it this needs results to be arranged that way
+ self.mock_volumes = []
+ self.mock_volumes.append([data_vol, wal_vol])
+ self.mock_volumes.append([data_vol])
+ self.mock_volumes.append([])
+ self.mock_volumes.append([wal_vol])
+
+ monkeypatch.setattr(migrate.api, 'get_lvs', self.mock_get_lvs)
+
+ monkeypatch.setattr(migrate, 'get_cluster_name',
+ lambda osd_id, osd_fsid: 'ceph_cluster')
+ monkeypatch.setattr(system, 'chown', lambda path: 0)
+
+ migrate.NewDB(argv=[
+ '--osd-id', '1',
+ '--osd-fsid', '55BD4219-16A7-4037-BC20-0F158EFCC83D',
+ '--target', 'vgname/new_db']).main()
+
+ n = len(self.mock_process_input)
+ assert n >= 5
+
+ assert self.mock_process_input[n - 5] == [
+ 'lvchange',
+ '--deltag', 'ceph.db_device=/dbdevice',
+ '/dev/VolGroup/lv1']
+ assert self.mock_process_input[n - 4] == [
+ 'lvchange',
+ '--addtag', 'ceph.db_uuid=y',
+ '--addtag', 'ceph.db_device=/dev/VolGroup/target_volume',
+ '/dev/VolGroup/lv1']
+
+ assert self.mock_process_input[n - 3].sort() == [
+ 'lvchange',
+ '--addtag', 'ceph.wal_uuid=uuid',
+ '--addtag', 'ceph.osd_id=0',
+ '--addtag', 'ceph.type=db',
+ '--addtag', 'ceph.osd_fsid=1234',
+ '--addtag', 'ceph.db_uuid=y',
+ '--addtag', 'ceph.db_device=/dev/VolGroup/target_volume',
+ '/dev/VolGroup/target_volume'].sort()
+
+ assert self.mock_process_input[n - 2] == [
+ 'lvchange',
+ '--addtag', 'ceph.db_uuid=y',
+ '--addtag', 'ceph.db_device=/dev/VolGroup/target_volume',
+ '/dev/VolGroup/lv3']
+
+ assert self.mock_process_input[n - 1] == [
+ 'ceph-bluestore-tool',
+ '--path', '/var/lib/ceph/osd/ceph_cluster-1',
+ '--dev-target', '/dev/VolGroup/target_volume',
+ '--command', 'bluefs-bdev-new-db']
+
+ def test_newdb_active_systemd(self, is_root, monkeypatch, capsys):
+ source_tags = \
+ 'ceph.osd_id=0,ceph.type=data,ceph.osd_fsid=1234,'\
+ 'ceph.wal_uuid=wal_uuid,ceph.db_device=/dbdevice'
+ source_wal_tags = \
+ 'ceph.wal_uuid=uuid,ceph.wal_device=device,' \
+ 'ceph.osd_id=0,ceph.type=wal'
+
+ data_vol = api.Volume(lv_name='volume1', lv_uuid='datauuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv1',
+ lv_tags=source_tags)
+ wal_vol = api.Volume(lv_name='volume3',
+ lv_uuid='waluuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv3',
+ lv_tags=source_wal_tags)
+
+ self.mock_single_volumes = {'/dev/VolGroup/lv1': data_vol,
+ '/dev/VolGroup/lv3': wal_vol}
+
+ monkeypatch.setattr(migrate.api, 'get_single_lv',
+ self.mock_get_single_lv)
+
+ self.mock_process_input = []
+ monkeypatch.setattr(process, 'call', self.mock_process)
+
+ self.mock_volume = api.Volume(lv_name='target_volume1', lv_uuid='y',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/target_volume',
+ lv_tags='')
+ monkeypatch.setattr(api, 'get_lv_by_fullname',
+ self.mock_get_lv_by_fullname)
+
+ monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active",
+ lambda id: True)
+
+ #find_associated_devices will call get_lvs() 4 times
+ # and it this needs results to be arranged that way
+ self.mock_volumes = []
+ self.mock_volumes.append([data_vol, wal_vol])
+ self.mock_volumes.append([data_vol])
+ self.mock_volumes.append([])
+ self.mock_volumes.append([wal_vol])
+
+ monkeypatch.setattr(migrate.api, 'get_lvs', self.mock_get_lvs)
+
+ monkeypatch.setattr(migrate, 'get_cluster_name',
+ lambda osd_id, osd_fsid: 'ceph_cluster')
+ monkeypatch.setattr(system, 'chown', lambda path: 0)
+
+ m = migrate.NewDB(argv=[
+ '--osd-id', '1',
+ '--osd-fsid', '55BD4219-16A7-4037-BC20-0F158EFCC83D',
+ '--target', 'vgname/new_db'])
+
+ with pytest.raises(SystemExit) as error:
+ m.main()
+
+ stdout, stderr = capsys.readouterr()
+
+ assert 'Unable to attach new volume for OSD: 1' == str(error.value)
+ assert '--> OSD ID is running, stop it with: systemctl stop ceph-osd@1' == stderr.rstrip()
+ assert not stdout
+
+ def test_newdb_no_systemd(self, is_root, monkeypatch):
+ source_tags = \
+ 'ceph.osd_id=0,ceph.type=data,ceph.osd_fsid=1234,'\
+ 'ceph.wal_uuid=wal_uuid,ceph.db_device=/dbdevice'
+ source_wal_tags = \
+ 'ceph.wal_uuid=uuid,ceph.wal_device=device,' \
+ 'ceph.osd_id=0,ceph.type=wal'
+
+ data_vol = api.Volume(lv_name='volume1', lv_uuid='datauuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv1',
+ lv_tags=source_tags)
+ wal_vol = api.Volume(lv_name='volume3',
+ lv_uuid='waluuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv3',
+ lv_tags=source_wal_tags)
+
+ self.mock_single_volumes = {'/dev/VolGroup/lv1': data_vol,
+ '/dev/VolGroup/lv3': wal_vol}
+
+ monkeypatch.setattr(migrate.api, 'get_single_lv',
+ self.mock_get_single_lv)
+
+ self.mock_process_input = []
+ monkeypatch.setattr(process, 'call', self.mock_process)
+
+ self.mock_volume = api.Volume(lv_name='target_volume1', lv_uuid='y',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/target_volume',
+ lv_tags='')
+ monkeypatch.setattr(api, 'get_lv_by_fullname',
+ self.mock_get_lv_by_fullname)
+
+ #find_associated_devices will call get_lvs() 4 times
+ # and it this needs results to be arranged that way
+ self.mock_volumes = []
+ self.mock_volumes.append([data_vol, wal_vol])
+ self.mock_volumes.append([data_vol])
+ self.mock_volumes.append([])
+ self.mock_volumes.append([wal_vol])
+
+ monkeypatch.setattr(migrate.api, 'get_lvs', self.mock_get_lvs)
+
+ monkeypatch.setattr(migrate, 'get_cluster_name',
+ lambda osd_id, osd_fsid: 'ceph_cluster')
+ monkeypatch.setattr(system, 'chown', lambda path: 0)
+
+ migrate.NewDB(argv=[
+ '--osd-id', '1',
+ '--osd-fsid', '55BD4219-16A7-4037-BC20-0F158EFCC83D',
+ '--target', 'vgname/new_db',
+ '--no-systemd']).main()
+
+ n = len(self.mock_process_input)
+ assert n >= 5
+
+ assert self.mock_process_input[n - 5] == [
+ 'lvchange',
+ '--deltag', 'ceph.db_device=/dbdevice',
+ '/dev/VolGroup/lv1']
+ assert self.mock_process_input[n - 4] == [
+ 'lvchange',
+ '--addtag', 'ceph.db_uuid=y',
+ '--addtag', 'ceph.db_device=/dev/VolGroup/target_volume',
+ '/dev/VolGroup/lv1']
+
+ assert self.mock_process_input[n - 3].sort() == [
+ 'lvchange',
+ '--addtag', 'ceph.wal_uuid=uuid',
+ '--addtag', 'ceph.osd_id=0',
+ '--addtag', 'ceph.type=db',
+ '--addtag', 'ceph.osd_fsid=1234',
+ '--addtag', 'ceph.db_uuid=y',
+ '--addtag', 'ceph.db_device=/dev/VolGroup/target_volume',
+ '/dev/VolGroup/target_volume'].sort()
+
+ assert self.mock_process_input[n - 2] == [
+ 'lvchange',
+ '--addtag', 'ceph.db_uuid=y',
+ '--addtag', 'ceph.db_device=/dev/VolGroup/target_volume',
+ '/dev/VolGroup/lv3']
+
+ assert self.mock_process_input[n - 1] == [
+ 'ceph-bluestore-tool',
+ '--path', '/var/lib/ceph/osd/ceph_cluster-1',
+ '--dev-target', '/dev/VolGroup/target_volume',
+ '--command', 'bluefs-bdev-new-db']
+
+ @patch('os.getuid')
+ def test_newwal(self, m_getuid, monkeypatch, capsys):
+ m_getuid.return_value = 0
+
+ source_tags = \
+ 'ceph.osd_id=0,ceph.type=data,ceph.osd_fsid=1234'
+
+ data_vol = api.Volume(lv_name='volume1', lv_uuid='datauuid', vg_name='vg',
+ lv_path='/dev/VolGroup/lv1', lv_tags=source_tags)
+
+ self.mock_single_volumes = {'/dev/VolGroup/lv1': data_vol}
+
+ monkeypatch.setattr(migrate.api, 'get_single_lv', self.mock_get_single_lv)
+
+ self.mock_process_input = []
+ monkeypatch.setattr(process, 'call', self.mock_process)
+
+ self.mock_volume = api.Volume(lv_name='target_volume1', lv_uuid='y', vg_name='vg',
+ lv_path='/dev/VolGroup/target_volume',
+ lv_tags='')
+ monkeypatch.setattr(api, 'get_lv_by_fullname', self.mock_get_lv_by_fullname)
+
+ monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active", lambda id: False)
+
+ #find_associated_devices will call get_lvs() 4 times
+ # and it this needs results to be arranged that way
+ self.mock_volumes = []
+ self.mock_volumes.append([data_vol])
+ self.mock_volumes.append([data_vol])
+ self.mock_volumes.append([])
+ self.mock_volumes.append([])
+
+ monkeypatch.setattr(migrate.api, 'get_lvs', self.mock_get_lvs)
+
+ monkeypatch.setattr(migrate, 'get_cluster_name', lambda osd_id, osd_fsid: 'cluster')
+ monkeypatch.setattr(system, 'chown', lambda path: 0)
+
+ migrate.NewWAL(argv=[
+ '--osd-id', '2',
+ '--osd-fsid', '55BD4219-16A7-4037-BC20-0F158EFCC83D',
+ '--target', 'vgname/new_wal']).main()
+
+ n = len(self.mock_process_input)
+ assert n >= 3
+
+ assert self.mock_process_input[n - 3] == [
+ 'lvchange',
+ '--addtag', 'ceph.wal_uuid=y',
+ '--addtag', 'ceph.wal_device=/dev/VolGroup/target_volume',
+ '/dev/VolGroup/lv1']
+
+ assert self.mock_process_input[n - 2].sort() == [
+ 'lvchange',
+ '--addtag', 'ceph.osd_id=0',
+ '--addtag', 'ceph.type=wal',
+ '--addtag', 'ceph.osd_fsid=1234',
+ '--addtag', 'ceph.wal_uuid=y',
+ '--addtag', 'ceph.wal_device=/dev/VolGroup/target_volume',
+ '/dev/VolGroup/target_volume'].sort()
+
+ assert self.mock_process_input[n - 1] == [
+ 'ceph-bluestore-tool',
+ '--path', '/var/lib/ceph/osd/cluster-2',
+ '--dev-target', '/dev/VolGroup/target_volume',
+ '--command', 'bluefs-bdev-new-wal']
+
+ def test_newwal_active_systemd(self, is_root, monkeypatch, capsys):
+ source_tags = \
+ 'ceph.osd_id=0,ceph.type=data,ceph.osd_fsid=1234'
+
+ data_vol = api.Volume(lv_name='volume1', lv_uuid='datauuid', vg_name='vg',
+ lv_path='/dev/VolGroup/lv1', lv_tags=source_tags)
+
+ self.mock_single_volumes = {'/dev/VolGroup/lv1': data_vol}
+
+ monkeypatch.setattr(migrate.api, 'get_single_lv', self.mock_get_single_lv)
+
+ self.mock_process_input = []
+ monkeypatch.setattr(process, 'call', self.mock_process)
+
+ self.mock_volume = api.Volume(lv_name='target_volume1', lv_uuid='y', vg_name='vg',
+ lv_path='/dev/VolGroup/target_volume',
+ lv_tags='')
+ monkeypatch.setattr(api, 'get_lv_by_fullname', self.mock_get_lv_by_fullname)
+
+ monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active", lambda id: True)
+
+ #find_associated_devices will call get_lvs() 4 times
+ # and it this needs results to be arranged that way
+ self.mock_volumes = []
+ self.mock_volumes.append([data_vol])
+ self.mock_volumes.append([data_vol])
+ self.mock_volumes.append([])
+ self.mock_volumes.append([])
+
+ monkeypatch.setattr(migrate.api, 'get_lvs', self.mock_get_lvs)
+
+ monkeypatch.setattr(migrate, 'get_cluster_name', lambda osd_id, osd_fsid: 'cluster')
+ monkeypatch.setattr(system, 'chown', lambda path: 0)
+
+ m = migrate.NewWAL(argv=[
+ '--osd-id', '2',
+ '--osd-fsid', '55BD4219-16A7-4037-BC20-0F158EFCC83D',
+ '--target', 'vgname/new_wal'])
+
+ with pytest.raises(SystemExit) as error:
+ m.main()
+
+ stdout, stderr = capsys.readouterr()
+
+ assert 'Unable to attach new volume for OSD: 2' == str(error.value)
+ assert '--> OSD ID is running, stop it with: systemctl stop ceph-osd@2' == stderr.rstrip()
+ assert not stdout
+
+ def test_newwal_no_systemd(self, is_root, monkeypatch):
+ source_tags = \
+ 'ceph.osd_id=0,ceph.type=data,ceph.osd_fsid=1234'
+
+ data_vol = api.Volume(lv_name='volume1', lv_uuid='datauuid', vg_name='vg',
+ lv_path='/dev/VolGroup/lv1', lv_tags=source_tags)
+
+ self.mock_single_volumes = {'/dev/VolGroup/lv1': data_vol}
+
+ monkeypatch.setattr(migrate.api, 'get_single_lv', self.mock_get_single_lv)
+
+ self.mock_process_input = []
+ monkeypatch.setattr(process, 'call', self.mock_process)
+
+ self.mock_volume = api.Volume(lv_name='target_volume1', lv_uuid='y', vg_name='vg',
+ lv_path='/dev/VolGroup/target_volume',
+ lv_tags='')
+ monkeypatch.setattr(api, 'get_lv_by_fullname', self.mock_get_lv_by_fullname)
+
+ #find_associated_devices will call get_lvs() 4 times
+ # and it this needs results to be arranged that way
+ self.mock_volumes = []
+ self.mock_volumes.append([data_vol])
+ self.mock_volumes.append([data_vol])
+ self.mock_volumes.append([])
+ self.mock_volumes.append([])
+
+ monkeypatch.setattr(migrate.api, 'get_lvs', self.mock_get_lvs)
+
+ monkeypatch.setattr(migrate, 'get_cluster_name', lambda osd_id, osd_fsid: 'cluster')
+ monkeypatch.setattr(system, 'chown', lambda path: 0)
+
+ migrate.NewWAL(argv=[
+ '--osd-id', '2',
+ '--osd-fsid', '55BD4219-16A7-4037-BC20-0F158EFCC83D',
+ '--target', 'vgname/new_wal',
+ '--no-systemd']).main()
+
+ n = len(self.mock_process_input)
+ assert n >= 3
+
+ assert self.mock_process_input[n - 3] == [
+ 'lvchange',
+ '--addtag', 'ceph.wal_uuid=y',
+ '--addtag', 'ceph.wal_device=/dev/VolGroup/target_volume',
+ '/dev/VolGroup/lv1']
+
+ assert self.mock_process_input[n - 2].sort() == [
+ 'lvchange',
+ '--addtag', 'ceph.osd_id=0',
+ '--addtag', 'ceph.type=wal',
+ '--addtag', 'ceph.osd_fsid=1234',
+ '--addtag', 'ceph.wal_uuid=y',
+ '--addtag', 'ceph.wal_device=/dev/VolGroup/target_volume',
+ '/dev/VolGroup/target_volume'].sort()
+
+ assert self.mock_process_input[n - 1] == [
+ 'ceph-bluestore-tool',
+ '--path', '/var/lib/ceph/osd/cluster-2',
+ '--dev-target', '/dev/VolGroup/target_volume',
+ '--command', 'bluefs-bdev-new-wal']
+
+class TestMigrate(object):
+
+ def test_invalid_osd_id_passed(self, is_root):
+ with pytest.raises(SystemExit):
+ migrate.Migrate(argv=['--osd-fsid', '123', '--from', 'data', '--target', 'foo', '--osd-id', 'foo']).main()
+
+ mock_volume = None
+ def mock_get_lv_by_fullname(self, *args, **kwargs):
+ return self.mock_volume
+
+ mock_process_input = []
+ def mock_process(self, *args, **kwargs):
+ self.mock_process_input.append(args[0])
+ return ('', '', 0)
+
+ mock_single_volumes = {}
+ def mock_get_single_lv(self, *args, **kwargs):
+ p = kwargs['filters']['lv_path']
+ return self.mock_single_volumes[p]
+
+ mock_volumes = []
+ def mock_get_lvs(self, *args, **kwargs):
+ return self.mock_volumes.pop(0)
+
+ def test_get_source_devices(self, monkeypatch):
+
+ source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234'
+ source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234'
+ source_wal_tags = 'ceph.osd_id=2,ceph.type=wal,ceph.osd_fsid=1234'
+
+ data_vol = api.Volume(lv_name='volume1',
+ lv_uuid='datauuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv1',
+ lv_tags=source_tags)
+ db_vol = api.Volume(lv_name='volume2',
+ lv_uuid='datauuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv2',
+ lv_tags=source_db_tags)
+
+ wal_vol = api.Volume(lv_name='volume3',
+ lv_uuid='datauuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv3',
+ lv_tags=source_wal_tags)
+
+ self.mock_single_volumes = {
+ '/dev/VolGroup/lv1': data_vol,
+ '/dev/VolGroup/lv2': db_vol,
+ '/dev/VolGroup/lv3': wal_vol,
+ }
+ monkeypatch.setattr(migrate.api, 'get_single_lv',
+ self.mock_get_single_lv)
+
+ self.mock_volume = api.Volume(lv_name='volume2', lv_uuid='y',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv2',
+ lv_tags='ceph.osd_id=5,ceph.osd_type=db')
+ monkeypatch.setattr(api, 'get_lv_by_fullname',
+ self.mock_get_lv_by_fullname)
+
+ self.mock_process_input = []
+ monkeypatch.setattr(process, 'call', self.mock_process)
+
+ devices = []
+ devices.append([Device('/dev/VolGroup/lv1'), 'block'])
+ devices.append([Device('/dev/VolGroup/lv2'), 'db'])
+ devices.append([Device('/dev/VolGroup/lv3'), 'wal'])
+
+ monkeypatch.setattr(migrate, 'find_associated_devices',
+ lambda osd_id, osd_fsid: devices)
+
+
+ argv = [
+ '--osd-id', '2',
+ '--osd-fsid', '55BD4219-16A7-4037-BC20-0F158EFCC83D',
+ '--from', 'data', 'wal',
+ '--target', 'vgname/new_wal'
+ ]
+ m = migrate.Migrate(argv=argv)
+ m.args = m.make_parser('ceph-volume lvm migation', 'help').parse_args(argv)
+ res_devices = m.get_source_devices(devices)
+
+ assert 2 == len(res_devices)
+ assert devices[0] == res_devices[0]
+ assert devices[2] == res_devices[1]
+
+ argv = [
+ '--osd-id', '2',
+ '--osd-fsid', '55BD4219-16A7-4037-BC20-0F158EFCC83D',
+ '--from', 'db', 'wal', 'data',
+ '--target', 'vgname/new_wal'
+ ]
+ m = migrate.Migrate(argv=argv)
+ m.args = m.make_parser('ceph-volume lvm migation', 'help').parse_args(argv)
+ res_devices = m.get_source_devices(devices)
+
+ assert 3 == len(res_devices)
+ assert devices[0] == res_devices[0]
+ assert devices[1] == res_devices[1]
+ assert devices[2] == res_devices[2]
+
+
+ def test_migrate_without_args(self, capsys):
+ help_msg = """
+Moves BlueFS data from source volume(s) to the target one, source
+volumes (except the main (i.e. data or block) one) are removed on
+success. LVM volumes are permitted for Target only, both already
+attached or new logical one. In the latter case it is attached to OSD
+replacing one of the source devices. Following replacement rules apply
+(in the order of precedence, stop on the first match):
+* if source list has DB volume - target device replaces it.
+* if source list has WAL volume - target device replace it.
+* if source list has slow volume only - operation is not permitted,
+ requires explicit allocation via new-db/new-wal command.
+
+Example calls for supported scenarios:
+
+ Moves BlueFS data from main device to LV already attached as DB:
+
+ ceph-volume lvm migrate --osd-id 1 --osd-fsid <uuid> --from data --target vgname/db
+
+ Moves BlueFS data from shared main device to LV which will be attached
+ as a new DB:
+
+ ceph-volume lvm migrate --osd-id 1 --osd-fsid <uuid> --from data --target vgname/new_db
+
+ Moves BlueFS data from DB device to new LV, DB is replaced:
+
+ ceph-volume lvm migrate --osd-id 1 --osd-fsid <uuid> --from db --target vgname/new_db
+
+ Moves BlueFS data from main and DB devices to new LV, DB is replaced:
+
+ ceph-volume lvm migrate --osd-id 1 --osd-fsid <uuid> --from data db --target vgname/new_db
+
+ Moves BlueFS data from main, DB and WAL devices to new LV, WAL is
+ removed and DB is replaced:
+
+ ceph-volume lvm migrate --osd-id 1 --osd-fsid <uuid> --from data db wal --target vgname/new_db
+
+ Moves BlueFS data from main, DB and WAL devices to main device, WAL
+ and DB are removed:
+
+ ceph-volume lvm migrate --osd-id 1 --osd-fsid <uuid> --from db wal --target vgname/data
+
+"""
+ m = migrate.Migrate(argv=[])
+ m.main()
+ stdout, stderr = capsys.readouterr()
+ assert help_msg in stdout
+ assert not stderr
+
+
+ @patch('os.getuid')
+ def test_migrate_data_db_to_new_db(self, m_getuid, monkeypatch):
+ m_getuid.return_value = 0
+
+ source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev'
+ source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev'
+
+ data_vol = api.Volume(lv_name='volume1',
+ lv_uuid='datauuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv1',
+ lv_tags=source_tags)
+ db_vol = api.Volume(lv_name='volume2',
+ lv_uuid='dbuuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv2',
+ lv_tags=source_db_tags)
+
+ self.mock_single_volumes = {
+ '/dev/VolGroup/lv1': data_vol,
+ '/dev/VolGroup/lv2': db_vol,
+ }
+ monkeypatch.setattr(migrate.api, 'get_single_lv',
+ self.mock_get_single_lv)
+
+ self.mock_volume = api.Volume(lv_name='volume2_new', lv_uuid='new-db-uuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv2_new',
+ lv_tags='')
+ monkeypatch.setattr(api, 'get_lv_by_fullname',
+ self.mock_get_lv_by_fullname)
+
+ self.mock_process_input = []
+ monkeypatch.setattr(process, 'call', self.mock_process)
+
+ devices = []
+ devices.append([Device('/dev/VolGroup/lv1'), 'block'])
+ devices.append([Device('/dev/VolGroup/lv2'), 'db'])
+
+ monkeypatch.setattr(migrate, 'find_associated_devices',
+ lambda osd_id, osd_fsid: devices)
+
+
+ monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active",
+ lambda id: False)
+
+ monkeypatch.setattr(migrate, 'get_cluster_name',
+ lambda osd_id, osd_fsid: 'ceph')
+ monkeypatch.setattr(system, 'chown', lambda path: 0)
+ m = migrate.Migrate(argv=[
+ '--osd-id', '2',
+ '--osd-fsid', '1234',
+ '--from', 'data', 'db', 'wal',
+ '--target', 'vgname/new_wal'])
+ m.main()
+
+ n = len(self.mock_process_input)
+ assert n >= 5
+
+ assert self. mock_process_input[n-5] == [
+ 'lvchange',
+ '--deltag', 'ceph.osd_id=2',
+ '--deltag', 'ceph.type=db',
+ '--deltag', 'ceph.osd_fsid=1234',
+ '--deltag', 'ceph.cluster_name=ceph',
+ '--deltag', 'ceph.db_uuid=dbuuid',
+ '--deltag', 'ceph.db_device=db_dev',
+ '/dev/VolGroup/lv2']
+
+ assert self. mock_process_input[n-4] == [
+ 'lvchange',
+ '--deltag', 'ceph.db_uuid=dbuuid',
+ '--deltag', 'ceph.db_device=db_dev',
+ '/dev/VolGroup/lv1']
+
+ assert self. mock_process_input[n-3] == [
+ 'lvchange',
+ '--addtag', 'ceph.db_uuid=new-db-uuid',
+ '--addtag', 'ceph.db_device=/dev/VolGroup/lv2_new',
+ '/dev/VolGroup/lv1']
+
+ assert self. mock_process_input[n-2] == [
+ 'lvchange',
+ '--addtag', 'ceph.osd_id=2',
+ '--addtag', 'ceph.type=db',
+ '--addtag', 'ceph.osd_fsid=1234',
+ '--addtag', 'ceph.cluster_name=ceph',
+ '--addtag', 'ceph.db_uuid=new-db-uuid',
+ '--addtag', 'ceph.db_device=/dev/VolGroup/lv2_new',
+ '/dev/VolGroup/lv2_new']
+
+ assert self. mock_process_input[n-1] == [
+ 'ceph-bluestore-tool',
+ '--path', '/var/lib/ceph/osd/ceph-2',
+ '--dev-target', '/dev/VolGroup/lv2_new',
+ '--command', 'bluefs-bdev-migrate',
+ '--devs-source', '/var/lib/ceph/osd/ceph-2/block',
+ '--devs-source', '/var/lib/ceph/osd/ceph-2/block.db']
+
+ def test_migrate_data_db_to_new_db_active_systemd(self, is_root, monkeypatch, capsys):
+ source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev'
+ source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev'
+
+ data_vol = api.Volume(lv_name='volume1',
+ lv_uuid='datauuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv1',
+ lv_tags=source_tags)
+ db_vol = api.Volume(lv_name='volume2',
+ lv_uuid='dbuuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv2',
+ lv_tags=source_db_tags)
+
+ self.mock_single_volumes = {
+ '/dev/VolGroup/lv1': data_vol,
+ '/dev/VolGroup/lv2': db_vol,
+ }
+ monkeypatch.setattr(migrate.api, 'get_single_lv',
+ self.mock_get_single_lv)
+
+ self.mock_volume = api.Volume(lv_name='volume2_new', lv_uuid='new-db-uuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv2_new',
+ lv_tags='')
+ monkeypatch.setattr(api, 'get_lv_by_fullname',
+ self.mock_get_lv_by_fullname)
+
+ self.mock_process_input = []
+ monkeypatch.setattr(process, 'call', self.mock_process)
+
+ devices = []
+ devices.append([Device('/dev/VolGroup/lv1'), 'block'])
+ devices.append([Device('/dev/VolGroup/lv2'), 'db'])
+
+ monkeypatch.setattr(migrate, 'find_associated_devices',
+ lambda osd_id, osd_fsid: devices)
+
+
+ monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active",
+ lambda id: True)
+
+ monkeypatch.setattr(migrate, 'get_cluster_name',
+ lambda osd_id, osd_fsid: 'ceph')
+ monkeypatch.setattr(system, 'chown', lambda path: 0)
+ m = migrate.Migrate(argv=[
+ '--osd-id', '2',
+ '--osd-fsid', '1234',
+ '--from', 'data', 'db', 'wal',
+ '--target', 'vgname/new_wal'])
+
+ with pytest.raises(SystemExit) as error:
+ m.main()
+
+ stdout, stderr = capsys.readouterr()
+
+ assert 'Unable to migrate devices associated with OSD ID: 2' == str(error.value)
+ assert '--> OSD is running, stop it with: systemctl stop ceph-osd@2' == stderr.rstrip()
+ assert not stdout
+
+ def test_migrate_data_db_to_new_db_no_systemd(self, is_root, monkeypatch):
+ source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev'
+ source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev'
+
+ data_vol = api.Volume(lv_name='volume1',
+ lv_uuid='datauuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv1',
+ lv_tags=source_tags)
+ db_vol = api.Volume(lv_name='volume2',
+ lv_uuid='dbuuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv2',
+ lv_tags=source_db_tags)
+
+ self.mock_single_volumes = {
+ '/dev/VolGroup/lv1': data_vol,
+ '/dev/VolGroup/lv2': db_vol,
+ }
+ monkeypatch.setattr(migrate.api, 'get_single_lv',
+ self.mock_get_single_lv)
+
+ self.mock_volume = api.Volume(lv_name='volume2_new', lv_uuid='new-db-uuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv2_new',
+ lv_tags='')
+ monkeypatch.setattr(api, 'get_lv_by_fullname',
+ self.mock_get_lv_by_fullname)
+
+ self.mock_process_input = []
+ monkeypatch.setattr(process, 'call', self.mock_process)
+
+ devices = []
+ devices.append([Device('/dev/VolGroup/lv1'), 'block'])
+ devices.append([Device('/dev/VolGroup/lv2'), 'db'])
+
+ monkeypatch.setattr(migrate, 'find_associated_devices',
+ lambda osd_id, osd_fsid: devices)
+
+
+ monkeypatch.setattr(migrate, 'get_cluster_name',
+ lambda osd_id, osd_fsid: 'ceph')
+ monkeypatch.setattr(system, 'chown', lambda path: 0)
+ m = migrate.Migrate(argv=[
+ '--osd-id', '2',
+ '--osd-fsid', '1234',
+ '--from', 'data', 'db', 'wal',
+ '--target', 'vgname/new_wal',
+ '--no-systemd'])
+ m.main()
+
+ n = len(self.mock_process_input)
+ assert n >= 5
+
+ assert self. mock_process_input[n-5] == [
+ 'lvchange',
+ '--deltag', 'ceph.osd_id=2',
+ '--deltag', 'ceph.type=db',
+ '--deltag', 'ceph.osd_fsid=1234',
+ '--deltag', 'ceph.cluster_name=ceph',
+ '--deltag', 'ceph.db_uuid=dbuuid',
+ '--deltag', 'ceph.db_device=db_dev',
+ '/dev/VolGroup/lv2']
+
+ assert self. mock_process_input[n-4] == [
+ 'lvchange',
+ '--deltag', 'ceph.db_uuid=dbuuid',
+ '--deltag', 'ceph.db_device=db_dev',
+ '/dev/VolGroup/lv1']
+
+ assert self. mock_process_input[n-3] == [
+ 'lvchange',
+ '--addtag', 'ceph.db_uuid=new-db-uuid',
+ '--addtag', 'ceph.db_device=/dev/VolGroup/lv2_new',
+ '/dev/VolGroup/lv1']
+
+ assert self. mock_process_input[n-2] == [
+ 'lvchange',
+ '--addtag', 'ceph.osd_id=2',
+ '--addtag', 'ceph.type=db',
+ '--addtag', 'ceph.osd_fsid=1234',
+ '--addtag', 'ceph.cluster_name=ceph',
+ '--addtag', 'ceph.db_uuid=new-db-uuid',
+ '--addtag', 'ceph.db_device=/dev/VolGroup/lv2_new',
+ '/dev/VolGroup/lv2_new']
+
+ assert self. mock_process_input[n-1] == [
+ 'ceph-bluestore-tool',
+ '--path', '/var/lib/ceph/osd/ceph-2',
+ '--dev-target', '/dev/VolGroup/lv2_new',
+ '--command', 'bluefs-bdev-migrate',
+ '--devs-source', '/var/lib/ceph/osd/ceph-2/block',
+ '--devs-source', '/var/lib/ceph/osd/ceph-2/block.db']
+
+ @patch('os.getuid')
+ def test_migrate_data_db_to_new_db_skip_wal(self, m_getuid, monkeypatch):
+ m_getuid.return_value = 0
+
+ source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev'
+ source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev'
+ source_wal_tags = 'ceph.osd_id=2,ceph.type=wal,ceph.osd_fsid=1234' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev'
+
+ data_vol = api.Volume(lv_name='volume1',
+ lv_uuid='datauuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv1',
+ lv_tags=source_tags)
+ db_vol = api.Volume(lv_name='volume2',
+ lv_uuid='dbuuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv2',
+ lv_tags=source_db_tags)
+
+ wal_vol = api.Volume(lv_name='volume3',
+ lv_uuid='datauuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv3',
+ lv_tags=source_wal_tags)
+
+ self.mock_single_volumes = {
+ '/dev/VolGroup/lv1': data_vol,
+ '/dev/VolGroup/lv2': db_vol,
+ '/dev/VolGroup/lv3': wal_vol,
+ }
+ monkeypatch.setattr(migrate.api, 'get_single_lv',
+ self.mock_get_single_lv)
+
+ self.mock_volume = api.Volume(lv_name='volume2_new', lv_uuid='new-db-uuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv2_new',
+ lv_tags='')
+ monkeypatch.setattr(api, 'get_lv_by_fullname',
+ self.mock_get_lv_by_fullname)
+
+ self.mock_process_input = []
+ monkeypatch.setattr(process, 'call', self.mock_process)
+
+ devices = []
+ devices.append([Device('/dev/VolGroup/lv1'), 'block'])
+ devices.append([Device('/dev/VolGroup/lv2'), 'db'])
+ devices.append([Device('/dev/VolGroup/lv3'), 'wal'])
+
+ monkeypatch.setattr(migrate, 'find_associated_devices',
+ lambda osd_id, osd_fsid: devices)
+
+ monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active",
+ lambda id: False)
+
+ monkeypatch.setattr(migrate, 'get_cluster_name',
+ lambda osd_id, osd_fsid: 'ceph')
+ monkeypatch.setattr(system, 'chown', lambda path: 0)
+ m = migrate.Migrate(argv=[
+ '--osd-id', '2',
+ '--osd-fsid', '1234',
+ '--from', 'data', 'db',
+ '--target', 'vgname/new_wal'])
+ m.main()
+
+ n = len(self.mock_process_input)
+ assert n >= 7
+
+ assert self. mock_process_input[n-7] == [
+ 'lvchange',
+ '--deltag', 'ceph.osd_id=2',
+ '--deltag', 'ceph.type=db',
+ '--deltag', 'ceph.osd_fsid=1234',
+ '--deltag', 'ceph.cluster_name=ceph',
+ '--deltag', 'ceph.db_uuid=dbuuid',
+ '--deltag', 'ceph.db_device=db_dev',
+ '/dev/VolGroup/lv2']
+
+ assert self. mock_process_input[n-6] == [
+ 'lvchange',
+ '--deltag', 'ceph.db_uuid=dbuuid',
+ '--deltag', 'ceph.db_device=db_dev',
+ '/dev/VolGroup/lv1']
+
+ assert self. mock_process_input[n-5] == [
+ 'lvchange',
+ '--addtag', 'ceph.db_uuid=new-db-uuid',
+ '--addtag', 'ceph.db_device=/dev/VolGroup/lv2_new',
+ '/dev/VolGroup/lv1']
+
+ assert self. mock_process_input[n-4] == [
+ 'lvchange',
+ '--deltag', 'ceph.db_uuid=dbuuid',
+ '--deltag', 'ceph.db_device=db_dev',
+ '/dev/VolGroup/lv3']
+
+ assert self. mock_process_input[n-3] == [
+ 'lvchange',
+ '--addtag', 'ceph.db_uuid=new-db-uuid',
+ '--addtag', 'ceph.db_device=/dev/VolGroup/lv2_new',
+ '/dev/VolGroup/lv3']
+
+ assert self. mock_process_input[n-2] == [
+ 'lvchange',
+ '--addtag', 'ceph.osd_id=2',
+ '--addtag', 'ceph.type=db',
+ '--addtag', 'ceph.osd_fsid=1234',
+ '--addtag', 'ceph.cluster_name=ceph',
+ '--addtag', 'ceph.db_uuid=new-db-uuid',
+ '--addtag', 'ceph.db_device=/dev/VolGroup/lv2_new',
+ '/dev/VolGroup/lv2_new']
+
+ assert self. mock_process_input[n-1] == [
+ 'ceph-bluestore-tool',
+ '--path', '/var/lib/ceph/osd/ceph-2',
+ '--dev-target', '/dev/VolGroup/lv2_new',
+ '--command', 'bluefs-bdev-migrate',
+ '--devs-source', '/var/lib/ceph/osd/ceph-2/block',
+ '--devs-source', '/var/lib/ceph/osd/ceph-2/block.db']
+
+ @patch('os.getuid')
+ def test_migrate_data_db_wal_to_new_db(self, m_getuid, monkeypatch):
+ m_getuid.return_value = 0
+
+ source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
+ 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
+ source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev'
+ source_wal_tags = 'ceph.osd_id=0,ceph.type=wal,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
+ 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
+
+ data_vol = api.Volume(lv_name='volume1',
+ lv_uuid='datauuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv1',
+ lv_tags=source_tags)
+ db_vol = api.Volume(lv_name='volume2',
+ lv_uuid='dbuuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv2',
+ lv_tags=source_db_tags)
+
+ wal_vol = api.Volume(lv_name='volume3',
+ lv_uuid='waluuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv3',
+ lv_tags=source_wal_tags)
+
+ self.mock_single_volumes = {
+ '/dev/VolGroup/lv1': data_vol,
+ '/dev/VolGroup/lv2': db_vol,
+ '/dev/VolGroup/lv3': wal_vol,
+ }
+ monkeypatch.setattr(migrate.api, 'get_single_lv',
+ self.mock_get_single_lv)
+
+ self.mock_volume = api.Volume(lv_name='volume2_new', lv_uuid='new-db-uuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv2_new',
+ lv_tags='')
+ monkeypatch.setattr(api, 'get_lv_by_fullname',
+ self.mock_get_lv_by_fullname)
+
+ self.mock_process_input = []
+ monkeypatch.setattr(process, 'call', self.mock_process)
+
+ devices = []
+ devices.append([Device('/dev/VolGroup/lv1'), 'block'])
+ devices.append([Device('/dev/VolGroup/lv2'), 'db'])
+ devices.append([Device('/dev/VolGroup/lv3'), 'wal'])
+
+ monkeypatch.setattr(migrate, 'find_associated_devices',
+ lambda osd_id, osd_fsid: devices)
+
+ monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active",
+ lambda id: False)
+
+ monkeypatch.setattr(migrate, 'get_cluster_name',
+ lambda osd_id, osd_fsid: 'ceph')
+ monkeypatch.setattr(system, 'chown', lambda path: 0)
+ m = migrate.Migrate(argv=[
+ '--osd-id', '2',
+ '--osd-fsid', '1234',
+ '--from', 'data', 'db', 'wal',
+ '--target', 'vgname/new_wal'])
+ m.main()
+
+ n = len(self.mock_process_input)
+ assert n >= 6
+
+ assert self. mock_process_input[n-6] == [
+ 'lvchange',
+ '--deltag', 'ceph.osd_id=2',
+ '--deltag', 'ceph.type=db',
+ '--deltag', 'ceph.osd_fsid=1234',
+ '--deltag', 'ceph.cluster_name=ceph',
+ '--deltag', 'ceph.db_uuid=dbuuid',
+ '--deltag', 'ceph.db_device=db_dev',
+ '/dev/VolGroup/lv2']
+
+ assert self. mock_process_input[n-5] == [
+ 'lvchange',
+ '--deltag', 'ceph.osd_id=0',
+ '--deltag', 'ceph.type=wal',
+ '--deltag', 'ceph.osd_fsid=1234',
+ '--deltag', 'ceph.cluster_name=ceph',
+ '--deltag', 'ceph.db_uuid=dbuuid',
+ '--deltag', 'ceph.db_device=db_dev',
+ '--deltag', 'ceph.wal_uuid=waluuid',
+ '--deltag', 'ceph.wal_device=wal_dev',
+ '/dev/VolGroup/lv3']
+
+ assert self. mock_process_input[n-4] == [
+ 'lvchange',
+ '--deltag', 'ceph.db_uuid=dbuuid',
+ '--deltag', 'ceph.db_device=db_dev',
+ '--deltag', 'ceph.wal_uuid=waluuid',
+ '--deltag', 'ceph.wal_device=wal_dev',
+ '/dev/VolGroup/lv1']
+
+ assert self. mock_process_input[n-3] == [
+ 'lvchange',
+ '--addtag', 'ceph.db_uuid=new-db-uuid',
+ '--addtag', 'ceph.db_device=/dev/VolGroup/lv2_new',
+ '/dev/VolGroup/lv1']
+
+ assert self. mock_process_input[n-2] == [
+ 'lvchange',
+ '--addtag', 'ceph.osd_id=2',
+ '--addtag', 'ceph.type=db',
+ '--addtag', 'ceph.osd_fsid=1234',
+ '--addtag', 'ceph.cluster_name=ceph',
+ '--addtag', 'ceph.db_uuid=new-db-uuid',
+ '--addtag', 'ceph.db_device=/dev/VolGroup/lv2_new',
+ '/dev/VolGroup/lv2_new']
+
+ assert self. mock_process_input[n-1] == [
+ 'ceph-bluestore-tool',
+ '--path', '/var/lib/ceph/osd/ceph-2',
+ '--dev-target', '/dev/VolGroup/lv2_new',
+ '--command', 'bluefs-bdev-migrate',
+ '--devs-source', '/var/lib/ceph/osd/ceph-2/block',
+ '--devs-source', '/var/lib/ceph/osd/ceph-2/block.db',
+ '--devs-source', '/var/lib/ceph/osd/ceph-2/block.wal']
+
+ @patch('os.getuid')
+ def test_dont_migrate_data_db_wal_to_new_data(self,
+ m_getuid,
+ monkeypatch,
+ capsys):
+ m_getuid.return_value = 0
+
+ source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev'
+ source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev'
+
+ data_vol = api.Volume(lv_name='volume1',
+ lv_uuid='datauuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv1',
+ lv_tags=source_tags)
+ db_vol = api.Volume(lv_name='volume2',
+ lv_uuid='dbuuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv2',
+ lv_tags=source_db_tags)
+
+ self.mock_single_volumes = {
+ '/dev/VolGroup/lv1': data_vol,
+ '/dev/VolGroup/lv2': db_vol,
+ }
+ monkeypatch.setattr(migrate.api, 'get_single_lv',
+ self.mock_get_single_lv)
+
+ self.mock_volume = api.Volume(lv_name='volume2_new', lv_uuid='new-db-uuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv2_new',
+ lv_tags='')
+ monkeypatch.setattr(api, 'get_lv_by_fullname',
+ self.mock_get_lv_by_fullname)
+
+ self.mock_process_input = []
+ monkeypatch.setattr(process, 'call', self.mock_process)
+
+ devices = []
+ devices.append([Device('/dev/VolGroup/lv1'), 'block'])
+ devices.append([Device('/dev/VolGroup/lv2'), 'db'])
+
+ monkeypatch.setattr(migrate, 'find_associated_devices',
+ lambda osd_id, osd_fsid: devices)
+
+ monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active",
+ lambda id: False)
+
+ monkeypatch.setattr(migrate, 'get_cluster_name',
+ lambda osd_id, osd_fsid: 'ceph')
+ monkeypatch.setattr(system, 'chown', lambda path: 0)
+ m = migrate.Migrate(argv=[
+ '--osd-id', '2',
+ '--osd-fsid', '1234',
+ '--from', 'data',
+ '--target', 'vgname/new_data'])
+
+ with pytest.raises(SystemExit) as error:
+ m.main()
+ stdout, stderr = capsys.readouterr()
+ expected = 'Unable to migrate to : vgname/new_data'
+ assert expected in str(error.value)
+ expected = 'Unable to determine new volume type,'
+ ' please use new-db or new-wal command before.'
+ assert expected in stderr
+
+ @patch('os.getuid')
+ def test_dont_migrate_db_to_wal(self,
+ m_getuid,
+ monkeypatch,
+ capsys):
+ m_getuid.return_value = 0
+
+ source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
+ 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
+ source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev'
+ source_wal_tags = 'ceph.osd_id=2,ceph.type=wal,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
+ 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
+
+ data_vol = api.Volume(lv_name='volume1',
+ lv_uuid='datauuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv1',
+ lv_tags=source_tags)
+ db_vol = api.Volume(lv_name='volume2',
+ lv_uuid='dbuuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv2',
+ lv_tags=source_db_tags)
+
+ wal_vol = api.Volume(lv_name='volume3',
+ lv_uuid='waluuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv3',
+ lv_tags=source_wal_tags)
+
+ self.mock_single_volumes = {
+ '/dev/VolGroup/lv1': data_vol,
+ '/dev/VolGroup/lv2': db_vol,
+ '/dev/VolGroup/lv3': wal_vol,
+ }
+ monkeypatch.setattr(migrate.api, 'get_single_lv',
+ self.mock_get_single_lv)
+
+ self.mock_volume = wal_vol
+ monkeypatch.setattr(api, 'get_lv_by_fullname',
+ self.mock_get_lv_by_fullname)
+
+ self.mock_process_input = []
+ monkeypatch.setattr(process, 'call', self.mock_process)
+
+ devices = []
+ devices.append([Device('/dev/VolGroup/lv1'), 'block'])
+ devices.append([Device('/dev/VolGroup/lv2'), 'db'])
+ devices.append([Device('/dev/VolGroup/lv3'), 'wal'])
+
+ monkeypatch.setattr(migrate, 'find_associated_devices',
+ lambda osd_id, osd_fsid: devices)
+
+ monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active",
+ lambda id: False)
+
+ monkeypatch.setattr(migrate, 'get_cluster_name',
+ lambda osd_id, osd_fsid: 'ceph')
+ monkeypatch.setattr(system, 'chown', lambda path: 0)
+ m = migrate.Migrate(argv=[
+ '--osd-id', '2',
+ '--osd-fsid', '1234',
+ '--from', 'db',
+ '--target', 'vgname/wal'])
+
+ with pytest.raises(SystemExit) as error:
+ m.main()
+ stdout, stderr = capsys.readouterr()
+ expected = 'Unable to migrate to : vgname/wal'
+ assert expected in str(error.value)
+ expected = 'Migrate to WAL is not supported'
+ assert expected in stderr
+
+ @patch('os.getuid')
+ def test_migrate_data_db_to_db(self,
+ m_getuid,
+ monkeypatch,
+ capsys):
+ m_getuid.return_value = 0
+
+ source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
+ 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
+ source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev'
+ source_wal_tags = 'ceph.osd_id=2,ceph.type=wal,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
+ 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
+
+ data_vol = api.Volume(lv_name='volume1',
+ lv_uuid='datauuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv1',
+ lv_tags=source_tags)
+ db_vol = api.Volume(lv_name='volume2',
+ lv_uuid='dbuuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv2',
+ lv_tags=source_db_tags)
+
+ wal_vol = api.Volume(lv_name='volume3',
+ lv_uuid='waluuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv3',
+ lv_tags=source_wal_tags)
+
+ self.mock_single_volumes = {
+ '/dev/VolGroup/lv1': data_vol,
+ '/dev/VolGroup/lv2': db_vol,
+ '/dev/VolGroup/lv3': wal_vol,
+ }
+ monkeypatch.setattr(migrate.api, 'get_single_lv',
+ self.mock_get_single_lv)
+
+ self.mock_volume = db_vol
+ monkeypatch.setattr(api, 'get_lv_by_fullname',
+ self.mock_get_lv_by_fullname)
+
+ self.mock_process_input = []
+ monkeypatch.setattr(process, 'call', self.mock_process)
+
+ devices = []
+ devices.append([Device('/dev/VolGroup/lv1'), 'block'])
+ devices.append([Device('/dev/VolGroup/lv2'), 'db'])
+ devices.append([Device('/dev/VolGroup/lv3'), 'wal'])
+
+ monkeypatch.setattr(migrate, 'find_associated_devices',
+ lambda osd_id, osd_fsid: devices)
+
+ monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active",
+ lambda id: False)
+
+ monkeypatch.setattr(migrate, 'get_cluster_name',
+ lambda osd_id, osd_fsid: 'ceph')
+ monkeypatch.setattr(system, 'chown', lambda path: 0)
+ m = migrate.Migrate(argv=[
+ '--osd-id', '2',
+ '--osd-fsid', '1234',
+ '--from', 'db', 'data',
+ '--target', 'vgname/db'])
+
+ m.main()
+
+ n = len(self.mock_process_input)
+ assert n >= 1
+ for s in self.mock_process_input:
+ print(s)
+
+ assert self. mock_process_input[n-1] == [
+ 'ceph-bluestore-tool',
+ '--path', '/var/lib/ceph/osd/ceph-2',
+ '--dev-target', '/var/lib/ceph/osd/ceph-2/block.db',
+ '--command', 'bluefs-bdev-migrate',
+ '--devs-source', '/var/lib/ceph/osd/ceph-2/block']
+
+ def test_migrate_data_db_to_db_active_systemd(self, is_root, monkeypatch, capsys):
+ source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
+ 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
+ source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev'
+ source_wal_tags = 'ceph.osd_id=2,ceph.type=wal,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
+ 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
+
+ data_vol = api.Volume(lv_name='volume1',
+ lv_uuid='datauuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv1',
+ lv_tags=source_tags)
+ db_vol = api.Volume(lv_name='volume2',
+ lv_uuid='dbuuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv2',
+ lv_tags=source_db_tags)
+
+ wal_vol = api.Volume(lv_name='volume3',
+ lv_uuid='waluuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv3',
+ lv_tags=source_wal_tags)
+
+ self.mock_single_volumes = {
+ '/dev/VolGroup/lv1': data_vol,
+ '/dev/VolGroup/lv2': db_vol,
+ '/dev/VolGroup/lv3': wal_vol,
+ }
+ monkeypatch.setattr(migrate.api, 'get_single_lv',
+ self.mock_get_single_lv)
+
+ self.mock_volume = db_vol
+ monkeypatch.setattr(api, 'get_lv_by_fullname',
+ self.mock_get_lv_by_fullname)
+
+ self.mock_process_input = []
+ monkeypatch.setattr(process, 'call', self.mock_process)
+
+ devices = []
+ devices.append([Device('/dev/VolGroup/lv1'), 'block'])
+ devices.append([Device('/dev/VolGroup/lv2'), 'db'])
+ devices.append([Device('/dev/VolGroup/lv3'), 'wal'])
+
+ monkeypatch.setattr(migrate, 'find_associated_devices',
+ lambda osd_id, osd_fsid: devices)
+
+ monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active",
+ lambda id: True)
+
+ monkeypatch.setattr(migrate, 'get_cluster_name',
+ lambda osd_id, osd_fsid: 'ceph')
+ monkeypatch.setattr(system, 'chown', lambda path: 0)
+ m = migrate.Migrate(argv=[
+ '--osd-id', '2',
+ '--osd-fsid', '1234',
+ '--from', 'db', 'data',
+ '--target', 'vgname/db'])
+
+ with pytest.raises(SystemExit) as error:
+ m.main()
+
+ stdout, stderr = capsys.readouterr()
+
+ assert 'Unable to migrate devices associated with OSD ID: 2' == str(error.value)
+ assert '--> OSD is running, stop it with: systemctl stop ceph-osd@2' == stderr.rstrip()
+ assert not stdout
+
+ def test_migrate_data_db_to_db_no_systemd(self, is_root, monkeypatch):
+ source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
+ 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
+ source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev'
+ source_wal_tags = 'ceph.osd_id=2,ceph.type=wal,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
+ 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
+
+ data_vol = api.Volume(lv_name='volume1',
+ lv_uuid='datauuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv1',
+ lv_tags=source_tags)
+ db_vol = api.Volume(lv_name='volume2',
+ lv_uuid='dbuuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv2',
+ lv_tags=source_db_tags)
+
+ wal_vol = api.Volume(lv_name='volume3',
+ lv_uuid='waluuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv3',
+ lv_tags=source_wal_tags)
+
+ self.mock_single_volumes = {
+ '/dev/VolGroup/lv1': data_vol,
+ '/dev/VolGroup/lv2': db_vol,
+ '/dev/VolGroup/lv3': wal_vol,
+ }
+ monkeypatch.setattr(migrate.api, 'get_single_lv',
+ self.mock_get_single_lv)
+
+ self.mock_volume = db_vol
+ monkeypatch.setattr(api, 'get_lv_by_fullname',
+ self.mock_get_lv_by_fullname)
+
+ self.mock_process_input = []
+ monkeypatch.setattr(process, 'call', self.mock_process)
+
+ devices = []
+ devices.append([Device('/dev/VolGroup/lv1'), 'block'])
+ devices.append([Device('/dev/VolGroup/lv2'), 'db'])
+ devices.append([Device('/dev/VolGroup/lv3'), 'wal'])
+
+ monkeypatch.setattr(migrate, 'find_associated_devices',
+ lambda osd_id, osd_fsid: devices)
+
+ monkeypatch.setattr(migrate, 'get_cluster_name',
+ lambda osd_id, osd_fsid: 'ceph')
+ monkeypatch.setattr(system, 'chown', lambda path: 0)
+ m = migrate.Migrate(argv=[
+ '--osd-id', '2',
+ '--osd-fsid', '1234',
+ '--from', 'db', 'data',
+ '--target', 'vgname/db',
+ '--no-systemd'])
+
+ m.main()
+
+ n = len(self.mock_process_input)
+ assert n >= 1
+ for s in self.mock_process_input:
+ print(s)
+
+ assert self. mock_process_input[n-1] == [
+ 'ceph-bluestore-tool',
+ '--path', '/var/lib/ceph/osd/ceph-2',
+ '--dev-target', '/var/lib/ceph/osd/ceph-2/block.db',
+ '--command', 'bluefs-bdev-migrate',
+ '--devs-source', '/var/lib/ceph/osd/ceph-2/block']
+
+ @patch('os.getuid')
+ def test_migrate_data_wal_to_db(self,
+ m_getuid,
+ monkeypatch,
+ capsys):
+ m_getuid.return_value = 0
+
+ source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
+ 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
+ source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
+ 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
+ source_wal_tags = 'ceph.osd_id=2,ceph.type=wal,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
+ 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
+
+ data_vol = api.Volume(lv_name='volume1',
+ lv_uuid='datauuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv1',
+ lv_tags=source_tags)
+ db_vol = api.Volume(lv_name='volume2',
+ lv_uuid='dbuuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv2',
+ lv_tags=source_db_tags)
+
+ wal_vol = api.Volume(lv_name='volume3',
+ lv_uuid='waluuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv3',
+ lv_tags=source_wal_tags)
+
+ self.mock_single_volumes = {
+ '/dev/VolGroup/lv1': data_vol,
+ '/dev/VolGroup/lv2': db_vol,
+ '/dev/VolGroup/lv3': wal_vol,
+ }
+ monkeypatch.setattr(migrate.api, 'get_single_lv',
+ self.mock_get_single_lv)
+
+ self.mock_volume = db_vol
+ monkeypatch.setattr(api, 'get_lv_by_fullname',
+ self.mock_get_lv_by_fullname)
+
+ self.mock_process_input = []
+ monkeypatch.setattr(process, 'call', self.mock_process)
+
+ devices = []
+ devices.append([Device('/dev/VolGroup/lv1'), 'block'])
+ devices.append([Device('/dev/VolGroup/lv2'), 'db'])
+ devices.append([Device('/dev/VolGroup/lv3'), 'wal'])
+
+ monkeypatch.setattr(migrate, 'find_associated_devices',
+ lambda osd_id, osd_fsid: devices)
+
+ monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active",
+ lambda id: False)
+
+ monkeypatch.setattr(migrate, 'get_cluster_name',
+ lambda osd_id, osd_fsid: 'ceph')
+ monkeypatch.setattr(system, 'chown', lambda path: 0)
+ m = migrate.Migrate(argv=[
+ '--osd-id', '2',
+ '--osd-fsid', '1234',
+ '--from', 'db', 'data', 'wal',
+ '--target', 'vgname/db'])
+
+ m.main()
+
+ n = len(self.mock_process_input)
+ assert n >= 1
+ for s in self.mock_process_input:
+ print(s)
+
+ assert self. mock_process_input[n-4] == [
+ 'lvchange',
+ '--deltag', 'ceph.osd_id=2',
+ '--deltag', 'ceph.type=wal',
+ '--deltag', 'ceph.osd_fsid=1234',
+ '--deltag', 'ceph.cluster_name=ceph',
+ '--deltag', 'ceph.db_uuid=dbuuid',
+ '--deltag', 'ceph.db_device=db_dev',
+ '--deltag', 'ceph.wal_uuid=waluuid',
+ '--deltag', 'ceph.wal_device=wal_dev',
+ '/dev/VolGroup/lv3']
+ assert self. mock_process_input[n-3] == [
+ 'lvchange',
+ '--deltag', 'ceph.wal_uuid=waluuid',
+ '--deltag', 'ceph.wal_device=wal_dev',
+ '/dev/VolGroup/lv1']
+ assert self. mock_process_input[n-2] == [
+ 'lvchange',
+ '--deltag', 'ceph.wal_uuid=waluuid',
+ '--deltag', 'ceph.wal_device=wal_dev',
+ '/dev/VolGroup/lv2']
+ assert self. mock_process_input[n-1] == [
+ 'ceph-bluestore-tool',
+ '--path', '/var/lib/ceph/osd/ceph-2',
+ '--dev-target', '/var/lib/ceph/osd/ceph-2/block.db',
+ '--command', 'bluefs-bdev-migrate',
+ '--devs-source', '/var/lib/ceph/osd/ceph-2/block',
+ '--devs-source', '/var/lib/ceph/osd/ceph-2/block.wal']
+
+ def test_migrate_data_wal_to_db_active_systemd(self, is_root, monkeypatch, capsys):
+ source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
+ 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
+ source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
+ 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
+ source_wal_tags = 'ceph.osd_id=2,ceph.type=wal,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
+ 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
+
+ data_vol = api.Volume(lv_name='volume1',
+ lv_uuid='datauuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv1',
+ lv_tags=source_tags)
+ db_vol = api.Volume(lv_name='volume2',
+ lv_uuid='dbuuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv2',
+ lv_tags=source_db_tags)
+
+ wal_vol = api.Volume(lv_name='volume3',
+ lv_uuid='waluuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv3',
+ lv_tags=source_wal_tags)
+
+ self.mock_single_volumes = {
+ '/dev/VolGroup/lv1': data_vol,
+ '/dev/VolGroup/lv2': db_vol,
+ '/dev/VolGroup/lv3': wal_vol,
+ }
+ monkeypatch.setattr(migrate.api, 'get_single_lv',
+ self.mock_get_single_lv)
+
+ self.mock_volume = db_vol
+ monkeypatch.setattr(api, 'get_lv_by_fullname',
+ self.mock_get_lv_by_fullname)
+
+ self.mock_process_input = []
+ monkeypatch.setattr(process, 'call', self.mock_process)
+
+ devices = []
+ devices.append([Device('/dev/VolGroup/lv1'), 'block'])
+ devices.append([Device('/dev/VolGroup/lv2'), 'db'])
+ devices.append([Device('/dev/VolGroup/lv3'), 'wal'])
+
+ monkeypatch.setattr(migrate, 'find_associated_devices',
+ lambda osd_id, osd_fsid: devices)
+
+ monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active",
+ lambda id: True)
+
+ monkeypatch.setattr(migrate, 'get_cluster_name',
+ lambda osd_id, osd_fsid: 'ceph')
+ monkeypatch.setattr(system, 'chown', lambda path: 0)
+ m = migrate.Migrate(argv=[
+ '--osd-id', '2',
+ '--osd-fsid', '1234',
+ '--from', 'db', 'data', 'wal',
+ '--target', 'vgname/db'])
+
+ with pytest.raises(SystemExit) as error:
+ m.main()
+
+ stdout, stderr = capsys.readouterr()
+
+ assert 'Unable to migrate devices associated with OSD ID: 2' == str(error.value)
+ assert '--> OSD is running, stop it with: systemctl stop ceph-osd@2' == stderr.rstrip()
+ assert not stdout
+
+ def test_migrate_data_wal_to_db_no_systemd(self, is_root, monkeypatch):
+ source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
+ 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
+ source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
+ 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
+ source_wal_tags = 'ceph.osd_id=2,ceph.type=wal,ceph.osd_fsid=1234,' \
+ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
+ 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
+
+ data_vol = api.Volume(lv_name='volume1',
+ lv_uuid='datauuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv1',
+ lv_tags=source_tags)
+ db_vol = api.Volume(lv_name='volume2',
+ lv_uuid='dbuuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv2',
+ lv_tags=source_db_tags)
+
+ wal_vol = api.Volume(lv_name='volume3',
+ lv_uuid='waluuid',
+ vg_name='vg',
+ lv_path='/dev/VolGroup/lv3',
+ lv_tags=source_wal_tags)
+
+ self.mock_single_volumes = {
+ '/dev/VolGroup/lv1': data_vol,
+ '/dev/VolGroup/lv2': db_vol,
+ '/dev/VolGroup/lv3': wal_vol,
+ }
+ monkeypatch.setattr(migrate.api, 'get_single_lv',
+ self.mock_get_single_lv)
+
+ self.mock_volume = db_vol
+ monkeypatch.setattr(api, 'get_lv_by_fullname',
+ self.mock_get_lv_by_fullname)
+
+ self.mock_process_input = []
+ monkeypatch.setattr(process, 'call', self.mock_process)
+
+ devices = []
+ devices.append([Device('/dev/VolGroup/lv1'), 'block'])
+ devices.append([Device('/dev/VolGroup/lv2'), 'db'])
+ devices.append([Device('/dev/VolGroup/lv3'), 'wal'])
+
+ monkeypatch.setattr(migrate, 'find_associated_devices',
+ lambda osd_id, osd_fsid: devices)
+
+ monkeypatch.setattr(migrate, 'get_cluster_name',
+ lambda osd_id, osd_fsid: 'ceph')
+ monkeypatch.setattr(system, 'chown', lambda path: 0)
+ m = migrate.Migrate(argv=[
+ '--osd-id', '2',
+ '--osd-fsid', '1234',
+ '--from', 'db', 'data', 'wal',
+ '--target', 'vgname/db',
+ '--no-systemd'])
+
+ m.main()
+
+ n = len(self.mock_process_input)
+ assert n >= 1
+ for s in self.mock_process_input:
+ print(s)
+
+ assert self. mock_process_input[n-4] == [
+ 'lvchange',
+ '--deltag', 'ceph.osd_id=2',
+ '--deltag', 'ceph.type=wal',
+ '--deltag', 'ceph.osd_fsid=1234',
+ '--deltag', 'ceph.cluster_name=ceph',
+ '--deltag', 'ceph.db_uuid=dbuuid',
+ '--deltag', 'ceph.db_device=db_dev',
+ '--deltag', 'ceph.wal_uuid=waluuid',
+ '--deltag', 'ceph.wal_device=wal_dev',
+ '/dev/VolGroup/lv3']
+ assert self. mock_process_input[n-3] == [
+ 'lvchange',
+ '--deltag', 'ceph.wal_uuid=waluuid',
+ '--deltag', 'ceph.wal_device=wal_dev',
+ '/dev/VolGroup/lv1']
+ assert self. mock_process_input[n-2] == [
+ 'lvchange',
+ '--deltag', 'ceph.wal_uuid=waluuid',
+ '--deltag', 'ceph.wal_device=wal_dev',
+ '/dev/VolGroup/lv2']
+ assert self. mock_process_input[n-1] == [
+ 'ceph-bluestore-tool',
+ '--path', '/var/lib/ceph/osd/ceph-2',
+ '--dev-target', '/var/lib/ceph/osd/ceph-2/block.db',
+ '--command', 'bluefs-bdev-migrate',
+ '--devs-source', '/var/lib/ceph/osd/ceph-2/block',
+ '--devs-source', '/var/lib/ceph/osd/ceph-2/block.wal']
diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_prepare.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_prepare.py
new file mode 100644
index 000000000..9f0a5e0bb
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_prepare.py
@@ -0,0 +1,189 @@
+import pytest
+from ceph_volume.devices import lvm
+from ceph_volume.api import lvm as api
+from mock.mock import patch, Mock, MagicMock
+
+
+class TestLVM(object):
+
+ def test_main_spits_help_with_no_arguments(self, capsys):
+ lvm.main.LVM([]).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'Use LVM and LVM-based technologies to deploy' in stdout
+
+ def test_main_shows_activate_subcommands(self, capsys):
+ lvm.main.LVM([]).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'activate ' in stdout
+ assert 'Discover and mount' in stdout
+
+ def test_main_shows_prepare_subcommands(self, capsys):
+ lvm.main.LVM([]).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'prepare ' in stdout
+ assert 'Format an LVM device' in stdout
+
+
+class TestPrepareDevice(object):
+
+ def test_cannot_use_device(self, factory):
+ args = factory(data='/dev/var/foo')
+ with pytest.raises(RuntimeError) as error:
+ p = lvm.prepare.Prepare([])
+ p.args = args
+ p.prepare_data_device( 'data', '0')
+ assert 'Cannot use device (/dev/var/foo)' in str(error.value)
+ assert 'A vg/lv path or an existing device is needed' in str(error.value)
+
+
+class TestGetClusterFsid(object):
+
+ def test_fsid_is_passed_in(self, factory):
+ args = factory(cluster_fsid='aaaa-1111')
+ prepare_obj = lvm.prepare.Prepare([])
+ prepare_obj.args = args
+ assert prepare_obj.get_cluster_fsid() == 'aaaa-1111'
+
+ def test_fsid_is_read_from_ceph_conf(self, factory, conf_ceph_stub):
+ conf_ceph_stub('[global]\nfsid = bbbb-2222')
+ prepare_obj = lvm.prepare.Prepare([])
+ prepare_obj.args = factory(cluster_fsid=None)
+ assert prepare_obj.get_cluster_fsid() == 'bbbb-2222'
+
+
+class TestPrepare(object):
+
+ def test_main_spits_help_with_no_arguments(self, capsys):
+ lvm.prepare.Prepare([]).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'Prepare an OSD by assigning an ID and FSID' in stdout
+
+ def test_main_shows_full_help(self, capsys):
+ with pytest.raises(SystemExit):
+ lvm.prepare.Prepare(argv=['--help']).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'Use the filestore objectstore' in stdout
+ assert 'Use the bluestore objectstore' in stdout
+ assert 'A physical device or logical' in stdout
+
+
+ @patch('ceph_volume.util.disk.has_bluestore_label', return_value=False)
+ def test_excludes_filestore_bluestore_flags(self, m_has_bs_label, fake_call, capsys, device_info):
+ device_info()
+ with pytest.raises(SystemExit):
+ lvm.prepare.Prepare(argv=['--data', '/dev/sdfoo', '--filestore', '--bluestore']).main()
+ stdout, stderr = capsys.readouterr()
+ expected = 'Cannot use --filestore (filestore) with --bluestore (bluestore)'
+ assert expected in stderr
+
+
+ @patch('ceph_volume.util.disk.has_bluestore_label', return_value=False)
+ def test_excludes_other_filestore_bluestore_flags(self, m_has_bs_label, fake_call, capsys, device_info):
+ device_info()
+ with pytest.raises(SystemExit):
+ lvm.prepare.Prepare(argv=[
+ '--bluestore', '--data', '/dev/sdfoo',
+ '--journal', '/dev/sf14',
+ ]).main()
+ stdout, stderr = capsys.readouterr()
+ expected = 'Cannot use --bluestore (bluestore) with --journal (filestore)'
+ assert expected in stderr
+
+ @patch('ceph_volume.util.disk.has_bluestore_label', return_value=False)
+ def test_excludes_block_and_journal_flags(self, m_has_bs_label, fake_call, capsys, device_info):
+ device_info()
+ with pytest.raises(SystemExit):
+ lvm.prepare.Prepare(argv=[
+ '--bluestore', '--data', '/dev/sdfoo', '--block.db', 'vg/ceph1',
+ '--journal', '/dev/sf14',
+ ]).main()
+ stdout, stderr = capsys.readouterr()
+ expected = 'Cannot use --block.db (bluestore) with --journal (filestore)'
+ assert expected in stderr
+
+ @patch('ceph_volume.util.arg_validators.Device')
+ @patch('ceph_volume.util.disk.has_bluestore_label', return_value=False)
+ def test_journal_is_required_with_filestore(self, m_has_bs_label, m_device, is_root, monkeypatch, device_info):
+ m_device.return_value = MagicMock(exists=True,
+ has_fs=False,
+ used_by_ceph=False,
+ has_partitions=False,
+ has_gpt_headers=False)
+ monkeypatch.setattr("os.path.exists", lambda path: True)
+ with pytest.raises(SystemExit) as error:
+ lvm.prepare.Prepare(argv=['--filestore', '--data', '/dev/sdfoo']).main()
+ expected = '--journal is required when using --filestore'
+ assert expected in str(error.value)
+
+ @patch('ceph_volume.devices.lvm.prepare.api.is_ceph_device')
+ def test_safe_prepare_osd_already_created(self, m_is_ceph_device):
+ m_is_ceph_device.return_value = True
+ with pytest.raises(RuntimeError) as error:
+ prepare = lvm.prepare.Prepare(argv=[])
+ prepare.args = Mock()
+ prepare.args.data = '/dev/sdfoo'
+ prepare.get_lv = Mock()
+ prepare.safe_prepare()
+ expected = 'skipping {}, it is already prepared'.format('/dev/sdfoo')
+ assert expected in str(error.value)
+
+ def test_setup_device_device_name_is_none(self):
+ result = lvm.prepare.Prepare([]).setup_device(device_type='data', device_name=None, tags={'ceph.type': 'data'}, size=0, slots=None)
+ assert result == ('', '', {'ceph.type': 'data'})
+
+ @patch('ceph_volume.api.lvm.Volume.set_tags')
+ @patch('ceph_volume.devices.lvm.prepare.api.get_single_lv')
+ def test_setup_device_lv_passed(self, m_get_single_lv, m_set_tags):
+ fake_volume = api.Volume(lv_name='lv_foo', lv_path='/fake-path', vg_name='vg_foo', lv_tags='', lv_uuid='fake-uuid')
+ m_get_single_lv.return_value = fake_volume
+ result = lvm.prepare.Prepare([]).setup_device(device_type='data', device_name='vg_foo/lv_foo', tags={'ceph.type': 'data'}, size=0, slots=None)
+
+ assert result == ('/fake-path', 'fake-uuid', {'ceph.type': 'data',
+ 'ceph.vdo': '0',
+ 'ceph.data_uuid': 'fake-uuid',
+ 'ceph.data_device': '/fake-path'})
+
+ @patch('ceph_volume.devices.lvm.prepare.api.create_lv')
+ @patch('ceph_volume.api.lvm.Volume.set_tags')
+ @patch('ceph_volume.util.disk.is_device')
+ def test_setup_device_device_passed(self, m_is_device, m_set_tags, m_create_lv):
+ fake_volume = api.Volume(lv_name='lv_foo', lv_path='/fake-path', vg_name='vg_foo', lv_tags='', lv_uuid='fake-uuid')
+ m_is_device.return_value = True
+ m_create_lv.return_value = fake_volume
+ result = lvm.prepare.Prepare([]).setup_device(device_type='data', device_name='/dev/sdx', tags={'ceph.type': 'data'}, size=0, slots=None)
+
+ assert result == ('/fake-path', 'fake-uuid', {'ceph.type': 'data',
+ 'ceph.vdo': '0',
+ 'ceph.data_uuid': 'fake-uuid',
+ 'ceph.data_device': '/fake-path'})
+
+ @patch('ceph_volume.devices.lvm.prepare.Prepare.get_ptuuid')
+ @patch('ceph_volume.devices.lvm.prepare.api.get_single_lv')
+ def test_setup_device_partition_passed(self, m_get_single_lv, m_get_ptuuid):
+ m_get_single_lv.side_effect = ValueError()
+ m_get_ptuuid.return_value = 'fake-uuid'
+ result = lvm.prepare.Prepare([]).setup_device(device_type='data', device_name='/dev/sdx', tags={'ceph.type': 'data'}, size=0, slots=None)
+
+ assert result == ('/dev/sdx', 'fake-uuid', {'ceph.type': 'data',
+ 'ceph.vdo': '0',
+ 'ceph.data_uuid': 'fake-uuid',
+ 'ceph.data_device': '/dev/sdx'})
+
+ def test_invalid_osd_id_passed(self):
+ with pytest.raises(SystemExit):
+ lvm.prepare.Prepare(argv=['--osd-id', 'foo']).main()
+
+
+class TestActivate(object):
+
+ def test_main_spits_help_with_no_arguments(self, capsys):
+ lvm.activate.Activate([]).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'Activate OSDs by discovering them with' in stdout
+
+ def test_main_shows_full_help(self, capsys):
+ with pytest.raises(SystemExit):
+ lvm.activate.Activate(argv=['--help']).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'optional arguments' in stdout
+ assert 'positional arguments' in stdout
diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_trigger.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_trigger.py
new file mode 100644
index 000000000..b5280f931
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_trigger.py
@@ -0,0 +1,45 @@
+import pytest
+from ceph_volume import exceptions
+from ceph_volume.devices.lvm import trigger
+
+
+class TestParseOSDid(object):
+
+ def test_no_id_found_if_no_digit(self):
+ with pytest.raises(exceptions.SuffixParsingError):
+ trigger.parse_osd_id('asdlj-ljahsdfaslkjhdfa')
+
+ def test_no_id_found(self):
+ with pytest.raises(exceptions.SuffixParsingError):
+ trigger.parse_osd_id('ljahsdfaslkjhdfa')
+
+ def test_id_found(self):
+ result = trigger.parse_osd_id('1-ljahsdfaslkjhdfa')
+ assert result == '1'
+
+
+class TestParseOSDUUID(object):
+
+ def test_uuid_is_parsed(self):
+ result = trigger.parse_osd_uuid('1-asdf-ljkh-asdf-ljkh-asdf')
+ assert result == 'asdf-ljkh-asdf-ljkh-asdf'
+
+ def test_uuid_is_parsed_longer_sha1(self):
+ result = trigger.parse_osd_uuid('1-foo-bar-asdf-ljkh-asdf-ljkh-asdf')
+ assert result == 'foo-bar-asdf-ljkh-asdf-ljkh-asdf'
+
+ def test_uuid_is_not_found(self):
+ with pytest.raises(exceptions.SuffixParsingError):
+ trigger.parse_osd_uuid('ljahsdfaslkjhdfa')
+
+ def test_uuid_is_not_found_missing_id(self):
+ with pytest.raises(exceptions.SuffixParsingError):
+ trigger.parse_osd_uuid('ljahs-dfa-slkjhdfa-foo')
+
+ def test_robust_double_id_in_uuid(self):
+ # it is possible to have the id in the SHA1, this should
+ # be fine parsing that
+ result = trigger.parse_osd_uuid("1-abc959fd-1ec9-4864-b141-3154f9b9f8ed")
+ assert result == 'abc959fd-1ec9-4864-b141-3154f9b9f8ed'
+
+
diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_zap.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_zap.py
new file mode 100644
index 000000000..64016111c
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_zap.py
@@ -0,0 +1,241 @@
+import os
+import pytest
+from copy import deepcopy
+from mock.mock import patch, call
+from ceph_volume import process
+from ceph_volume.api import lvm as api
+from ceph_volume.devices.lvm import zap
+
+
+class TestZap(object):
+ def test_invalid_osd_id_passed(self):
+ with pytest.raises(SystemExit):
+ zap.Zap(argv=['--osd-id', 'foo']).main()
+
+class TestFindAssociatedDevices(object):
+
+ def test_no_lvs_found_that_match_id(self, monkeypatch, device_info):
+ tags = 'ceph.osd_id=9,ceph.journal_uuid=x,ceph.type=data'
+ osd = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='vg',
+ lv_tags=tags, lv_path='/dev/VolGroup/lv')
+ volumes = []
+ volumes.append(osd)
+ monkeypatch.setattr(zap.api, 'get_lvs', lambda **kwargs: {})
+
+ with pytest.raises(RuntimeError):
+ zap.find_associated_devices(osd_id=10)
+
+ def test_no_lvs_found_that_match_fsid(self, monkeypatch, device_info):
+ tags = 'ceph.osd_id=9,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,'+\
+ 'ceph.type=data'
+ osd = api.Volume(lv_name='volume1', lv_uuid='y', lv_tags=tags,
+ vg_name='vg', lv_path='/dev/VolGroup/lv')
+ volumes = []
+ volumes.append(osd)
+ monkeypatch.setattr(zap.api, 'get_lvs', lambda **kwargs: {})
+
+ with pytest.raises(RuntimeError):
+ zap.find_associated_devices(osd_fsid='aaaa-lkjh')
+
+ def test_no_lvs_found_that_match_id_fsid(self, monkeypatch, device_info):
+ tags = 'ceph.osd_id=9,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,'+\
+ 'ceph.type=data'
+ osd = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='vg',
+ lv_tags=tags, lv_path='/dev/VolGroup/lv')
+ volumes = []
+ volumes.append(osd)
+ monkeypatch.setattr(zap.api, 'get_lvs', lambda **kwargs: {})
+
+ with pytest.raises(RuntimeError):
+ zap.find_associated_devices(osd_id='9', osd_fsid='aaaa-lkjh')
+
+ def test_no_ceph_lvs_found(self, monkeypatch):
+ osd = api.Volume(lv_name='volume1', lv_uuid='y', lv_tags='',
+ lv_path='/dev/VolGroup/lv')
+ volumes = []
+ volumes.append(osd)
+ monkeypatch.setattr(zap.api, 'get_lvs', lambda **kwargs: {})
+
+ with pytest.raises(RuntimeError):
+ zap.find_associated_devices(osd_id=100)
+
+ def test_lv_is_matched_id(self, monkeypatch):
+ tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data'
+ osd = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='',
+ lv_path='/dev/VolGroup/lv', lv_tags=tags)
+ volumes = []
+ volumes.append(osd)
+ monkeypatch.setattr(zap.api, 'get_lvs', lambda **kw: volumes)
+ monkeypatch.setattr(process, 'call', lambda x, **kw: ('', '', 0))
+
+ result = zap.find_associated_devices(osd_id='0')
+ assert result[0].path == '/dev/VolGroup/lv'
+
+ def test_lv_is_matched_fsid(self, monkeypatch):
+ tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,' +\
+ 'ceph.type=data'
+ osd = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='',
+ lv_path='/dev/VolGroup/lv', lv_tags=tags)
+ volumes = []
+ volumes.append(osd)
+ monkeypatch.setattr(zap.api, 'get_lvs', lambda **kw: deepcopy(volumes))
+ monkeypatch.setattr(process, 'call', lambda x, **kw: ('', '', 0))
+
+ result = zap.find_associated_devices(osd_fsid='asdf-lkjh')
+ assert result[0].path == '/dev/VolGroup/lv'
+
+ def test_lv_is_matched_id_fsid(self, monkeypatch):
+ tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,' +\
+ 'ceph.type=data'
+ osd = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='',
+ lv_path='/dev/VolGroup/lv', lv_tags=tags)
+ volumes = []
+ volumes.append(osd)
+ monkeypatch.setattr(zap.api, 'get_lvs', lambda **kw: volumes)
+ monkeypatch.setattr(process, 'call', lambda x, **kw: ('', '', 0))
+
+ result = zap.find_associated_devices(osd_id='0', osd_fsid='asdf-lkjh')
+ assert result[0].path == '/dev/VolGroup/lv'
+
+
+class TestEnsureAssociatedLVs(object):
+
+ def test_nothing_is_found(self):
+ volumes = []
+ result = zap.ensure_associated_lvs(volumes)
+ assert result == []
+
+ def test_data_is_found(self, fake_call):
+ tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,ceph.type=data'
+ osd = api.Volume(
+ lv_name='volume1', lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/data', lv_tags=tags)
+ volumes = []
+ volumes.append(osd)
+ result = zap.ensure_associated_lvs(volumes)
+ assert result == ['/dev/VolGroup/data']
+
+ def test_block_is_found(self, fake_call):
+ tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,ceph.type=block'
+ osd = api.Volume(
+ lv_name='volume1', lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/block', lv_tags=tags)
+ volumes = []
+ volumes.append(osd)
+ result = zap.ensure_associated_lvs(volumes)
+ assert result == ['/dev/VolGroup/block']
+
+ def test_success_message_for_fsid(self, factory, is_root, capsys):
+ cli_zap = zap.Zap([])
+ args = factory(devices=[], osd_id=None, osd_fsid='asdf-lkjh')
+ cli_zap.args = args
+ cli_zap.zap()
+ out, err = capsys.readouterr()
+ assert "Zapping successful for OSD: asdf-lkjh" in err
+
+ def test_success_message_for_id(self, factory, is_root, capsys):
+ cli_zap = zap.Zap([])
+ args = factory(devices=[], osd_id='1', osd_fsid=None)
+ cli_zap.args = args
+ cli_zap.zap()
+ out, err = capsys.readouterr()
+ assert "Zapping successful for OSD: 1" in err
+
+ def test_block_and_partition_are_found(self, monkeypatch):
+ monkeypatch.setattr(zap.disk, 'get_device_from_partuuid', lambda x: '/dev/sdb1')
+ tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,ceph.type=block'
+ osd = api.Volume(
+ lv_name='volume1', lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/block', lv_tags=tags)
+ volumes = []
+ volumes.append(osd)
+ result = zap.ensure_associated_lvs(volumes)
+ assert '/dev/sdb1' in result
+ assert '/dev/VolGroup/block' in result
+
+ def test_journal_is_found(self, fake_call):
+ tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,ceph.type=journal'
+ osd = api.Volume(
+ lv_name='volume1', lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/lv', lv_tags=tags)
+ volumes = []
+ volumes.append(osd)
+ result = zap.ensure_associated_lvs(volumes)
+ assert result == ['/dev/VolGroup/lv']
+
+ def test_multiple_journals_are_found(self):
+ tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,ceph.type=journal'
+ volumes = []
+ for i in range(3):
+ osd = api.Volume(
+ lv_name='volume%s' % i, lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/lv%s' % i, lv_tags=tags)
+ volumes.append(osd)
+ result = zap.ensure_associated_lvs(volumes)
+ assert '/dev/VolGroup/lv0' in result
+ assert '/dev/VolGroup/lv1' in result
+ assert '/dev/VolGroup/lv2' in result
+
+ def test_multiple_dbs_are_found(self):
+ tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,ceph.type=db'
+ volumes = []
+ for i in range(3):
+ osd = api.Volume(
+ lv_name='volume%s' % i, lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/lv%s' % i, lv_tags=tags)
+ volumes.append(osd)
+ result = zap.ensure_associated_lvs(volumes)
+ assert '/dev/VolGroup/lv0' in result
+ assert '/dev/VolGroup/lv1' in result
+ assert '/dev/VolGroup/lv2' in result
+
+ def test_multiple_wals_are_found(self):
+ tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.wal_uuid=x,ceph.type=wal'
+ volumes = []
+ for i in range(3):
+ osd = api.Volume(
+ lv_name='volume%s' % i, lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/lv%s' % i, lv_tags=tags)
+ volumes.append(osd)
+ result = zap.ensure_associated_lvs(volumes)
+ assert '/dev/VolGroup/lv0' in result
+ assert '/dev/VolGroup/lv1' in result
+ assert '/dev/VolGroup/lv2' in result
+
+ def test_multiple_backing_devs_are_found(self):
+ volumes = []
+ for _type in ['journal', 'db', 'wal']:
+ tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.wal_uuid=x,ceph.type=%s' % _type
+ osd = api.Volume(
+ lv_name='volume%s' % _type, lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/lv%s' % _type, lv_tags=tags)
+ volumes.append(osd)
+ result = zap.ensure_associated_lvs(volumes)
+ assert '/dev/VolGroup/lvjournal' in result
+ assert '/dev/VolGroup/lvwal' in result
+ assert '/dev/VolGroup/lvdb' in result
+
+ @patch('ceph_volume.devices.lvm.zap.api.get_lvs')
+ def test_ensure_associated_lvs(self, m_get_lvs):
+ zap.ensure_associated_lvs([], lv_tags={'ceph.osd_id': '1'})
+ calls = [
+ call(tags={'ceph.type': 'journal', 'ceph.osd_id': '1'}),
+ call(tags={'ceph.type': 'db', 'ceph.osd_id': '1'}),
+ call(tags={'ceph.type': 'wal', 'ceph.osd_id': '1'})
+ ]
+ m_get_lvs.assert_has_calls(calls, any_order=True)
+
+
+class TestWipeFs(object):
+
+ def setup(self):
+ os.environ['CEPH_VOLUME_WIPEFS_INTERVAL'] = '0'
+
+ def test_works_on_second_try(self, stub_call):
+ os.environ['CEPH_VOLUME_WIPEFS_TRIES'] = '2'
+ stub_call([('wiping /dev/sda', '', 1), ('', '', 0)])
+ result = zap.wipefs('/dev/sda')
+ assert result is None
+
+ def test_does_not_work_after_several_tries(self, stub_call):
+ os.environ['CEPH_VOLUME_WIPEFS_TRIES'] = '2'
+ stub_call([('wiping /dev/sda', '', 1), ('', '', 1)])
+ with pytest.raises(RuntimeError):
+ zap.wipefs('/dev/sda')
+
+ def test_does_not_work_default_tries(self, stub_call):
+ stub_call([('wiping /dev/sda', '', 1)]*8)
+ with pytest.raises(RuntimeError):
+ zap.wipefs('/dev/sda')
diff --git a/src/ceph-volume/ceph_volume/tests/devices/raw/__init__.py b/src/ceph-volume/ceph_volume/tests/devices/raw/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/raw/__init__.py
diff --git a/src/ceph-volume/ceph_volume/tests/devices/raw/test_list.py b/src/ceph-volume/ceph_volume/tests/devices/raw/test_list.py
new file mode 100644
index 000000000..5ad501bab
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/raw/test_list.py
@@ -0,0 +1,238 @@
+import pytest
+from mock.mock import patch
+from ceph_volume.devices import raw
+
+# Sample lsblk output is below that overviews the test scenario. (--json output for reader clarity)
+# - sda and all its children are used for the OS
+# - sdb is a bluestore OSD with phantom Atari partitions
+# - sdc is an empty disk
+# - sdd has 2 LVM device children
+# > lsblk --paths --json
+# {
+# "blockdevices": [
+# {"name": "/dev/sda", "maj:min": "8:0", "rm": "0", "size": "128G", "ro": "0", "type": "disk", "mountpoint": null,
+# "children": [
+# {"name": "/dev/sda1", "maj:min": "8:1", "rm": "0", "size": "487M", "ro": "0", "type": "part", "mountpoint": null},
+# {"name": "/dev/sda2", "maj:min": "8:2", "rm": "0", "size": "1.9G", "ro": "0", "type": "part", "mountpoint": null},
+# {"name": "/dev/sda3", "maj:min": "8:3", "rm": "0", "size": "125.6G", "ro": "0", "type": "part", "mountpoint": "/etc/hosts"}
+# ]
+# },
+# {"name": "/dev/sdb", "maj:min": "8:16", "rm": "0", "size": "1T", "ro": "0", "type": "disk", "mountpoint": null,
+# "children": [
+# {"name": "/dev/sdb2", "maj:min": "8:18", "rm": "0", "size": "48G", "ro": "0", "type": "part", "mountpoint": null},
+# {"name": "/dev/sdb3", "maj:min": "8:19", "rm": "0", "size": "6M", "ro": "0", "type": "part", "mountpoint": null}
+# ]
+# },
+# {"name": "/dev/sdc", "maj:min": "8:32", "rm": "0", "size": "1T", "ro": "0", "type": "disk", "mountpoint": null},
+# {"name": "/dev/sdd", "maj:min": "8:48", "rm": "0", "size": "1T", "ro": "0", "type": "disk", "mountpoint": null,
+# "children": [
+# {"name": "/dev/mapper/ceph--osd--block--1", "maj:min": "253:0", "rm": "0", "size": "512G", "ro": "0", "type": "lvm", "mountpoint": null},
+# {"name": "/dev/mapper/ceph--osd--block--2", "maj:min": "253:1", "rm": "0", "size": "512G", "ro": "0", "type": "lvm", "mountpoint": null}
+# ]
+# }
+# ]
+# }
+
+def _devices_side_effect():
+ return {
+ "/dev/sda": {},
+ "/dev/sda1": {},
+ "/dev/sda2": {},
+ "/dev/sda3": {},
+ "/dev/sdb": {},
+ "/dev/sdb2": {},
+ "/dev/sdb3": {},
+ "/dev/sdc": {},
+ "/dev/sdd": {},
+ "/dev/mapper/ceph--osd--block--1": {},
+ "/dev/mapper/ceph--osd--block--2": {},
+ }
+
+def _lsblk_all_devices(abspath=True):
+ return [
+ {"NAME": "/dev/sda", "KNAME": "/dev/sda", "PKNAME": ""},
+ {"NAME": "/dev/sda1", "KNAME": "/dev/sda1", "PKNAME": "/dev/sda"},
+ {"NAME": "/dev/sda2", "KNAME": "/dev/sda2", "PKNAME": "/dev/sda"},
+ {"NAME": "/dev/sda3", "KNAME": "/dev/sda3", "PKNAME": "/dev/sda"},
+ {"NAME": "/dev/sdb", "KNAME": "/dev/sdb", "PKNAME": ""},
+ {"NAME": "/dev/sdb2", "KNAME": "/dev/sdb2", "PKNAME": "/dev/sdb"},
+ {"NAME": "/dev/sdb3", "KNAME": "/dev/sdb3", "PKNAME": "/dev/sdb"},
+ {"NAME": "/dev/sdc", "KNAME": "/dev/sdc", "PKNAME": ""},
+ {"NAME": "/dev/sdd", "KNAME": "/dev/sdd", "PKNAME": ""},
+ {"NAME": "/dev/mapper/ceph--osd--block--1", "KNAME": "/dev/mapper/ceph--osd--block--1", "PKNAME": "/dev/sdd"},
+ {"NAME": "/dev/mapper/ceph--osd--block--2", "KNAME": "/dev/mapper/ceph--osd--block--2", "PKNAME": "/dev/sdd"},
+ ]
+
+# dummy lsblk output for device with optional parent output
+def _lsblk_output(dev, parent=None):
+ if parent is None:
+ parent = ""
+ ret = 'NAME="{}" KNAME="{}" PKNAME="{}"'.format(dev, dev, parent)
+ return [ret] # needs to be in a list form
+
+def _bluestore_tool_label_output_sdb():
+ return '''{
+ "/dev/sdb": {
+ "osd_uuid": "sdb-uuid",
+ "size": 1099511627776,
+ "btime": "2021-07-23T16:02:22.809186+0000",
+ "description": "main",
+ "bfm_blocks": "268435456",
+ "bfm_blocks_per_key": "128",
+ "bfm_bytes_per_block": "4096",
+ "bfm_size": "1099511627776",
+ "bluefs": "1",
+ "ceph_fsid": "sdb-fsid",
+ "kv_backend": "rocksdb",
+ "magic": "ceph osd volume v026",
+ "mkfs_done": "yes",
+ "osd_key": "AQAO6PpgK+y4CBAAixq/X7OVimbaezvwD/cDmg==",
+ "ready": "ready",
+ "require_osd_release": "16",
+ "whoami": "0"
+ }
+}'''
+
+def _bluestore_tool_label_output_sdb2():
+ return '''{
+ "/dev/sdb2": {
+ "osd_uuid": "sdb2-uuid",
+ "size": 1099511627776,
+ "btime": "2021-07-23T16:02:22.809186+0000",
+ "description": "main",
+ "bfm_blocks": "268435456",
+ "bfm_blocks_per_key": "128",
+ "bfm_bytes_per_block": "4096",
+ "bfm_size": "1099511627776",
+ "bluefs": "1",
+ "ceph_fsid": "sdb2-fsid",
+ "kv_backend": "rocksdb",
+ "magic": "ceph osd volume v026",
+ "mkfs_done": "yes",
+ "osd_key": "AQAO6PpgK+y4CBAAixq/X7OVimbaezvwD/cDmg==",
+ "ready": "ready",
+ "require_osd_release": "16",
+ "whoami": "2"
+ }
+}'''
+
+def _bluestore_tool_label_output_dm_okay():
+ return '''{
+ "/dev/mapper/ceph--osd--block--1": {
+ "osd_uuid": "lvm-1-uuid",
+ "size": 549751619584,
+ "btime": "2021-07-23T16:04:37.881060+0000",
+ "description": "main",
+ "bfm_blocks": "134216704",
+ "bfm_blocks_per_key": "128",
+ "bfm_bytes_per_block": "4096",
+ "bfm_size": "549751619584",
+ "bluefs": "1",
+ "ceph_fsid": "lvm-1-fsid",
+ "kv_backend": "rocksdb",
+ "magic": "ceph osd volume v026",
+ "mkfs_done": "yes",
+ "osd_key": "AQCU6Ppgz+UcIRAAh6IUjtPjiXBlEXfwO8ixzw==",
+ "ready": "ready",
+ "require_osd_release": "16",
+ "whoami": "2"
+ }
+}'''
+
+def _process_call_side_effect(command, **kw):
+ if "lsblk" in command:
+ if "/dev/" in command[-1]:
+ dev = command[-1]
+ if dev == "/dev/sda1" or dev == "/dev/sda2" or dev == "/dev/sda3":
+ return _lsblk_output(dev, parent="/dev/sda"), '', 0
+ if dev == "/dev/sdb2" or dev == "/dev/sdb3":
+ return _lsblk_output(dev, parent="/dev/sdb"), '', 0
+ if dev == "/dev/sda" or dev == "/dev/sdb" or dev == "/dev/sdc" or dev == "/dev/sdd":
+ return _lsblk_output(dev), '', 0
+ if "mapper" in dev:
+ return _lsblk_output(dev, parent="/dev/sdd"), '', 0
+ pytest.fail('dev {} needs behavior specified for it'.format(dev))
+ if "/dev/" not in command:
+ return _lsblk_all_devices(), '', 0
+ pytest.fail('command {} needs behavior specified for it'.format(command))
+
+ if "ceph-bluestore-tool" in command:
+ if "/dev/sdb" in command:
+ # sdb is a bluestore OSD
+ return _bluestore_tool_label_output_sdb(), '', 0
+ if "/dev/sdb2" in command:
+ # sdb2 is a phantom atari partition that appears to have some valid bluestore info
+ return _bluestore_tool_label_output_sdb2(), '', 0
+ if "/dev/mapper/ceph--osd--block--1" in command:
+ # dm device 1 is a valid bluestore OSD (the other is corrupted/invalid)
+ return _bluestore_tool_label_output_dm_okay(), '', 0
+ # sda and children, sdb's children, sdc, sdd, dm device 2 all do NOT have bluestore OSD data
+ return [], 'fake No such file or directory error', 1
+ pytest.fail('command {} needs behavior specified for it'.format(command))
+
+def _has_bluestore_label_side_effect(disk_path):
+ if "/dev/sda" in disk_path:
+ return False # disk and all children are for the OS
+ if disk_path == "/dev/sdb":
+ return True # sdb is a valid bluestore OSD
+ if disk_path == "/dev/sdb2":
+ return True # sdb2 appears to be a valid bluestore OSD even though it should not be
+ if disk_path == "/dev/sdc":
+ return False # empty disk
+ if disk_path == "/dev/sdd":
+ return False # has LVM subdevices
+ if disk_path == "/dev/mapper/ceph--osd--block--1":
+ return True # good OSD
+ if disk_path == "/dev/mapper/ceph--osd--block--2":
+ return False # corrupted
+ pytest.fail('device {} needs behavior specified for it'.format(disk_path))
+
+class TestList(object):
+
+ @patch('ceph_volume.util.device.disk.get_devices')
+ @patch('ceph_volume.util.disk.has_bluestore_label')
+ @patch('ceph_volume.process.call')
+ @patch('ceph_volume.util.disk.lsblk_all')
+ def test_raw_list(self, patched_disk_lsblk, patched_call, patched_bluestore_label, patched_get_devices):
+ raw.list.logger.setLevel("DEBUG")
+ patched_call.side_effect = _process_call_side_effect
+ patched_disk_lsblk.side_effect = _lsblk_all_devices
+ patched_bluestore_label.side_effect = _has_bluestore_label_side_effect
+ patched_get_devices.side_effect = _devices_side_effect
+
+ result = raw.list.List([]).generate()
+ assert len(result) == 3
+
+ sdb = result['sdb-uuid']
+ assert sdb['osd_uuid'] == 'sdb-uuid'
+ assert sdb['osd_id'] == 0
+ assert sdb['device'] == '/dev/sdb'
+ assert sdb['ceph_fsid'] == 'sdb-fsid'
+ assert sdb['type'] == 'bluestore'
+
+ lvm1 = result['lvm-1-uuid']
+ assert lvm1['osd_uuid'] == 'lvm-1-uuid'
+ assert lvm1['osd_id'] == 2
+ assert lvm1['device'] == '/dev/mapper/ceph--osd--block--1'
+ assert lvm1['ceph_fsid'] == 'lvm-1-fsid'
+ assert lvm1['type'] == 'bluestore'
+
+ @patch('ceph_volume.util.device.disk.get_devices')
+ @patch('ceph_volume.util.disk.has_bluestore_label')
+ @patch('ceph_volume.process.call')
+ @patch('ceph_volume.util.disk.lsblk_all')
+ def test_raw_list_with_OSError(self, patched_disk_lsblk, patched_call, patched_bluestore_label, patched_get_devices):
+ def _has_bluestore_label_side_effect_with_OSError(device_path):
+ if device_path == "/dev/sdd":
+ raise OSError('fake OSError')
+ return _has_bluestore_label_side_effect(device_path)
+
+ raw.list.logger.setLevel("DEBUG")
+ patched_disk_lsblk.side_effect = _lsblk_all_devices
+ patched_call.side_effect = _process_call_side_effect
+ patched_bluestore_label.side_effect = _has_bluestore_label_side_effect_with_OSError
+ patched_get_devices.side_effect = _devices_side_effect
+
+ result = raw.list.List([]).generate()
+ assert len(result) == 3
+ assert 'sdb-uuid' in result
diff --git a/src/ceph-volume/ceph_volume/tests/devices/raw/test_prepare.py b/src/ceph-volume/ceph_volume/tests/devices/raw/test_prepare.py
new file mode 100644
index 000000000..f814bbf13
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/raw/test_prepare.py
@@ -0,0 +1,97 @@
+import pytest
+from ceph_volume.devices import raw
+from mock.mock import patch
+
+
+class TestRaw(object):
+
+ def test_main_spits_help_with_no_arguments(self, capsys):
+ raw.main.Raw([]).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'Manage a single-device OSD on a raw block device.' in stdout
+
+ def test_main_shows_activate_subcommands(self, capsys):
+ raw.main.Raw([]).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'activate ' in stdout
+ assert 'Discover and prepare' in stdout
+
+ def test_main_shows_prepare_subcommands(self, capsys):
+ raw.main.Raw([]).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'prepare ' in stdout
+ assert 'Format a raw device' in stdout
+
+
+class TestPrepare(object):
+
+ def test_main_spits_help_with_no_arguments(self, capsys):
+ raw.prepare.Prepare([]).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'Prepare an OSD by assigning an ID and FSID' in stdout
+
+ def test_main_shows_full_help(self, capsys):
+ with pytest.raises(SystemExit):
+ raw.prepare.Prepare(argv=['--help']).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'a raw device to use for the OSD' in stdout
+ assert 'Crush device class to assign this OSD to' in stdout
+ assert 'Use BlueStore backend' in stdout
+ assert 'Path to bluestore block.db block device' in stdout
+ assert 'Path to bluestore block.wal block device' in stdout
+ assert 'Enable device encryption via dm-crypt' in stdout
+
+ @patch('ceph_volume.util.arg_validators.ValidRawDevice.__call__')
+ def test_prepare_dmcrypt_no_secret_passed(self, m_valid_device, capsys):
+ m_valid_device.return_value = '/dev/foo'
+ with pytest.raises(SystemExit):
+ raw.prepare.Prepare(argv=['--bluestore', '--data', '/dev/foo', '--dmcrypt']).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'CEPH_VOLUME_DMCRYPT_SECRET is not set, you must set' in stderr
+
+ @patch('ceph_volume.util.encryption.luks_open')
+ @patch('ceph_volume.util.encryption.luks_format')
+ @patch('ceph_volume.util.disk.lsblk')
+ def test_prepare_dmcrypt_block(self, m_lsblk, m_luks_format, m_luks_open):
+ m_lsblk.return_value = {'KNAME': 'foo'}
+ m_luks_format.return_value = True
+ m_luks_open.return_value = True
+ result = raw.prepare.prepare_dmcrypt('foo', '/dev/foo', 'block', '123')
+ m_luks_open.assert_called_with('foo', '/dev/foo', 'ceph-123-foo-block-dmcrypt')
+ m_luks_format.assert_called_with('foo', '/dev/foo')
+ assert result == '/dev/mapper/ceph-123-foo-block-dmcrypt'
+
+ @patch('ceph_volume.util.encryption.luks_open')
+ @patch('ceph_volume.util.encryption.luks_format')
+ @patch('ceph_volume.util.disk.lsblk')
+ def test_prepare_dmcrypt_db(self, m_lsblk, m_luks_format, m_luks_open):
+ m_lsblk.return_value = {'KNAME': 'foo'}
+ m_luks_format.return_value = True
+ m_luks_open.return_value = True
+ result = raw.prepare.prepare_dmcrypt('foo', '/dev/foo', 'db', '123')
+ m_luks_open.assert_called_with('foo', '/dev/foo', 'ceph-123-foo-db-dmcrypt')
+ m_luks_format.assert_called_with('foo', '/dev/foo')
+ assert result == '/dev/mapper/ceph-123-foo-db-dmcrypt'
+
+ @patch('ceph_volume.util.encryption.luks_open')
+ @patch('ceph_volume.util.encryption.luks_format')
+ @patch('ceph_volume.util.disk.lsblk')
+ def test_prepare_dmcrypt_wal(self, m_lsblk, m_luks_format, m_luks_open):
+ m_lsblk.return_value = {'KNAME': 'foo'}
+ m_luks_format.return_value = True
+ m_luks_open.return_value = True
+ result = raw.prepare.prepare_dmcrypt('foo', '/dev/foo', 'wal', '123')
+ m_luks_open.assert_called_with('foo', '/dev/foo', 'ceph-123-foo-wal-dmcrypt')
+ m_luks_format.assert_called_with('foo', '/dev/foo')
+ assert result == '/dev/mapper/ceph-123-foo-wal-dmcrypt'
+
+ @patch('ceph_volume.devices.raw.prepare.rollback_osd')
+ @patch('ceph_volume.devices.raw.prepare.Prepare.prepare')
+ @patch('ceph_volume.util.arg_validators.ValidRawDevice.__call__')
+ def test_safe_prepare_exception_raised(self, m_valid_device, m_prepare, m_rollback_osd):
+ m_valid_device.return_value = '/dev/foo'
+ m_prepare.side_effect=Exception('foo')
+ m_rollback_osd.return_value = 'foobar'
+ with pytest.raises(Exception):
+ raw.prepare.Prepare(argv=['--bluestore', '--data', '/dev/foo']).main()
+ m_rollback_osd.assert_called()
diff --git a/src/ceph-volume/ceph_volume/tests/devices/simple/test_activate.py b/src/ceph-volume/ceph_volume/tests/devices/simple/test_activate.py
new file mode 100644
index 000000000..5c7bd3117
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/simple/test_activate.py
@@ -0,0 +1,200 @@
+import os
+import pytest
+from ceph_volume.devices.simple import activate
+
+
+class TestActivate(object):
+
+ def test_no_data_uuid(self, factory, is_root, monkeypatch, capture, fake_filesystem):
+ fake_filesystem.create_file('/tmp/json-config', contents='{}')
+ args = factory(osd_id='0', osd_fsid='1234', json_config='/tmp/json-config')
+ with pytest.raises(RuntimeError):
+ activate.Activate([]).activate(args)
+
+ def test_invalid_json_path(self):
+ os.environ['CEPH_VOLUME_SIMPLE_JSON_DIR'] = '/non/existing/path'
+ with pytest.raises(RuntimeError) as error:
+ activate.Activate(['1', 'asdf']).main()
+ assert 'Expected JSON config path not found' in str(error.value)
+
+ def test_main_spits_help_with_no_arguments(self, capsys):
+ activate.Activate([]).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'Activate OSDs by mounting devices previously configured' in stdout
+
+ def test_activate_all(self, is_root, monkeypatch):
+ '''
+ make sure Activate calls activate for each file returned by glob
+ '''
+ mocked_glob = []
+ def mock_glob(glob):
+ path = os.path.dirname(glob)
+ mocked_glob.extend(['{}/{}.json'.format(path, file_) for file_ in
+ ['1', '2', '3']])
+ return mocked_glob
+ activate_files = []
+ def mock_activate(self, args):
+ activate_files.append(args.json_config)
+ monkeypatch.setattr('glob.glob', mock_glob)
+ monkeypatch.setattr(activate.Activate, 'activate', mock_activate)
+ activate.Activate(['--all']).main()
+ assert activate_files == mocked_glob
+
+
+
+
+class TestEnableSystemdUnits(object):
+
+ def test_nothing_is_activated(self, is_root, capsys, fake_filesystem):
+ fake_filesystem.create_file('/tmp/json-config', contents='{}')
+ activation = activate.Activate(['--no-systemd', '--file', '/tmp/json-config', '0', '1234'], from_trigger=True)
+ activation.activate = lambda x: True
+ activation.main()
+ activation.enable_systemd_units('0', '1234')
+ stdout, stderr = capsys.readouterr()
+ assert 'Skipping enabling of `simple`' in stderr
+ assert 'Skipping masking of ceph-disk' in stderr
+ assert 'Skipping enabling and starting OSD simple' in stderr
+
+ def test_no_systemd_flag_is_true(self, is_root, fake_filesystem):
+ fake_filesystem.create_file('/tmp/json-config', contents='{}')
+ activation = activate.Activate(['--no-systemd', '--file', '/tmp/json-config', '0', '1234'], from_trigger=True)
+ activation.activate = lambda x: True
+ activation.main()
+ assert activation.skip_systemd is True
+
+ def test_no_systemd_flag_is_false(self, is_root, fake_filesystem):
+ fake_filesystem.create_file('/tmp/json-config', contents='{}')
+ activation = activate.Activate(['--file', '/tmp/json-config', '0', '1234'], from_trigger=True)
+ activation.activate = lambda x: True
+ activation.main()
+ assert activation.skip_systemd is False
+
+ def test_masks_ceph_disk(self, is_root, monkeypatch, capture, fake_filesystem):
+ monkeypatch.setattr('ceph_volume.systemd.systemctl.mask_ceph_disk', capture)
+ monkeypatch.setattr('ceph_volume.systemd.systemctl.enable_volume', lambda *a: True)
+ monkeypatch.setattr('ceph_volume.systemd.systemctl.enable_osd', lambda *a: True)
+ monkeypatch.setattr('ceph_volume.systemd.systemctl.start_osd', lambda *a: True)
+
+ fake_filesystem.create_file('/tmp/json-config', contents='{}')
+ activation = activate.Activate(['--file', '/tmp/json-config', '0', '1234'], from_trigger=False)
+ activation.activate = lambda x: True
+ activation.main()
+ activation.enable_systemd_units('0', '1234')
+ assert len(capture.calls) == 1
+
+ def test_enables_simple_unit(self, is_root, monkeypatch, capture, fake_filesystem):
+ monkeypatch.setattr('ceph_volume.systemd.systemctl.mask_ceph_disk', lambda *a: True)
+ monkeypatch.setattr('ceph_volume.systemd.systemctl.enable_volume', capture)
+ monkeypatch.setattr('ceph_volume.systemd.systemctl.enable_osd', lambda *a: True)
+ monkeypatch.setattr('ceph_volume.systemd.systemctl.start_osd', lambda *a: True)
+
+ fake_filesystem.create_file('/tmp/json-config', contents='{}')
+ activation = activate.Activate(['--file', '/tmp/json-config', '0', '1234'], from_trigger=False)
+ activation.activate = lambda x: True
+ activation.main()
+ activation.enable_systemd_units('0', '1234')
+ assert len(capture.calls) == 1
+ assert capture.calls[0]['args'] == ('0', '1234', 'simple')
+
+ def test_enables_osd_unit(self, is_root, monkeypatch, capture, fake_filesystem):
+ monkeypatch.setattr('ceph_volume.systemd.systemctl.mask_ceph_disk', lambda *a: True)
+ monkeypatch.setattr('ceph_volume.systemd.systemctl.enable_volume', lambda *a: True)
+ monkeypatch.setattr('ceph_volume.systemd.systemctl.enable_osd', capture)
+ monkeypatch.setattr('ceph_volume.systemd.systemctl.start_osd', lambda *a: True)
+
+ fake_filesystem.create_file('/tmp/json-config', contents='{}')
+ activation = activate.Activate(['--file', '/tmp/json-config', '0', '1234'], from_trigger=False)
+ activation.activate = lambda x: True
+ activation.main()
+ activation.enable_systemd_units('0', '1234')
+ assert len(capture.calls) == 1
+ assert capture.calls[0]['args'] == ('0',)
+
+ def test_starts_osd_unit(self, is_root, monkeypatch, capture, fake_filesystem):
+ monkeypatch.setattr('ceph_volume.systemd.systemctl.mask_ceph_disk', lambda *a: True)
+ monkeypatch.setattr('ceph_volume.systemd.systemctl.enable_volume', lambda *a: True)
+ monkeypatch.setattr('ceph_volume.systemd.systemctl.enable_osd', lambda *a: True)
+ monkeypatch.setattr('ceph_volume.systemd.systemctl.start_osd', capture)
+
+ fake_filesystem.create_file('/tmp/json-config', contents='{}')
+ activation = activate.Activate(['--file', '/tmp/json-config', '0', '1234'], from_trigger=False)
+ activation.activate = lambda x: True
+ activation.main()
+ activation.enable_systemd_units('0', '1234')
+ assert len(capture.calls) == 1
+ assert capture.calls[0]['args'] == ('0',)
+
+
+class TestValidateDevices(object):
+
+ def test_filestore_missing_journal(self):
+ activation = activate.Activate([])
+ with pytest.raises(RuntimeError) as error:
+ activation.validate_devices({'type': 'filestore', 'data': {}})
+ assert 'Unable to activate filestore OSD due to missing devices' in str(error.value)
+
+ def test_filestore_missing_data(self):
+ activation = activate.Activate([])
+ with pytest.raises(RuntimeError) as error:
+ activation.validate_devices({'type': 'filestore', 'journal': {}})
+ assert 'Unable to activate filestore OSD due to missing devices' in str(error.value)
+
+ def test_filestore_journal_device_found(self, capsys):
+ activation = activate.Activate([])
+ with pytest.raises(RuntimeError):
+ activation.validate_devices({'type': 'filestore', 'journal': {}})
+ stdout, stderr = capsys.readouterr()
+ assert "devices found: ['journal']" in stderr
+
+ def test_filestore_data_device_found(self, capsys):
+ activation = activate.Activate([])
+ with pytest.raises(RuntimeError):
+ activation.validate_devices({'type': 'filestore', 'data': {}})
+ stdout, stderr = capsys.readouterr()
+ assert "devices found: ['data']" in stderr
+
+ def test_filestore_with_all_devices(self):
+ activation = activate.Activate([])
+ result = activation.validate_devices({'type': 'filestore', 'journal': {}, 'data': {}})
+ assert result is True
+
+ def test_filestore_without_type(self):
+ activation = activate.Activate([])
+ result = activation.validate_devices({'journal': {}, 'data': {}})
+ assert result is True
+
+ def test_bluestore_with_all_devices(self):
+ activation = activate.Activate([])
+ result = activation.validate_devices({'type': 'bluestore', 'data': {}, 'block': {}})
+ assert result is True
+
+ def test_bluestore_without_type(self):
+ activation = activate.Activate([])
+ result = activation.validate_devices({'data': {}, 'block': {}})
+ assert result is True
+
+ def test_bluestore_is_default(self):
+ activation = activate.Activate([])
+ result = activation.validate_devices({'data': {}, 'block': {}})
+ assert result is True
+
+ def test_bluestore_data_device_found(self, capsys):
+ activation = activate.Activate([])
+ with pytest.raises(RuntimeError):
+ activation.validate_devices({'data': {}})
+ stdout, stderr = capsys.readouterr()
+ assert "devices found: ['data']" in stderr
+
+ def test_bluestore_missing_data(self):
+ activation = activate.Activate([])
+ with pytest.raises(RuntimeError) as error:
+ activation.validate_devices({'type': 'bluestore', 'block': {}})
+ assert 'Unable to activate bluestore OSD due to missing devices' in str(error.value)
+
+ def test_bluestore_block_device_found(self, capsys):
+ activation = activate.Activate([])
+ with pytest.raises(RuntimeError):
+ activation.validate_devices({'block': {}})
+ stdout, stderr = capsys.readouterr()
+ assert "devices found: ['block']" in stderr
diff --git a/src/ceph-volume/ceph_volume/tests/devices/simple/test_scan.py b/src/ceph-volume/ceph_volume/tests/devices/simple/test_scan.py
new file mode 100644
index 000000000..b5d120655
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/simple/test_scan.py
@@ -0,0 +1,71 @@
+import os
+import pytest
+from ceph_volume.devices.simple import scan
+
+
+class TestGetContents(object):
+
+ def setup(self):
+ self.magic_file_name = '/tmp/magic-file'
+
+ def test_multiple_lines_are_left_as_is(self, fake_filesystem):
+ magic_file = fake_filesystem.create_file(self.magic_file_name, contents='first\nsecond\n')
+ scanner = scan.Scan([])
+ assert scanner.get_contents(magic_file.path) == 'first\nsecond\n'
+
+ def test_extra_whitespace_gets_removed(self, fake_filesystem):
+ magic_file = fake_filesystem.create_file(self.magic_file_name, contents='first ')
+ scanner = scan.Scan([])
+ assert scanner.get_contents(magic_file.path) == 'first'
+
+ def test_single_newline_values_are_trimmed(self, fake_filesystem):
+ magic_file = fake_filesystem.create_file(self.magic_file_name, contents='first\n')
+ scanner = scan.Scan([])
+ assert scanner.get_contents(magic_file.path) == 'first'
+
+
+class TestEtcPath(object):
+
+ def test_directory_is_valid(self, tmpdir):
+ path = str(tmpdir)
+ scanner = scan.Scan([])
+ scanner._etc_path = path
+ assert scanner.etc_path == path
+
+ def test_directory_does_not_exist_gets_created(self, tmpdir):
+ path = os.path.join(str(tmpdir), 'subdir')
+ scanner = scan.Scan([])
+ scanner._etc_path = path
+ assert scanner.etc_path == path
+ assert os.path.isdir(path)
+
+ def test_complains_when_file(self, fake_filesystem):
+ etc_dir = fake_filesystem.create_file('/etc/ceph/osd')
+ scanner = scan.Scan([])
+ scanner._etc_path = etc_dir.path
+ with pytest.raises(RuntimeError):
+ scanner.etc_path
+
+
+class TestParseKeyring(object):
+
+ def test_newlines_are_removed(self):
+ contents = [
+ '[client.osd-lockbox.8d7a8ab2-5db0-4f83-a785-2809aba403d5]',
+ '\tkey = AQDtoGha/GYJExAA7HNl7Ukhqr7AKlCpLJk6UA==', '']
+ assert '\n' not in scan.parse_keyring('\n'.join(contents))
+
+ def test_key_has_spaces_removed(self):
+ contents = [
+ '[client.osd-lockbox.8d7a8ab2-5db0-4f83-a785-2809aba403d5]',
+ '\tkey = AQDtoGha/GYJExAA7HNl7Ukhqr7AKlCpLJk6UA==', '']
+ result = scan.parse_keyring('\n'.join(contents))
+ assert result.startswith(' ') is False
+ assert result.endswith(' ') is False
+
+ def test_actual_key_is_extracted(self):
+ contents = [
+ '[client.osd-lockbox.8d7a8ab2-5db0-4f83-a785-2809aba403d5]',
+ '\tkey = AQDtoGha/GYJExAA7HNl7Ukhqr7AKlCpLJk6UA==', '']
+ result = scan.parse_keyring('\n'.join(contents))
+ assert result == 'AQDtoGha/GYJExAA7HNl7Ukhqr7AKlCpLJk6UA=='
diff --git a/src/ceph-volume/ceph_volume/tests/devices/simple/test_trigger.py b/src/ceph-volume/ceph_volume/tests/devices/simple/test_trigger.py
new file mode 100644
index 000000000..d3220f2b0
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/simple/test_trigger.py
@@ -0,0 +1,45 @@
+import pytest
+from ceph_volume import exceptions
+from ceph_volume.devices.simple import trigger
+
+
+class TestParseOSDid(object):
+
+ def test_no_id_found_if_no_digit(self):
+ with pytest.raises(exceptions.SuffixParsingError):
+ trigger.parse_osd_id('asdlj-ljahsdfaslkjhdfa')
+
+ def test_no_id_found(self):
+ with pytest.raises(exceptions.SuffixParsingError):
+ trigger.parse_osd_id('ljahsdfaslkjhdfa')
+
+ def test_id_found(self):
+ result = trigger.parse_osd_id('1-ljahsdfaslkjhdfa')
+ assert result == '1'
+
+
+class TestParseOSDUUID(object):
+
+ def test_uuid_is_parsed(self):
+ result = trigger.parse_osd_uuid('1-asdf-ljkh-asdf-ljkh-asdf')
+ assert result == 'asdf-ljkh-asdf-ljkh-asdf'
+
+ def test_uuid_is_parsed_longer_sha1(self):
+ result = trigger.parse_osd_uuid('1-foo-bar-asdf-ljkh-asdf-ljkh-asdf')
+ assert result == 'foo-bar-asdf-ljkh-asdf-ljkh-asdf'
+
+ def test_uuid_is_not_found(self):
+ with pytest.raises(exceptions.SuffixParsingError):
+ trigger.parse_osd_uuid('ljahsdfaslkjhdfa')
+
+ def test_uuid_is_not_found_missing_id(self):
+ with pytest.raises(exceptions.SuffixParsingError):
+ trigger.parse_osd_uuid('ljahs-dfa-slkjhdfa-foo')
+
+ def test_robust_double_id_in_uuid(self):
+ # it is possible to have the id in the SHA1, this should
+ # be fine parsing that
+ result = trigger.parse_osd_uuid("1-abc959fd-1ec9-4864-b141-3154f9b9f8ed")
+ assert result == 'abc959fd-1ec9-4864-b141-3154f9b9f8ed'
+
+
diff --git a/src/ceph-volume/ceph_volume/tests/devices/test_zap.py b/src/ceph-volume/ceph_volume/tests/devices/test_zap.py
new file mode 100644
index 000000000..745b58ae5
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/test_zap.py
@@ -0,0 +1,38 @@
+import pytest
+from ceph_volume.devices import lvm
+from mock.mock import patch, MagicMock
+
+
+class TestZap(object):
+
+ def test_main_spits_help_with_no_arguments(self, capsys):
+ lvm.zap.Zap([]).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'Zaps the given logical volume(s), raw device(s) or partition(s)' in stdout
+
+ def test_main_shows_full_help(self, capsys):
+ with pytest.raises(SystemExit):
+ lvm.zap.Zap(argv=['--help']).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'optional arguments' in stdout
+
+ @pytest.mark.parametrize('device_name', [
+ '/dev/mapper/foo',
+ '/dev/dm-0',
+ ])
+ @patch('ceph_volume.util.arg_validators.Device')
+ def test_can_not_zap_mapper_device(self, mocked_device, monkeypatch, device_info, capsys, is_root, device_name):
+ monkeypatch.setattr('os.path.exists', lambda x: True)
+ mocked_device.return_value = MagicMock(
+ is_mapper=True,
+ is_mpath=False,
+ used_by_ceph=True,
+ exists=True,
+ has_partitions=False,
+ has_gpt_headers=False,
+ has_fs=False
+ )
+ with pytest.raises(SystemExit):
+ lvm.zap.Zap(argv=[device_name]).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'Refusing to zap' in stderr
diff --git a/src/ceph-volume/ceph_volume/tests/functional/.gitignore b/src/ceph-volume/ceph_volume/tests/functional/.gitignore
new file mode 100644
index 000000000..a2ee2e58b
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/.gitignore
@@ -0,0 +1,5 @@
+*.vdi
+.vagrant/
+vagrant_ssh_config
+fetch/
+global_vagrant_variables.yml
diff --git a/src/ceph-volume/ceph_volume/tests/functional/README.md b/src/ceph-volume/ceph_volume/tests/functional/README.md
new file mode 100644
index 000000000..b9e892ac1
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/README.md
@@ -0,0 +1,24 @@
+# ceph-volume functional test suite
+
+This test suite is based on vagrant and is normally run via Jenkins on github
+PRs. With a functioning Vagrant installation these test can also be run locally
+(tested with vagrant's libvirt provider).
+
+## Vagrant with libvirt
+By default the tests make assumption on the network segments to use (public and
+cluster network), as well as the libvirt storage pool and uri. In an unused
+vagrant setup these defaults should be fine.
+If you prefer to explicitly configure the storage pool and libvirt
+uri, create a file
+`$ceph_repo/src/ceph-volume/ceph_volume/tests/functional/global_vagrant_variables.yml`
+with content as follows:
+``` yaml
+libvirt_uri: qemu:///system
+libvirt_storage_pool: 'vagrant-ceph-nvme'
+```
+Adjust the values as needed.
+
+After this descend into a test directory (e.g.
+`$ceph_repo/src/ceph-volume/ceph_volume/tests/functional/lvm` and run `tox -vre
+centos7-bluestore-create -- --provider=libvirt` to execute the tests in
+`$ceph_repo/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/`
diff --git a/src/ceph-volume/ceph_volume/tests/functional/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/Vagrantfile
new file mode 100644
index 000000000..f5425165a
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/Vagrantfile
@@ -0,0 +1,423 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+
+require 'yaml'
+require 'time'
+VAGRANTFILE_API_VERSION = '2'
+
+DEBUG = false
+
+global_settings = {}
+if File.symlink?(__FILE__)
+ global_config = File.expand_path(
+ File.join(
+ File.dirname(File.readlink(__FILE__)),
+ 'global_vagrant_variables.yml')
+ )
+ if File.exist?(global_config)
+ global_settings = YAML.load_file(global_config)
+ end
+end
+
+LIBVIRT_URI = global_settings.fetch('libvirt_uri', '')
+LIBVIRT_STORAGE_POOL = global_settings.fetch('libvirt_storage_pool', '')
+
+config_file=File.expand_path(File.join(File.dirname(__FILE__), 'vagrant_variables.yml'))
+settings=YAML.load_file(config_file)
+
+LABEL_PREFIX = settings['label_prefix'] ? settings['label_prefix'] + "-" : ""
+NMONS = settings['mon_vms']
+NOSDS = settings['osd_vms']
+NMDSS = settings['mds_vms']
+NRGWS = settings['rgw_vms']
+NNFSS = settings['nfs_vms']
+RESTAPI = settings['restapi']
+NRBD_MIRRORS = settings['rbd_mirror_vms']
+CLIENTS = settings['client_vms']
+NISCSI_GWS = settings['iscsi_gw_vms']
+PUBLIC_SUBNET = settings['public_subnet']
+CLUSTER_SUBNET = settings['cluster_subnet']
+BOX = settings['vagrant_box']
+CLIENT_BOX = settings['client_vagrant_box']
+BOX_URL = settings['vagrant_box_url']
+SYNC_DIR = settings.fetch('vagrant_sync_dir', '/vagrant')
+MEMORY = settings['memory']
+ETH = settings['eth']
+USER = settings['ssh_username']
+
+ASSIGN_STATIC_IP = settings.fetch('assign_static_ip', true)
+DISABLE_SYNCED_FOLDER = settings.fetch('vagrant_disable_synced_folder', false)
+DISK_UUID = Time.now.utc.to_i
+
+def create_vmdk(name, size)
+ dir = Pathname.new(__FILE__).expand_path.dirname
+ path = File.join(dir, '.vagrant', name + '.vmdk')
+ `vmware-vdiskmanager -c -s #{size} -t 0 -a scsi #{path} \
+ 2>&1 > /dev/null` unless File.exist?(path)
+end
+
+Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
+ config.ssh.insert_key = false # workaround for https://github.com/mitchellh/vagrant/issues/5048
+ config.ssh.private_key_path = settings['ssh_private_key_path']
+ config.ssh.username = USER
+
+ config.vm.provider :libvirt do |lv|
+ # When using libvirt, avoid errors like:
+ # "CPU feature cmt not found"
+ lv.cpu_mode = 'host-passthrough'
+ # set libvirt uri if present
+ if not LIBVIRT_URI.empty?
+ lv.uri = LIBVIRT_URI
+ end
+ # set libvirt storage pool if present
+ if not LIBVIRT_STORAGE_POOL.empty?
+ lv.storage_pool_name = LIBVIRT_STORAGE_POOL
+ end
+ end
+
+ (0..CLIENTS - 1).each do |i|
+ config.vm.define "#{LABEL_PREFIX}client#{i}" do |client|
+ client.vm.box = CLIENT_BOX
+ client.vm.hostname = "#{LABEL_PREFIX}client#{i}"
+ if ASSIGN_STATIC_IP
+ client.vm.network :private_network,
+ ip: "#{PUBLIC_SUBNET}.4#{i}"
+ end
+ # Virtualbox
+ client.vm.provider :virtualbox do |vb|
+ vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
+ end
+
+ # VMware
+ client.vm.provider :vmware_fusion do |v|
+ v.vmx['memsize'] = "#{MEMORY}"
+ end
+
+ # Libvirt
+ client.vm.provider :libvirt do |lv|
+ lv.memory = MEMORY
+ lv.random_hostname = true
+ lv.nic_model_type = "e1000"
+ end
+
+ # Parallels
+ client.vm.provider "parallels" do |prl|
+ prl.name = "client#{i}"
+ prl.memory = "#{MEMORY}"
+ end
+
+ client.vm.provider :linode do |provider|
+ provider.label = client.vm.hostname
+ end
+ end
+ end
+
+ (0..NRGWS - 1).each do |i|
+ config.vm.define "#{LABEL_PREFIX}rgw#{i}" do |rgw|
+ rgw.vm.box = BOX
+ rgw.vm.box_url = BOX_URL
+ rgw.vm.hostname = "#{LABEL_PREFIX}rgw#{i}"
+ if ASSIGN_STATIC_IP
+ rgw.vm.network :private_network,
+ ip: "#{PUBLIC_SUBNET}.5#{i}"
+ end
+
+ # Virtualbox
+ rgw.vm.provider :virtualbox do |vb|
+ vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
+ end
+
+ # VMware
+ rgw.vm.provider :vmware_fusion do |v|
+ v.vmx['memsize'] = "#{MEMORY}"
+ end
+
+ # Libvirt
+ rgw.vm.provider :libvirt do |lv|
+ lv.memory = MEMORY
+ lv.random_hostname = true
+ lv.nic_model_type = "e1000"
+ end
+
+ # Parallels
+ rgw.vm.provider "parallels" do |prl|
+ prl.name = "rgw#{i}"
+ prl.memory = "#{MEMORY}"
+ end
+
+ rgw.vm.provider :linode do |provider|
+ provider.label = rgw.vm.hostname
+ end
+ end
+ end
+
+ (0..NNFSS - 1).each do |i|
+ config.vm.define "nfs#{i}" do |nfs|
+ nfs.vm.box = BOX
+ nfs.vm.box_url = BOX_URL
+ nfs.vm.hostname = "nfs#{i}"
+ if ASSIGN_STATIC_IP
+ nfs.vm.network :private_network,
+ ip: "#{PUBLIC_SUBNET}.6#{i}"
+ end
+
+ # Virtualbox
+ nfs.vm.provider :virtualbox do |vb|
+ vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
+ end
+
+ # VMware
+ nfs.vm.provider :vmware_fusion do |v|
+ v.vmx['memsize'] = "#{MEMORY}"
+ end
+
+ # Libvirt
+ nfs.vm.provider :libvirt do |lv|
+ lv.memory = MEMORY
+ lv.random_hostname = true
+ lv.nic_model_type = "e1000"
+ end
+
+ # Parallels
+ nfs.vm.provider "parallels" do |prl|
+ prl.name = "nfs#{i}"
+ prl.memory = "#{MEMORY}"
+ end
+
+ nfs.vm.provider :linode do |provider|
+ provider.label = nfs.vm.hostname
+ end
+ end
+ end
+
+ (0..NMDSS - 1).each do |i|
+ config.vm.define "#{LABEL_PREFIX}mds#{i}" do |mds|
+ mds.vm.box = BOX
+ mds.vm.box_url = BOX_URL
+ mds.vm.hostname = "#{LABEL_PREFIX}mds#{i}"
+ if ASSIGN_STATIC_IP
+ mds.vm.network :private_network,
+ ip: "#{PUBLIC_SUBNET}.7#{i}"
+ end
+ # Virtualbox
+ mds.vm.provider :virtualbox do |vb|
+ vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
+ end
+
+ # VMware
+ mds.vm.provider :vmware_fusion do |v|
+ v.vmx['memsize'] = "#{MEMORY}"
+ end
+
+ # Libvirt
+ mds.vm.provider :libvirt do |lv|
+ lv.memory = MEMORY
+ lv.random_hostname = true
+ lv.nic_model_type = "e1000"
+ end
+ # Parallels
+ mds.vm.provider "parallels" do |prl|
+ prl.name = "mds#{i}"
+ prl.memory = "#{MEMORY}"
+ end
+
+ mds.vm.provider :linode do |provider|
+ provider.label = mds.vm.hostname
+ end
+ end
+ end
+
+ (0..NRBD_MIRRORS - 1).each do |i|
+ config.vm.define "#{LABEL_PREFIX}rbd_mirror#{i}" do |rbd_mirror|
+ rbd_mirror.vm.box = BOX
+ rbd_mirror.vm.box_url = BOX_URL
+ rbd_mirror.vm.hostname = "#{LABEL_PREFIX}rbd-mirror#{i}"
+ if ASSIGN_STATIC_IP
+ rbd_mirror.vm.network :private_network,
+ ip: "#{PUBLIC_SUBNET}.8#{i}"
+ end
+ # Virtualbox
+ rbd_mirror.vm.provider :virtualbox do |vb|
+ vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
+ end
+
+ # VMware
+ rbd_mirror.vm.provider :vmware_fusion do |v|
+ v.vmx['memsize'] = "#{MEMORY}"
+ end
+
+ # Libvirt
+ rbd_mirror.vm.provider :libvirt do |lv|
+ lv.memory = MEMORY
+ lv.random_hostname = true
+ lv.nic_model_type = "e1000"
+ end
+ # Parallels
+ rbd_mirror.vm.provider "parallels" do |prl|
+ prl.name = "rbd-mirror#{i}"
+ prl.memory = "#{MEMORY}"
+ end
+
+ rbd_mirror.vm.provider :linode do |provider|
+ provider.label = rbd_mirror.vm.hostname
+ end
+ end
+ end
+
+ (0..NISCSI_GWS - 1).each do |i|
+ config.vm.define "#{LABEL_PREFIX}iscsi_gw#{i}" do |iscsi_gw|
+ iscsi_gw.vm.box = BOX
+ iscsi_gw.vm.box_url = BOX_URL
+ iscsi_gw.vm.hostname = "#{LABEL_PREFIX}iscsi-gw#{i}"
+ if ASSIGN_STATIC_IP
+ iscsi_gw.vm.network :private_network,
+ ip: "#{PUBLIC_SUBNET}.9#{i}"
+ end
+ # Virtualbox
+ iscsi_gw.vm.provider :virtualbox do |vb|
+ vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
+ end
+
+ # VMware
+ iscsi_gw.vm.provider :vmware_fusion do |v|
+ v.vmx['memsize'] = "#{MEMORY}"
+ end
+
+ # Libvirt
+ iscsi_gw.vm.provider :libvirt do |lv|
+ lv.memory = MEMORY
+ lv.random_hostname = true
+ lv.nic_model_type = "e1000"
+ end
+ # Parallels
+ iscsi_gw.vm.provider "parallels" do |prl|
+ prl.name = "iscsi-gw#{i}"
+ prl.memory = "#{MEMORY}"
+ end
+
+ iscsi_gw.vm.provider :linode do |provider|
+ provider.label = iscsi_gw.vm.hostname
+ end
+ end
+ end
+
+ (0..NMONS - 1).each do |i|
+ config.vm.define "#{LABEL_PREFIX}mon#{i}" do |mon|
+ mon.vm.box = BOX
+ mon.vm.box_url = BOX_URL
+ mon.vm.hostname = "#{LABEL_PREFIX}mon#{i}"
+ if ASSIGN_STATIC_IP
+ mon.vm.network :private_network,
+ ip: "#{PUBLIC_SUBNET}.1#{i}"
+ end
+ # Virtualbox
+ mon.vm.provider :virtualbox do |vb|
+ vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
+ end
+
+ # VMware
+ mon.vm.provider :vmware_fusion do |v|
+ v.vmx['memsize'] = "#{MEMORY}"
+ end
+
+ # Libvirt
+ mon.vm.provider :libvirt do |lv|
+ lv.memory = MEMORY
+ lv.random_hostname = true
+ lv.nic_model_type = "e1000"
+ end
+
+ # Parallels
+ mon.vm.provider "parallels" do |prl|
+ prl.name = "mon#{i}"
+ prl.memory = "#{MEMORY}"
+ end
+
+ mon.vm.provider :linode do |provider|
+ provider.label = mon.vm.hostname
+ end
+ end
+ end
+
+ (0..NOSDS - 1).each do |i|
+ config.vm.define "#{LABEL_PREFIX}osd#{i}" do |osd|
+ osd.vm.box = BOX
+ osd.vm.box_url = BOX_URL
+ osd.vm.hostname = "#{LABEL_PREFIX}osd#{i}"
+ if ASSIGN_STATIC_IP
+ osd.vm.network :private_network,
+ ip: "#{PUBLIC_SUBNET}.10#{i}"
+ osd.vm.network :private_network,
+ ip: "#{CLUSTER_SUBNET}.20#{i}"
+ end
+ # Virtualbox
+ osd.vm.provider :virtualbox do |vb|
+ # Create our own controller for consistency and to remove VM dependency
+ # but only do it once, otherwise it would fail when rebooting machines.
+ # We assume this has run if one disk was created before
+ unless File.exist?("disk-#{i}-0.vdi")
+ vb.customize ['storagectl', :id,
+ '--name', 'OSD Controller',
+ '--add', 'scsi']
+ end
+ (0..2).each do |d|
+ vb.customize ['createhd',
+ '--filename', "disk-#{i}-#{d}",
+ '--size', '12000'] unless File.exist?("disk-#{i}-#{d}.vdi")
+ vb.customize ['storageattach', :id,
+ '--storagectl', 'OSD Controller',
+ '--port', 3 + d,
+ '--device', 0,
+ '--type', 'hdd',
+ '--medium', "disk-#{i}-#{d}.vdi"]
+ end
+ vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
+ end
+
+ # VMware
+ osd.vm.provider :vmware_fusion do |v|
+ (0..1).each do |d|
+ v.vmx["scsi0:#{d + 1}.present"] = 'TRUE'
+ v.vmx["scsi0:#{d + 1}.fileName"] =
+ create_vmdk("disk-#{i}-#{d}", '11000MB')
+ end
+ v.vmx['memsize'] = "#{MEMORY}"
+ end
+
+ # Libvirt
+ driverletters = ('b'..'z').to_a
+ osd.vm.provider :libvirt do |lv|
+ # always make /dev/sd{a/b/c/d} so that CI can ensure that
+ # virtualbox and libvirt will have the same devices to use for OSDs
+ (0..3).each do |d|
+ lv.storage :file, :device => "sd#{driverletters[d]}", :size => '100G'
+ end
+ lv.memory = MEMORY
+ lv.random_hostname = true
+ lv.nic_model_type = "e1000"
+ end
+
+ # Parallels
+ osd.vm.provider "parallels" do |prl|
+ prl.name = "osd#{i}"
+ prl.memory = "#{MEMORY}"
+ (0..1).each do |d|
+ prl.customize ["set", :id,
+ "--device-add",
+ "hdd",
+ "--iface",
+ "sata"]
+ end
+ end
+
+ osd.vm.provider :linode do |provider|
+ provider.label = osd.vm.hostname
+ end
+
+ end
+ end
+
+ # Faster bootup. Disables mounting the sync folder for libvirt and virtualbox
+ config.vm.synced_folder './', SYNC_DIR, disabled: DISABLE_SYNCED_FOLDER
+
+end
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt-explicit/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt-explicit/Vagrantfile
new file mode 120000
index 000000000..16076e424
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt-explicit/Vagrantfile
@@ -0,0 +1 @@
+../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt-explicit/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt-explicit/group_vars/all
new file mode 120000
index 000000000..c3808c1d7
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt-explicit/group_vars/all
@@ -0,0 +1 @@
+../../../../../group_vars/bluestore \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt-explicit/hosts b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt-explicit/hosts
new file mode 100644
index 000000000..e1c1de6f8
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt-explicit/hosts
@@ -0,0 +1,8 @@
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mgrs]
+mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt-explicit/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt-explicit/setup.yml
new file mode 120000
index 000000000..8cf11d4ef
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt-explicit/setup.yml
@@ -0,0 +1 @@
+../../../playbooks/setup_mixed_type.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt-explicit/test.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt-explicit/test.yml
new file mode 120000
index 000000000..66d44c728
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt-explicit/test.yml
@@ -0,0 +1 @@
+../../../playbooks/test_explicit.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt-explicit/test_zap.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt-explicit/test_zap.yml
new file mode 120000
index 000000000..cb969fa1d
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt-explicit/test_zap.yml
@@ -0,0 +1 @@
+../../../playbooks/test_zap.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt-explicit/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt-explicit/vagrant_variables.yml
new file mode 120000
index 000000000..d21531f6c
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt-explicit/vagrant_variables.yml
@@ -0,0 +1 @@
+../../../../vagrant_variables.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt/Vagrantfile
new file mode 120000
index 000000000..16076e424
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt/Vagrantfile
@@ -0,0 +1 @@
+../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt/group_vars/all
new file mode 120000
index 000000000..c3808c1d7
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt/group_vars/all
@@ -0,0 +1 @@
+../../../../../group_vars/bluestore \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt/hosts b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt/hosts
new file mode 100644
index 000000000..e1c1de6f8
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt/hosts
@@ -0,0 +1,8 @@
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mgrs]
+mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt/setup.yml
new file mode 120000
index 000000000..8cf11d4ef
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt/setup.yml
@@ -0,0 +1 @@
+../../../playbooks/setup_mixed_type.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt/test.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt/test.yml
new file mode 120000
index 000000000..aa867bcde
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt/test.yml
@@ -0,0 +1 @@
+../../../playbooks/test.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt/test_zap.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt/test_zap.yml
new file mode 120000
index 000000000..cb969fa1d
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt/test_zap.yml
@@ -0,0 +1 @@
+../../../playbooks/test_zap.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt/vagrant_variables.yml
new file mode 120000
index 000000000..d21531f6c
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt/vagrant_variables.yml
@@ -0,0 +1 @@
+../../../../vagrant_variables.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-explicit/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-explicit/Vagrantfile
new file mode 120000
index 000000000..16076e424
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-explicit/Vagrantfile
@@ -0,0 +1 @@
+../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-explicit/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-explicit/group_vars/all
new file mode 120000
index 000000000..c3808c1d7
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-explicit/group_vars/all
@@ -0,0 +1 @@
+../../../../../group_vars/bluestore \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-explicit/hosts b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-explicit/hosts
new file mode 100644
index 000000000..e1c1de6f8
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-explicit/hosts
@@ -0,0 +1,8 @@
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mgrs]
+mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-explicit/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-explicit/setup.yml
new file mode 120000
index 000000000..8cf11d4ef
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-explicit/setup.yml
@@ -0,0 +1 @@
+../../../playbooks/setup_mixed_type.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-explicit/test.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-explicit/test.yml
new file mode 120000
index 000000000..66d44c728
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-explicit/test.yml
@@ -0,0 +1 @@
+../../../playbooks/test_explicit.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-explicit/test_zap.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-explicit/test_zap.yml
new file mode 120000
index 000000000..cb969fa1d
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-explicit/test_zap.yml
@@ -0,0 +1 @@
+../../../playbooks/test_zap.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-explicit/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-explicit/vagrant_variables.yml
new file mode 120000
index 000000000..d21531f6c
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-explicit/vagrant_variables.yml
@@ -0,0 +1 @@
+../../../../vagrant_variables.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type/Vagrantfile
new file mode 120000
index 000000000..16076e424
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type/Vagrantfile
@@ -0,0 +1 @@
+../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type/group_vars/all
new file mode 120000
index 000000000..c3808c1d7
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type/group_vars/all
@@ -0,0 +1 @@
+../../../../../group_vars/bluestore \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type/hosts b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type/hosts
new file mode 100644
index 000000000..e1c1de6f8
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type/hosts
@@ -0,0 +1,8 @@
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mgrs]
+mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type/setup.yml
new file mode 120000
index 000000000..8cf11d4ef
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type/setup.yml
@@ -0,0 +1 @@
+../../../playbooks/setup_mixed_type.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type/test.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type/test.yml
new file mode 120000
index 000000000..aa867bcde
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type/test.yml
@@ -0,0 +1 @@
+../../../playbooks/test.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type/test_zap.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type/test_zap.yml
new file mode 120000
index 000000000..cb969fa1d
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type/test_zap.yml
@@ -0,0 +1 @@
+../../../playbooks/test_zap.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type/vagrant_variables.yml
new file mode 120000
index 000000000..d21531f6c
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type/vagrant_variables.yml
@@ -0,0 +1 @@
+../../../../vagrant_variables.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type-dmcrypt/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type-dmcrypt/Vagrantfile
new file mode 120000
index 000000000..16076e424
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type-dmcrypt/Vagrantfile
@@ -0,0 +1 @@
+../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type-dmcrypt/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type-dmcrypt/group_vars/all
new file mode 120000
index 000000000..1e6ea0080
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type-dmcrypt/group_vars/all
@@ -0,0 +1 @@
+../../../../../group_vars/bluestore_single \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type-dmcrypt/hosts b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type-dmcrypt/hosts
new file mode 100644
index 000000000..e1c1de6f8
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type-dmcrypt/hosts
@@ -0,0 +1,8 @@
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mgrs]
+mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type-dmcrypt/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type-dmcrypt/setup.yml
new file mode 120000
index 000000000..30874dfbb
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type-dmcrypt/setup.yml
@@ -0,0 +1 @@
+../../../playbooks/noop.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type-dmcrypt/test.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type-dmcrypt/test.yml
new file mode 120000
index 000000000..aa867bcde
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type-dmcrypt/test.yml
@@ -0,0 +1 @@
+../../../playbooks/test.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type-dmcrypt/test_zap.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type-dmcrypt/test_zap.yml
new file mode 120000
index 000000000..cb969fa1d
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type-dmcrypt/test_zap.yml
@@ -0,0 +1 @@
+../../../playbooks/test_zap.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type-dmcrypt/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type-dmcrypt/vagrant_variables.yml
new file mode 120000
index 000000000..d21531f6c
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type-dmcrypt/vagrant_variables.yml
@@ -0,0 +1 @@
+../../../../vagrant_variables.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type/Vagrantfile
new file mode 120000
index 000000000..16076e424
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type/Vagrantfile
@@ -0,0 +1 @@
+../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type/group_vars/all
new file mode 120000
index 000000000..1e6ea0080
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type/group_vars/all
@@ -0,0 +1 @@
+../../../../../group_vars/bluestore_single \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type/hosts b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type/hosts
new file mode 100644
index 000000000..e1c1de6f8
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type/hosts
@@ -0,0 +1,8 @@
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mgrs]
+mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type/setup.yml
new file mode 120000
index 000000000..30874dfbb
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type/setup.yml
@@ -0,0 +1 @@
+../../../playbooks/noop.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type/test.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type/test.yml
new file mode 120000
index 000000000..aa867bcde
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type/test.yml
@@ -0,0 +1 @@
+../../../playbooks/test.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type/test_zap.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type/test_zap.yml
new file mode 120000
index 000000000..cb969fa1d
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type/test_zap.yml
@@ -0,0 +1 @@
+../../../playbooks/test_zap.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type/vagrant_variables.yml
new file mode 120000
index 000000000..d21531f6c
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type/vagrant_variables.yml
@@ -0,0 +1 @@
+../../../../vagrant_variables.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-dmcrypt-explicit/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-dmcrypt-explicit/Vagrantfile
new file mode 120000
index 000000000..16076e424
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-dmcrypt-explicit/Vagrantfile
@@ -0,0 +1 @@
+../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-dmcrypt-explicit/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-dmcrypt-explicit/group_vars/all
new file mode 120000
index 000000000..689ca5399
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-dmcrypt-explicit/group_vars/all
@@ -0,0 +1 @@
+../../../../../group_vars/filestore \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-dmcrypt-explicit/hosts b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-dmcrypt-explicit/hosts
new file mode 100644
index 000000000..e1c1de6f8
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-dmcrypt-explicit/hosts
@@ -0,0 +1,8 @@
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mgrs]
+mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-dmcrypt-explicit/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-dmcrypt-explicit/setup.yml
new file mode 120000
index 000000000..8cf11d4ef
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-dmcrypt-explicit/setup.yml
@@ -0,0 +1 @@
+../../../playbooks/setup_mixed_type.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-dmcrypt-explicit/test.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-dmcrypt-explicit/test.yml
new file mode 120000
index 000000000..66d44c728
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-dmcrypt-explicit/test.yml
@@ -0,0 +1 @@
+../../../playbooks/test_explicit.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-dmcrypt-explicit/test_zap.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-dmcrypt-explicit/test_zap.yml
new file mode 120000
index 000000000..cb969fa1d
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-dmcrypt-explicit/test_zap.yml
@@ -0,0 +1 @@
+../../../playbooks/test_zap.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-dmcrypt-explicit/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-dmcrypt-explicit/vagrant_variables.yml
new file mode 120000
index 000000000..d21531f6c
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-dmcrypt-explicit/vagrant_variables.yml
@@ -0,0 +1 @@
+../../../../vagrant_variables.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-dmcrypt/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-dmcrypt/Vagrantfile
new file mode 120000
index 000000000..16076e424
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-dmcrypt/Vagrantfile
@@ -0,0 +1 @@
+../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-dmcrypt/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-dmcrypt/group_vars/all
new file mode 120000
index 000000000..689ca5399
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-dmcrypt/group_vars/all
@@ -0,0 +1 @@
+../../../../../group_vars/filestore \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-dmcrypt/hosts b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-dmcrypt/hosts
new file mode 100644
index 000000000..e1c1de6f8
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-dmcrypt/hosts
@@ -0,0 +1,8 @@
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mgrs]
+mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-dmcrypt/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-dmcrypt/setup.yml
new file mode 120000
index 000000000..8cf11d4ef
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-dmcrypt/setup.yml
@@ -0,0 +1 @@
+../../../playbooks/setup_mixed_type.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-dmcrypt/test.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-dmcrypt/test.yml
new file mode 120000
index 000000000..aa867bcde
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-dmcrypt/test.yml
@@ -0,0 +1 @@
+../../../playbooks/test.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-dmcrypt/test_zap.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-dmcrypt/test_zap.yml
new file mode 120000
index 000000000..cb969fa1d
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-dmcrypt/test_zap.yml
@@ -0,0 +1 @@
+../../../playbooks/test_zap.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-dmcrypt/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-dmcrypt/vagrant_variables.yml
new file mode 120000
index 000000000..d21531f6c
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-dmcrypt/vagrant_variables.yml
@@ -0,0 +1 @@
+../../../../vagrant_variables.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-explicit/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-explicit/Vagrantfile
new file mode 120000
index 000000000..16076e424
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-explicit/Vagrantfile
@@ -0,0 +1 @@
+../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-explicit/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-explicit/group_vars/all
new file mode 120000
index 000000000..689ca5399
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-explicit/group_vars/all
@@ -0,0 +1 @@
+../../../../../group_vars/filestore \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-explicit/hosts b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-explicit/hosts
new file mode 100644
index 000000000..e1c1de6f8
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-explicit/hosts
@@ -0,0 +1,8 @@
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mgrs]
+mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-explicit/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-explicit/setup.yml
new file mode 120000
index 000000000..8cf11d4ef
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-explicit/setup.yml
@@ -0,0 +1 @@
+../../../playbooks/setup_mixed_type.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-explicit/test.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-explicit/test.yml
new file mode 120000
index 000000000..66d44c728
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-explicit/test.yml
@@ -0,0 +1 @@
+../../../playbooks/test_explicit.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-explicit/test_zap.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-explicit/test_zap.yml
new file mode 120000
index 000000000..cb969fa1d
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-explicit/test_zap.yml
@@ -0,0 +1 @@
+../../../playbooks/test_zap.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-explicit/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-explicit/vagrant_variables.yml
new file mode 120000
index 000000000..d21531f6c
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type-explicit/vagrant_variables.yml
@@ -0,0 +1 @@
+../../../../vagrant_variables.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type/Vagrantfile
new file mode 120000
index 000000000..16076e424
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type/Vagrantfile
@@ -0,0 +1 @@
+../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type/group_vars/all
new file mode 120000
index 000000000..689ca5399
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type/group_vars/all
@@ -0,0 +1 @@
+../../../../../group_vars/filestore \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type/hosts b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type/hosts
new file mode 100644
index 000000000..e1c1de6f8
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type/hosts
@@ -0,0 +1,8 @@
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mgrs]
+mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type/setup.yml
new file mode 120000
index 000000000..8cf11d4ef
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type/setup.yml
@@ -0,0 +1 @@
+../../../playbooks/setup_mixed_type.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type/test.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type/test.yml
new file mode 120000
index 000000000..aa867bcde
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type/test.yml
@@ -0,0 +1 @@
+../../../playbooks/test.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type/test_zap.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type/test_zap.yml
new file mode 120000
index 000000000..cb969fa1d
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type/test_zap.yml
@@ -0,0 +1 @@
+../../../playbooks/test_zap.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type/vagrant_variables.yml
new file mode 120000
index 000000000..d21531f6c
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/mixed-type/vagrant_variables.yml
@@ -0,0 +1 @@
+../../../../vagrant_variables.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/single-type-dmcrypt/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/single-type-dmcrypt/Vagrantfile
new file mode 120000
index 000000000..16076e424
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/single-type-dmcrypt/Vagrantfile
@@ -0,0 +1 @@
+../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/single-type-dmcrypt/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/single-type-dmcrypt/group_vars/all
new file mode 120000
index 000000000..ef102881f
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/single-type-dmcrypt/group_vars/all
@@ -0,0 +1 @@
+../../../../../group_vars/filestore_single \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/single-type-dmcrypt/hosts b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/single-type-dmcrypt/hosts
new file mode 100644
index 000000000..e1c1de6f8
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/single-type-dmcrypt/hosts
@@ -0,0 +1,8 @@
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mgrs]
+mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/single-type-dmcrypt/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/single-type-dmcrypt/setup.yml
new file mode 120000
index 000000000..30874dfbb
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/single-type-dmcrypt/setup.yml
@@ -0,0 +1 @@
+../../../playbooks/noop.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/single-type-dmcrypt/test.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/single-type-dmcrypt/test.yml
new file mode 120000
index 000000000..aa867bcde
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/single-type-dmcrypt/test.yml
@@ -0,0 +1 @@
+../../../playbooks/test.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/single-type-dmcrypt/test_zap.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/single-type-dmcrypt/test_zap.yml
new file mode 120000
index 000000000..cb969fa1d
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/single-type-dmcrypt/test_zap.yml
@@ -0,0 +1 @@
+../../../playbooks/test_zap.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/single-type-dmcrypt/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/single-type-dmcrypt/vagrant_variables.yml
new file mode 120000
index 000000000..d21531f6c
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/single-type-dmcrypt/vagrant_variables.yml
@@ -0,0 +1 @@
+../../../../vagrant_variables.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/single-type/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/single-type/Vagrantfile
new file mode 120000
index 000000000..16076e424
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/single-type/Vagrantfile
@@ -0,0 +1 @@
+../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/single-type/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/single-type/group_vars/all
new file mode 120000
index 000000000..ef102881f
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/single-type/group_vars/all
@@ -0,0 +1 @@
+../../../../../group_vars/filestore_single \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/single-type/hosts b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/single-type/hosts
new file mode 100644
index 000000000..e1c1de6f8
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/single-type/hosts
@@ -0,0 +1,8 @@
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mgrs]
+mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/single-type/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/single-type/setup.yml
new file mode 120000
index 000000000..30874dfbb
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/single-type/setup.yml
@@ -0,0 +1 @@
+../../../playbooks/noop.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/single-type/test.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/single-type/test.yml
new file mode 120000
index 000000000..aa867bcde
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/single-type/test.yml
@@ -0,0 +1 @@
+../../../playbooks/test.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/single-type/test_zap.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/single-type/test_zap.yml
new file mode 120000
index 000000000..cb969fa1d
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/single-type/test_zap.yml
@@ -0,0 +1 @@
+../../../playbooks/test_zap.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/single-type/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/single-type/vagrant_variables.yml
new file mode 120000
index 000000000..d21531f6c
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/filestore/single-type/vagrant_variables.yml
@@ -0,0 +1 @@
+../../../../vagrant_variables.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/noop.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/noop.yml
new file mode 100644
index 000000000..5922ecf2e
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/noop.yml
@@ -0,0 +1,12 @@
+---
+
+# Allows to always include a 'setup.yml' file in functional tests, and execute
+# only on the ones that actually need it
+
+- hosts: all
+ gather_facts: no
+
+ tasks:
+
+ - debug:
+ msg: "This is an empty setup playbook. The current scenario didn't need any work done"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/setup_mixed_type.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/setup_mixed_type.yml
new file mode 100644
index 000000000..1fa9f66fc
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/setup_mixed_type.yml
@@ -0,0 +1,215 @@
+---
+- hosts: osds
+ become: yes
+ tasks:
+
+ - name: install lvm2
+ package:
+ name: lvm2
+ state: present
+ - name: tell lvm to ignore loop devices
+ lineinfile:
+ path: /etc/lvm/lvm.conf
+ line: "\tfilter = [ 'r|loop.*|' ]"
+ insertafter: 'devices {'
+ - name: lvm allow changes depsite duplicate PVIDs
+ lineinfile:
+ path: /etc/lvm/lvm.conf
+ line: ' allow_changes_with_duplicate_pvs = 1'
+ insertafter: '^devices {'
+ regexp: 'allow_changes_with_duplicate_pvs = 0'
+ - name: create mount points
+ command: "mkdir /opt/{{ item }}"
+ loop:
+ - vdd
+ - vde
+ ignore_errors: yes
+
+ - name: add part
+ shell: echo "type=83" | sfdisk /dev/{{ item }}
+ loop:
+ - vdd
+ - vde
+
+ - name: add fs
+ command: "mkfs.ext4 /dev/{{ item }}1"
+ loop:
+ - vdd
+ - vde
+
+ - name: mount additional drives
+ command: "mount /dev/{{ item }}1 /opt/{{ item }}"
+ loop:
+ - vdd
+ - vde
+
+ - name: create the nvme image systemd unit
+ copy:
+ content: |
+ [Unit]
+ Description=NVMe loop device
+ After=local-fs.target
+ Wants=local-fs.target
+
+ [Service]
+ Type=simple
+ ExecStart=/bin/bash /opt/ceph-nvme.sh
+ StandardOutput=journal
+ StandardError=journal
+
+ [Install]
+ WantedBy=multi-user.target
+ dest: "/etc/systemd/system/ceph-nvme.service"
+
+ - name: create the ceph-nvme startup script
+ copy:
+ content: |
+ set -x
+ set -e
+ mount /dev/vdd1 /opt/vdd
+ mount /dev/vde1 /opt/vde
+ modprobe nvmet
+ modprobe nvme_loop
+ modprobe nvme_fabrics
+ modprobe loop
+ losetup -v /dev/loop0 /opt/vdd/loop0_nvme0
+ losetup -v /dev/loop1 /opt/vde/loop1_nvme1
+ losetup -l
+ nvmetcli restore /opt/loop.json
+ nvme connect -t loop -n testnqn1 -q hostnqn
+ nvme connect -t loop -n testnqn2 -q hostnqn
+ nvme list
+ dest: "/opt/ceph-nvme.sh"
+
+ - name: ensure ceph-nvme is enabled
+ service:
+ name: ceph-nvme
+ state: stopped
+ enabled: yes
+
+ - name: install nvme dependencies
+ package:
+ name: "{{ item }}"
+ state: present
+ with_items:
+ - nvme-cli
+ - nvmetcli
+
+ - name: enable NVME kernel modules
+ modprobe:
+ name: "{{ item }}"
+ state: present
+ with_items:
+ - nvmet
+ - nvme_loop
+ - nvme_fabrics
+
+ - name: detach nvme files from loop devices
+ command: "losetup -d /dev/{{ item }}"
+ failed_when: false
+ loop:
+ - loop0
+ - loop1
+
+ - name: remove previous nvme files
+ file:
+ path: "{{ item }}"
+ state: absent
+ loop:
+ - /opt/vdd/loop0_nvme0
+ - /opt/vde/loop1_nvme1
+
+ - name: create 20GB sparse files for NVMe
+ command: "fallocate -l 20G {{ item }}"
+ loop:
+ - /opt/vdd/loop0_nvme0
+ - /opt/vde/loop1_nvme1
+
+ - name: setup loop devices with sparse files 0
+ command: "losetup /dev/loop0 /opt/vdd/loop0_nvme0"
+ failed_when: false
+
+ - name: setup loop devices with sparse files 1
+ command: "losetup /dev/loop1 /opt/vde/loop1_nvme1"
+ failed_when: false
+
+ - name: create the loop.json file for nvmetcli
+ copy:
+ content: |
+ {
+ "hosts": [
+ {
+ "nqn": "hostnqn"
+ }
+ ],
+ "ports": [
+ {
+ "addr": {
+ "adrfam": "",
+ "traddr": "",
+ "treq": "not specified",
+ "trsvcid": "",
+ "trtype": "loop"
+ },
+ "portid": 1,
+ "referrals": [],
+ "subsystems": [
+ "testnqn1",
+ "testnqn2"
+ ]
+ }
+ ],
+ "subsystems": [
+ {
+ "allowed_hosts": [
+ "hostnqn"
+ ],
+ "attr": {
+ "allow_any_host": "0"
+ },
+ "namespaces": [
+ {
+ "device": {
+ "nguid": "ef90689c-6c46-d44c-89c1-4067801309a8",
+ "path": "/dev/loop0"
+ },
+ "enable": 1,
+ "nsid": 1
+ }
+ ],
+ "nqn": "testnqn1"
+ },
+ {
+ "allowed_hosts": [
+ "hostnqn"
+ ],
+ "attr": {
+ "allow_any_host": "0"
+ },
+ "namespaces": [
+ {
+ "device": {
+ "nguid": "ef90689c-6c46-d44c-89c1-4067801309a7",
+ "path": "/dev/loop1"
+ },
+ "enable": 1,
+ "nsid": 2
+ }
+ ],
+ "nqn": "testnqn2"
+ }
+ ]
+ }
+ dest: "/opt/loop.json"
+
+ - name: setup the /dev/loop0 target with nvmetcli
+ command: nvmetcli restore /opt/loop.json
+
+ - name: connect the new target as an nvme device
+ command: "nvme connect -t loop -n testnqn{{ item }} -q hostnqn"
+ loop:
+ - 1
+ - 2
+
+ - name: debug output for nvme list
+ command: nvme list
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test.yml
new file mode 100644
index 000000000..5d5bc59f2
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test.yml
@@ -0,0 +1,64 @@
+
+- hosts: osds
+ become: yes
+ tasks:
+
+ - name: stop ceph-osd daemons
+ service:
+ name: "ceph-osd@{{ item }}"
+ state: stopped
+ with_items: "{{ osd_ids }}"
+
+- hosts: mons
+ become: yes
+ tasks:
+
+ - name: mark osds down
+ command: "ceph --cluster {{ cluster }} osd down osd.{{ item }}"
+ with_items: "{{ osd_ids }}"
+ - name: purge osds
+ command: "ceph --cluster {{ cluster }} osd purge osd.{{ item }} --yes-i-really-mean-it"
+ with_items: "{{ osd_ids }}"
+
+- hosts: osds
+ become: yes
+ tasks:
+
+ - name: zap devices used for OSDs
+ command: "ceph-volume --cluster {{ cluster }} lvm zap {{ item }} --destroy"
+ with_items: "{{ devices }}"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: batch create devices again
+ command: "ceph-volume --cluster {{ cluster }} lvm batch --yes --{{ osd_objectstore|default('bluestore') }} {{ '--dmcrypt' if dmcrypt|default(false) else '' }} {{ devices | join(' ') }}"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: ensure batch create is idempotent
+ command: "ceph-volume --cluster {{ cluster }} lvm batch --yes --{{ osd_objectstore|default('bluestore') }} {{ '--dmcrypt' if dmcrypt|default(false) else '' }} {{ devices | join(' ') }}"
+ register: batch_cmd
+ failed_when: false
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: check batch idempotency
+ fail:
+ msg: "lvm batch failed idempotency check"
+ when:
+ - batch_cmd.rc != 0
+ - "'strategy changed' not in batch_cmd.stderr"
+
+ - name: run batch --report to see if devices get filtered
+ command: "ceph-volume --cluster {{ cluster }} lvm batch --report --format=json --{{ osd_objectstore|default('bluestore') }} {{ '--dmcrypt' if dmcrypt|default(false) else '' }} {{ devices | join(' ') }}"
+ register: report_cmd
+ failed_when: false
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: check batch --report idempotency
+ fail:
+ msg: "lvm batch --report failed idempotency check"
+ when:
+ - report_cmd.rc != 0
+ - "'strategy changed' not in report_cmd.stderr"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_explicit.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_explicit.yml
new file mode 100644
index 000000000..1ff0acc9d
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_explicit.yml
@@ -0,0 +1,64 @@
+
+- hosts: osds
+ become: yes
+ tasks:
+
+ - name: stop ceph-osd daemons
+ service:
+ name: "ceph-osd@{{ item }}"
+ state: stopped
+ with_items: "{{ osd_ids }}"
+
+- hosts: mons
+ become: yes
+ tasks:
+
+ - name: mark osds down
+ command: "ceph --cluster {{ cluster }} osd down osd.{{ item }}"
+ with_items: "{{ osd_ids }}"
+ - name: purge osds
+ command: "ceph --cluster {{ cluster }} osd purge osd.{{ item }} --yes-i-really-mean-it"
+ with_items: "{{ osd_ids }}"
+
+- hosts: osds
+ become: yes
+ vars:
+ external_devices: "{{ '--db-devices' if osd_objectstore == 'bluestore' else '--journal-devices' }}"
+ tasks:
+
+ - name: zap devices used for OSDs
+ command: "ceph-volume --cluster {{ cluster }} lvm zap {{ item }} --destroy"
+ with_items: "{{ devices }}"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: batch create devices again
+ command: "ceph-volume --cluster {{ cluster }} lvm batch --yes --{{ osd_objectstore|default('bluestore') }} {{ '--dmcrypt' if dmcrypt|default(false) else '' }} {{ devices[:2] | join(' ') }} {{ external_devices }} {{ devices[2:] | join(' ') }}"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: ensure batch create is idempotent when all data devices are filtered
+ command: "ceph-volume --cluster {{ cluster }} lvm batch --yes --{{ osd_objectstore|default('bluestore') }} {{ '--dmcrypt' if dmcrypt|default(false) else '' }} {{ devices[:2] | join(' ') }} {{ external_devices }} {{ devices[2:] | join(' ') }}"
+ register: batch_cmd
+ failed_when: false
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: check batch idempotency
+ fail:
+ msg: "lvm batch failed idempotency check"
+ when:
+ - batch_cmd.rc != 0
+
+ - name: run batch --report to see if devices get filtered
+ command: "ceph-volume --cluster {{ cluster }} lvm batch --report --format=json --{{ osd_objectstore|default('bluestore') }} {{ '--dmcrypt' if dmcrypt|default(false) else '' }} {{ devices[:2] | join(' ') }} {{ external_devices }} {{ devices[2:] | join(' ') }}"
+ register: report_cmd
+ failed_when: false
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: check batch --report idempotency
+ fail:
+ msg: "lvm batch --report failed idempotency check"
+ when:
+ - report_cmd.rc != 0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_zap.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_zap.yml
new file mode 100644
index 000000000..9d63df9e0
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_zap.yml
@@ -0,0 +1,34 @@
+
+- hosts: osds
+ become: yes
+ tasks:
+
+ - name: stop ceph-osd daemons
+ service:
+ name: "ceph-osd@{{ item }}"
+ state: stopped
+ with_items: "{{ osd_ids }}"
+
+
+- hosts: mons
+ become: yes
+ tasks:
+
+ - name: mark osds down
+ command: "ceph --cluster {{ cluster }} osd down osd.{{ item }}"
+ with_items: "{{ osd_ids }}"
+ - name: purge osds
+ command: "ceph --cluster {{ cluster }} osd purge osd.{{ item }} --yes-i-really-mean-it"
+ with_items: "{{ osd_ids }}"
+
+
+- hosts: osds
+ become: yes
+ tasks:
+
+ - name: zap devices used for OSDs
+ command: "ceph-volume --cluster {{ cluster }} lvm zap --osd-id {{ item }} --destroy"
+ with_items: "{{ osd_ids }}"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/tox.ini b/src/ceph-volume/ceph_volume/tests/functional/batch/tox.ini
new file mode 100644
index 000000000..45dd7e733
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/tox.ini
@@ -0,0 +1,74 @@
+[tox]
+envlist = centos8-{bluestore,filestore}-{single_type,single_type_dmcrypt},centos8-{bluestore,filestore}-{mixed_type,mixed_type_dmcrypt,mixed_type_explicit, mixed_type_dmcrypt_explicit}
+skipsdist = True
+
+[testenv]
+deps = mock
+allowlist_externals =
+ vagrant
+ bash
+ git
+ cp
+ sleep
+passenv=*
+setenv=
+ ANSIBLE_CONFIG = {envdir}/tmp/ceph-ansible/ansible.cfg
+ ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey
+ ANSIBLE_STDOUT_CALLBACK = debug
+ VAGRANT_CWD = {changedir}
+ CEPH_VOLUME_DEBUG = 1
+ DEBIAN_FRONTEND=noninteractive
+changedir=
+ centos8-filestore-single_type: {toxinidir}/centos8/filestore/single-type
+ centos8-filestore-single_type_dmcrypt: {toxinidir}/centos8/filestore/single-type-dmcrypt
+ centos8-filestore-mixed_type: {toxinidir}/centos8/filestore/mixed-type
+ centos8-filestore-mixed_type_dmcrypt: {toxinidir}/centos8/filestore/mixed-type-dmcrypt
+ centos8-filestore-mixed_type_explicit: {toxinidir}/centos8/filestore/mixed-type-explicit
+ centos8-filestore-mixed_type_dmcrypt_explicit: {toxinidir}/centos8/filestore/mixed-type-dmcrypt-explicit
+ centos8-bluestore-single_type: {toxinidir}/centos8/bluestore/single-type
+ centos8-bluestore-single_type_dmcrypt: {toxinidir}/centos8/bluestore/single-type-dmcrypt
+ centos8-bluestore-mixed_type: {toxinidir}/centos8/bluestore/mixed-type
+ centos8-bluestore-mixed_type_dmcrypt: {toxinidir}/centos8/bluestore/mixed-type-dmcrypt
+ centos8-bluestore-mixed_type_explicit: {toxinidir}/centos8/bluestore/mixed-type-explicit
+ centos8-bluestore-mixed_type_dmcrypt_explicit: {toxinidir}/centos8/bluestore/mixed-type-dmcrypt-explicit
+commands=
+ git clone -b {env:CEPH_ANSIBLE_BRANCH:master} --single-branch {env:CEPH_ANSIBLE_CLONE:"https://github.com/ceph/ceph-ansible.git"} {envdir}/tmp/ceph-ansible
+ python -m pip install -r {envdir}/tmp/ceph-ansible/tests/requirements.txt
+
+ # bash {toxinidir}/../scripts/vagrant_up.sh {env:VAGRANT_UP_FLAGS:""} {posargs:--provider=virtualbox}
+ bash {toxinidir}/../scripts/vagrant_up.sh {posargs:--provider=virtualbox}
+ bash {toxinidir}/../scripts/generate_ssh_config.sh {changedir}
+
+ cp {toxinidir}/../playbooks/deploy.yml {envdir}/tmp/ceph-ansible
+
+ # individual scenario setup
+ ansible-playbook -vv -i {changedir}/hosts {changedir}/setup.yml
+
+ # use ceph-ansible to deploy a ceph cluster on the vms
+ ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/deploy.yml --extra-vars "fetch_directory={changedir}/fetch ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} toxinidir={toxinidir}"
+
+ # prepare nodes for testing with testinfra
+ ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml
+
+ # test cluster state using testinfra
+ py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
+
+ # reboot all vms - attempt
+ bash {toxinidir}/../scripts/vagrant_reload.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox}
+
+ # after a reboot, osds may take about 20 seconds to come back up
+ sleep 30
+
+ # retest to ensure cluster came back up correctly after rebooting
+ py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
+
+ # destroy an OSD, zap it's device and recreate it using it's ID
+ ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml
+
+ # retest to ensure cluster came back up correctly
+ py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
+
+ # test zap OSDs by ID
+ ansible-playbook -vv -i {changedir}/hosts {changedir}/test_zap.yml
+
+ vagrant destroy {env:VAGRANT_DESTROY_FLAGS:"--force"}
diff --git a/src/ceph-volume/ceph_volume/tests/functional/group_vars/bluestore b/src/ceph-volume/ceph_volume/tests/functional/group_vars/bluestore
new file mode 100644
index 000000000..ca0146b19
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/group_vars/bluestore
@@ -0,0 +1,34 @@
+---
+
+ceph_dev: True
+cluster: test
+public_network: "192.168.3.0/24"
+cluster_network: "192.168.4.0/24"
+monitor_interface: eth1
+osd_objectstore: "bluestore"
+osd_scenario: lvm
+num_osds: 2
+ceph_origin: 'repository'
+ceph_repository: 'dev'
+copy_admin_key: false
+devices:
+ - /dev/vdb
+ - /dev/vdc
+ - /dev/nvme0n1
+ - /dev/nvme1n1
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ osd_pool_default_size: 1
+# 9GB in bytes
+block_db_size: 9663676416
+
+# The following is only needed for testing purposes and is not part of
+# ceph-ansible supported variables
+
+osd_ids:
+ - 0
+ - 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/group_vars/bluestore_lvm b/src/ceph-volume/ceph_volume/tests/functional/group_vars/bluestore_lvm
new file mode 100644
index 000000000..c333af3e5
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/group_vars/bluestore_lvm
@@ -0,0 +1,32 @@
+---
+
+ceph_dev: True
+cluster: test
+public_network: "192.168.3.0/24"
+cluster_network: "192.168.4.0/24"
+monitor_interface: eth1
+journal_size: 100
+osd_objectstore: "bluestore"
+osd_scenario: lvm
+ceph_origin: 'repository'
+ceph_repository: 'dev'
+copy_admin_key: false
+pv_devices:
+ - /dev/vdb
+ - /dev/vdc
+lvm_volumes:
+ - data: data-lv1
+ data_vg: test_group
+ crush_device_class: test
+ - data: data-lv2
+ data_vg: test_group
+ db: journal1
+ db_vg: journals
+ - data: /dev/vdd1
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ osd_pool_default_size: 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/group_vars/bluestore_lvm_dmcrypt b/src/ceph-volume/ceph_volume/tests/functional/group_vars/bluestore_lvm_dmcrypt
new file mode 100644
index 000000000..d73637763
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/group_vars/bluestore_lvm_dmcrypt
@@ -0,0 +1,33 @@
+---
+
+dmcrypt: True
+ceph_dev: True
+cluster: test
+public_network: "192.168.3.0/24"
+cluster_network: "192.168.4.0/24"
+monitor_interface: eth1
+journal_size: 100
+osd_objectstore: "bluestore"
+osd_scenario: lvm
+ceph_origin: 'repository'
+ceph_repository: 'dev'
+copy_admin_key: false
+pv_devices:
+ - /dev/vdb
+ - /dev/vdc
+lvm_volumes:
+ - data: data-lv1
+ data_vg: test_group
+ crush_device_class: test
+ - data: data-lv2
+ data_vg: test_group
+ db: journal1
+ db_vg: journals
+ - data: /dev/vdd1
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ osd_pool_default_size: 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/group_vars/bluestore_single b/src/ceph-volume/ceph_volume/tests/functional/group_vars/bluestore_single
new file mode 100644
index 000000000..e43b14a75
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/group_vars/bluestore_single
@@ -0,0 +1,29 @@
+---
+
+ceph_dev: True
+cluster: test
+public_network: "192.168.3.0/24"
+cluster_network: "192.168.4.0/24"
+monitor_interface: eth1
+osd_objectstore: "bluestore"
+osd_scenario: lvm
+ceph_origin: 'repository'
+ceph_repository: 'dev'
+copy_admin_key: false
+devices:
+ - /dev/vdb
+ - /dev/vdc
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ osd_pool_default_size: 1
+
+# The following is only needed for testing purposes and is not part of
+# ceph-ansible supported variables
+
+osd_ids:
+ - 0
+ - 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/group_vars/filestore b/src/ceph-volume/ceph_volume/tests/functional/group_vars/filestore
new file mode 100644
index 000000000..182925b73
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/group_vars/filestore
@@ -0,0 +1,32 @@
+---
+
+ceph_dev: True
+cluster: test
+public_network: "192.168.3.0/24"
+cluster_network: "192.168.4.0/24"
+monitor_interface: eth1
+osd_objectstore: "filestore"
+osd_scenario: lvm
+ceph_origin: 'repository'
+ceph_repository: 'dev'
+copy_admin_key: false
+num_osds: 2
+devices:
+ - /dev/vdb
+ - /dev/vdc
+ - /dev/nvme0n1
+ - /dev/nvme1n1
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ osd_pool_default_size: 1
+
+# The following is only needed for testing purposes and is not part of
+# ceph-ansible supported variables
+
+osd_ids:
+ - 0
+ - 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/group_vars/filestore_lvm b/src/ceph-volume/ceph_volume/tests/functional/group_vars/filestore_lvm
new file mode 100644
index 000000000..f5f26e7ce
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/group_vars/filestore_lvm
@@ -0,0 +1,35 @@
+---
+
+ceph_dev: True
+cluster: test
+public_network: "192.168.3.0/24"
+cluster_network: "192.168.4.0/24"
+monitor_interface: eth1
+journal_size: 100
+osd_objectstore: "filestore"
+osd_scenario: lvm
+ceph_origin: 'repository'
+ceph_repository: 'dev'
+copy_admin_key: false
+pv_devices:
+ - /dev/vdb
+ - /dev/vdc
+# test-volume is created by tests/functional/lvm_setup.yml from /dev/sda
+lvm_volumes:
+ - data: data-lv1
+ journal: /dev/vdc1
+ data_vg: test_group
+ crush_device_class: test
+ - data: data-lv2
+ journal: journal1
+ data_vg: test_group
+ journal_vg: journals
+ - data: /dev/vdd1
+ journal: /dev/vdd2
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ osd_pool_default_size: 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/group_vars/filestore_lvm_dmcrypt b/src/ceph-volume/ceph_volume/tests/functional/group_vars/filestore_lvm_dmcrypt
new file mode 100644
index 000000000..e5c087271
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/group_vars/filestore_lvm_dmcrypt
@@ -0,0 +1,36 @@
+---
+
+dmcrypt: True
+ceph_dev: True
+cluster: test
+public_network: "192.168.3.0/24"
+cluster_network: "192.168.4.0/24"
+monitor_interface: eth1
+journal_size: 100
+osd_objectstore: "filestore"
+osd_scenario: lvm
+ceph_origin: 'repository'
+ceph_repository: 'dev'
+copy_admin_key: false
+pv_devices:
+ - /dev/vdb
+ - /dev/vdc
+# test-volume is created by tests/functional/lvm_setup.yml from /dev/sda
+lvm_volumes:
+ - data: data-lv1
+ journal: /dev/vdc1
+ data_vg: test_group
+ crush_device_class: test
+ - data: data-lv2
+ journal: journal1
+ data_vg: test_group
+ journal_vg: journals
+ - data: /dev/vdd1
+ journal: /dev/vdd2
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ osd_pool_default_size: 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/group_vars/filestore_single b/src/ceph-volume/ceph_volume/tests/functional/group_vars/filestore_single
new file mode 100644
index 000000000..17463307d
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/group_vars/filestore_single
@@ -0,0 +1,29 @@
+---
+
+ceph_dev: True
+cluster: test
+public_network: "192.168.3.0/24"
+cluster_network: "192.168.4.0/24"
+monitor_interface: eth1
+osd_objectstore: "filestore"
+osd_scenario: lvm
+ceph_origin: 'repository'
+ceph_repository: 'dev'
+copy_admin_key: false
+devices:
+ - /dev/vdb
+ - /dev/vdc
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ osd_pool_default_size: 1
+
+# The following is only needed for testing purposes and is not part of
+# ceph-ansible supported variables
+
+osd_ids:
+ - 0
+ - 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/Vagrantfile
new file mode 120000
index 000000000..16076e424
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/Vagrantfile
@@ -0,0 +1 @@
+../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/group_vars/all
new file mode 120000
index 000000000..5a7af3be0
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/group_vars/all
@@ -0,0 +1 @@
+../../../../../group_vars/bluestore_lvm \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/hosts b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/hosts
new file mode 100644
index 000000000..e1c1de6f8
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/hosts
@@ -0,0 +1,8 @@
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mgrs]
+mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/setup.yml
new file mode 120000
index 000000000..1c1a3ce8d
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/setup.yml
@@ -0,0 +1 @@
+../../../playbooks/setup_partitions.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/test.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/test.yml
new file mode 120000
index 000000000..165d9da29
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/test.yml
@@ -0,0 +1 @@
+../../../playbooks/test_bluestore.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/vagrant_variables.yml
new file mode 120000
index 000000000..d21531f6c
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/vagrant_variables.yml
@@ -0,0 +1 @@
+../../../../vagrant_variables.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/Vagrantfile
new file mode 120000
index 000000000..16076e424
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/Vagrantfile
@@ -0,0 +1 @@
+../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/group_vars/all
new file mode 120000
index 000000000..6ef6a9844
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/group_vars/all
@@ -0,0 +1 @@
+../../../../../group_vars/bluestore_lvm_dmcrypt \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/hosts b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/hosts
new file mode 100644
index 000000000..e1c1de6f8
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/hosts
@@ -0,0 +1,8 @@
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mgrs]
+mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/setup.yml
new file mode 120000
index 000000000..1c1a3ce8d
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/setup.yml
@@ -0,0 +1 @@
+../../../playbooks/setup_partitions.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/test.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/test.yml
new file mode 100644
index 000000000..0a47b5eb8
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/test.yml
@@ -0,0 +1,123 @@
+- hosts: osds
+ become: yes
+ tasks:
+
+ - name: stop ceph-osd@2 daemon
+ service:
+ name: ceph-osd@2
+ state: stopped
+
+ - name: stop ceph-osd@0 daemon
+ service:
+ name: ceph-osd@0
+ state: stopped
+
+- hosts: mons
+ become: yes
+ tasks:
+ - name: mark osds down
+ command: "ceph --cluster {{ cluster }} osd down osd.{{ item }}"
+ with_items:
+ - 0
+ - 2
+
+ - name: destroy osd.2
+ command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it"
+ register: result
+ retries: 30
+ delay: 1
+ until: result is succeeded
+
+ - name: destroy osd.0
+ command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
+ register: result
+ retries: 30
+ delay: 1
+ until: result is succeeded
+
+- hosts: osds
+ become: yes
+ tasks:
+
+ # osd.2 device
+ - name: zap /dev/vdd1
+ command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdd1 --destroy"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ # partitions have been completely removed, so re-create them again
+ - name: re-create partition /dev/vdd for lvm data usage
+ parted:
+ device: /dev/vdd
+ number: 1
+ part_start: 0%
+ part_end: 50%
+ unit: '%'
+ label: gpt
+ state: present
+
+ - name: redeploy osd.2 using /dev/vdd1
+ command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data /dev/vdd1 --osd-id 2"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ # osd.0 lv
+ - name: zap test_group/data-lv1
+ command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: redeploy osd.0 using test_group/data-lv1
+ command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data test_group/data-lv1 --osd-id 0"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: stop ceph-osd@0 daemon
+ service:
+ name: ceph-osd@0
+ state: stopped
+
+
+- hosts: mons
+ become: yes
+ tasks:
+ - name: mark osds down
+ command: "ceph --cluster {{ cluster }} osd down osd.0"
+
+ - name: destroy osd.0
+ command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
+ register: result
+ retries: 30
+ delay: 1
+ until: result is succeeded
+
+
+- hosts: osds
+ become: yes
+ tasks:
+
+
+ - name: zap test_group/data-lv1
+ command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: prepare osd.0 using test_group/data-lv1
+ command: "ceph-volume --cluster {{ cluster }} lvm prepare --bluestore --data test_group/data-lv1 --osd-id 0"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: activate all to start the previously prepared osd.0
+ command: "ceph-volume lvm activate --all"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: node inventory
+ command: "ceph-volume inventory"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: list all OSDs
+ command: "ceph-volume lvm list"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/vagrant_variables.yml
new file mode 120000
index 000000000..d21531f6c
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/vagrant_variables.yml
@@ -0,0 +1 @@
+../../../../vagrant_variables.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/create/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/create/Vagrantfile
new file mode 120000
index 000000000..16076e424
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/create/Vagrantfile
@@ -0,0 +1 @@
+../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/create/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/create/group_vars/all
new file mode 120000
index 000000000..d6c71453a
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/create/group_vars/all
@@ -0,0 +1 @@
+../../../../../group_vars/filestore_lvm \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/create/hosts b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/create/hosts
new file mode 100644
index 000000000..e1c1de6f8
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/create/hosts
@@ -0,0 +1,8 @@
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mgrs]
+mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/create/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/create/setup.yml
new file mode 120000
index 000000000..1c1a3ce8d
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/create/setup.yml
@@ -0,0 +1 @@
+../../../playbooks/setup_partitions.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/create/test.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/create/test.yml
new file mode 120000
index 000000000..1a8c37c13
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/create/test.yml
@@ -0,0 +1 @@
+../../../playbooks/test_filestore.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/create/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/create/vagrant_variables.yml
new file mode 120000
index 000000000..d21531f6c
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/create/vagrant_variables.yml
@@ -0,0 +1 @@
+../../../../vagrant_variables.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/dmcrypt/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/dmcrypt/Vagrantfile
new file mode 120000
index 000000000..16076e424
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/dmcrypt/Vagrantfile
@@ -0,0 +1 @@
+../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/dmcrypt/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/dmcrypt/group_vars/all
new file mode 120000
index 000000000..a17512755
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/dmcrypt/group_vars/all
@@ -0,0 +1 @@
+../../../../../group_vars/filestore_lvm_dmcrypt \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/dmcrypt/hosts b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/dmcrypt/hosts
new file mode 100644
index 000000000..e1c1de6f8
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/dmcrypt/hosts
@@ -0,0 +1,8 @@
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mgrs]
+mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/dmcrypt/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/dmcrypt/setup.yml
new file mode 120000
index 000000000..1c1a3ce8d
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/dmcrypt/setup.yml
@@ -0,0 +1 @@
+../../../playbooks/setup_partitions.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/dmcrypt/test.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/dmcrypt/test.yml
new file mode 100644
index 000000000..21eff00fa
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/dmcrypt/test.yml
@@ -0,0 +1,120 @@
+
+- hosts: osds
+ become: yes
+ tasks:
+
+ - name: stop ceph-osd@2 daemon
+ service:
+ name: ceph-osd@2
+ state: stopped
+
+ - name: stop ceph-osd@0 daemon
+ service:
+ name: ceph-osd@0
+ state: stopped
+
+
+- hosts: mons
+ become: yes
+ tasks:
+ - name: mark osds down
+ command: "ceph --cluster {{ cluster }} osd down osd.{{ item }}"
+ with_items:
+ - 0
+ - 2
+
+ - name: destroy osd.2
+ command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it"
+ register: result
+ retries: 30
+ delay: 1
+ until: result is succeeded
+
+ - name: destroy osd.0
+ command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
+ register: result
+ retries: 30
+ delay: 1
+ until: result is succeeded
+
+- hosts: osds
+ become: yes
+ tasks:
+
+ # osd.2 device
+ - name: zap /dev/vdd1
+ command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdd1 --destroy"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: zap /dev/vdd2
+ command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdd2 --destroy"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ # partitions have been completely removed, so re-create them again
+ - name: re-create partition /dev/vdd for lvm data usage
+ parted:
+ device: /dev/vdd
+ number: 1
+ part_start: 0%
+ part_end: 50%
+ unit: '%'
+ label: gpt
+ state: present
+
+ - name: re-create partition /dev/vdd lvm journals
+ parted:
+ device: /dev/vdd
+ number: 2
+ part_start: 50%
+ part_end: 100%
+ unit: '%'
+ state: present
+ label: gpt
+
+ - name: redeploy osd.2 using /dev/vdd1
+ command: "ceph-volume --cluster {{ cluster }} lvm create --filestore --data /dev/vdd1 --journal /dev/vdd2 --osd-id 2"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ # osd.0 lv
+ - name: zap test_group/data-lv1
+ command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: zap /dev/vdc1
+ command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdc1 --destroy"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: re-create partition /dev/vdc1
+ parted:
+ device: /dev/vdc
+ number: 1
+ part_start: 0%
+ part_end: 50%
+ unit: '%'
+ state: present
+ label: gpt
+
+ - name: prepare osd.0 again using test_group/data-lv1
+ command: "ceph-volume --cluster {{ cluster }} lvm prepare --filestore --data test_group/data-lv1 --journal /dev/vdc1 --osd-id 0"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: activate all to start the previously prepared osd.0
+ command: "ceph-volume lvm activate --filestore --all"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: node inventory
+ command: "ceph-volume inventory"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: list all OSDs
+ command: "ceph-volume lvm list"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/dmcrypt/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/dmcrypt/vagrant_variables.yml
new file mode 120000
index 000000000..d21531f6c
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/dmcrypt/vagrant_variables.yml
@@ -0,0 +1 @@
+../../../../vagrant_variables.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/setup_partitions.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/setup_partitions.yml
new file mode 100644
index 000000000..4b9e6638e
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/setup_partitions.yml
@@ -0,0 +1,27 @@
+---
+
+- hosts: osds
+ gather_facts: false
+ become: yes
+ tasks:
+
+ - name: partition /dev/vdd for lvm data usage
+ parted:
+ device: /dev/vdd
+ number: 1
+ part_start: 0%
+ part_end: 50%
+ unit: '%'
+ label: gpt
+ state: present
+
+ - name: partition /dev/vdd lvm journals
+ parted:
+ device: /dev/vdd
+ number: 2
+ part_start: 50%
+ part_end: 100%
+ unit: '%'
+ state: present
+ label: gpt
+
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_bluestore.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_bluestore.yml
new file mode 100644
index 000000000..97d77a7f4
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_bluestore.yml
@@ -0,0 +1,161 @@
+
+- hosts: osds
+ become: yes
+ tasks:
+
+ - name: stop ceph-osd@2 daemon
+ service:
+ name: ceph-osd@2
+ state: stopped
+
+ - name: stop ceph-osd@0 daemon
+ service:
+ name: ceph-osd@0
+ state: stopped
+
+
+- hosts: mons
+ become: yes
+ tasks:
+ - name: mark osds down
+ command: "ceph --cluster {{ cluster }} osd down osd.{{ item }}"
+ with_items:
+ - 0
+ - 2
+
+ - name: destroy osd.2
+ command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it"
+ register: result
+ retries: 30
+ delay: 1
+ until: result is succeeded
+
+ - name: destroy osd.0
+ command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
+ register: result
+ retries: 30
+ delay: 1
+ until: result is succeeded
+
+
+- hosts: osds
+ become: yes
+ tasks:
+
+ # osd.2 device
+ - name: zap /dev/vdd1
+ command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdd1 --destroy"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ # partitions have been completely removed, so re-create them again
+ - name: re-create partition /dev/vdd for lvm data usage
+ parted:
+ device: /dev/vdd
+ number: 1
+ part_start: 0%
+ part_end: 50%
+ unit: '%'
+ label: gpt
+ state: present
+
+ - name: redeploy osd.2 using /dev/vdd1
+ command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data /dev/vdd1 --osd-id 2"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ # osd.0 device (zap without --destroy that removes the LV)
+ - name: zap test_group/data-lv1
+ command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: prepare osd.0 again using test_group/data-lv1
+ command: "ceph-volume --cluster {{ cluster }} lvm prepare --bluestore --data test_group/data-lv1 --osd-id 0"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: find all OSD directories
+ find:
+ paths: /var/lib/ceph/osd
+ recurse: no
+ file_type: directory
+ register: osd_directories
+
+ - name: find all OSD symlinks
+ find:
+ paths: /var/lib/ceph/osd
+ recurse: yes
+ depth: 2
+ file_type: link
+ register: osd_symlinks
+
+ # set the OSD dir and the block/block.db links to root:root permissions, to
+ # ensure that the OSD will be able to activate regardless
+ - file:
+ path: "{{ item.path }}"
+ owner: root
+ group: root
+ with_items:
+ - "{{ osd_directories.files }}"
+
+ - file:
+ path: "{{ item.path }}"
+ owner: root
+ group: root
+ with_items:
+ - "{{ osd_symlinks.files }}"
+
+ - name: activate all to start the previously prepared osd.0
+ command: "ceph-volume lvm activate --all"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: node inventory
+ command: "ceph-volume inventory"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: list all OSDs
+ command: "ceph-volume lvm list"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: create temporary directory
+ tempfile:
+ state: directory
+ suffix: sparse
+ register: tmpdir
+
+ - name: create a 1GB sparse file
+ command: fallocate -l 1G {{ tmpdir.path }}/sparse.file
+
+ - name: find an empty loop device
+ command: losetup -f
+ register: losetup_list
+
+ - name: setup loop device with sparse file
+ command: losetup {{ losetup_list.stdout }} {{ tmpdir.path }}/sparse.file
+
+ - name: create volume group
+ command: vgcreate test_zap {{ losetup_list.stdout }}
+ failed_when: false
+
+ - name: create logical volume 1
+ command: lvcreate --yes -l 50%FREE -n data-lv1 test_zap
+ failed_when: false
+
+ - name: create logical volume 2
+ command: lvcreate --yes -l 50%FREE -n data-lv2 test_zap
+ failed_when: false
+
+ # zapping the first lv shouldn't remove the vg, allowing the second zap to succeed
+ - name: zap test_zap/data-lv1
+ command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv1"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: zap test_zap/data-lv2
+ command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv2"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_filestore.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_filestore.yml
new file mode 100644
index 000000000..a9b6aa267
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_filestore.yml
@@ -0,0 +1,191 @@
+
+- hosts: osds
+ become: yes
+ tasks:
+
+ - name: stop ceph-osd@2 daemon
+ service:
+ name: ceph-osd@2
+ state: stopped
+
+ - name: stop ceph-osd@0 daemon
+ service:
+ name: ceph-osd@0
+ state: stopped
+
+
+- hosts: mons
+ become: yes
+ tasks:
+ - name: mark osds down
+ command: "ceph --cluster {{ cluster }} osd down osd.{{ item }}"
+ with_items:
+ - 0
+ - 2
+
+ - name: destroy osd.2
+ command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it"
+ register: result
+ retries: 30
+ delay: 1
+ until: result is succeeded
+
+ - name: destroy osd.0
+ command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
+ register: result
+ retries: 30
+ delay: 1
+ until: result is succeeded
+
+- hosts: osds
+ become: yes
+ tasks:
+
+ # osd.2 device
+ - name: zap /dev/vdd1
+ command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdd1 --destroy"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ # osd.2 journal
+ - name: zap /dev/vdd2
+ command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdd2 --destroy"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ # partitions have been completely removed, so re-create them again
+ - name: re-create partition /dev/vdd for lvm data usage
+ parted:
+ device: /dev/vdd
+ number: 1
+ part_start: 0%
+ part_end: 50%
+ unit: '%'
+ label: gpt
+ state: present
+
+ - name: re-create partition /dev/vdd lvm journals
+ parted:
+ device: /dev/vdd
+ number: 2
+ part_start: 50%
+ part_end: 100%
+ unit: '%'
+ state: present
+ label: gpt
+
+ - name: redeploy osd.2 using /dev/vdd1
+ command: "ceph-volume --cluster {{ cluster }} lvm create --filestore --data /dev/vdd1 --journal /dev/vdd2 --osd-id 2"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ # osd.0 data lv
+ # note: we don't use --destroy here to test this works without that flag.
+ # --destroy is used in the bluestore tests
+ - name: zap test_group/data-lv1
+ command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ # osd.0 journal device
+ - name: zap /dev/vdc1
+ command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy /dev/vdc1"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: re-create partition /dev/vdc1
+ parted:
+ device: /dev/vdc
+ number: 1
+ part_start: 0%
+ part_end: 50%
+ unit: '%'
+ state: present
+ label: gpt
+
+ - name: prepare osd.0 again using test_group/data-lv1
+ command: "ceph-volume --cluster {{ cluster }} lvm prepare --filestore --data test_group/data-lv1 --journal /dev/vdc1 --osd-id 0"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: find all OSD paths
+ find:
+ paths: /var/lib/ceph/osd
+ recurse: no
+ file_type: directory
+ register: osd_paths
+
+ # set all OSD paths to root:rootto ensure that the OSD will be able to
+ # activate regardless
+ - name: mangle permissions to root
+ file:
+ path: "{{ item.path }}"
+ owner: root
+ group: root
+ recurse: yes
+ with_items:
+ - "{{ osd_paths.files }}"
+
+ - name: stop ceph-osd@2 daemon
+ service:
+ name: ceph-osd@2
+ state: stopped
+
+ - name: stop ceph-osd@1 daemon
+ service:
+ name: ceph-osd@1
+ state: stopped
+
+ - name: activate all to start the previously prepared osd.0
+ command: "ceph-volume lvm activate --filestore --all"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: node inventory
+ command: "ceph-volume inventory"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: list all OSDs
+ command: "ceph-volume lvm list"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: create temporary directory
+ tempfile:
+ state: directory
+ suffix: sparse
+ register: tmpdir
+
+ - name: create a 1GB sparse file
+ command: fallocate -l 1G {{ tmpdir.path }}/sparse.file
+
+ - name: find an empty loop device
+ command: losetup -f
+ register: losetup_list
+
+ - name: setup loop device with sparse file
+ command: losetup {{ losetup_list.stdout }} {{ tmpdir.path }}/sparse.file
+
+ - name: create volume group
+ command: vgcreate test_zap {{ losetup_list.stdout }}
+ failed_when: false
+
+ - name: create logical volume 1
+ command: lvcreate --yes -l 50%FREE -n data-lv1 test_zap
+ failed_when: false
+
+ - name: create logical volume 2
+ command: lvcreate --yes -l 50%FREE -n data-lv2 test_zap
+ failed_when: false
+
+ # zapping the first lv shouldn't remove the vg, allowing the second zap to succeed
+ - name: zap test_zap/data-lv1
+ command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv1"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: zap test_zap/data-lv2
+ command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv2"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini b/src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini
new file mode 100644
index 000000000..0a01a79e6
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini
@@ -0,0 +1,71 @@
+[tox]
+envlist = centos8-{filestore,bluestore}-{create,prepare_activate,dmcrypt}
+skipsdist = True
+
+[testenv]
+deps = mock
+allowlist_externals =
+ vagrant
+ bash
+ git
+ cp
+ sleep
+passenv=*
+setenv=
+ ANSIBLE_CONFIG = {envdir}/tmp/ceph-ansible/ansible.cfg
+ ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey
+ ANSIBLE_STDOUT_CALLBACK = debug
+ VAGRANT_CWD = {changedir}
+ CEPH_VOLUME_DEBUG = 1
+ DEBIAN_FRONTEND=noninteractive
+changedir=
+ # plain/unencrypted
+ centos8-filestore-create: {toxinidir}/centos8/filestore/create
+ centos8-bluestore-create: {toxinidir}/centos8/bluestore/create
+ # dmcrypt
+ centos8-filestore-dmcrypt: {toxinidir}/centos8/filestore/dmcrypt
+ centos8-bluestore-dmcrypt: {toxinidir}/centos8/bluestore/dmcrypt
+ # TODO: these are placeholders for now, eventually we want to
+ # test the prepare/activate workflow of ceph-volume as well
+ centos8-filestore-prepare_activate: {toxinidir}/xenial/filestore/prepare_activate
+ centos8-bluestore-prepare_activate: {toxinidir}/xenial/bluestore/prepare_activate
+commands=
+ git clone -b {env:CEPH_ANSIBLE_BRANCH:master} --single-branch {env:CEPH_ANSIBLE_CLONE:"https://github.com/ceph/ceph-ansible.git"} {envdir}/tmp/ceph-ansible
+ pip install -r {envdir}/tmp/ceph-ansible/tests/requirements.txt
+
+ bash {toxinidir}/../scripts/vagrant_up.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox}
+ bash {toxinidir}/../scripts/generate_ssh_config.sh {changedir}
+
+ # create logical volumes to test with on the vms
+ ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/lvm_setup.yml
+
+ # ad-hoc/local test setup for lvm
+ ansible-playbook -vv -i {changedir}/hosts {changedir}/setup.yml
+
+ cp {toxinidir}/../playbooks/deploy.yml {envdir}/tmp/ceph-ansible
+
+ # use ceph-ansible to deploy a ceph cluster on the vms
+ ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/deploy.yml --extra-vars "fetch_directory={changedir}/fetch ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} toxinidir={toxinidir}"
+
+ # prepare nodes for testing with testinfra
+ ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml
+
+ # test cluster state using testinfra
+ py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
+
+ # reboot all vms - attempt
+ bash {toxinidir}/../scripts/vagrant_reload.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox}
+
+ # after a reboot, osds may take about 20 seconds to come back up
+ sleep 30
+
+ # retest to ensure cluster came back up correctly after rebooting
+ py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
+
+ # destroy an OSD, zap it's device and recreate it using it's ID
+ ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml
+
+ # retest to ensure cluster came back up correctly
+ py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
+
+ vagrant destroy {env:VAGRANT_DESTROY_FLAGS:"--force"}
diff --git a/src/ceph-volume/ceph_volume/tests/functional/playbooks/deploy.yml b/src/ceph-volume/ceph_volume/tests/functional/playbooks/deploy.yml
new file mode 100644
index 000000000..e5185e3fc
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/playbooks/deploy.yml
@@ -0,0 +1,155 @@
+---
+# Defines deployment design and assigns role to server groups
+
+- hosts:
+ - mons
+ - osds
+ - mgrs
+
+ gather_facts: false
+ any_errors_fatal: true
+ become: true
+
+ tags:
+ - always
+
+ vars:
+ delegate_facts_host: True
+ dashboard_enabled: False
+
+ environment:
+ DEBIAN_FRONTEND: noninteractive
+
+ pre_tasks:
+ # If we can't get python2 installed before any module is used we will fail
+ # so just try what we can to get it installed
+ - name: check for python2
+ stat:
+ path: /usr/bin/python
+ ignore_errors: yes
+ register: systempython2
+
+ - name: install python2 for debian based systems
+ raw: sudo apt-get -y install python-simplejson
+ ignore_errors: yes
+ when:
+ - systempython2.stat is undefined or systempython2.stat.exists == false
+
+ # Ansible will try to auto-install python-apt, in some systems this might be
+ # python3-apt, or python-apt, and it has caused whole runs to fail because
+ # it is trying to do an interactive prompt
+ - name: install python-apt and aptitude in debian based systems
+ raw: sudo apt-get -y install "{{ item }}"
+ ignore_errors: yes
+ with_items:
+ - python3-apt
+ - python-apt
+ - aptitude
+
+ - name: install python2 for fedora
+ raw: sudo dnf -y install python creates=/usr/bin/python
+ ignore_errors: yes
+ when:
+ - systempython2.stat is undefined or systempython2.stat.exists == false
+
+ - name: install python2 for opensuse
+ raw: sudo zypper -n install python-base creates=/usr/bin/python2.7
+ ignore_errors: yes
+ when:
+ - systempython2.stat is undefined or systempython2.stat.exists == false
+
+ - name: gather facts
+ setup:
+ when:
+ - not delegate_facts_host | bool
+
+ - name: gather and delegate facts
+ setup:
+ delegate_to: "{{ item }}"
+ delegate_facts: True
+ with_items: "{{ groups['all'] }}"
+ run_once: true
+ when:
+ - delegate_facts_host | bool
+
+ - name: install required packages for fedora > 23
+ raw: sudo dnf -y install python2-dnf libselinux-python ntp
+ when:
+ - ansible_facts['distribution'] == 'Fedora'
+ - ansible_facts['distribution_major_version']|int >= 23
+
+ - name: check if it is atomic host
+ stat:
+ path: /run/ostree-booted
+ register: stat_ostree
+
+ - name: set_fact is_atomic
+ set_fact:
+ is_atomic: '{{ stat_ostree.stat.exists }}'
+
+ tasks:
+ - import_role:
+ name: ceph-defaults
+ - import_role:
+ name: ceph-facts
+ - import_role:
+ name: ceph-validate
+
+- hosts:
+ - mons
+ - osds
+ - mgrs
+ gather_facts: false
+ become: True
+ any_errors_fatal: true
+ vars:
+ dashboard_enabled: False
+ tasks:
+ - import_role:
+ name: ceph-defaults
+ - import_role:
+ name: ceph-facts
+ - import_role:
+ name: ceph-handler
+ - import_role:
+ name: ceph-common
+
+ - name: rsync ceph-volume to test nodes on centos
+ synchronize:
+ src: "{{ toxinidir }}/../../../../ceph_volume"
+ dest: "/usr/lib/python3.6/site-packages"
+ use_ssh_args: true
+ when:
+ - ansible_facts['os_family'] == "RedHat"
+ - inventory_hostname in groups.get(osd_group_name, [])
+
+ - name: rsync ceph-volume to test nodes on ubuntu
+ synchronize:
+ src: "{{ toxinidir }}/../../../../ceph_volume"
+ dest: "/usr/lib/python2.7/dist-packages"
+ use_ssh_args: true
+ when:
+ - ansible_facts['os_family'] == "Debian"
+ - inventory_hostname in groups.get(osd_group_name, [])
+
+ - name: run ceph-config role
+ import_role:
+ name: ceph-config
+
+ - name: run ceph-mon role
+ import_role:
+ name: ceph-mon
+ when:
+ - inventory_hostname in groups.get(mon_group_name, [])
+
+ - name: run ceph-mgr role
+ import_role:
+ name: ceph-mgr
+ when:
+ - inventory_hostname in groups.get(mgr_group_name, [])
+
+ - name: run ceph-osd role
+ import_role:
+ name: ceph-osd
+ when:
+ - inventory_hostname in groups.get(osd_group_name, [])
diff --git a/src/ceph-volume/ceph_volume/tests/functional/scripts/generate_ssh_config.sh b/src/ceph-volume/ceph_volume/tests/functional/scripts/generate_ssh_config.sh
new file mode 100644
index 000000000..43e64a654
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/scripts/generate_ssh_config.sh
@@ -0,0 +1,14 @@
+#!/bin/bash
+# Generate a custom ssh config from Vagrant so that it can then be used by
+# ansible.cfg
+
+path=$1
+
+if [ $# -eq 0 ]
+ then
+ echo "A path to the scenario is required as an argument and it wasn't provided"
+ exit 1
+fi
+
+cd "$path"
+vagrant ssh-config > vagrant_ssh_config
diff --git a/src/ceph-volume/ceph_volume/tests/functional/scripts/output.py b/src/ceph-volume/ceph_volume/tests/functional/scripts/output.py
new file mode 100644
index 000000000..160719444
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/scripts/output.py
@@ -0,0 +1,5 @@
+import os
+from ceph_volume import terminal
+
+char = os.environ.get('INVALID')
+terminal.stdout(char)
diff --git a/src/ceph-volume/ceph_volume/tests/functional/scripts/test_unicode.sh b/src/ceph-volume/ceph_volume/tests/functional/scripts/test_unicode.sh
new file mode 100644
index 000000000..fe600b2ba
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/scripts/test_unicode.sh
@@ -0,0 +1,35 @@
+#!/bin/bash
+
+# Not entirely sure why these executables don't seem to be available in the
+# $PATH when running from tox. Calling out to `which` seems to fix it, at the
+# expense of making the script a bit obtuse
+
+mktemp=$(which mktemp)
+cat=$(which cat)
+grep=$(which grep)
+PYTHON_EXECUTABLE=`which python3`
+STDERR_FILE=$($mktemp)
+INVALID="→"
+
+echo "stderr file created: $STDERR_FILE"
+
+INVALID="$INVALID" $PYTHON_EXECUTABLE $1 2> ${STDERR_FILE}
+
+retVal=$?
+
+if [ $retVal -ne 0 ]; then
+ echo "Failed test: Unexpected failure from running Python script"
+ echo "Below is output of stderr captured:"
+ $cat "${STDERR_FILE}"
+ exit $retVal
+fi
+
+$grep --quiet "$INVALID" ${STDERR_FILE}
+
+retVal=$?
+if [ $retVal -ne 0 ]; then
+ echo "Failed test: expected to find \"${INVALID}\" character in tmpfile: \"${STDERR_FILE}\""
+ echo "Below is output of stderr captured:"
+ $cat "${STDERR_FILE}"
+fi
+exit $retVal
diff --git a/src/ceph-volume/ceph_volume/tests/functional/scripts/vagrant_reload.sh b/src/ceph-volume/ceph_volume/tests/functional/scripts/vagrant_reload.sh
new file mode 100644
index 000000000..3211b066d
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/scripts/vagrant_reload.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+
+# vagrant-libvirt has a common behavior where it times out when "reloading" vms. Instead
+# of calling `vagrant reload` attempt to halt everything, and then start everything, which gives
+# this script the ability to try the `vagrant up` again in case of failure
+#
+
+vagrant halt
+# This should not really be needed, but in case of a possible race condition between halt
+# and up, it might improve things
+sleep 5
+
+
+retries=0
+until [ $retries -ge 5 ]
+do
+ echo "Attempting to start VMs. Attempts: $retries"
+ timeout 10m vagrant up "$@" && break
+ retries=$[$retries+1]
+ sleep 5
+done
diff --git a/src/ceph-volume/ceph_volume/tests/functional/scripts/vagrant_up.sh b/src/ceph-volume/ceph_volume/tests/functional/scripts/vagrant_up.sh
new file mode 100644
index 000000000..8f4cd3bca
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/scripts/vagrant_up.sh
@@ -0,0 +1,14 @@
+#!/bin/bash
+
+set -e
+
+retries=0
+until [ $retries -ge 5 ]
+do
+ echo "Attempting to start VMs. Attempts: $retries"
+ timeout 10m vagrant up "$@" && break
+ retries=$[$retries+1]
+ sleep 5
+done
+
+sleep 10
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/Vagrantfile
new file mode 120000
index 000000000..16076e424
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/Vagrantfile
@@ -0,0 +1 @@
+../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/group_vars/all
new file mode 100644
index 000000000..c265e783b
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/group_vars/all
@@ -0,0 +1,19 @@
+---
+
+ceph_dev: True
+cluster: test
+public_network: "192.168.1.0/24"
+cluster_network: "192.168.2.0/24"
+monitor_interface: eth1
+journal_size: 100
+osd_objectstore: "bluestore"
+ceph_origin: 'repository'
+ceph_repository: 'dev'
+copy_admin_key: false
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ osd_pool_default_size: 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/host_vars/osd0.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/host_vars/osd0.yml
new file mode 100644
index 000000000..2e1c7ee9e
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/host_vars/osd0.yml
@@ -0,0 +1,7 @@
+---
+
+devices:
+ - '/dev/sdb'
+dedicated_devices:
+ - '/dev/sdc'
+osd_scenario: "non-collocated"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/host_vars/osd1.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/host_vars/osd1.yml
new file mode 100644
index 000000000..7e90071c9
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/host_vars/osd1.yml
@@ -0,0 +1,6 @@
+---
+
+devices:
+ - '/dev/sdb'
+ - '/dev/sdc'
+osd_scenario: "collocated"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/hosts b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/hosts
new file mode 100644
index 000000000..e0c08b946
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/hosts
@@ -0,0 +1,9 @@
+[mons]
+mon0 monitor_interface=eth1
+
+[osds]
+osd0
+osd1
+
+[mgrs]
+mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/test.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/test.yml
new file mode 100644
index 000000000..24e2c0353
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/test.yml
@@ -0,0 +1,31 @@
+---
+
+- hosts: osds
+ become: yes
+ tasks:
+
+ - name: list all OSD directories
+ find:
+ paths: /var/lib/ceph/osd
+ file_type: directory
+ register: osd_paths
+
+ - name: scan all OSD directories
+ command: "ceph-volume --cluster={{ cluster }} simple scan {{ item.path }}"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+ with_items:
+ - "{{ osd_paths.files }}"
+
+ - name: list all OSD JSON files
+ find:
+ paths: /etc/ceph/osd
+ file_type: file
+ register: osd_configs
+
+ - name: activate all scanned OSDs
+ command: "ceph-volume --cluster={{ cluster }} simple activate --file {{ item.path }}"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+ with_items:
+ - "{{ osd_configs.files }}"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/vagrant_variables.yml
new file mode 100644
index 000000000..63700c3c9
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/vagrant_variables.yml
@@ -0,0 +1,73 @@
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: false
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 2
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.1
+cluster_subnet: 192.168.2
+
+# MEMORY
+# set 1024 for CentOS
+memory: 512
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sdb', '/dev/sdc' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+#vagrant_sync_dir: /
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/Vagrantfile
new file mode 120000
index 000000000..16076e424
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/Vagrantfile
@@ -0,0 +1 @@
+../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/group_vars/all
new file mode 100644
index 000000000..885c2c82f
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/group_vars/all
@@ -0,0 +1,22 @@
+---
+
+dmcrypt: True
+ceph_dev: True
+cluster: test
+public_network: "192.168.1.0/24"
+cluster_network: "192.168.2.0/24"
+monitor_interface: eth1
+journal_size: 100
+osd_objectstore: "bluestore"
+ceph_origin: 'repository'
+ceph_repository: 'dev'
+copy_admin_key: false
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ osd_pool_default_size: 1
+ osd:
+ osd_dmcrypt_type: luks
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/host_vars/osd0.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/host_vars/osd0.yml
new file mode 100644
index 000000000..2e1c7ee9e
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/host_vars/osd0.yml
@@ -0,0 +1,7 @@
+---
+
+devices:
+ - '/dev/sdb'
+dedicated_devices:
+ - '/dev/sdc'
+osd_scenario: "non-collocated"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/host_vars/osd1.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/host_vars/osd1.yml
new file mode 100644
index 000000000..7e90071c9
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/host_vars/osd1.yml
@@ -0,0 +1,6 @@
+---
+
+devices:
+ - '/dev/sdb'
+ - '/dev/sdc'
+osd_scenario: "collocated"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/hosts b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/hosts
new file mode 100644
index 000000000..e0c08b946
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/hosts
@@ -0,0 +1,9 @@
+[mons]
+mon0 monitor_interface=eth1
+
+[osds]
+osd0
+osd1
+
+[mgrs]
+mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/test.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/test.yml
new file mode 100644
index 000000000..55ae7cc8e
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/test.yml
@@ -0,0 +1,15 @@
+---
+
+- hosts: osds
+ become: yes
+ tasks:
+
+ - name: scan all running OSDs
+ command: "ceph-volume --cluster={{ cluster }} simple scan"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: activate all scanned OSDs
+ command: "ceph-volume --cluster={{ cluster }} simple activate --all"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/vagrant_variables.yml
new file mode 100644
index 000000000..63700c3c9
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/vagrant_variables.yml
@@ -0,0 +1,73 @@
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: false
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 2
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.1
+cluster_subnet: 192.168.2
+
+# MEMORY
+# set 1024 for CentOS
+memory: 512
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sdb', '/dev/sdc' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+#vagrant_sync_dir: /
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/Vagrantfile
new file mode 120000
index 000000000..16076e424
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/Vagrantfile
@@ -0,0 +1 @@
+../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/group_vars/all
new file mode 100644
index 000000000..30bcf5be7
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/group_vars/all
@@ -0,0 +1,22 @@
+---
+
+dmcrypt: True
+ceph_dev: True
+cluster: test
+public_network: "192.168.1.0/24"
+cluster_network: "192.168.2.0/24"
+monitor_interface: eth1
+journal_size: 100
+osd_objectstore: "bluestore"
+ceph_origin: 'repository'
+ceph_repository: 'dev'
+copy_admin_key: false
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ osd_pool_default_size: 1
+ osd:
+ osd_dmcrypt_type: plain
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/host_vars/osd0.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/host_vars/osd0.yml
new file mode 100644
index 000000000..2e1c7ee9e
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/host_vars/osd0.yml
@@ -0,0 +1,7 @@
+---
+
+devices:
+ - '/dev/sdb'
+dedicated_devices:
+ - '/dev/sdc'
+osd_scenario: "non-collocated"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/host_vars/osd1.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/host_vars/osd1.yml
new file mode 100644
index 000000000..7e90071c9
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/host_vars/osd1.yml
@@ -0,0 +1,6 @@
+---
+
+devices:
+ - '/dev/sdb'
+ - '/dev/sdc'
+osd_scenario: "collocated"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/hosts b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/hosts
new file mode 100644
index 000000000..e0c08b946
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/hosts
@@ -0,0 +1,9 @@
+[mons]
+mon0 monitor_interface=eth1
+
+[osds]
+osd0
+osd1
+
+[mgrs]
+mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/test.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/test.yml
new file mode 100644
index 000000000..24e2c0353
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/test.yml
@@ -0,0 +1,31 @@
+---
+
+- hosts: osds
+ become: yes
+ tasks:
+
+ - name: list all OSD directories
+ find:
+ paths: /var/lib/ceph/osd
+ file_type: directory
+ register: osd_paths
+
+ - name: scan all OSD directories
+ command: "ceph-volume --cluster={{ cluster }} simple scan {{ item.path }}"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+ with_items:
+ - "{{ osd_paths.files }}"
+
+ - name: list all OSD JSON files
+ find:
+ paths: /etc/ceph/osd
+ file_type: file
+ register: osd_configs
+
+ - name: activate all scanned OSDs
+ command: "ceph-volume --cluster={{ cluster }} simple activate --file {{ item.path }}"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+ with_items:
+ - "{{ osd_configs.files }}"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/vagrant_variables.yml
new file mode 100644
index 000000000..63700c3c9
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/vagrant_variables.yml
@@ -0,0 +1,73 @@
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: false
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 2
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.1
+cluster_subnet: 192.168.2
+
+# MEMORY
+# set 1024 for CentOS
+memory: 512
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sdb', '/dev/sdc' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+#vagrant_sync_dir: /
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/Vagrantfile
new file mode 120000
index 000000000..16076e424
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/Vagrantfile
@@ -0,0 +1 @@
+../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/group_vars/all
new file mode 100644
index 000000000..7ab573b07
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/group_vars/all
@@ -0,0 +1,19 @@
+---
+
+ceph_dev: True
+cluster: test
+public_network: "192.168.1.0/24"
+cluster_network: "192.168.2.0/24"
+monitor_interface: eth1
+journal_size: 100
+osd_objectstore: "filestore"
+ceph_origin: 'repository'
+ceph_repository: 'dev'
+copy_admin_key: false
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ osd_pool_default_size: 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/host_vars/osd0.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/host_vars/osd0.yml
new file mode 100644
index 000000000..2e1c7ee9e
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/host_vars/osd0.yml
@@ -0,0 +1,7 @@
+---
+
+devices:
+ - '/dev/sdb'
+dedicated_devices:
+ - '/dev/sdc'
+osd_scenario: "non-collocated"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/host_vars/osd1.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/host_vars/osd1.yml
new file mode 100644
index 000000000..7e90071c9
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/host_vars/osd1.yml
@@ -0,0 +1,6 @@
+---
+
+devices:
+ - '/dev/sdb'
+ - '/dev/sdc'
+osd_scenario: "collocated"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/hosts b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/hosts
new file mode 100644
index 000000000..e0c08b946
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/hosts
@@ -0,0 +1,9 @@
+[mons]
+mon0 monitor_interface=eth1
+
+[osds]
+osd0
+osd1
+
+[mgrs]
+mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/test.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/test.yml
new file mode 100644
index 000000000..0745f2571
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/test.yml
@@ -0,0 +1,29 @@
+---
+
+- hosts: osds
+ become: yes
+ tasks:
+
+ - name: list all OSD directories
+ find:
+ paths: /var/lib/ceph/osd
+ file_type: directory
+ register: osd_paths
+
+ - name: scan all OSD directories
+ command: "ceph-volume --cluster={{ cluster }} simple scan {{ item.path }}"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+ with_items:
+ - "{{ osd_paths.files }}"
+
+ - name: list all OSD JSON files
+ find:
+ paths: /etc/ceph/osd
+ file_type: file
+ register: osd_configs
+
+ - name: activate all scanned OSDs
+ command: "ceph-volume --cluster={{ cluster }} simple activate --all"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/vagrant_variables.yml
new file mode 100644
index 000000000..63700c3c9
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/vagrant_variables.yml
@@ -0,0 +1,73 @@
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: false
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 2
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.1
+cluster_subnet: 192.168.2
+
+# MEMORY
+# set 1024 for CentOS
+memory: 512
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sdb', '/dev/sdc' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+#vagrant_sync_dir: /
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/Vagrantfile
new file mode 120000
index 000000000..16076e424
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/Vagrantfile
@@ -0,0 +1 @@
+../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/group_vars/all
new file mode 100644
index 000000000..a27cfbad0
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/group_vars/all
@@ -0,0 +1,22 @@
+---
+
+dmcrypt: True
+ceph_dev: True
+cluster: test
+public_network: "192.168.1.0/24"
+cluster_network: "192.168.2.0/24"
+monitor_interface: eth1
+journal_size: 100
+osd_objectstore: "filestore"
+ceph_origin: 'repository'
+ceph_repository: 'dev'
+copy_admin_key: false
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ osd_pool_default_size: 1
+ osd:
+ osd_dmcrypt_type: luks
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/host_vars/osd0.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/host_vars/osd0.yml
new file mode 100644
index 000000000..2e1c7ee9e
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/host_vars/osd0.yml
@@ -0,0 +1,7 @@
+---
+
+devices:
+ - '/dev/sdb'
+dedicated_devices:
+ - '/dev/sdc'
+osd_scenario: "non-collocated"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/host_vars/osd1.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/host_vars/osd1.yml
new file mode 100644
index 000000000..7e90071c9
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/host_vars/osd1.yml
@@ -0,0 +1,6 @@
+---
+
+devices:
+ - '/dev/sdb'
+ - '/dev/sdc'
+osd_scenario: "collocated"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/hosts b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/hosts
new file mode 100644
index 000000000..e0c08b946
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/hosts
@@ -0,0 +1,9 @@
+[mons]
+mon0 monitor_interface=eth1
+
+[osds]
+osd0
+osd1
+
+[mgrs]
+mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/test.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/test.yml
new file mode 100644
index 000000000..24e2c0353
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/test.yml
@@ -0,0 +1,31 @@
+---
+
+- hosts: osds
+ become: yes
+ tasks:
+
+ - name: list all OSD directories
+ find:
+ paths: /var/lib/ceph/osd
+ file_type: directory
+ register: osd_paths
+
+ - name: scan all OSD directories
+ command: "ceph-volume --cluster={{ cluster }} simple scan {{ item.path }}"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+ with_items:
+ - "{{ osd_paths.files }}"
+
+ - name: list all OSD JSON files
+ find:
+ paths: /etc/ceph/osd
+ file_type: file
+ register: osd_configs
+
+ - name: activate all scanned OSDs
+ command: "ceph-volume --cluster={{ cluster }} simple activate --file {{ item.path }}"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+ with_items:
+ - "{{ osd_configs.files }}"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/vagrant_variables.yml
new file mode 100644
index 000000000..63700c3c9
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/vagrant_variables.yml
@@ -0,0 +1,73 @@
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: false
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 2
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.1
+cluster_subnet: 192.168.2
+
+# MEMORY
+# set 1024 for CentOS
+memory: 512
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sdb', '/dev/sdc' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+#vagrant_sync_dir: /
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/Vagrantfile
new file mode 120000
index 000000000..16076e424
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/Vagrantfile
@@ -0,0 +1 @@
+../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/group_vars/all
new file mode 100644
index 000000000..edac61b20
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/group_vars/all
@@ -0,0 +1,22 @@
+---
+
+dmcrypt: True
+ceph_dev: True
+cluster: test
+public_network: "192.168.1.0/24"
+cluster_network: "192.168.2.0/24"
+monitor_interface: eth1
+journal_size: 100
+osd_objectstore: "filestore"
+ceph_origin: 'repository'
+ceph_repository: 'dev'
+copy_admin_key: false
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ osd_pool_default_size: 1
+ osd:
+ osd_dmcrypt_type: plain
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/host_vars/osd0.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/host_vars/osd0.yml
new file mode 100644
index 000000000..2e1c7ee9e
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/host_vars/osd0.yml
@@ -0,0 +1,7 @@
+---
+
+devices:
+ - '/dev/sdb'
+dedicated_devices:
+ - '/dev/sdc'
+osd_scenario: "non-collocated"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/host_vars/osd1.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/host_vars/osd1.yml
new file mode 100644
index 000000000..7e90071c9
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/host_vars/osd1.yml
@@ -0,0 +1,6 @@
+---
+
+devices:
+ - '/dev/sdb'
+ - '/dev/sdc'
+osd_scenario: "collocated"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/hosts b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/hosts
new file mode 100644
index 000000000..e0c08b946
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/hosts
@@ -0,0 +1,9 @@
+[mons]
+mon0 monitor_interface=eth1
+
+[osds]
+osd0
+osd1
+
+[mgrs]
+mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/test.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/test.yml
new file mode 100644
index 000000000..24e2c0353
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/test.yml
@@ -0,0 +1,31 @@
+---
+
+- hosts: osds
+ become: yes
+ tasks:
+
+ - name: list all OSD directories
+ find:
+ paths: /var/lib/ceph/osd
+ file_type: directory
+ register: osd_paths
+
+ - name: scan all OSD directories
+ command: "ceph-volume --cluster={{ cluster }} simple scan {{ item.path }}"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+ with_items:
+ - "{{ osd_paths.files }}"
+
+ - name: list all OSD JSON files
+ find:
+ paths: /etc/ceph/osd
+ file_type: file
+ register: osd_configs
+
+ - name: activate all scanned OSDs
+ command: "ceph-volume --cluster={{ cluster }} simple activate --file {{ item.path }}"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+ with_items:
+ - "{{ osd_configs.files }}"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/vagrant_variables.yml
new file mode 100644
index 000000000..63700c3c9
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/vagrant_variables.yml
@@ -0,0 +1,73 @@
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: false
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 2
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.1
+cluster_subnet: 192.168.2
+
+# MEMORY
+# set 1024 for CentOS
+memory: 512
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sdb', '/dev/sdc' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+#vagrant_sync_dir: /
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/tox.ini b/src/ceph-volume/ceph_volume/tests/functional/simple/tox.ini
new file mode 100644
index 000000000..2d8f89805
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/tox.ini
@@ -0,0 +1,58 @@
+[tox]
+envlist = centos7-{filestore,bluestore}-{activate,dmcrypt_plain,dmcrypt_luks}
+skipsdist = True
+
+[testenv]
+deps = mock
+allowlist_externals =
+ vagrant
+ bash
+ git
+ sleep
+ cp
+passenv=*
+setenv=
+ ANSIBLE_CONFIG = {envdir}/tmp/ceph-ansible/ansible.cfg
+ ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey
+ ANSIBLE_STDOUT_CALLBACK = debug
+ VAGRANT_CWD = {changedir}
+ CEPH_VOLUME_DEBUG = 1
+ DEBIAN_FRONTEND=noninteractive
+changedir=
+ centos7-filestore-activate: {toxinidir}/centos7/filestore/activate
+ centos7-bluestore-activate: {toxinidir}/centos7/bluestore/activate
+ centos7-bluestore-dmcrypt_plain: {toxinidir}/centos7/bluestore/dmcrypt-plain
+ centos7-bluestore-dmcrypt_luks: {toxinidir}/centos7/bluestore/dmcrypt-luks
+ centos7-filestore-dmcrypt_plain: {toxinidir}/centos7/filestore/dmcrypt-plain
+ centos7-filestore-dmcrypt_luks: {toxinidir}/centos7/filestore/dmcrypt-luks
+commands=
+ git clone -b {env:CEPH_ANSIBLE_BRANCH:master} --single-branch https://github.com/ceph/ceph-ansible.git {envdir}/tmp/ceph-ansible
+ pip install -r {envdir}/tmp/ceph-ansible/tests/requirements.txt
+
+ bash {toxinidir}/../scripts/vagrant_up.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox}
+ bash {toxinidir}/../scripts/generate_ssh_config.sh {changedir}
+
+ cp {toxinidir}/../playbooks/deploy.yml {envdir}/tmp/ceph-ansible
+
+ # use ceph-ansible to deploy a ceph cluster on the vms
+ ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/deploy.yml --extra-vars "fetch_directory={changedir}/fetch ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} toxinidir={toxinidir}"
+
+ # prepare nodes for testing with testinfra
+ ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml
+
+ # test cluster state testinfra
+ py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
+
+ # make ceph-volume simple take over all the OSDs that got deployed, disabling ceph-disk
+ ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml
+
+ # reboot all vms
+ bash {toxinidir}/../scripts/vagrant_reload.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox}
+
+ # wait 2 minutes for services to be ready
+ sleep 120
+
+ # retest to ensure cluster came back up correctly after rebooting
+ py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
+
+ vagrant destroy {env:VAGRANT_DESTROY_FLAGS:"--force"}
diff --git a/src/ceph-volume/ceph_volume/tests/functional/tests/__init__.py b/src/ceph-volume/ceph_volume/tests/functional/tests/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/tests/__init__.py
diff --git a/src/ceph-volume/ceph_volume/tests/functional/tests/conftest.py b/src/ceph-volume/ceph_volume/tests/functional/tests/conftest.py
new file mode 100644
index 000000000..17cc996ed
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/tests/conftest.py
@@ -0,0 +1,103 @@
+import pytest
+import os
+
+
+@pytest.fixture()
+def node(host, request):
+ """ This fixture represents a single node in the ceph cluster. Using the
+ host.ansible fixture provided by testinfra it can access all the ansible
+ variables provided to it by the specific test scenario being ran.
+
+ You must include this fixture on any tests that operate on specific type
+ of node because it contains the logic to manage which tests a node
+ should run.
+ """
+ ansible_vars = host.ansible.get_variables()
+ # tox/jenkins/user will pass in this environment variable. we need to do it this way
+ # because testinfra does not collect and provide ansible config passed in
+ # from using --extra-vars
+ ceph_dev_branch = os.environ.get("CEPH_DEV_BRANCH", "master")
+ group_names = ansible_vars["group_names"]
+ num_osd_ports = 4
+ if 'mimic' in ceph_dev_branch or 'luminous' in ceph_dev_branch:
+ num_osd_ports = 2
+
+ # capture the initial/default state
+ test_is_applicable = False
+ for marker in request.node.iter_markers():
+ if marker.name in group_names or marker.name == 'all':
+ test_is_applicable = True
+ break
+ # Check if any markers on the test method exist in the nodes group_names.
+ # If they do not, this test is not valid for the node being tested.
+ if not test_is_applicable:
+ reason = "%s: Not a valid test for node type: %s" % (
+ request.function, group_names)
+ pytest.skip(reason)
+
+ osd_ids = []
+ osds = []
+ cluster_address = ""
+ # I can assume eth1 because I know all the vagrant
+ # boxes we test with use that interface
+ address = host.interface("eth1").addresses[0]
+ subnet = ".".join(ansible_vars["public_network"].split(".")[0:-1])
+ num_mons = len(ansible_vars["groups"]["mons"])
+ num_osds = len(ansible_vars.get("devices", []))
+ if not num_osds:
+ num_osds = len(ansible_vars.get("lvm_volumes", []))
+ osds_per_device = ansible_vars.get("osds_per_device", 1)
+ num_osds = num_osds * osds_per_device
+
+ # If number of devices doesn't map to number of OSDs, allow tests to define
+ # that custom number, defaulting it to ``num_devices``
+ num_osds = ansible_vars.get('num_osds', num_osds)
+ cluster_name = ansible_vars.get("cluster", "ceph")
+ conf_path = "/etc/ceph/{}.conf".format(cluster_name)
+ if "osds" in group_names:
+ # I can assume eth2 because I know all the vagrant
+ # boxes we test with use that interface. OSDs are the only
+ # nodes that have this interface.
+ cluster_address = host.interface("eth2").addresses[0]
+ cmd = host.run('sudo ls /var/lib/ceph/osd/ | sed "s/.*-//"')
+ if cmd.rc == 0:
+ osd_ids = cmd.stdout.rstrip("\n").split("\n")
+ osds = osd_ids
+
+ data = dict(
+ address=address,
+ subnet=subnet,
+ vars=ansible_vars,
+ osd_ids=osd_ids,
+ num_mons=num_mons,
+ num_osds=num_osds,
+ num_osd_ports=num_osd_ports,
+ cluster_name=cluster_name,
+ conf_path=conf_path,
+ cluster_address=cluster_address,
+ osds=osds,
+ )
+ return data
+
+
+def pytest_collection_modifyitems(session, config, items):
+ for item in items:
+ test_path = item.location[0]
+ if "mon" in test_path:
+ item.add_marker(pytest.mark.mons)
+ elif "osd" in test_path:
+ item.add_marker(pytest.mark.osds)
+ elif "mds" in test_path:
+ item.add_marker(pytest.mark.mdss)
+ elif "mgr" in test_path:
+ item.add_marker(pytest.mark.mgrs)
+ elif "rbd-mirror" in test_path:
+ item.add_marker(pytest.mark.rbdmirrors)
+ elif "rgw" in test_path:
+ item.add_marker(pytest.mark.rgws)
+ elif "nfs" in test_path:
+ item.add_marker(pytest.mark.nfss)
+ elif "iscsi" in test_path:
+ item.add_marker(pytest.mark.iscsigws)
+ else:
+ item.add_marker(pytest.mark.all)
diff --git a/src/ceph-volume/ceph_volume/tests/functional/tests/osd/__init__.py b/src/ceph-volume/ceph_volume/tests/functional/tests/osd/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/tests/osd/__init__.py
diff --git a/src/ceph-volume/ceph_volume/tests/functional/tests/osd/test_osds.py b/src/ceph-volume/ceph_volume/tests/functional/tests/osd/test_osds.py
new file mode 100644
index 000000000..6d12babdb
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/tests/osd/test_osds.py
@@ -0,0 +1,60 @@
+import json
+
+
+class TestOSDs(object):
+
+ def test_ceph_osd_package_is_installed(self, node, host):
+ assert host.package("ceph-osd").is_installed
+
+ def test_osds_listen_on_public_network(self, node, host):
+ # TODO: figure out way to paramaterize this test
+ nb_port = (node["num_osds"] * node["num_osd_ports"])
+ assert host.check_output(
+ "netstat -lntp | grep ceph-osd | grep %s | wc -l" % (node["address"])) == str(nb_port) # noqa E501
+
+ def test_osds_listen_on_cluster_network(self, node, host):
+ # TODO: figure out way to paramaterize this test
+ nb_port = (node["num_osds"] * node["num_osd_ports"])
+ assert host.check_output("netstat -lntp | grep ceph-osd | grep %s | wc -l" % # noqa E501
+ (node["cluster_address"])) == str(nb_port)
+
+ def test_osd_services_are_running(self, node, host):
+ # TODO: figure out way to paramaterize node['osds'] for this test
+ for osd in node["osds"]:
+ assert host.service("ceph-osd@%s" % osd).is_running
+
+ def test_osd_are_mounted(self, node, host):
+ # TODO: figure out way to paramaterize node['osd_ids'] for this test
+ for osd_id in node["osd_ids"]:
+ osd_path = "/var/lib/ceph/osd/{cluster}-{osd_id}".format(
+ cluster=node["cluster_name"],
+ osd_id=osd_id,
+ )
+ assert host.mount_point(osd_path).exists
+
+ def test_ceph_volume_is_installed(self, node, host):
+ host.exists('ceph-volume')
+
+ def test_ceph_volume_systemd_is_installed(self, node, host):
+ host.exists('ceph-volume-systemd')
+
+ def _get_osd_id_from_host(self, node, osd_tree):
+ children = []
+ for n in osd_tree['nodes']:
+ if n['name'] == node['vars']['inventory_hostname'] and n['type'] == 'host': # noqa E501
+ children = n['children']
+ return children
+
+ def _get_nb_up_osds_from_ids(self, node, osd_tree):
+ nb_up = 0
+ ids = self._get_osd_id_from_host(node, osd_tree)
+ for n in osd_tree['nodes']:
+ if n['id'] in ids and n['status'] == 'up':
+ nb_up += 1
+ return nb_up
+
+ def test_all_osds_are_up_and_in(self, node, host):
+ cmd = "sudo ceph --cluster={cluster} --connect-timeout 5 --keyring /var/lib/ceph/bootstrap-osd/{cluster}.keyring -n client.bootstrap-osd osd tree -f json".format( # noqa E501
+ cluster=node["cluster_name"])
+ output = json.loads(host.check_output(cmd))
+ assert node["num_osds"] == self._get_nb_up_osds_from_ids(node, output)
diff --git a/src/ceph-volume/ceph_volume/tests/functional/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/vagrant_variables.yml
new file mode 100644
index 000000000..e87700ac1
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/vagrant_variables.yml
@@ -0,0 +1,57 @@
+---
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.3
+cluster_subnet: 192.168.4
+
+# MEMORY
+# set 1024 for CentOS
+memory: 1024
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/stream8
+# vagrant_box_url: https://cloud.centos.org/centos/8/x86_64/images/CentOS-8-Vagrant-8.1.1911-20200113.3.x86_64.vagrant-libvirt.box
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+#vagrant_sync_dir: /
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
diff --git a/src/ceph-volume/ceph_volume/tests/systemd/test_main.py b/src/ceph-volume/ceph_volume/tests/systemd/test_main.py
new file mode 100644
index 000000000..0af52e8d1
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/systemd/test_main.py
@@ -0,0 +1,51 @@
+import pytest
+from ceph_volume import exceptions, conf
+from ceph_volume.systemd.main import parse_subcommand, main, process
+
+
+class TestParseSubcommand(object):
+
+ def test_no_subcommand_found(self):
+ with pytest.raises(exceptions.SuffixParsingError):
+ parse_subcommand('')
+
+ def test_sub_command_is_found(self):
+ result = parse_subcommand('lvm-1-sha-1-something-0')
+ assert result == 'lvm'
+
+
+class Capture(object):
+
+ def __init__(self, *a, **kw):
+ self.a = a
+ self.kw = kw
+ self.calls = []
+
+ def __call__(self, *a, **kw):
+ self.calls.append(a)
+ self.calls.append(kw)
+
+
+class TestMain(object):
+
+ def setup(self):
+ conf.log_path = '/tmp/'
+
+ def test_no_arguments_parsing_error(self):
+ with pytest.raises(RuntimeError):
+ main(args=[])
+
+ def test_parsing_suffix_error(self):
+ with pytest.raises(exceptions.SuffixParsingError):
+ main(args=['asdf'])
+
+ def test_correct_command(self, monkeypatch):
+ run = Capture()
+ monkeypatch.setattr(process, 'run', run)
+ main(args=['ceph-volume-systemd', 'lvm-8715BEB4-15C5-49DE-BA6F-401086EC7B41-0' ])
+ command = run.calls[0][0]
+ assert command == [
+ 'ceph-volume',
+ 'lvm', 'trigger',
+ '8715BEB4-15C5-49DE-BA6F-401086EC7B41-0'
+ ]
diff --git a/src/ceph-volume/ceph_volume/tests/systemd/test_systemctl.py b/src/ceph-volume/ceph_volume/tests/systemd/test_systemctl.py
new file mode 100644
index 000000000..8eec4a3d4
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/systemd/test_systemctl.py
@@ -0,0 +1,21 @@
+import pytest
+from ceph_volume.systemd import systemctl
+
+class TestSystemctl(object):
+
+ @pytest.mark.parametrize("stdout,expected", [
+ (['Id=ceph-osd@1.service', '', 'Id=ceph-osd@2.service'], ['1','2']),
+ (['Id=ceph-osd1.service',], []),
+ (['Id=ceph-osd@1'], ['1']),
+ ([], []),
+ ])
+ def test_get_running_osd_ids(self, stub_call, stdout, expected):
+ stub_call((stdout, [], 0))
+ osd_ids = systemctl.get_running_osd_ids()
+ assert osd_ids == expected
+
+ def test_returns_empty_list_on_nonzero_return_code(self, stub_call):
+ stdout = ['Id=ceph-osd@1.service', '', 'Id=ceph-osd@2.service']
+ stub_call((stdout, [], 1))
+ osd_ids = systemctl.get_running_osd_ids()
+ assert osd_ids == []
diff --git a/src/ceph-volume/ceph_volume/tests/test_configuration.py b/src/ceph-volume/ceph_volume/tests/test_configuration.py
new file mode 100644
index 000000000..9af6cd9be
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/test_configuration.py
@@ -0,0 +1,117 @@
+import os
+try:
+ from cStringIO import StringIO
+except ImportError: # pragma: no cover
+ from io import StringIO # pragma: no cover
+from textwrap import dedent
+import pytest
+from ceph_volume import configuration, exceptions
+
+tabbed_conf = """
+[global]
+ default = 0
+ other_h = 1 # comment
+ other_c = 1 ; comment
+ colon = ;
+ hash = #
+"""
+
+
+class TestConf(object):
+
+ def setup(self):
+ self.conf_file = StringIO(dedent("""
+ [foo]
+ default = 0
+ """))
+
+ def test_get_non_existing_list(self):
+ cfg = configuration.Conf()
+ cfg.is_valid = lambda: True
+ cfg.read_conf(self.conf_file)
+ assert cfg.get_list('global', 'key') == []
+
+ def test_get_non_existing_list_get_default(self):
+ cfg = configuration.Conf()
+ cfg.is_valid = lambda: True
+ cfg.read_conf(self.conf_file)
+ assert cfg.get_list('global', 'key', ['a']) == ['a']
+
+ def test_get_rid_of_comments(self):
+ cfg = configuration.Conf()
+ cfg.is_valid = lambda: True
+ conf_file = StringIO(dedent("""
+ [foo]
+ default = 0 # this is a comment
+ """))
+
+ cfg.read_conf(conf_file)
+ assert cfg.get_list('foo', 'default') == ['0']
+
+ def test_gets_split_on_commas(self):
+ cfg = configuration.Conf()
+ cfg.is_valid = lambda: True
+ conf_file = StringIO(dedent("""
+ [foo]
+ default = 0,1,2,3 # this is a comment
+ """))
+
+ cfg.read_conf(conf_file)
+ assert cfg.get_list('foo', 'default') == ['0', '1', '2', '3']
+
+ def test_spaces_and_tabs_are_ignored(self):
+ cfg = configuration.Conf()
+ cfg.is_valid = lambda: True
+ conf_file = StringIO(dedent("""
+ [foo]
+ default = 0, 1, 2 ,3 # this is a comment
+ """))
+
+ cfg.read_conf(conf_file)
+ assert cfg.get_list('foo', 'default') == ['0', '1', '2', '3']
+
+
+class TestLoad(object):
+
+ def test_load_from_path(self, tmpdir):
+ conf_path = os.path.join(str(tmpdir), 'ceph.conf')
+ with open(conf_path, 'w') as conf:
+ conf.write(tabbed_conf)
+ result = configuration.load(conf_path)
+ assert result.get('global', 'default') == '0'
+
+ def test_load_with_colon_comments(self, tmpdir):
+ conf_path = os.path.join(str(tmpdir), 'ceph.conf')
+ with open(conf_path, 'w') as conf:
+ conf.write(tabbed_conf)
+ result = configuration.load(conf_path)
+ assert result.get('global', 'other_c') == '1'
+
+ def test_load_with_hash_comments(self, tmpdir):
+ conf_path = os.path.join(str(tmpdir), 'ceph.conf')
+ with open(conf_path, 'w') as conf:
+ conf.write(tabbed_conf)
+ result = configuration.load(conf_path)
+ assert result.get('global', 'other_h') == '1'
+
+ def test_path_does_not_exist(self):
+ with pytest.raises(exceptions.ConfigurationError):
+ conf = configuration.load('/path/does/not/exist/ceph.con')
+ conf.is_valid()
+
+ def test_unable_to_read_configuration(self, tmpdir, capsys):
+ ceph_conf = os.path.join(str(tmpdir), 'ceph.conf')
+ with open(ceph_conf, 'w') as config:
+ config.write(']broken] config\n[[')
+ with pytest.raises(RuntimeError):
+ configuration.load(ceph_conf)
+ stdout, stderr = capsys.readouterr()
+ assert 'File contains no section headers' in stderr
+
+ @pytest.mark.parametrize('commented', ['colon','hash'])
+ def test_coment_as_a_value(self, tmpdir, commented):
+ conf_path = os.path.join(str(tmpdir), 'ceph.conf')
+ with open(conf_path, 'w') as conf:
+ conf.write(tabbed_conf)
+ result = configuration.load(conf_path)
+ assert result.get('global', commented) == ''
diff --git a/src/ceph-volume/ceph_volume/tests/test_decorators.py b/src/ceph-volume/ceph_volume/tests/test_decorators.py
new file mode 100644
index 000000000..5bdf6b3d2
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/test_decorators.py
@@ -0,0 +1,78 @@
+import os
+import pytest
+from ceph_volume import exceptions, decorators, terminal
+
+
+class TestNeedsRoot(object):
+
+ def test_is_root(self, monkeypatch):
+ def func():
+ return True
+ monkeypatch.setattr(decorators.os, 'getuid', lambda: 0)
+ assert decorators.needs_root(func)() is True
+
+ def test_is_not_root_env_var_skip_needs_root(self, monkeypatch):
+ def func():
+ return True
+ monkeypatch.setattr(decorators.os, 'getuid', lambda: 123)
+ monkeypatch.setattr(decorators.os, 'environ', {'CEPH_VOLUME_SKIP_NEEDS_ROOT': '1'})
+ assert decorators.needs_root(func)() is True
+
+ def test_is_not_root(self, monkeypatch):
+ def func():
+ return True # pragma: no cover
+ monkeypatch.setattr(decorators.os, 'getuid', lambda: 20)
+ with pytest.raises(exceptions.SuperUserError) as error:
+ decorators.needs_root(func)()
+
+ msg = 'This command needs to be executed with sudo or as root'
+ assert str(error.value) == msg
+
+
+class TestExceptionMessage(object):
+
+ def test_has_str_method(self):
+ result = decorators.make_exception_message(RuntimeError('an error'))
+ expected = "%s %s\n" % (terminal.red_arrow, 'RuntimeError: an error')
+ assert result == expected
+
+ def test_has_no_str_method(self):
+ class Error(Exception):
+ pass
+ result = decorators.make_exception_message(Error())
+ expected = "%s %s\n" % (terminal.red_arrow, 'Error')
+ assert result == expected
+
+
+class TestCatches(object):
+
+ def teardown(self):
+ try:
+ del(os.environ['CEPH_VOLUME_DEBUG'])
+ except KeyError:
+ pass
+
+ def test_ceph_volume_debug_enabled(self):
+ os.environ['CEPH_VOLUME_DEBUG'] = '1'
+ @decorators.catches() # noqa
+ def func():
+ raise RuntimeError()
+ with pytest.raises(RuntimeError):
+ func()
+
+ def test_ceph_volume_debug_disabled_no_exit(self, capsys):
+ @decorators.catches(exit=False)
+ def func():
+ raise RuntimeError()
+ func()
+ stdout, stderr = capsys.readouterr()
+ assert 'RuntimeError\n' in stderr
+
+ def test_ceph_volume_debug_exits(self, capsys):
+ @decorators.catches()
+ def func():
+ raise RuntimeError()
+ with pytest.raises(SystemExit):
+ func()
+ stdout, stderr = capsys.readouterr()
+ assert 'RuntimeError\n' in stderr
diff --git a/src/ceph-volume/ceph_volume/tests/test_inventory.py b/src/ceph-volume/ceph_volume/tests/test_inventory.py
new file mode 100644
index 000000000..6ad2aef0d
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/test_inventory.py
@@ -0,0 +1,263 @@
+# -*- coding: utf-8 -*-
+
+import pytest
+from ceph_volume.util.device import Devices
+from ceph_volume.util.lsmdisk import LSMDisk
+from mock.mock import patch
+import ceph_volume.util.lsmdisk as lsmdisk
+
+
+@pytest.fixture
+@patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+def device_report_keys(device_info):
+ device_info(devices={
+ # example output of disk.get_devices()
+ '/dev/sdb': {'human_readable_size': '1.82 TB',
+ 'locked': 0,
+ 'model': 'PERC H700',
+ 'nr_requests': '128',
+ 'partitions': {},
+ 'path': '/dev/sdb',
+ 'removable': '0',
+ 'rev': '2.10',
+ 'ro': '0',
+ 'rotational': '1',
+ 'sas_address': '',
+ 'sas_device_handle': '',
+ 'scheduler_mode': 'cfq',
+ 'sectors': 0,
+ 'sectorsize': '512',
+ 'size': 1999844147200.0,
+ 'support_discard': '',
+ 'vendor': 'DELL',
+ 'device_id': 'Vendor-Model-Serial',
+ 'device_nodes': 'sdb'}
+ }
+ )
+ report = Devices().json_report()[0]
+ return list(report.keys())
+
+@pytest.fixture
+@patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+def device_sys_api_keys(device_info):
+ device_info(devices={
+ # example output of disk.get_devices()
+ '/dev/sdb': {'human_readable_size': '1.82 TB',
+ 'locked': 0,
+ 'model': 'PERC H700',
+ 'nr_requests': '128',
+ 'partitions': {},
+ 'path': '/dev/sdb',
+ 'removable': '0',
+ 'rev': '2.10',
+ 'ro': '0',
+ 'rotational': '1',
+ 'sas_address': '',
+ 'sas_device_handle': '',
+ 'scheduler_mode': 'cfq',
+ 'sectors': 0,
+ 'sectorsize': '512',
+ 'size': 1999844147200.0,
+ 'support_discard': '',
+ 'vendor': 'DELL',
+ 'device_nodes': 'sdb'}
+ }
+ )
+ report = Devices().json_report()[0]
+ return list(report['sys_api'].keys())
+
+@pytest.fixture
+@patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+def device_data(device_info):
+ device_info(
+ devices={
+ # example output of disk.get_devices()
+ '/dev/sdb': {
+ 'human_readable_size': '1.82 TB',
+ 'locked': 0,
+ 'model': 'PERC H700',
+ 'nr_requests': '128',
+ 'partitions': {},
+ 'path': '/dev/sdb',
+ 'removable': '0',
+ 'rev': '2.10',
+ 'ro': '0',
+ 'rotational': '1',
+ 'sas_address': '',
+ 'sas_device_handle': '',
+ 'scheduler_mode': 'cfq',
+ 'sectors': 0,
+ 'sectorsize': '512',
+ 'size': 1999844147200.0,
+ 'support_discard': '',
+ 'vendor': 'DELL',
+ 'device_nodes': 'sdb'
+ }
+ }
+ )
+
+ dev = Devices().devices[0]
+ dev.lsm_data = {
+ "serialNum": 'S2X9NX0H935283',
+ "transport": 'SAS',
+ "mediaType": 'HDD',
+ "rpm": 10000,
+ "linkSpeed": 6000,
+ "health": 'Good',
+ "ledSupport": {
+ "IDENTsupport": 'Supported',
+ "IDENTstatus": 'Off',
+ "FAILsupport": 'Supported',
+ "FAILstatus": 'Off',
+ },
+ "errors": [],
+ }
+ return dev.json_report()
+
+
+class TestInventory(object):
+
+ expected_keys = [
+ 'ceph_device',
+ 'path',
+ 'rejected_reasons',
+ 'sys_api',
+ 'available',
+ 'lvs',
+ 'device_id',
+ 'lsm_data',
+ ]
+
+ expected_sys_api_keys = [
+ 'human_readable_size',
+ 'locked',
+ 'model',
+ 'nr_requests',
+ 'partitions',
+ 'path',
+ 'removable',
+ 'rev',
+ 'ro',
+ 'rotational',
+ 'sas_address',
+ 'sas_device_handle',
+ 'scheduler_mode',
+ 'sectors',
+ 'sectorsize',
+ 'size',
+ 'support_discard',
+ 'vendor',
+ 'device_nodes'
+ ]
+
+ expected_lsm_keys = [
+ 'serialNum',
+ 'transport',
+ 'mediaType',
+ 'rpm',
+ 'linkSpeed',
+ 'health',
+ 'ledSupport',
+ 'errors',
+ ]
+
+ def test_json_inventory_keys_unexpected(self, fake_call, device_report_keys):
+ for k in device_report_keys:
+ assert k in self.expected_keys, "unexpected key {} in report".format(k)
+
+ def test_json_inventory_keys_missing(self, fake_call, device_report_keys):
+ for k in self.expected_keys:
+ assert k in device_report_keys, "expected key {} in report".format(k)
+
+ def test_sys_api_keys_unexpected(self, fake_call, device_sys_api_keys):
+ for k in device_sys_api_keys:
+ assert k in self.expected_sys_api_keys, "unexpected key {} in sys_api field".format(k)
+
+ def test_sys_api_keys_missing(self, fake_call, device_sys_api_keys):
+ for k in self.expected_sys_api_keys:
+ assert k in device_sys_api_keys, "expected key {} in sys_api field".format(k)
+
+ def test_lsm_data_type_unexpected(self, fake_call, device_data):
+ assert isinstance(device_data['lsm_data'], dict), "lsm_data field must be of type dict"
+
+ def test_lsm_data_keys_unexpected(self, fake_call, device_data):
+ for k in device_data['lsm_data'].keys():
+ assert k in self.expected_lsm_keys, "unexpected key {} in lsm_data field".format(k)
+
+ def test_lsm_data_keys_missing(self, fake_call, device_data):
+ lsm_keys = device_data['lsm_data'].keys()
+ assert lsm_keys
+ for k in self.expected_lsm_keys:
+ assert k in lsm_keys, "expected key {} in lsm_data field".format(k)
+
+
+@pytest.fixture
+def lsm_info(monkeypatch):
+ def mock_query_lsm(_, func, path):
+ query_map = {
+ 'serial_num_get': "S2X9NX0H935283",
+ 'link_type_get': 6,
+ 'rpm_get': 0,
+ 'link_speed_get': 6000,
+ 'health_status_get': 2,
+ 'led_status_get': 36,
+ }
+ return query_map.get(func, 'Unknown')
+
+ # mocked states and settings taken from the libstoragemgmt code base
+ # c_binding/include/libstoragemgmt/libstoragemgmt_types.h at
+ # https://github.com/libstorage/libstoragemgmt/
+ mock_health_map = {
+ -1: "Unknown",
+ 0: "Fail",
+ 1: "Warn",
+ 2: "Good",
+ }
+ mock_transport_map = {
+ -1: "Unavailable",
+ 0: "Fibre Channel",
+ 2: "IBM SSA",
+ 3: "Serial Bus",
+ 4: "SCSI RDMA",
+ 5: "iSCSI",
+ 6: "SAS",
+ 7: "ADT (Tape)",
+ 8: "ATA/SATA",
+ 9: "USB",
+ 10: "SCSI over PCI-E",
+ 11: "PCI-E",
+ }
+ class MockLEDStates():
+ LED_STATUS_UNKNOWN = 1
+ LED_STATUS_IDENT_ON = 2
+ LED_STATUS_IDENT_OFF = 4
+ LED_STATUS_IDENT_UNKNOWN = 8
+ LED_STATUS_FAULT_ON = 16
+ LED_STATUS_FAULT_OFF = 32
+ LED_STATUS_FAULT_UNKNOWN = 64
+
+ monkeypatch.setattr(LSMDisk, '_query_lsm', mock_query_lsm)
+ monkeypatch.setattr(lsmdisk, 'health_map', mock_health_map)
+ monkeypatch.setattr(lsmdisk, 'transport_map', mock_transport_map)
+ monkeypatch.setattr(lsmdisk, 'lsm_Disk', MockLEDStates)
+
+ return LSMDisk('/dev/sda')
+
+
+class TestLSM(object):
+ def test_lsmdisk_health(self, lsm_info):
+ assert lsm_info.health == "Good"
+ def test_lsmdisk_transport(self, lsm_info):
+ assert lsm_info.transport == 'SAS'
+ def test_lsmdisk_mediatype(self, lsm_info):
+ assert lsm_info.media_type == 'Flash'
+ def test_lsmdisk_led_ident_support(self, lsm_info):
+ assert lsm_info.led_ident_support == 'Supported'
+ def test_lsmdisk_led_ident(self, lsm_info):
+ assert lsm_info.led_ident_state == 'Off'
+ def test_lsmdisk_led_fault_support(self, lsm_info):
+ assert lsm_info.led_fault_support == 'Supported'
+ def test_lsmdisk_led_fault(self, lsm_info):
+ assert lsm_info.led_fault_state == 'Off'
+ def test_lsmdisk_report(self, lsm_info):
+ assert isinstance(lsm_info.json_report(), dict)
diff --git a/src/ceph-volume/ceph_volume/tests/test_main.py b/src/ceph-volume/ceph_volume/tests/test_main.py
new file mode 100644
index 000000000..d03d405d5
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/test_main.py
@@ -0,0 +1,69 @@
+import os
+import pytest
+from ceph_volume import main
+
+
+class TestVolume(object):
+
+ def test_main_spits_help_with_no_arguments(self, capsys):
+ with pytest.raises(SystemExit):
+ main.Volume(argv=[])
+ stdout, stderr = capsys.readouterr()
+ assert 'Log Path' in stdout
+
+ def test_warn_about_using_help_for_full_options(self, capsys):
+ with pytest.raises(SystemExit):
+ main.Volume(argv=[])
+ stdout, stderr = capsys.readouterr()
+ assert 'See "ceph-volume --help" for full list' in stdout
+
+ def test_environ_vars_show_up(self, capsys):
+ os.environ['CEPH_CONF'] = '/opt/ceph.conf'
+ with pytest.raises(SystemExit):
+ main.Volume(argv=[])
+ stdout, stderr = capsys.readouterr()
+ assert 'CEPH_CONF' in stdout
+ assert '/opt/ceph.conf' in stdout
+
+ def test_flags_are_parsed_with_help(self, capsys):
+ with pytest.raises(SystemExit):
+ main.Volume(argv=['ceph-volume', '--help'])
+ stdout, stderr = capsys.readouterr()
+ assert '--cluster' in stdout
+ assert '--log-path' in stdout
+
+ def test_log_ignoring_missing_ceph_conf(self, caplog):
+ with pytest.raises(SystemExit) as error:
+ main.Volume(argv=['ceph-volume', '--cluster', 'barnacle', 'lvm', '--help'])
+ # make sure we aren't causing an actual error
+ assert error.value.code == 0
+ log = caplog.records[-1]
+ assert log.message == 'ignoring inability to load ceph.conf'
+ assert log.levelname == 'WARNING'
+
+ def test_logs_current_command(self, caplog):
+ with pytest.raises(SystemExit) as error:
+ main.Volume(argv=['ceph-volume', '--cluster', 'barnacle', 'lvm', '--help'])
+ # make sure we aren't causing an actual error
+ assert error.value.code == 0
+ log = caplog.records[-2]
+ assert log.message == 'Running command: ceph-volume --cluster barnacle lvm --help'
+ assert log.levelname == 'INFO'
+
+ def test_logs_set_level_warning(self, caplog):
+ with pytest.raises(SystemExit) as error:
+ main.Volume(argv=['ceph-volume', '--log-level', 'warning', '--cluster', 'barnacle', 'lvm', '--help'])
+ # make sure we aren't causing an actual error
+ assert error.value.code == 0
+ assert caplog.records
+ # only log levels of 'WARNING'
+ for log in caplog.records:
+ assert log.levelname == 'WARNING'
+
+ def test_logs_incorrect_log_level(self, capsys):
+ with pytest.raises(SystemExit) as error:
+ main.Volume(argv=['ceph-volume', '--log-level', 'foo', '--cluster', 'barnacle', 'lvm', '--help'])
+ # make sure this is an error
+ assert error.value.code != 0
+ stdout, stderr = capsys.readouterr()
+ assert "invalid choice" in stderr
diff --git a/src/ceph-volume/ceph_volume/tests/test_process.py b/src/ceph-volume/ceph_volume/tests/test_process.py
new file mode 100644
index 000000000..46e5c40e6
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/test_process.py
@@ -0,0 +1,92 @@
+import pytest
+import logging
+from ceph_volume.tests.conftest import Factory
+from ceph_volume import process
+
+
+@pytest.fixture
+def mock_call(monkeypatch):
+ """
+ Monkeypatches process.call, so that a caller can add behavior to the response
+ """
+ def apply(stdout=None, stderr=None, returncode=0):
+ stdout_stream = Factory(read=lambda: stdout)
+ stderr_stream = Factory(read=lambda: stderr)
+ return_value = Factory(
+ stdout=stdout_stream,
+ stderr=stderr_stream,
+ wait=lambda: returncode,
+ communicate=lambda x: (stdout, stderr, returncode)
+ )
+
+ monkeypatch.setattr(
+ 'ceph_volume.process.subprocess.Popen',
+ lambda *a, **kw: return_value)
+
+ return apply
+
+
+class TestCall(object):
+
+ def test_stderr_terminal_and_logfile(self, mock_call, caplog, capsys):
+ caplog.set_level(logging.INFO)
+ mock_call(stdout='stdout\n', stderr='some stderr message\n')
+ process.call(['ls'], terminal_verbose=True)
+ out, err = capsys.readouterr()
+ log_lines = [line[-1] for line in caplog.record_tuples]
+ assert 'Running command: ' in log_lines[0]
+ assert 'ls' in log_lines[0]
+ assert 'stderr some stderr message' in log_lines[-1]
+ assert 'some stderr message' in err
+
+ def test_stderr_terminal_and_logfile_off(self, mock_call, caplog, capsys):
+ caplog.set_level(logging.INFO)
+ mock_call(stdout='stdout\n', stderr='some stderr message\n')
+ process.call(['ls'], terminal_verbose=False)
+ out, err = capsys.readouterr()
+ log_lines = [line[-1] for line in caplog.record_tuples]
+ assert 'Running command: ' in log_lines[0]
+ assert 'ls' in log_lines[0]
+ assert 'stderr some stderr message' in log_lines[-1]
+ assert out == ''
+
+ def test_verbose_on_failure(self, mock_call, caplog, capsys):
+ caplog.set_level(logging.INFO)
+ mock_call(stdout='stdout\n', stderr='stderr\n', returncode=1)
+ process.call(['ls'], terminal_verbose=False, logfile_verbose=False)
+ out, err = capsys.readouterr()
+ log_lines = '\n'.join([line[-1] for line in caplog.record_tuples])
+ assert 'Running command: ' in log_lines
+ assert 'ls' in log_lines
+ assert 'stderr' in log_lines
+ assert 'stdout: stdout' in err
+ assert out == ''
+
+ def test_silent_verbose_on_failure(self, mock_call, caplog, capsys):
+ caplog.set_level(logging.INFO)
+ mock_call(stdout='stdout\n', stderr='stderr\n', returncode=1)
+ process.call(['ls'], verbose_on_failure=False)
+ out, err = capsys.readouterr()
+ log_lines = '\n'.join([line[-1] for line in caplog.record_tuples])
+ assert 'Running command: ' in log_lines
+ assert 'ls' in log_lines
+ assert 'stderr' in log_lines
+ assert out == ''
+
+
+class TestFunctionalCall(object):
+
+ def test_stdin(self):
+ process.call(['xargs', 'ls'], stdin="echo '/'")
+
+ def test_unicode_encoding(self):
+ process.call(['echo', u'\xd0'])
+
+ def test_unicode_encoding_stdin(self):
+ process.call(['echo'], stdin=u'\xd0'.encode('utf-8'))
+
+
+class TestFunctionalRun(object):
+
+ def test_log_descriptors(self):
+ process.run(['ls', '-l'])
diff --git a/src/ceph-volume/ceph_volume/tests/test_terminal.py b/src/ceph-volume/ceph_volume/tests/test_terminal.py
new file mode 100644
index 000000000..fdf219070
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/test_terminal.py
@@ -0,0 +1,143 @@
+# -*- mode:python; tab-width:4; indent-tabs-mode:nil; coding:utf-8 -*-
+
+import codecs
+import io
+try:
+ from io import StringIO
+except ImportError:
+ from StringIO import StringIO
+import pytest
+import sys
+from ceph_volume import terminal
+from ceph_volume.log import setup_console
+
+
+class SubCommand(object):
+
+ help = "this is the subcommand help"
+
+ def __init__(self, argv):
+ self.argv = argv
+
+ def main(self):
+ pass
+
+
+class BadSubCommand(object):
+
+ def __init__(self, argv):
+ self.argv = argv
+
+ def main(self):
+ raise SystemExit(100)
+
+
+class TestSubhelp(object):
+
+ def test_no_sub_command_help(self):
+ assert terminal.subhelp({}) == ''
+
+ def test_single_level_help(self):
+ result = terminal.subhelp({'sub': SubCommand})
+
+ assert 'this is the subcommand help' in result
+
+ def test_has_title_header(self):
+ result = terminal.subhelp({'sub': SubCommand})
+ assert 'Available subcommands:' in result
+
+ def test_command_with_no_help(self):
+ class SubCommandNoHelp(object):
+ pass
+ result = terminal.subhelp({'sub': SubCommandNoHelp})
+ assert result == ''
+
+
+class TestDispatch(object):
+
+ def test_no_subcommand_found(self):
+ result = terminal.dispatch({'sub': SubCommand}, argv=[])
+ assert result is None
+
+ def test_no_main_found(self):
+ class NoMain(object):
+
+ def __init__(self, argv):
+ pass
+ result = terminal.dispatch({'sub': NoMain}, argv=['sub'])
+ assert result is None
+
+ def test_subcommand_found_and_dispatched(self):
+ with pytest.raises(SystemExit) as error:
+ terminal.dispatch({'sub': SubCommand}, argv=['sub'])
+ assert str(error.value) == '0'
+
+ def test_subcommand_found_and_dispatched_with_errors(self):
+ with pytest.raises(SystemExit) as error:
+ terminal.dispatch({'sub': BadSubCommand}, argv=['sub'])
+ assert str(error.value) == '100'
+
+
+@pytest.fixture
+def stream():
+ def make_stream(buffer, encoding):
+ # mock a stdout with given encoding
+ if sys.version_info >= (3, 0):
+ stderr = sys.stderr
+ stream = io.TextIOWrapper(buffer,
+ encoding=encoding,
+ errors=stderr.errors,
+ newline=stderr.newlines,
+ line_buffering=stderr.line_buffering)
+ else:
+ stream = codecs.getwriter(encoding)(buffer)
+ # StreamWriter does not have encoding attached to it, it will ask
+ # the inner buffer for "encoding" attribute in this case
+ stream.encoding = encoding
+ return stream
+ return make_stream
+
+
+class TestWriteUnicode(object):
+
+ def setup(self):
+ self.octpus_and_squid_en = u'octpus and squid'
+ self.octpus_and_squid_zh = u'章鱼和鱿鱼'
+ self.message = self.octpus_and_squid_en + self.octpus_and_squid_zh
+ setup_console()
+
+ def test_stdout_writer(self, capsys):
+ # should work with whatever stdout is
+ terminal.stdout(self.message)
+ _, err = capsys.readouterr()
+ assert self.octpus_and_squid_en in err
+ assert self.octpus_and_squid_zh in err
+
+ @pytest.mark.parametrize('encoding', ['ascii', 'utf8'])
+ def test_writer_log(self, stream, encoding, monkeypatch, caplog):
+ writer = StringIO()
+ terminal._Write(_writer=writer).raw(self.message)
+ writer.flush()
+ writer.seek(0)
+ output = writer.readlines()[0]
+ assert self.octpus_and_squid_en in output
+
+ @pytest.mark.parametrize('encoding', ['utf8'])
+ def test_writer(self, encoding, stream, monkeypatch, capsys, caplog):
+ buffer = io.BytesIO()
+ writer = stream(buffer, encoding)
+ terminal._Write(_writer=writer).raw(self.message)
+ writer.flush()
+ writer.seek(0)
+ val = buffer.getvalue()
+ assert self.octpus_and_squid_en.encode(encoding) in val
+
+ def test_writer_uses_log_on_unicodeerror(self, stream, monkeypatch, capture):
+
+ if sys.version_info > (3,):
+ pytest.skip("Something breaks inside of pytest's capsys")
+ monkeypatch.setattr(terminal.terminal_logger, 'info', capture)
+ buffer = io.BytesIO()
+ writer = stream(buffer, 'ascii')
+ terminal._Write(_writer=writer).raw(self.message)
+ assert self.octpus_and_squid_en in capture.calls[0]['args'][0]
diff --git a/src/ceph-volume/ceph_volume/tests/util/test_arg_validators.py b/src/ceph-volume/ceph_volume/tests/util/test_arg_validators.py
new file mode 100644
index 000000000..59ca12619
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/util/test_arg_validators.py
@@ -0,0 +1,338 @@
+import argparse
+import pytest
+import os
+from ceph_volume import exceptions, process
+from ceph_volume.util import arg_validators
+from mock.mock import patch, MagicMock
+
+
+class TestOSDPath(object):
+
+ def setup(self):
+ self.validator = arg_validators.OSDPath()
+
+ def test_is_not_root(self, monkeypatch):
+ monkeypatch.setattr(os, 'getuid', lambda: 100)
+ with pytest.raises(exceptions.SuperUserError):
+ self.validator('')
+
+ def test_path_is_not_a_directory(self, is_root, monkeypatch, fake_filesystem):
+ fake_file = fake_filesystem.create_file('/tmp/foo')
+ monkeypatch.setattr(arg_validators.disk, 'is_partition', lambda x: False)
+ validator = arg_validators.OSDPath()
+ with pytest.raises(argparse.ArgumentError):
+ validator(fake_file.path)
+
+ def test_files_are_missing(self, is_root, tmpdir, monkeypatch):
+ tmppath = str(tmpdir)
+ monkeypatch.setattr(arg_validators.disk, 'is_partition', lambda x: False)
+ validator = arg_validators.OSDPath()
+ with pytest.raises(argparse.ArgumentError) as error:
+ validator(tmppath)
+ assert 'Required file (ceph_fsid) was not found in OSD' in str(error.value)
+
+
+class TestExcludeGroupOptions(object):
+
+ def setup(self):
+ self.parser = argparse.ArgumentParser()
+
+ def test_flags_in_one_group(self):
+ argv = ['<prog>', '--filestore', '--bar']
+ filestore_group = self.parser.add_argument_group('filestore')
+ bluestore_group = self.parser.add_argument_group('bluestore')
+ filestore_group.add_argument('--filestore')
+ bluestore_group.add_argument('--bluestore')
+ result = arg_validators.exclude_group_options(
+ self.parser,
+ ['filestore', 'bluestore'],
+ argv=argv
+ )
+ assert result is None
+
+ def test_flags_in_no_group(self):
+ argv = ['<prog>', '--foo', '--bar']
+ filestore_group = self.parser.add_argument_group('filestore')
+ bluestore_group = self.parser.add_argument_group('bluestore')
+ filestore_group.add_argument('--filestore')
+ bluestore_group.add_argument('--bluestore')
+ result = arg_validators.exclude_group_options(
+ self.parser,
+ ['filestore', 'bluestore'],
+ argv=argv
+ )
+ assert result is None
+
+ def test_flags_conflict(self, capsys):
+ argv = ['<prog>', '--filestore', '--bluestore']
+ filestore_group = self.parser.add_argument_group('filestore')
+ bluestore_group = self.parser.add_argument_group('bluestore')
+ filestore_group.add_argument('--filestore')
+ bluestore_group.add_argument('--bluestore')
+
+ arg_validators.exclude_group_options(
+ self.parser, ['filestore', 'bluestore'], argv=argv
+ )
+ stdout, stderr = capsys.readouterr()
+ assert 'Cannot use --filestore (filestore) with --bluestore (bluestore)' in stderr
+
+
+class TestValidDevice(object):
+
+ def setup(self, fake_filesystem):
+ self.validator = arg_validators.ValidDevice()
+
+ @patch('ceph_volume.util.arg_validators.disk.has_bluestore_label', return_value=False)
+ def test_path_is_valid(self, m_has_bs_label,
+ fake_call, patch_bluestore_label,
+ device_info, monkeypatch):
+ monkeypatch.setattr('ceph_volume.util.device.Device.exists', lambda: True)
+ lsblk = {"TYPE": "disk", "NAME": "sda"}
+ device_info(lsblk=lsblk)
+ result = self.validator('/dev/sda')
+ assert result.path == '/dev/sda'
+
+ @patch('ceph_volume.util.arg_validators.disk.has_bluestore_label', return_value=False)
+ def test_path_is_invalid(self, m_has_bs_label,
+ fake_call, patch_bluestore_label,
+ device_info):
+ lsblk = {"TYPE": "disk", "NAME": "sda"}
+ device_info(lsblk=lsblk)
+ with pytest.raises(argparse.ArgumentError):
+ self.validator('/device/does/not/exist')
+
+ @patch('ceph_volume.util.arg_validators.Device')
+ @patch('ceph_volume.util.arg_validators.disk.has_bluestore_label', return_value=False)
+ @patch('ceph_volume.api.lvm.get_single_lv', return_value=None)
+ def test_dev_has_partitions(self, m_get_single_lv, m_has_bs_label, mocked_device, fake_call):
+ mocked_device.return_value = MagicMock(
+ exists=True,
+ has_partitions=True,
+ )
+ with pytest.raises(RuntimeError):
+ self.validator('/dev/foo')
+
+class TestValidZapDevice(object):
+ def setup(self):
+ self.validator = arg_validators.ValidZapDevice()
+
+ @patch('ceph_volume.util.arg_validators.Device')
+ @patch('ceph_volume.util.arg_validators.disk.has_bluestore_label', return_value=False)
+ @patch('ceph_volume.api.lvm.get_single_lv', return_value=None)
+ def test_device_has_partition(self, m_get_single_lv, m_has_bs_label, mocked_device):
+ mocked_device.return_value = MagicMock(
+ used_by_ceph=False,
+ exists=True,
+ has_partitions=True,
+ has_gpt_headers=False,
+ has_fs=False
+ )
+ self.validator.zap = False
+ with pytest.raises(RuntimeError):
+ assert self.validator('/dev/foo')
+
+ @patch('ceph_volume.util.arg_validators.Device')
+ @patch('ceph_volume.util.arg_validators.disk.has_bluestore_label', return_value=False)
+ @patch('ceph_volume.api.lvm.get_single_lv', return_value=None)
+ def test_device_has_no_partition(self, m_get_single_lv, m_has_bs_label, mocked_device):
+ mocked_device.return_value = MagicMock(
+ used_by_ceph=False,
+ exists=True,
+ has_partitions=False,
+ has_gpt_headers=False,
+ has_fs=False
+ )
+ self.validator.zap = False
+ assert self.validator('/dev/foo')
+
+class TestValidDataDevice(object):
+ def setup(self):
+ self.validator = arg_validators.ValidDataDevice()
+
+ @patch('ceph_volume.util.arg_validators.Device')
+ @patch('ceph_volume.util.arg_validators.disk.has_bluestore_label', return_value=False)
+ @patch('ceph_volume.api.lvm.get_single_lv', return_value=None)
+ def test_device_used_by_ceph(self, m_get_single_lv, m_has_bs_label, mocked_device, fake_call):
+ mocked_device.return_value = MagicMock(
+ used_by_ceph=True,
+ exists=True,
+ has_partitions=False,
+ has_gpt_headers=False
+ )
+ with pytest.raises(SystemExit):
+ self.validator.zap = False
+ self.validator('/dev/foo')
+
+ @patch('ceph_volume.util.arg_validators.Device')
+ @patch('ceph_volume.util.arg_validators.disk.has_bluestore_label', return_value=False)
+ @patch('ceph_volume.api.lvm.get_single_lv', return_value=None)
+ def test_device_has_fs(self, m_get_single_lv, m_has_bs_label, mocked_device, fake_call):
+ mocked_device.return_value = MagicMock(
+ used_by_ceph=False,
+ exists=True,
+ has_partitions=False,
+ has_gpt_headers=False,
+ has_fs=True
+ )
+ with pytest.raises(RuntimeError):
+ self.validator.zap = False
+ self.validator('/dev/foo')
+
+ @patch('ceph_volume.util.arg_validators.Device')
+ @patch('ceph_volume.util.arg_validators.disk.has_bluestore_label', return_value=True)
+ @patch('ceph_volume.api.lvm.get_single_lv', return_value=None)
+ def test_device_has_bs_signature(self, m_get_single_lv, m_has_bs_label, mocked_device, fake_call):
+ mocked_device.return_value = MagicMock(
+ used_by_ceph=False,
+ exists=True,
+ has_partitions=False,
+ has_gpt_headers=False,
+ has_fs=False
+ )
+ with pytest.raises(RuntimeError):
+ self.validator.zap = False
+ self.validator('/dev/foo')
+
+class TestValidRawDevice(object):
+ def setup(self):
+ self.validator = arg_validators.ValidRawDevice()
+
+ @patch('ceph_volume.util.arg_validators.Device')
+ @patch('ceph_volume.util.arg_validators.disk.has_bluestore_label', return_value=False)
+ @patch('ceph_volume.util.arg_validators.disk.blkid')
+ @patch('ceph_volume.api.lvm.get_single_lv', return_value=None)
+ def test_dmcrypt_device_already_prepared(self, m_get_single_lv, m_blkid, m_has_bs_label, mocked_device, fake_call, monkeypatch):
+ def mock_call(cmd, **kw):
+ return ('', '', 1)
+ monkeypatch.setattr(process, 'call', mock_call)
+ m_blkid.return_value = {'UUID': '8fd92779-ad78-437c-a06f-275f7170fa74', 'TYPE': 'crypto_LUKS'}
+ mocked_device.return_value = MagicMock(
+ used_by_ceph=False,
+ exists=True,
+ has_partitions=False,
+ has_gpt_headers=False,
+ has_fs=False
+ )
+ with pytest.raises(SystemExit):
+ self.validator.zap = False
+ self.validator('/dev/foo')
+
+ @patch('ceph_volume.util.arg_validators.Device')
+ @patch('ceph_volume.util.arg_validators.disk.has_bluestore_label', return_value=False)
+ @patch('ceph_volume.api.lvm.get_single_lv', return_value=None)
+ def test_device_already_prepared(self, m_get_single_lv, m_has_bs_label, mocked_device, fake_call):
+ mocked_device.return_value = MagicMock(
+ used_by_ceph=False,
+ exists=True,
+ has_partitions=False,
+ has_gpt_headers=False,
+ has_fs=False
+ )
+ with pytest.raises(SystemExit):
+ self.validator.zap = False
+ self.validator('/dev/foo')
+
+ @patch('ceph_volume.util.arg_validators.Device')
+ @patch('ceph_volume.util.arg_validators.disk.has_bluestore_label', return_value=False)
+ @patch('ceph_volume.api.lvm.get_single_lv', return_value=None)
+ def test_device_not_prepared(self, m_get_single_lv, m_has_bs_label, mocked_device, fake_call, monkeypatch):
+ def mock_call(cmd, **kw):
+ return ('', '', 1)
+ monkeypatch.setattr(process, 'call', mock_call)
+ mocked_device.return_value = MagicMock(
+ used_by_ceph=False,
+ exists=True,
+ has_partitions=False,
+ has_gpt_headers=False,
+ has_fs=False
+ )
+ self.validator.zap = False
+ assert self.validator('/dev/foo')
+
+ @patch('ceph_volume.util.arg_validators.Device')
+ @patch('ceph_volume.util.arg_validators.disk.has_bluestore_label', return_value=False)
+ @patch('ceph_volume.api.lvm.get_single_lv', return_value=None)
+ def test_device_has_partition(self, m_get_single_lv, m_has_bs_label, mocked_device, fake_call, monkeypatch):
+ def mock_call(cmd, **kw):
+ return ('', '', 1)
+ monkeypatch.setattr(process, 'call', mock_call)
+ mocked_device.return_value = MagicMock(
+ used_by_ceph=False,
+ exists=True,
+ has_partitions=True,
+ has_gpt_headers=False,
+ has_fs=False
+ )
+ self.validator.zap = False
+ with pytest.raises(RuntimeError):
+ assert self.validator('/dev/foo')
+
+class TestValidBatchDevice(object):
+ def setup(self):
+ self.validator = arg_validators.ValidBatchDevice()
+
+ @patch('ceph_volume.util.arg_validators.Device')
+ @patch('ceph_volume.util.arg_validators.disk.has_bluestore_label', return_value=False)
+ @patch('ceph_volume.api.lvm.get_single_lv', return_value=None)
+ def test_device_is_partition(self, m_get_single_lv, m_has_bs_label, mocked_device, fake_call):
+ mocked_device.return_value = MagicMock(
+ used_by_ceph=False,
+ exists=True,
+ has_partitions=False,
+ has_gpt_headers=False,
+ has_fs=False,
+ is_partition=True
+ )
+ with pytest.raises(argparse.ArgumentError):
+ self.validator.zap = False
+ self.validator('/dev/foo')
+
+ @patch('ceph_volume.util.arg_validators.Device')
+ @patch('ceph_volume.util.arg_validators.disk.has_bluestore_label', return_value=False)
+ @patch('ceph_volume.api.lvm.get_single_lv', return_value=None)
+ def test_device_is_not_partition(self, m_get_single_lv, m_has_bs_label, mocked_device, fake_call):
+ mocked_device.return_value = MagicMock(
+ used_by_ceph=False,
+ exists=True,
+ has_partitions=False,
+ has_gpt_headers=False,
+ has_fs=False,
+ is_partition=False
+ )
+ self.validator.zap = False
+ assert self.validator('/dev/foo')
+
+class TestValidBatchDataDevice(object):
+ def setup(self):
+ self.validator = arg_validators.ValidBatchDataDevice()
+
+ @patch('ceph_volume.util.arg_validators.Device')
+ @patch('ceph_volume.util.arg_validators.disk.has_bluestore_label', return_value=False)
+ @patch('ceph_volume.api.lvm.get_single_lv', return_value=None)
+ def test_device_is_partition(self, m_get_single_lv, m_has_bs_label, mocked_device, fake_call):
+ mocked_device.return_value = MagicMock(
+ used_by_ceph=False,
+ exists=True,
+ has_partitions=False,
+ has_gpt_headers=False,
+ has_fs=False,
+ is_partition=True
+ )
+ with pytest.raises(argparse.ArgumentError):
+ self.validator.zap = False
+ assert self.validator('/dev/foo')
+
+ @patch('ceph_volume.util.arg_validators.Device')
+ @patch('ceph_volume.util.arg_validators.disk.has_bluestore_label', return_value=False)
+ @patch('ceph_volume.api.lvm.get_single_lv', return_value=None)
+ def test_device_is_not_partition(self, m_get_single_lv, m_has_bs_label, mocked_device, fake_call):
+ mocked_device.return_value = MagicMock(
+ used_by_ceph=False,
+ exists=True,
+ has_partitions=False,
+ has_gpt_headers=False,
+ has_fs=False,
+ is_partition=False
+ )
+ self.validator.zap = False
+ assert self.validator('/dev/foo')
diff --git a/src/ceph-volume/ceph_volume/tests/util/test_device.py b/src/ceph-volume/ceph_volume/tests/util/test_device.py
new file mode 100644
index 000000000..8eef3ff00
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/util/test_device.py
@@ -0,0 +1,704 @@
+import os
+import pytest
+from copy import deepcopy
+from ceph_volume.util import device
+from ceph_volume.api import lvm as api
+from mock.mock import patch, mock_open
+
+
+class TestDevice(object):
+
+ def test_sys_api(self, monkeypatch, device_info):
+ volume = api.Volume(lv_name='lv', lv_uuid='y', vg_name='vg',
+ lv_tags={}, lv_path='/dev/VolGroup/lv')
+ volumes = []
+ volumes.append(volume)
+ monkeypatch.setattr(api, 'get_lvs', lambda **kwargs:
+ deepcopy(volumes))
+
+ data = {"/dev/sda": {"foo": "bar"}}
+ lsblk = {"TYPE": "disk", "NAME": "sda"}
+ device_info(devices=data, lsblk=lsblk)
+ disk = device.Device("/dev/sda")
+ assert disk.sys_api
+ assert "foo" in disk.sys_api
+
+ def test_lvm_size(self, monkeypatch, device_info):
+ volume = api.Volume(lv_name='lv', lv_uuid='y', vg_name='vg',
+ lv_tags={}, lv_path='/dev/VolGroup/lv')
+ volumes = []
+ volumes.append(volume)
+ monkeypatch.setattr(api, 'get_lvs', lambda **kwargs:
+ deepcopy(volumes))
+
+ # 5GB in size
+ data = {"/dev/sda": {"size": "5368709120"}}
+ lsblk = {"TYPE": "disk", "NAME": "sda"}
+ device_info(devices=data,lsblk=lsblk)
+ disk = device.Device("/dev/sda")
+ assert disk.lvm_size.gb == 4
+
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_lvm_size_rounds_down(self, fake_call, device_info):
+ # 5.5GB in size
+ data = {"/dev/sda": {"size": "5905580032"}}
+ lsblk = {"TYPE": "disk", "NAME": "sda"}
+ device_info(devices=data,lsblk=lsblk)
+ disk = device.Device("/dev/sda")
+ assert disk.lvm_size.gb == 4
+
+ def test_is_lv(self, fake_call, device_info):
+ data = {"lv_path": "vg/lv", "vg_name": "vg", "name": "lv"}
+ lsblk = {"TYPE": "lvm", "NAME": "vg-lv"}
+ device_info(lv=data,lsblk=lsblk)
+ disk = device.Device("vg/lv")
+ assert disk.is_lv
+
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_vgs_is_empty(self, fake_call, device_info, monkeypatch):
+ BarPVolume = api.PVolume(pv_name='/dev/sda', pv_uuid="0000",
+ pv_tags={})
+ pvolumes = []
+ pvolumes.append(BarPVolume)
+ lsblk = {"TYPE": "disk", "NAME": "sda"}
+ device_info(lsblk=lsblk)
+ monkeypatch.setattr(api, 'get_pvs', lambda **kwargs: {})
+
+ disk = device.Device("/dev/nvme0n1")
+ assert disk.vgs == []
+
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_vgs_is_not_empty(self, fake_call, device_info, monkeypatch):
+ vg = api.VolumeGroup(pv_name='/dev/nvme0n1', vg_name='foo/bar', vg_free_count=6,
+ vg_extent_size=1073741824)
+ monkeypatch.setattr(api, 'get_all_devices_vgs', lambda : [vg])
+ lsblk = {"TYPE": "disk", "NAME": "nvme0n1"}
+ device_info(lsblk=lsblk)
+ disk = device.Device("/dev/nvme0n1")
+ assert len(disk.vgs) == 1
+
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_device_is_device(self, fake_call, device_info):
+ data = {"/dev/sda": {"foo": "bar"}}
+ lsblk = {"TYPE": "device", "NAME": "sda"}
+ device_info(devices=data, lsblk=lsblk)
+ disk = device.Device("/dev/sda")
+ assert disk.is_device is True
+
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_loop_device_is_not_device(self, fake_call, device_info):
+ data = {"/dev/loop0": {"foo": "bar"}}
+ lsblk = {"TYPE": "loop"}
+ device_info(devices=data, lsblk=lsblk)
+ disk = device.Device("/dev/loop0")
+ assert disk.is_device is False
+
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_loop_device_is_device(self, fake_call, device_info):
+ data = {"/dev/loop0": {"foo": "bar"}}
+ lsblk = {"TYPE": "loop"}
+ os.environ["CEPH_VOLUME_ALLOW_LOOP_DEVICES"] = "1"
+ device_info(devices=data, lsblk=lsblk)
+ disk = device.Device("/dev/loop0")
+ assert disk.is_device is True
+ del os.environ["CEPH_VOLUME_ALLOW_LOOP_DEVICES"]
+
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_device_is_rotational(self, fake_call, device_info):
+ data = {"/dev/sda": {"rotational": "1"}}
+ lsblk = {"TYPE": "device", "NAME": "sda"}
+ device_info(devices=data, lsblk=lsblk)
+ disk = device.Device("/dev/sda")
+ assert disk.rotational
+
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_device_is_not_rotational(self, fake_call, device_info):
+ data = {"/dev/sda": {"rotational": "0"}}
+ lsblk = {"TYPE": "device", "NAME": "sda"}
+ device_info(devices=data, lsblk=lsblk)
+ disk = device.Device("/dev/sda")
+ assert not disk.rotational
+
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_device_is_rotational_lsblk(self, fake_call, device_info):
+ data = {"/dev/sda": {"foo": "bar"}}
+ lsblk = {"TYPE": "device", "ROTA": "1", "NAME": "sda"}
+ device_info(devices=data, lsblk=lsblk)
+ disk = device.Device("/dev/sda")
+ assert disk.rotational
+
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_device_is_not_rotational_lsblk(self, fake_call, device_info):
+ data = {"/dev/sda": {"rotational": "0"}}
+ lsblk = {"TYPE": "device", "ROTA": "0", "NAME": "sda"}
+ device_info(devices=data, lsblk=lsblk)
+ disk = device.Device("/dev/sda")
+ assert not disk.rotational
+
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_device_is_rotational_defaults_true(self, fake_call, device_info):
+ # rotational will default true if no info from sys_api or lsblk is found
+ data = {"/dev/sda": {"foo": "bar"}}
+ lsblk = {"TYPE": "device", "foo": "bar", "NAME": "sda"}
+ device_info(devices=data, lsblk=lsblk)
+ disk = device.Device("/dev/sda")
+ assert disk.rotational
+
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_disk_is_device(self, fake_call, device_info):
+ data = {"/dev/sda": {"foo": "bar"}}
+ lsblk = {"TYPE": "disk", "NAME": "sda"}
+ device_info(devices=data, lsblk=lsblk)
+ disk = device.Device("/dev/sda")
+ assert disk.is_device is True
+
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_is_partition(self, fake_call, device_info):
+ data = {"/dev/sda1": {"foo": "bar"}}
+ lsblk = {"TYPE": "part", "NAME": "sda1", "PKNAME": "sda"}
+ device_info(devices=data, lsblk=lsblk)
+ disk = device.Device("/dev/sda1")
+ assert disk.is_partition
+
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_mpath_device_is_device(self, fake_call, device_info):
+ data = {"/dev/foo": {"foo": "bar"}}
+ lsblk = {"TYPE": "mpath", "NAME": "foo"}
+ device_info(devices=data, lsblk=lsblk)
+ disk = device.Device("/dev/foo")
+ assert disk.is_device is True
+
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_is_not_lvm_member(self, fake_call, device_info):
+ data = {"/dev/sda1": {"foo": "bar"}}
+ lsblk = {"TYPE": "part", "NAME": "sda1", "PKNAME": "sda"}
+ device_info(devices=data, lsblk=lsblk)
+ disk = device.Device("/dev/sda1")
+ assert not disk.is_lvm_member
+
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_is_lvm_member(self, fake_call, device_info):
+ data = {"/dev/sda1": {"foo": "bar"}}
+ lsblk = {"TYPE": "part", "NAME": "sda1", "PKNAME": "sda"}
+ device_info(devices=data, lsblk=lsblk)
+ disk = device.Device("/dev/sda1")
+ assert not disk.is_lvm_member
+
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_is_mapper_device(self, fake_call, device_info):
+ lsblk = {"TYPE": "lvm", "NAME": "foo"}
+ device_info(lsblk=lsblk)
+ disk = device.Device("/dev/mapper/foo")
+ assert disk.is_mapper
+
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_dm_is_mapper_device(self, fake_call, device_info):
+ lsblk = {"TYPE": "lvm", "NAME": "dm-4"}
+ device_info(lsblk=lsblk)
+ disk = device.Device("/dev/dm-4")
+ assert disk.is_mapper
+
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_is_not_mapper_device(self, fake_call, device_info):
+ lsblk = {"TYPE": "disk", "NAME": "sda"}
+ device_info(lsblk=lsblk)
+ disk = device.Device("/dev/sda")
+ assert not disk.is_mapper
+
+ @pytest.mark.usefixtures("lsblk_ceph_disk_member",
+ "disable_kernel_queries")
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_is_ceph_disk_lsblk(self, fake_call, monkeypatch, patch_bluestore_label):
+ disk = device.Device("/dev/sda")
+ assert disk.is_ceph_disk_member
+
+ @pytest.mark.usefixtures("blkid_ceph_disk_member",
+ "lsblk_ceph_disk_member",
+ "disable_kernel_queries")
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_is_ceph_disk_blkid(self, fake_call, monkeypatch, patch_bluestore_label):
+ disk = device.Device("/dev/sda")
+ assert disk.is_ceph_disk_member
+
+ @pytest.mark.usefixtures("lsblk_ceph_disk_member",
+ "disable_kernel_queries")
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_is_ceph_disk_member_not_available_lsblk(self, fake_call, monkeypatch, patch_bluestore_label):
+ disk = device.Device("/dev/sda")
+ assert disk.is_ceph_disk_member
+ assert not disk.available
+ assert "Used by ceph-disk" in disk.rejected_reasons
+
+ @pytest.mark.usefixtures("blkid_ceph_disk_member",
+ "lsblk_ceph_disk_member",
+ "disable_kernel_queries")
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_is_ceph_disk_member_not_available_blkid(self, fake_call, monkeypatch, patch_bluestore_label):
+ disk = device.Device("/dev/sda")
+ assert disk.is_ceph_disk_member
+ assert not disk.available
+ assert "Used by ceph-disk" in disk.rejected_reasons
+
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_reject_removable_device(self, fake_call, device_info):
+ data = {"/dev/sdb": {"removable": 1}}
+ lsblk = {"TYPE": "disk", "NAME": "sdb"}
+ device_info(devices=data,lsblk=lsblk)
+ disk = device.Device("/dev/sdb")
+ assert not disk.available
+
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_reject_device_with_gpt_headers(self, fake_call, device_info):
+ data = {"/dev/sdb": {"removable": 0, "size": 5368709120}}
+ lsblk = {"TYPE": "disk", "NAME": "sdb"}
+ blkid= {"PTTYPE": "gpt"}
+ device_info(
+ devices=data,
+ blkid=blkid,
+ lsblk=lsblk,
+ )
+ disk = device.Device("/dev/sdb")
+ assert not disk.available
+
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_accept_non_removable_device(self, fake_call, device_info):
+ data = {"/dev/sdb": {"removable": 0, "size": 5368709120}}
+ lsblk = {"TYPE": "disk", "NAME": "sdb"}
+ device_info(devices=data,lsblk=lsblk)
+ disk = device.Device("/dev/sdb")
+ assert disk.available
+
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_reject_not_acceptable_device(self, fake_call, device_info):
+ data = {"/dev/dm-0": {"foo": "bar"}}
+ lsblk = {"TYPE": "mpath", "NAME": "dm-0"}
+ device_info(devices=data, lsblk=lsblk)
+ disk = device.Device("/dev/dm-0")
+ assert not disk.available
+
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ @patch('ceph_volume.util.device.os.path.realpath')
+ @patch('ceph_volume.util.device.os.path.islink')
+ def test_accept_symlink_to_device(self,
+ m_os_path_islink,
+ m_os_path_realpath,
+ device_info,
+ fake_call):
+ m_os_path_islink.return_value = True
+ m_os_path_realpath.return_value = '/dev/sdb'
+ data = {"/dev/sdb": {"ro": 0, "size": 5368709120}}
+ lsblk = {"TYPE": "disk"}
+ device_info(devices=data,lsblk=lsblk)
+ disk = device.Device("/dev/test_symlink")
+ print(disk)
+ print(disk.sys_api)
+ assert disk.available
+
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ @patch('ceph_volume.util.device.os.readlink')
+ @patch('ceph_volume.util.device.os.path.islink')
+ def test_reject_symlink_to_device_mapper(self,
+ m_os_path_islink,
+ m_os_readlink,
+ device_info,
+ fake_call):
+ m_os_path_islink.return_value = True
+ m_os_readlink.return_value = '/dev/dm-0'
+ data = {"/dev/mapper/mpatha": {"ro": 0, "size": 5368709120}}
+ lsblk = {"TYPE": "disk"}
+ device_info(devices=data,lsblk=lsblk)
+ disk = device.Device("/dev/mapper/mpatha")
+ assert disk.available
+
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_reject_readonly_device(self, fake_call, device_info):
+ data = {"/dev/cdrom": {"ro": 1}}
+ lsblk = {"TYPE": "disk", "NAME": "cdrom"}
+ device_info(devices=data,lsblk=lsblk)
+ disk = device.Device("/dev/cdrom")
+ assert not disk.available
+
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_reject_smaller_than_5gb(self, fake_call, device_info):
+ data = {"/dev/sda": {"size": 5368709119}}
+ lsblk = {"TYPE": "disk", "NAME": "sda"}
+ device_info(devices=data,lsblk=lsblk)
+ disk = device.Device("/dev/sda")
+ assert not disk.available, 'too small device is available'
+
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_accept_non_readonly_device(self, fake_call, device_info):
+ data = {"/dev/sda": {"ro": 0, "size": 5368709120}}
+ lsblk = {"TYPE": "disk", "NAME": "sda"}
+ device_info(devices=data,lsblk=lsblk)
+ disk = device.Device("/dev/sda")
+ assert disk.available
+
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_reject_bluestore_device(self, fake_call, monkeypatch, patch_bluestore_label, device_info):
+ patch_bluestore_label.return_value = True
+ lsblk = {"TYPE": "disk", "NAME": "sda"}
+ device_info(lsblk=lsblk)
+ disk = device.Device("/dev/sda")
+ assert not disk.available
+ assert "Has BlueStore device label" in disk.rejected_reasons
+
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_reject_device_with_oserror(self, fake_call, monkeypatch, patch_bluestore_label, device_info):
+ patch_bluestore_label.side_effect = OSError('test failure')
+ lsblk = {"TYPE": "disk", "NAME": "sda"}
+ device_info(lsblk=lsblk)
+ disk = device.Device("/dev/sda")
+ assert not disk.available
+ assert "Failed to determine if device is BlueStore" in disk.rejected_reasons
+
+ @pytest.mark.usefixtures("lsblk_ceph_disk_member",
+ "device_info_not_ceph_disk_member",
+ "disable_kernel_queries")
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_is_not_ceph_disk_member_lsblk(self, fake_call, patch_bluestore_label):
+ disk = device.Device("/dev/sda")
+ assert disk.is_ceph_disk_member is False
+
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_existing_vg_available(self, fake_call, monkeypatch, device_info):
+ vg = api.VolumeGroup(pv_name='/dev/nvme0n1', vg_name='foo/bar', vg_free_count=1536,
+ vg_extent_size=4194304)
+ monkeypatch.setattr(api, 'get_all_devices_vgs', lambda : [vg])
+ lsblk = {"TYPE": "disk", "NAME": "nvme0n1"}
+ data = {"/dev/nvme0n1": {"size": "6442450944"}}
+ lv = {"tags": {"ceph.osd_id": "1"}}
+ device_info(devices=data, lsblk=lsblk, lv=lv)
+ disk = device.Device("/dev/nvme0n1")
+ assert disk.available_lvm
+ assert not disk.available
+ assert not disk.available_raw
+
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_existing_vg_too_small(self, fake_call, monkeypatch, device_info):
+ vg = api.VolumeGroup(pv_name='/dev/nvme0n1', vg_name='foo/bar', vg_free_count=4,
+ vg_extent_size=1073741824)
+ monkeypatch.setattr(api, 'get_all_devices_vgs', lambda : [vg])
+ lsblk = {"TYPE": "disk", "NAME": "nvme0n1"}
+ data = {"/dev/nvme0n1": {"size": "6442450944"}}
+ lv = {"tags": {"ceph.osd_id": "1"}}
+ device_info(devices=data, lsblk=lsblk, lv=lv)
+ disk = device.Device("/dev/nvme0n1")
+ assert not disk.available_lvm
+ assert not disk.available
+ assert not disk.available_raw
+
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_multiple_existing_vgs(self, fake_call, monkeypatch, device_info):
+ vg1 = api.VolumeGroup(pv_name='/dev/nvme0n1', vg_name='foo/bar', vg_free_count=1000,
+ vg_extent_size=4194304)
+ vg2 = api.VolumeGroup(pv_name='/dev/nvme0n1', vg_name='foo/bar', vg_free_count=536,
+ vg_extent_size=4194304)
+ monkeypatch.setattr(api, 'get_all_devices_vgs', lambda : [vg1, vg2])
+ lsblk = {"TYPE": "disk", "NAME": "nvme0n1"}
+ data = {"/dev/nvme0n1": {"size": "6442450944"}}
+ lv = {"tags": {"ceph.osd_id": "1"}}
+ device_info(devices=data, lsblk=lsblk, lv=lv)
+ disk = device.Device("/dev/nvme0n1")
+ assert disk.available_lvm
+ assert not disk.available
+ assert not disk.available_raw
+
+ @pytest.mark.parametrize("ceph_type", ["data", "block"])
+ def test_used_by_ceph(self, fake_call, device_info,
+ monkeypatch, ceph_type):
+ data = {"/dev/sda": {"foo": "bar"}}
+ lsblk = {"TYPE": "part", "NAME": "sda", "PKNAME": "sda"}
+ FooPVolume = api.PVolume(pv_name='/dev/sda', pv_uuid="0000",
+ lv_uuid="0000", pv_tags={}, vg_name="vg")
+ pvolumes = []
+ pvolumes.append(FooPVolume)
+ lv_data = {"lv_name": "lv", "lv_path": "vg/lv", "vg_name": "vg",
+ "lv_uuid": "0000", "lv_tags":
+ "ceph.osd_id=0,ceph.type="+ceph_type}
+ volumes = []
+ lv = api.Volume(**lv_data)
+ volumes.append(lv)
+ monkeypatch.setattr(api, 'get_pvs', lambda **kwargs: pvolumes)
+ monkeypatch.setattr(api, 'get_lvs', lambda **kwargs:
+ deepcopy(volumes))
+
+ device_info(devices=data, lsblk=lsblk, lv=lv_data)
+ vg = api.VolumeGroup(vg_name='foo/bar', vg_free_count=6,
+ vg_extent_size=1073741824)
+ monkeypatch.setattr(api, 'get_device_vgs', lambda x: [vg])
+ disk = device.Device("/dev/sda")
+ assert disk.used_by_ceph
+
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_not_used_by_ceph(self, fake_call, device_info, monkeypatch):
+ FooPVolume = api.PVolume(pv_name='/dev/sda', pv_uuid="0000", lv_uuid="0000", pv_tags={}, vg_name="vg")
+ pvolumes = []
+ pvolumes.append(FooPVolume)
+ data = {"/dev/sda": {"foo": "bar"}}
+ lsblk = {"TYPE": "part", "NAME": "sda", "PKNAME": "sda"}
+ lv_data = {"lv_path": "vg/lv", "vg_name": "vg", "lv_uuid": "0000", "tags": {"ceph.osd_id": 0, "ceph.type": "journal"}}
+ monkeypatch.setattr(api, 'get_pvs', lambda **kwargs: pvolumes)
+
+ device_info(devices=data, lsblk=lsblk, lv=lv_data)
+ disk = device.Device("/dev/sda")
+ assert not disk.used_by_ceph
+
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_get_device_id(self, fake_call, device_info):
+ udev = {k:k for k in ['ID_VENDOR', 'ID_MODEL', 'ID_SCSI_SERIAL']}
+ lsblk = {"TYPE": "disk", "NAME": "sda"}
+ device_info(udevadm=udev,lsblk=lsblk)
+ disk = device.Device("/dev/sda")
+ assert disk._get_device_id() == 'ID_VENDOR_ID_MODEL_ID_SCSI_SERIAL'
+
+ def test_has_bluestore_label(self):
+ # patch device.Device __init__ function to do nothing since we want to only test the
+ # low-level behavior of has_bluestore_label
+ with patch.object(device.Device, "__init__", lambda self, path, with_lsm=False: None):
+ disk = device.Device("/dev/sda")
+ disk.path = "/dev/sda"
+ with patch('builtins.open', mock_open(read_data=b'bluestore block device\n')):
+ assert disk.has_bluestore_label
+ with patch('builtins.open', mock_open(read_data=b'not a bluestore block device\n')):
+ assert not disk.has_bluestore_label
+
+
+class TestDeviceEncryption(object):
+
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_partition_is_not_encrypted_lsblk(self, fake_call, device_info):
+ lsblk = {'TYPE': 'part', 'FSTYPE': 'xfs', 'NAME': 'sda', 'PKNAME': 'sda'}
+ device_info(lsblk=lsblk)
+ disk = device.Device("/dev/sda")
+ assert disk.is_encrypted is False
+
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_partition_is_encrypted_lsblk(self, fake_call, device_info):
+ lsblk = {'TYPE': 'part', 'FSTYPE': 'crypto_LUKS', 'NAME': 'sda', 'PKNAME': 'sda'}
+ device_info(lsblk=lsblk)
+ disk = device.Device("/dev/sda")
+ assert disk.is_encrypted is True
+
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_partition_is_not_encrypted_blkid(self, fake_call, device_info):
+ lsblk = {'TYPE': 'part', 'NAME': 'sda', 'PKNAME': 'sda'}
+ blkid = {'TYPE': 'ceph data'}
+ device_info(lsblk=lsblk, blkid=blkid)
+ disk = device.Device("/dev/sda")
+ assert disk.is_encrypted is False
+
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_partition_is_encrypted_blkid(self, fake_call, device_info):
+ lsblk = {'TYPE': 'part', 'NAME': 'sda' ,'PKNAME': 'sda'}
+ blkid = {'TYPE': 'crypto_LUKS'}
+ device_info(lsblk=lsblk, blkid=blkid)
+ disk = device.Device("/dev/sda")
+ assert disk.is_encrypted is True
+
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_mapper_is_encrypted_luks1(self, fake_call, device_info, monkeypatch):
+ status = {'type': 'LUKS1'}
+ monkeypatch.setattr(device, 'encryption_status', lambda x: status)
+ lsblk = {'FSTYPE': 'xfs', 'NAME': 'uuid','TYPE': 'lvm'}
+ blkid = {'TYPE': 'mapper'}
+ device_info(lsblk=lsblk, blkid=blkid)
+ disk = device.Device("/dev/mapper/uuid")
+ assert disk.is_encrypted is True
+
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_mapper_is_encrypted_luks2(self, fake_call, device_info, monkeypatch):
+ status = {'type': 'LUKS2'}
+ monkeypatch.setattr(device, 'encryption_status', lambda x: status)
+ lsblk = {'FSTYPE': 'xfs', 'NAME': 'uuid', 'TYPE': 'lvm'}
+ blkid = {'TYPE': 'mapper'}
+ device_info(lsblk=lsblk, blkid=blkid)
+ disk = device.Device("/dev/mapper/uuid")
+ assert disk.is_encrypted is True
+
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_mapper_is_encrypted_plain(self, fake_call, device_info, monkeypatch):
+ status = {'type': 'PLAIN'}
+ monkeypatch.setattr(device, 'encryption_status', lambda x: status)
+ lsblk = {'FSTYPE': 'xfs', 'NAME': 'uuid', 'TYPE': 'lvm'}
+ blkid = {'TYPE': 'mapper'}
+ device_info(lsblk=lsblk, blkid=blkid)
+ disk = device.Device("/dev/mapper/uuid")
+ assert disk.is_encrypted is True
+
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_mapper_is_not_encrypted_plain(self, fake_call, device_info, monkeypatch):
+ monkeypatch.setattr(device, 'encryption_status', lambda x: {})
+ lsblk = {'FSTYPE': 'xfs', 'NAME': 'uuid', 'TYPE': 'lvm'}
+ blkid = {'TYPE': 'mapper'}
+ device_info(lsblk=lsblk, blkid=blkid)
+ disk = device.Device("/dev/mapper/uuid")
+ assert disk.is_encrypted is False
+
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_lv_is_encrypted_blkid(self, fake_call, device_info):
+ lsblk = {'TYPE': 'lvm', 'NAME': 'sda'}
+ blkid = {'TYPE': 'crypto_LUKS'}
+ device_info(lsblk=lsblk, blkid=blkid)
+ disk = device.Device("/dev/sda")
+ disk.lv_api = {}
+ assert disk.is_encrypted is True
+
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_lv_is_not_encrypted_blkid(self, fake_call, factory, device_info):
+ lsblk = {'TYPE': 'lvm', 'NAME': 'sda'}
+ blkid = {'TYPE': 'xfs'}
+ device_info(lsblk=lsblk, blkid=blkid)
+ disk = device.Device("/dev/sda")
+ disk.lv_api = factory(encrypted=None)
+ assert disk.is_encrypted is False
+
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_lv_is_encrypted_lsblk(self, fake_call, device_info):
+ lsblk = {'FSTYPE': 'crypto_LUKS', 'NAME': 'sda', 'TYPE': 'lvm'}
+ blkid = {'TYPE': 'mapper'}
+ device_info(lsblk=lsblk, blkid=blkid)
+ disk = device.Device("/dev/sda")
+ disk.lv_api = {}
+ assert disk.is_encrypted is True
+
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_lv_is_not_encrypted_lsblk(self, fake_call, factory, device_info):
+ lsblk = {'FSTYPE': 'xfs', 'NAME': 'sda', 'TYPE': 'lvm'}
+ blkid = {'TYPE': 'mapper'}
+ device_info(lsblk=lsblk, blkid=blkid)
+ disk = device.Device("/dev/sda")
+ disk.lv_api = factory(encrypted=None)
+ assert disk.is_encrypted is False
+
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_lv_is_encrypted_lvm_api(self, fake_call, factory, device_info):
+ lsblk = {'FSTYPE': 'xfs', 'NAME': 'sda', 'TYPE': 'lvm'}
+ blkid = {'TYPE': 'mapper'}
+ device_info(lsblk=lsblk, blkid=blkid)
+ disk = device.Device("/dev/sda")
+ disk.lv_api = factory(encrypted=True)
+ assert disk.is_encrypted is True
+
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_lv_is_not_encrypted_lvm_api(self, fake_call, factory, device_info):
+ lsblk = {'FSTYPE': 'xfs', 'NAME': 'sda', 'TYPE': 'lvm'}
+ blkid = {'TYPE': 'mapper'}
+ device_info(lsblk=lsblk, blkid=blkid)
+ disk = device.Device("/dev/sda")
+ disk.lv_api = factory(encrypted=False)
+ assert disk.is_encrypted is False
+
+
+class TestDeviceOrdering(object):
+
+ def setup(self):
+ self.data = {
+ "/dev/sda": {"removable": 0},
+ "/dev/sdb": {"removable": 1}, # invalid
+ "/dev/sdc": {"removable": 0},
+ "/dev/sdd": {"removable": 1}, # invalid
+ }
+
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_valid_before_invalid(self, fake_call, device_info):
+ lsblk_sda = {"NAME": "sda", "TYPE": "disk"}
+ lsblk_sdb = {"NAME": "sdb", "TYPE": "disk"}
+ device_info(devices=self.data,lsblk=lsblk_sda)
+ sda = device.Device("/dev/sda")
+ device_info(devices=self.data,lsblk=lsblk_sdb)
+ sdb = device.Device("/dev/sdb")
+
+ assert sda < sdb
+ assert sdb > sda
+
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_valid_alphabetical_ordering(self, fake_call, device_info):
+ lsblk_sda = {"NAME": "sda", "TYPE": "disk"}
+ lsblk_sdc = {"NAME": "sdc", "TYPE": "disk"}
+ device_info(devices=self.data,lsblk=lsblk_sda)
+ sda = device.Device("/dev/sda")
+ device_info(devices=self.data,lsblk=lsblk_sdc)
+ sdc = device.Device("/dev/sdc")
+
+ assert sda < sdc
+ assert sdc > sda
+
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_invalid_alphabetical_ordering(self, fake_call, device_info):
+ lsblk_sdb = {"NAME": "sdb", "TYPE": "disk"}
+ lsblk_sdd = {"NAME": "sdd", "TYPE": "disk"}
+ device_info(devices=self.data,lsblk=lsblk_sdb)
+ sdb = device.Device("/dev/sdb")
+ device_info(devices=self.data,lsblk=lsblk_sdd)
+ sdd = device.Device("/dev/sdd")
+
+ assert sdb < sdd
+ assert sdd > sdb
+
+
+class TestCephDiskDevice(object):
+
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_partlabel_lsblk(self, fake_call, device_info):
+ lsblk = {"TYPE": "disk", "NAME": "sda", "PARTLABEL": ""}
+ device_info(lsblk=lsblk)
+ disk = device.CephDiskDevice(device.Device("/dev/sda"))
+
+ assert disk.partlabel == ''
+
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_partlabel_blkid(self, fake_call, device_info):
+ lsblk = {"TYPE": "disk", "NAME": "sda", "PARTLABEL": "ceph data"}
+ blkid = {"TYPE": "disk", "PARTLABEL": "ceph data"}
+ device_info(blkid=blkid, lsblk=lsblk)
+ disk = device.CephDiskDevice(device.Device("/dev/sda"))
+
+ assert disk.partlabel == 'ceph data'
+
+ @pytest.mark.usefixtures("lsblk_ceph_disk_member",
+ "blkid_ceph_disk_member",
+ "disable_kernel_queries")
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_is_member_blkid(self, fake_call, monkeypatch):
+ disk = device.CephDiskDevice(device.Device("/dev/sda"))
+
+ assert disk.is_member is True
+
+ @pytest.mark.usefixtures("lsblk_ceph_disk_member",
+ "disable_kernel_queries")
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_is_member_lsblk(self, fake_call, patch_bluestore_label, device_info):
+ lsblk = {"TYPE": "disk", "NAME": "sda", "PARTLABEL": "ceph"}
+ device_info(lsblk=lsblk)
+ disk = device.CephDiskDevice(device.Device("/dev/sda"))
+
+ assert disk.is_member is True
+
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_unknown_type(self, fake_call, device_info):
+ lsblk = {"TYPE": "disk", "NAME": "sda", "PARTLABEL": "gluster"}
+ device_info(lsblk=lsblk)
+ disk = device.CephDiskDevice(device.Device("/dev/sda"))
+
+ assert disk.type == 'unknown'
+
+ ceph_types = ['data', 'wal', 'db', 'lockbox', 'journal', 'block']
+
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ @pytest.mark.usefixtures("lsblk_ceph_disk_member",
+ "blkid_ceph_disk_member",
+ "disable_kernel_queries")
+ def test_type_blkid(self, monkeypatch, fake_call, device_info, ceph_partlabel):
+ disk = device.CephDiskDevice(device.Device("/dev/sda"))
+
+ assert disk.type in self.ceph_types
+
+ @pytest.mark.usefixtures("blkid_ceph_disk_member",
+ "lsblk_ceph_disk_member",
+ "disable_kernel_queries")
+ @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
+ def test_type_lsblk(self, fake_call, device_info, ceph_partlabel):
+ disk = device.CephDiskDevice(device.Device("/dev/sda"))
+
+ assert disk.type in self.ceph_types
diff --git a/src/ceph-volume/ceph_volume/tests/util/test_disk.py b/src/ceph-volume/ceph_volume/tests/util/test_disk.py
new file mode 100644
index 000000000..fcd644a86
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/util/test_disk.py
@@ -0,0 +1,524 @@
+import os
+import pytest
+from ceph_volume.util import disk
+from mock.mock import patch
+
+
+class TestLsblkParser(object):
+
+ def test_parses_whitespace_values(self):
+ output = 'NAME="sdaa5" PARTLABEL="ceph data" RM="0" SIZE="10M" RO="0" TYPE="part"'
+ result = disk._lsblk_parser(output)
+ assert result['PARTLABEL'] == 'ceph data'
+
+ def test_ignores_bogus_pairs(self):
+ output = 'NAME="sdaa5" PARTLABEL RM="0" SIZE="10M" RO="0" TYPE="part" MOUNTPOINT=""'
+ result = disk._lsblk_parser(output)
+ assert result['SIZE'] == '10M'
+
+
+class TestBlkidParser(object):
+
+ def test_parses_whitespace_values(self):
+ output = '''/dev/sdb1: UUID="62416664-cbaf-40bd-9689-10bd337379c3" TYPE="xfs" PART_ENTRY_SCHEME="gpt" PART_ENTRY_NAME="ceph data" PART_ENTRY_UUID="b89c03bc-bf58-4338-a8f8-a2f484852b4f"''' # noqa
+ result = disk._blkid_parser(output)
+ assert result['PARTLABEL'] == 'ceph data'
+
+ def test_ignores_unmapped(self):
+ output = '''/dev/sdb1: UUID="62416664-cbaf-40bd-9689-10bd337379c3" TYPE="xfs" PART_ENTRY_SCHEME="gpt" PART_ENTRY_NAME="ceph data" PART_ENTRY_UUID="b89c03bc-bf58-4338-a8f8-a2f484852b4f"''' # noqa
+ result = disk._blkid_parser(output)
+ assert len(result.keys()) == 4
+
+ def test_translates_to_partuuid(self):
+ output = '''/dev/sdb1: UUID="62416664-cbaf-40bd-9689-10bd337379c3" TYPE="xfs" PART_ENTRY_SCHEME="gpt" PART_ENTRY_NAME="ceph data" PART_ENTRY_UUID="b89c03bc-bf58-4338-a8f8-a2f484852b4f"''' # noqa
+ result = disk._blkid_parser(output)
+ assert result['PARTUUID'] == 'b89c03bc-bf58-4338-a8f8-a2f484852b4f'
+
+
+class TestBlkid(object):
+
+ def test_parses_translated(self, stub_call):
+ output = '''/dev/sdb1: UUID="62416664-cbaf-40bd-9689-10bd337379c3" TYPE="xfs" PART_ENTRY_SCHEME="gpt" PART_ENTRY_NAME="ceph data" PART_ENTRY_UUID="b89c03bc-bf58-4338-a8f8-a2f484852b4f"''' # noqa
+ stub_call((output.split(), [], 0))
+ result = disk.blkid('/dev/sdb1')
+ assert result['PARTUUID'] == 'b89c03bc-bf58-4338-a8f8-a2f484852b4f'
+ assert result['PARTLABEL'] == 'ceph data'
+ assert result['UUID'] == '62416664-cbaf-40bd-9689-10bd337379c3'
+ assert result['TYPE'] == 'xfs'
+
+class TestUdevadmProperty(object):
+
+ def test_good_output(self, stub_call):
+ output = """ID_MODEL=SK_hynix_SC311_SATA_512GB
+ID_PART_TABLE_TYPE=gpt
+ID_SERIAL_SHORT=MS83N71801150416A""".split()
+ stub_call((output, [], 0))
+ result = disk.udevadm_property('dev/sda')
+ assert result['ID_MODEL'] == 'SK_hynix_SC311_SATA_512GB'
+ assert result['ID_PART_TABLE_TYPE'] == 'gpt'
+ assert result['ID_SERIAL_SHORT'] == 'MS83N71801150416A'
+
+ def test_property_filter(self, stub_call):
+ output = """ID_MODEL=SK_hynix_SC311_SATA_512GB
+ID_PART_TABLE_TYPE=gpt
+ID_SERIAL_SHORT=MS83N71801150416A""".split()
+ stub_call((output, [], 0))
+ result = disk.udevadm_property('dev/sda', ['ID_MODEL',
+ 'ID_SERIAL_SHORT'])
+ assert result['ID_MODEL'] == 'SK_hynix_SC311_SATA_512GB'
+ assert 'ID_PART_TABLE_TYPE' not in result
+
+ def test_fail_on_broken_output(self, stub_call):
+ output = ["ID_MODEL:SK_hynix_SC311_SATA_512GB"]
+ stub_call((output, [], 0))
+ with pytest.raises(ValueError):
+ disk.udevadm_property('dev/sda')
+
+
+class TestDeviceFamily(object):
+
+ def test_groups_multiple_devices(self, stub_call):
+ out = [
+ 'NAME="sdaa5" PARLABEL="ceph lockbox"',
+ 'NAME="sdaa" RO="0"',
+ 'NAME="sdaa1" PARLABEL="ceph data"',
+ 'NAME="sdaa2" PARLABEL="ceph journal"',
+ ]
+ stub_call((out, '', 0))
+ result = disk.device_family('sdaa5')
+ assert len(result) == 4
+
+ def test_parses_output_correctly(self, stub_call):
+ names = ['sdaa', 'sdaa5', 'sdaa1', 'sdaa2']
+ out = [
+ 'NAME="sdaa5" PARLABEL="ceph lockbox"',
+ 'NAME="sdaa" RO="0"',
+ 'NAME="sdaa1" PARLABEL="ceph data"',
+ 'NAME="sdaa2" PARLABEL="ceph journal"',
+ ]
+ stub_call((out, '', 0))
+ result = disk.device_family('sdaa5')
+ for parsed in result:
+ assert parsed['NAME'] in names
+
+
+class TestHumanReadableSize(object):
+
+ def test_bytes(self):
+ result = disk.human_readable_size(800)
+ assert result == '800.00 B'
+
+ def test_kilobytes(self):
+ result = disk.human_readable_size(800*1024)
+ assert result == '800.00 KB'
+
+ def test_megabytes(self):
+ result = disk.human_readable_size(800*1024*1024)
+ assert result == '800.00 MB'
+
+ def test_gigabytes(self):
+ result = disk.human_readable_size(8.19*1024*1024*1024)
+ assert result == '8.19 GB'
+
+ def test_terabytes(self):
+ result = disk.human_readable_size(81.2*1024*1024*1024*1024)
+ assert result == '81.20 TB'
+
+ def test_petabytes(self):
+ result = disk.human_readable_size(9.23*1024*1024*1024*1024*1024)
+ assert result == '9.23 PB'
+
+class TestSizeFromHumanReadable(object):
+
+ def test_bytes(self):
+ result = disk.size_from_human_readable('2')
+ assert result == disk.Size(b=2)
+
+ def test_kilobytes(self):
+ result = disk.size_from_human_readable('2 K')
+ assert result == disk.Size(kb=2)
+
+ def test_megabytes(self):
+ result = disk.size_from_human_readable('2 M')
+ assert result == disk.Size(mb=2)
+
+ def test_gigabytes(self):
+ result = disk.size_from_human_readable('2 G')
+ assert result == disk.Size(gb=2)
+
+ def test_terabytes(self):
+ result = disk.size_from_human_readable('2 T')
+ assert result == disk.Size(tb=2)
+
+ def test_petabytes(self):
+ result = disk.size_from_human_readable('2 P')
+ assert result == disk.Size(pb=2)
+
+ def test_case(self):
+ result = disk.size_from_human_readable('2 t')
+ assert result == disk.Size(tb=2)
+
+ def test_space(self):
+ result = disk.size_from_human_readable('2T')
+ assert result == disk.Size(tb=2)
+
+ def test_float(self):
+ result = disk.size_from_human_readable('2.0')
+ assert result == disk.Size(b=2)
+ result = disk.size_from_human_readable('2.0T')
+ assert result == disk.Size(tb=2)
+ result = disk.size_from_human_readable('1.8T')
+ assert result == disk.Size(tb=1.8)
+
+
+class TestSizeParse(object):
+
+ def test_bytes(self):
+ result = disk.Size.parse('2')
+ assert result == disk.Size(b=2)
+
+ def test_kilobytes(self):
+ result = disk.Size.parse('2K')
+ assert result == disk.Size(kb=2)
+
+ def test_megabytes(self):
+ result = disk.Size.parse('2M')
+ assert result == disk.Size(mb=2)
+
+ def test_gigabytes(self):
+ result = disk.Size.parse('2G')
+ assert result == disk.Size(gb=2)
+
+ def test_terabytes(self):
+ result = disk.Size.parse('2T')
+ assert result == disk.Size(tb=2)
+
+ def test_petabytes(self):
+ result = disk.Size.parse('2P')
+ assert result == disk.Size(pb=2)
+
+ def test_tb(self):
+ result = disk.Size.parse('2Tb')
+ assert result == disk.Size(tb=2)
+
+ def test_case(self):
+ result = disk.Size.parse('2t')
+ assert result == disk.Size(tb=2)
+
+ def test_space(self):
+ result = disk.Size.parse('2T')
+ assert result == disk.Size(tb=2)
+
+ def test_float(self):
+ result = disk.Size.parse('2.0')
+ assert result == disk.Size(b=2)
+ result = disk.Size.parse('2.0T')
+ assert result == disk.Size(tb=2)
+ result = disk.Size.parse('1.8T')
+ assert result == disk.Size(tb=1.8)
+
+
+class TestGetDevices(object):
+
+ def test_no_devices_are_found(self, tmpdir, patched_get_block_devs_sysfs):
+ patched_get_block_devs_sysfs.return_value = []
+ result = disk.get_devices(_sys_block_path=str(tmpdir))
+ assert result == {}
+
+ @patch('ceph_volume.util.disk.is_locked_raw_device', lambda x: False)
+ def test_sda_block_is_found(self, patched_get_block_devs_sysfs, fake_filesystem):
+ sda_path = '/dev/sda'
+ patched_get_block_devs_sysfs.return_value = [[sda_path, sda_path, 'disk']]
+ result = disk.get_devices()
+ assert len(result.keys()) == 1
+ assert result[sda_path]['human_readable_size'] == '0.00 B'
+ assert result[sda_path]['model'] == ''
+ assert result[sda_path]['partitions'] == {}
+
+ @patch('ceph_volume.util.disk.is_locked_raw_device', lambda x: False)
+ def test_sda_size(self, patched_get_block_devs_sysfs, fake_filesystem):
+ sda_path = '/dev/sda'
+ patched_get_block_devs_sysfs.return_value = [[sda_path, sda_path, 'disk']]
+ fake_filesystem.create_file('/sys/block/sda/size', contents = '1024')
+ result = disk.get_devices()
+ assert list(result.keys()) == [sda_path]
+ assert result[sda_path]['human_readable_size'] == '512.00 KB'
+
+ @patch('ceph_volume.util.disk.is_locked_raw_device', lambda x: False)
+ def test_sda_sectorsize_fallsback(self, patched_get_block_devs_sysfs, fake_filesystem):
+ # if no sectorsize, it will use queue/hw_sector_size
+ sda_path = '/dev/sda'
+ patched_get_block_devs_sysfs.return_value = [[sda_path, sda_path, 'disk']]
+ fake_filesystem.create_file('/sys/block/sda/queue/hw_sector_size', contents = '1024')
+ result = disk.get_devices()
+ assert list(result.keys()) == [sda_path]
+ assert result[sda_path]['sectorsize'] == '1024'
+
+ @patch('ceph_volume.util.disk.is_locked_raw_device', lambda x: False)
+ def test_sda_sectorsize_from_logical_block(self, patched_get_block_devs_sysfs, fake_filesystem):
+ sda_path = '/dev/sda'
+ patched_get_block_devs_sysfs.return_value = [[sda_path, sda_path, 'disk']]
+ fake_filesystem.create_file('/sys/block/sda/queue/logical_block_size', contents = '99')
+ result = disk.get_devices()
+ assert result[sda_path]['sectorsize'] == '99'
+
+ @patch('ceph_volume.util.disk.is_locked_raw_device', lambda x: False)
+ def test_sda_sectorsize_does_not_fallback(self, patched_get_block_devs_sysfs, fake_filesystem):
+ sda_path = '/dev/sda'
+ patched_get_block_devs_sysfs.return_value = [[sda_path, sda_path, 'disk']]
+ fake_filesystem.create_file('/sys/block/sda/queue/logical_block_size', contents = '99')
+ fake_filesystem.create_file('/sys/block/sda/queue/hw_sector_size', contents = '1024')
+ result = disk.get_devices()
+ assert result[sda_path]['sectorsize'] == '99'
+
+ @patch('ceph_volume.util.disk.is_locked_raw_device', lambda x: False)
+ def test_is_rotational(self, patched_get_block_devs_sysfs, fake_filesystem):
+ sda_path = '/dev/sda'
+ patched_get_block_devs_sysfs.return_value = [[sda_path, sda_path, 'disk']]
+ fake_filesystem.create_file('/sys/block/sda/queue/rotational', contents = '1')
+ result = disk.get_devices()
+ assert result[sda_path]['rotational'] == '1'
+
+ @patch('ceph_volume.util.disk.is_locked_raw_device', lambda x: False)
+ def test_is_ceph_rbd(self, patched_get_block_devs_sysfs, fake_filesystem):
+ rbd_path = '/dev/rbd0'
+ patched_get_block_devs_sysfs.return_value = [[rbd_path, rbd_path, 'disk']]
+ result = disk.get_devices()
+ assert rbd_path not in result
+
+
+class TestSizeCalculations(object):
+
+ @pytest.mark.parametrize('aliases', [
+ ('b', 'bytes'),
+ ('kb', 'kilobytes'),
+ ('mb', 'megabytes'),
+ ('gb', 'gigabytes'),
+ ('tb', 'terabytes'),
+ ])
+ def test_aliases(self, aliases):
+ short_alias, long_alias = aliases
+ s = disk.Size(b=1)
+ short_alias = getattr(s, short_alias)
+ long_alias = getattr(s, long_alias)
+ assert short_alias == long_alias
+
+ @pytest.mark.parametrize('values', [
+ ('b', 857619069665.28),
+ ('kb', 837518622.72),
+ ('mb', 817889.28),
+ ('gb', 798.72),
+ ('tb', 0.78),
+ ])
+ def test_terabytes(self, values):
+ # regardless of the input value, all the other values correlate to each
+ # other the same, every time
+ unit, value = values
+ s = disk.Size(**{unit: value})
+ assert s.b == 857619069665.28
+ assert s.kb == 837518622.72
+ assert s.mb == 817889.28
+ assert s.gb == 798.72
+ assert s.tb == 0.78
+
+
+class TestSizeOperators(object):
+
+ @pytest.mark.parametrize('larger', [1025, 1024.1, 1024.001])
+ def test_gigabytes_is_smaller(self, larger):
+ assert disk.Size(gb=1) < disk.Size(mb=larger)
+
+ @pytest.mark.parametrize('smaller', [1023, 1023.9, 1023.001])
+ def test_gigabytes_is_larger(self, smaller):
+ assert disk.Size(gb=1) > disk.Size(mb=smaller)
+
+ @pytest.mark.parametrize('larger', [1025, 1024.1, 1024.001, 1024])
+ def test_gigabytes_is_smaller_or_equal(self, larger):
+ assert disk.Size(gb=1) <= disk.Size(mb=larger)
+
+ @pytest.mark.parametrize('smaller', [1023, 1023.9, 1023.001, 1024])
+ def test_gigabytes_is_larger_or_equal(self, smaller):
+ assert disk.Size(gb=1) >= disk.Size(mb=smaller)
+
+ @pytest.mark.parametrize('values', [
+ ('b', 857619069665.28),
+ ('kb', 837518622.72),
+ ('mb', 817889.28),
+ ('gb', 798.72),
+ ('tb', 0.78),
+ ])
+ def test_equality(self, values):
+ unit, value = values
+ s = disk.Size(**{unit: value})
+ # both tb and b, since b is always calculated regardless, and is useful
+ # when testing tb
+ assert disk.Size(tb=0.78) == s
+ assert disk.Size(b=857619069665.28) == s
+
+ @pytest.mark.parametrize('values', [
+ ('b', 857619069665.28),
+ ('kb', 837518622.72),
+ ('mb', 817889.28),
+ ('gb', 798.72),
+ ('tb', 0.78),
+ ])
+ def test_inequality(self, values):
+ unit, value = values
+ s = disk.Size(**{unit: value})
+ # both tb and b, since b is always calculated regardless, and is useful
+ # when testing tb
+ assert disk.Size(tb=1) != s
+ assert disk.Size(b=100) != s
+
+
+class TestSizeOperations(object):
+
+ def test_assignment_addition_with_size_objects(self):
+ result = disk.Size(mb=256) + disk.Size(gb=1)
+ assert result.gb == 1.25
+ assert result.gb.as_int() == 1
+ assert result.gb.as_float() == 1.25
+
+ def test_self_addition_with_size_objects(self):
+ base = disk.Size(mb=256)
+ base += disk.Size(gb=1)
+ assert base.gb == 1.25
+
+ def test_self_addition_does_not_alter_state(self):
+ base = disk.Size(mb=256)
+ base + disk.Size(gb=1)
+ assert base.mb == 256
+
+ def test_addition_with_non_size_objects(self):
+ with pytest.raises(TypeError):
+ disk.Size(mb=100) + 4
+
+ def test_assignment_subtraction_with_size_objects(self):
+ base = disk.Size(gb=1)
+ base -= disk.Size(mb=256)
+ assert base.mb == 768
+
+ def test_self_subtraction_does_not_alter_state(self):
+ base = disk.Size(gb=1)
+ base - disk.Size(mb=256)
+ assert base.gb == 1
+
+ def test_subtraction_with_size_objects(self):
+ result = disk.Size(gb=1) - disk.Size(mb=256)
+ assert result.mb == 768
+
+ def test_subtraction_with_non_size_objects(self):
+ with pytest.raises(TypeError):
+ disk.Size(mb=100) - 4
+
+ def test_multiplication_with_size_objects(self):
+ with pytest.raises(TypeError):
+ disk.Size(mb=100) * disk.Size(mb=1)
+
+ def test_multiplication_with_non_size_objects(self):
+ base = disk.Size(gb=1)
+ result = base * 2
+ assert result.gb == 2
+ assert result.gb.as_int() == 2
+
+ def test_division_with_size_objects(self):
+ result = disk.Size(gb=1) / disk.Size(mb=1)
+ assert int(result) == 1024
+
+ def test_division_with_non_size_objects(self):
+ base = disk.Size(gb=1)
+ result = base / 2
+ assert result.mb == 512
+ assert result.mb.as_int() == 512
+
+ def test_division_with_non_size_objects_without_state(self):
+ base = disk.Size(gb=1)
+ base / 2
+ assert base.gb == 1
+ assert base.gb.as_int() == 1
+
+
+class TestSizeAttributes(object):
+
+ def test_attribute_does_not_exist(self):
+ with pytest.raises(AttributeError):
+ disk.Size(mb=1).exabytes
+
+
+class TestSizeFormatting(object):
+
+ def test_default_formatting_tb_to_b(self):
+ size = disk.Size(tb=0.0000000001)
+ result = "%s" % size
+ assert result == "109.95 B"
+
+ def test_default_formatting_tb_to_kb(self):
+ size = disk.Size(tb=0.00000001)
+ result = "%s" % size
+ assert result == "10.74 KB"
+
+ def test_default_formatting_tb_to_mb(self):
+ size = disk.Size(tb=0.000001)
+ result = "%s" % size
+ assert result == "1.05 MB"
+
+ def test_default_formatting_tb_to_gb(self):
+ size = disk.Size(tb=0.001)
+ result = "%s" % size
+ assert result == "1.02 GB"
+
+ def test_default_formatting_tb_to_tb(self):
+ size = disk.Size(tb=10)
+ result = "%s" % size
+ assert result == "10.00 TB"
+
+
+class TestSizeSpecificFormatting(object):
+
+ def test_formatting_b(self):
+ size = disk.Size(b=2048)
+ result = "%s" % size.b
+ assert "%s" % size.b == "%s" % size.bytes
+ assert result == "2048.00 B"
+
+ def test_formatting_kb(self):
+ size = disk.Size(kb=5700)
+ result = "%s" % size.kb
+ assert "%s" % size.kb == "%s" % size.kilobytes
+ assert result == "5700.00 KB"
+
+ def test_formatting_mb(self):
+ size = disk.Size(mb=4000)
+ result = "%s" % size.mb
+ assert "%s" % size.mb == "%s" % size.megabytes
+ assert result == "4000.00 MB"
+
+ def test_formatting_gb(self):
+ size = disk.Size(gb=77777)
+ result = "%s" % size.gb
+ assert "%s" % size.gb == "%s" % size.gigabytes
+ assert result == "77777.00 GB"
+
+ def test_formatting_tb(self):
+ size = disk.Size(tb=1027)
+ result = "%s" % size.tb
+ assert "%s" % size.tb == "%s" % size.terabytes
+ assert result == "1027.00 TB"
+
+
+class TestAllowLoopDevsWarning(object):
+ def test_loop_dev_warning(self, fake_call, caplog):
+ assert disk.allow_loop_devices() is False
+ assert not caplog.records
+ os.environ['CEPH_VOLUME_ALLOW_LOOP_DEVICES'] = "y"
+ assert disk.allow_loop_devices() is True
+ log = caplog.records[0]
+ assert log.levelname == "WARNING"
+ assert "will never be supported in production" in log.message
+
+
+class TestHasBlueStoreLabel(object):
+ def test_device_path_is_a_path(self, fake_filesystem):
+ device_path = '/var/lib/ceph/osd/ceph-0'
+ fake_filesystem.create_dir(device_path)
+ assert not disk.has_bluestore_label(device_path) \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/util/test_encryption.py b/src/ceph-volume/ceph_volume/tests/util/test_encryption.py
new file mode 100644
index 000000000..cd2ea8f18
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/util/test_encryption.py
@@ -0,0 +1,138 @@
+from ceph_volume.util import encryption
+from mock.mock import patch
+import base64
+
+class TestGetKeySize(object):
+ def test_get_size_from_conf_default(self, conf_ceph_stub):
+ conf_ceph_stub('''
+ [global]
+ fsid=asdf
+ ''')
+ assert encryption.get_key_size_from_conf() == '512'
+
+ def test_get_size_from_conf_custom(self, conf_ceph_stub):
+ conf_ceph_stub('''
+ [global]
+ fsid=asdf
+ [osd]
+ osd_dmcrypt_key_size=256
+ ''')
+ assert encryption.get_key_size_from_conf() == '256'
+
+ def test_get_size_from_conf_custom_invalid(self, conf_ceph_stub):
+ conf_ceph_stub('''
+ [global]
+ fsid=asdf
+ [osd]
+ osd_dmcrypt_key_size=1024
+ ''')
+ assert encryption.get_key_size_from_conf() == '512'
+
+class TestStatus(object):
+
+ def test_skips_unuseful_lines(self, stub_call):
+ out = ['some line here', ' device: /dev/sdc1']
+ stub_call((out, '', 0))
+ assert encryption.status('/dev/sdc1') == {'device': '/dev/sdc1'}
+
+ def test_removes_extra_quotes(self, stub_call):
+ out = ['some line here', ' device: "/dev/sdc1"']
+ stub_call((out, '', 0))
+ assert encryption.status('/dev/sdc1') == {'device': '/dev/sdc1'}
+
+ def test_ignores_bogus_lines(self, stub_call):
+ out = ['some line here', ' ']
+ stub_call((out, '', 0))
+ assert encryption.status('/dev/sdc1') == {}
+
+
+class TestDmcryptClose(object):
+
+ def test_mapper_exists(self, fake_run, fake_filesystem):
+ file_name = fake_filesystem.create_file('mapper-device')
+ encryption.dmcrypt_close(file_name.path)
+ arguments = fake_run.calls[0]['args'][0]
+ assert arguments[0] == 'cryptsetup'
+ assert arguments[1] == 'remove'
+ assert arguments[2].startswith('/')
+
+ def test_mapper_does_not_exist(self, fake_run):
+ file_name = '/path/does/not/exist'
+ encryption.dmcrypt_close(file_name)
+ assert fake_run.calls == []
+
+
+class TestDmcryptKey(object):
+
+ def test_dmcrypt(self):
+ result = encryption.create_dmcrypt_key()
+ assert len(base64.b64decode(result)) == 128
+
+class TestLuksFormat(object):
+ @patch('ceph_volume.util.encryption.process.call')
+ def test_luks_format_command_with_default_size(self, m_call, conf_ceph_stub):
+ conf_ceph_stub('[global]\nfsid=abcd')
+ expected = [
+ 'cryptsetup',
+ '--batch-mode',
+ '--key-size',
+ '512',
+ '--key-file',
+ '-',
+ 'luksFormat',
+ '/dev/foo'
+ ]
+ encryption.luks_format('abcd', '/dev/foo')
+ assert m_call.call_args[0][0] == expected
+
+ @patch('ceph_volume.util.encryption.process.call')
+ def test_luks_format_command_with_custom_size(self, m_call, conf_ceph_stub):
+ conf_ceph_stub('[global]\nfsid=abcd\n[osd]\nosd_dmcrypt_key_size=256')
+ expected = [
+ 'cryptsetup',
+ '--batch-mode',
+ '--key-size',
+ '256',
+ '--key-file',
+ '-',
+ 'luksFormat',
+ '/dev/foo'
+ ]
+ encryption.luks_format('abcd', '/dev/foo')
+ assert m_call.call_args[0][0] == expected
+
+
+class TestLuksOpen(object):
+ @patch('ceph_volume.util.encryption.process.call')
+ def test_luks_open_command_with_default_size(self, m_call, conf_ceph_stub):
+ conf_ceph_stub('[global]\nfsid=abcd')
+ expected = [
+ 'cryptsetup',
+ '--key-size',
+ '512',
+ '--key-file',
+ '-',
+ '--allow-discards',
+ 'luksOpen',
+ '/dev/foo',
+ '/dev/bar'
+ ]
+ encryption.luks_open('abcd', '/dev/foo', '/dev/bar')
+ assert m_call.call_args[0][0] == expected
+
+ @patch('ceph_volume.util.encryption.process.call')
+ def test_luks_open_command_with_custom_size(self, m_call, conf_ceph_stub):
+ conf_ceph_stub('[global]\nfsid=abcd\n[osd]\nosd_dmcrypt_key_size=256')
+ expected = [
+ 'cryptsetup',
+ '--key-size',
+ '256',
+ '--key-file',
+ '-',
+ '--allow-discards',
+ 'luksOpen',
+ '/dev/foo',
+ '/dev/bar'
+ ]
+ encryption.luks_open('abcd', '/dev/foo', '/dev/bar')
+ assert m_call.call_args[0][0] == expected
diff --git a/src/ceph-volume/ceph_volume/tests/util/test_prepare.py b/src/ceph-volume/ceph_volume/tests/util/test_prepare.py
new file mode 100644
index 000000000..080823307
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/util/test_prepare.py
@@ -0,0 +1,413 @@
+import pytest
+from textwrap import dedent
+import json
+from ceph_volume.util import prepare
+from ceph_volume.util.prepare import system
+from ceph_volume import conf
+from ceph_volume.tests.conftest import Factory
+
+
+class TestOSDIDAvailable(object):
+
+ def test_false_if_id_is_none(self):
+ assert not prepare.osd_id_available(None)
+
+ def test_returncode_is_not_zero(self, monkeypatch):
+ monkeypatch.setattr('ceph_volume.process.call', lambda *a, **kw: ('', '', 1))
+ with pytest.raises(RuntimeError):
+ prepare.osd_id_available(1)
+
+ def test_id_does_exist_but_not_available(self, monkeypatch):
+ stdout = dict(nodes=[
+ dict(id=0, status="up"),
+ ])
+ stdout = ['', json.dumps(stdout)]
+ monkeypatch.setattr('ceph_volume.process.call', lambda *a, **kw: (stdout, '', 0))
+ result = prepare.osd_id_available(0)
+ assert not result
+
+ def test_id_does_not_exist(self, monkeypatch):
+ stdout = dict(nodes=[
+ dict(id=0),
+ ])
+ stdout = ['', json.dumps(stdout)]
+ monkeypatch.setattr('ceph_volume.process.call', lambda *a, **kw: (stdout, '', 0))
+ result = prepare.osd_id_available(1)
+ assert result
+
+ def test_returns_true_when_id_is_destroyed(self, monkeypatch):
+ stdout = dict(nodes=[
+ dict(id=0, status="destroyed"),
+ ])
+ stdout = ['', json.dumps(stdout)]
+ monkeypatch.setattr('ceph_volume.process.call', lambda *a, **kw: (stdout, '', 0))
+ result = prepare.osd_id_available(0)
+ assert result
+
+
+class TestFormatDevice(object):
+
+ def test_include_force(self, fake_run, monkeypatch):
+ monkeypatch.setattr(conf, 'ceph', Factory(get_list=lambda *a, **kw: []))
+ prepare.format_device('/dev/sxx')
+ flags = fake_run.calls[0]['args'][0]
+ assert '-f' in flags
+
+ def test_device_is_always_appended(self, fake_run, conf_ceph):
+ conf_ceph(get_list=lambda *a, **kw: [])
+ prepare.format_device('/dev/sxx')
+ flags = fake_run.calls[0]['args'][0]
+ assert flags[-1] == '/dev/sxx'
+
+ def test_extra_flags_are_added(self, fake_run, conf_ceph):
+ conf_ceph(get_list=lambda *a, **kw: ['--why-yes'])
+ prepare.format_device('/dev/sxx')
+ flags = fake_run.calls[0]['args'][0]
+ assert '--why-yes' in flags
+
+ def test_default_options(self, conf_ceph_stub, fake_run):
+ conf_ceph_stub(dedent("""[global]
+ fsid = 1234lkjh1234"""))
+ conf.cluster = 'ceph'
+ prepare.format_device('/dev/sda1')
+ expected = [
+ 'mkfs', '-t', 'xfs',
+ '-f', '-i', 'size=2048', # default flags
+ '/dev/sda1']
+ assert expected == fake_run.calls[0]['args'][0]
+
+ def test_multiple_options_are_used(self, conf_ceph_stub, fake_run):
+ conf_ceph_stub(dedent("""[global]
+ fsid = 1234lkjh1234
+ [osd]
+ osd mkfs options xfs = -f -i size=1024"""))
+ conf.cluster = 'ceph'
+ prepare.format_device('/dev/sda1')
+ expected = [
+ 'mkfs', '-t', 'xfs',
+ '-f', '-i', 'size=1024',
+ '/dev/sda1']
+ assert expected == fake_run.calls[0]['args'][0]
+
+ def test_multiple_options_will_get_the_force_flag(self, conf_ceph_stub, fake_run):
+ conf_ceph_stub(dedent("""[global]
+ fsid = 1234lkjh1234
+ [osd]
+ osd mkfs options xfs = -i size=1024"""))
+ conf.cluster = 'ceph'
+ prepare.format_device('/dev/sda1')
+ expected = [
+ 'mkfs', '-t', 'xfs',
+ '-f', '-i', 'size=1024',
+ '/dev/sda1']
+ assert expected == fake_run.calls[0]['args'][0]
+
+ def test_underscore_options_are_used(self, conf_ceph_stub, fake_run):
+ conf_ceph_stub(dedent("""[global]
+ fsid = 1234lkjh1234
+ [osd]
+ osd_mkfs_options_xfs = -i size=128"""))
+ conf.cluster = 'ceph'
+ prepare.format_device('/dev/sda1')
+ expected = [
+ 'mkfs', '-t', 'xfs',
+ '-f', '-i', 'size=128',
+ '/dev/sda1']
+ assert expected == fake_run.calls[0]['args'][0]
+
+
+mkfs_filestore_flags = [
+ 'ceph-osd',
+ '--cluster',
+ '--osd-objectstore', 'filestore',
+ '--mkfs',
+ '-i',
+ '--monmap',
+ '--keyfile', '-', # goes through stdin
+ '--osd-data',
+ '--osd-journal',
+ '--osd-uuid',
+ '--setuser', 'ceph',
+ '--setgroup', 'ceph'
+]
+
+
+class TestOsdMkfsFilestore(object):
+
+ @pytest.mark.parametrize('flag', mkfs_filestore_flags)
+ def test_keyring_is_used(self, fake_call, monkeypatch, flag):
+ monkeypatch.setattr(prepare, '__release__', 'mimic')
+ monkeypatch.setattr(system, 'chown', lambda path: True)
+ prepare.osd_mkfs_filestore(1, 'asdf', keyring='secret')
+ assert flag in fake_call.calls[0]['args'][0]
+
+ def test_keyring_is_used_luminous(self, fake_call, monkeypatch):
+ monkeypatch.setattr(prepare, '__release__', 'luminous')
+ monkeypatch.setattr(system, 'chown', lambda path: True)
+ prepare.osd_mkfs_filestore(1, 'asdf', keyring='secret')
+ assert '--keyfile' not in fake_call.calls[0]['args'][0]
+
+
+class TestOsdMkfsBluestore(object):
+
+ def test_keyring_is_added(self, fake_call, monkeypatch):
+ monkeypatch.setattr(system, 'chown', lambda path: True)
+ prepare.osd_mkfs_bluestore(1, 'asdf', keyring='secret')
+ assert '--keyfile' in fake_call.calls[0]['args'][0]
+
+ def test_keyring_is_not_added(self, fake_call, monkeypatch):
+ monkeypatch.setattr(system, 'chown', lambda path: True)
+ prepare.osd_mkfs_bluestore(1, 'asdf')
+ assert '--keyfile' not in fake_call.calls[0]['args'][0]
+
+ def test_keyring_is_not_added_luminous(self, fake_call, monkeypatch):
+ monkeypatch.setattr(system, 'chown', lambda path: True)
+ prepare.osd_mkfs_bluestore(1, 'asdf')
+ monkeypatch.setattr(prepare, '__release__', 'luminous')
+ assert '--keyfile' not in fake_call.calls[0]['args'][0]
+
+ def test_wal_is_added(self, fake_call, monkeypatch):
+ monkeypatch.setattr(system, 'chown', lambda path: True)
+ prepare.osd_mkfs_bluestore(1, 'asdf', wal='/dev/smm1')
+ assert '--bluestore-block-wal-path' in fake_call.calls[0]['args'][0]
+ assert '/dev/smm1' in fake_call.calls[0]['args'][0]
+
+ def test_db_is_added(self, fake_call, monkeypatch):
+ monkeypatch.setattr(system, 'chown', lambda path: True)
+ prepare.osd_mkfs_bluestore(1, 'asdf', db='/dev/smm2')
+ assert '--bluestore-block-db-path' in fake_call.calls[0]['args'][0]
+ assert '/dev/smm2' in fake_call.calls[0]['args'][0]
+
+
+class TestMountOSD(object):
+
+ def test_default_options(self, conf_ceph_stub, fake_run):
+ conf_ceph_stub(dedent("""[global]
+ fsid = 1234lkjh1234"""))
+ conf.cluster = 'ceph'
+ prepare.mount_osd('/dev/sda1', 1)
+ expected = [
+ 'mount', '-t', 'xfs', '-o',
+ 'rw,noatime,inode64', # default flags
+ '/dev/sda1', '/var/lib/ceph/osd/ceph-1']
+ assert expected == fake_run.calls[0]['args'][0]
+
+ def test_mount_options_are_used(self, conf_ceph_stub, fake_run):
+ conf_ceph_stub(dedent("""[global]
+ fsid = 1234lkjh1234
+ [osd]
+ osd mount options xfs = rw"""))
+ conf.cluster = 'ceph'
+ prepare.mount_osd('/dev/sda1', 1)
+ expected = [
+ 'mount', '-t', 'xfs', '-o',
+ 'rw',
+ '/dev/sda1', '/var/lib/ceph/osd/ceph-1']
+ assert expected == fake_run.calls[0]['args'][0]
+
+ def test_multiple_whitespace_options_are_used(self, conf_ceph_stub, fake_run):
+ conf_ceph_stub(dedent("""[global]
+ fsid = 1234lkjh1234
+ [osd]
+ osd mount options xfs = rw auto exec"""))
+ conf.cluster = 'ceph'
+ prepare.mount_osd('/dev/sda1', 1)
+ expected = [
+ 'mount', '-t', 'xfs', '-o',
+ 'rw,auto,exec',
+ '/dev/sda1', '/var/lib/ceph/osd/ceph-1']
+ assert expected == fake_run.calls[0]['args'][0]
+
+ def test_multiple_comma_whitespace_options_are_used(self, conf_ceph_stub, fake_run):
+ conf_ceph_stub(dedent("""[global]
+ fsid = 1234lkjh1234
+ [osd]
+ osd mount options xfs = rw, auto, exec"""))
+ conf.cluster = 'ceph'
+ prepare.mount_osd('/dev/sda1', 1)
+ expected = [
+ 'mount', '-t', 'xfs', '-o',
+ 'rw,auto,exec',
+ '/dev/sda1', '/var/lib/ceph/osd/ceph-1']
+ assert expected == fake_run.calls[0]['args'][0]
+
+ def test_underscore_mount_options_are_used(self, conf_ceph_stub, fake_run):
+ conf_ceph_stub(dedent("""[global]
+ fsid = 1234lkjh1234
+ [osd]
+ osd mount options xfs = rw"""))
+ conf.cluster = 'ceph'
+ prepare.mount_osd('/dev/sda1', 1)
+ expected = [
+ 'mount', '-t', 'xfs', '-o',
+ 'rw',
+ '/dev/sda1', '/var/lib/ceph/osd/ceph-1']
+ assert expected == fake_run.calls[0]['args'][0]
+
+
+ceph_conf_mount_values = [
+ ['rw,', 'auto,' 'exec'],
+ ['rw', 'auto', 'exec'],
+ [' rw ', ' auto ', ' exec '],
+ ['rw,', 'auto,', 'exec,'],
+ [',rw ', ',auto ', ',exec,'],
+ [',rw,', ',auto,', ',exec,'],
+]
+
+string_mount_values = [
+ 'rw, auto exec ',
+ 'rw auto exec',
+ ',rw, auto, exec,',
+ ' rw auto exec ',
+ ' rw,auto,exec ',
+ 'rw,auto,exec',
+ ',rw,auto,exec,',
+ 'rw,auto,exec ',
+ 'rw, auto, exec ',
+]
+
+
+class TestNormalizeFlags(object):
+ # a bit overkill since most of this is already tested in prepare.mount_osd
+ # tests
+
+ @pytest.mark.parametrize("flags", ceph_conf_mount_values)
+ def test_normalize_lists(self, flags):
+ result = sorted(prepare._normalize_mount_flags(flags).split(','))
+ assert ','.join(result) == 'auto,exec,rw'
+
+ @pytest.mark.parametrize("flags", string_mount_values)
+ def test_normalize_strings(self, flags):
+ result = sorted(prepare._normalize_mount_flags(flags).split(','))
+ assert ','.join(result) == 'auto,exec,rw'
+
+ @pytest.mark.parametrize("flags", ceph_conf_mount_values)
+ def test_normalize_extra_flags(self, flags):
+ result = prepare._normalize_mount_flags(flags, extras=['discard'])
+ assert sorted(result.split(',')) == ['auto', 'discard', 'exec', 'rw']
+
+ @pytest.mark.parametrize("flags", ceph_conf_mount_values)
+ def test_normalize_duplicate_extra_flags(self, flags):
+ result = prepare._normalize_mount_flags(flags, extras=['rw', 'discard'])
+ assert sorted(result.split(',')) == ['auto', 'discard', 'exec', 'rw']
+
+ @pytest.mark.parametrize("flags", string_mount_values)
+ def test_normalize_strings_flags(self, flags):
+ result = sorted(prepare._normalize_mount_flags(flags, extras=['discard']).split(','))
+ assert ','.join(result) == 'auto,discard,exec,rw'
+
+ @pytest.mark.parametrize("flags", string_mount_values)
+ def test_normalize_strings_duplicate_flags(self, flags):
+ result = sorted(prepare._normalize_mount_flags(flags, extras=['discard','rw']).split(','))
+ assert ','.join(result) == 'auto,discard,exec,rw'
+
+
+class TestMkfsFilestore(object):
+
+ def test_non_zero_exit_status(self, stub_call, monkeypatch):
+ conf.cluster = 'ceph'
+ monkeypatch.setattr('ceph_volume.util.prepare.system.chown', lambda x: True)
+ stub_call(([], [], 1))
+ with pytest.raises(RuntimeError) as error:
+ prepare.osd_mkfs_filestore('1', 'asdf-1234', 'keyring')
+ assert "Command failed with exit code 1" in str(error.value)
+
+ def test_non_zero_exit_formats_command_correctly(self, stub_call, monkeypatch):
+ conf.cluster = 'ceph'
+ monkeypatch.setattr('ceph_volume.util.prepare.system.chown', lambda x: True)
+ stub_call(([], [], 1))
+ with pytest.raises(RuntimeError) as error:
+ prepare.osd_mkfs_filestore('1', 'asdf-1234', 'keyring')
+ expected = ' '.join([
+ 'ceph-osd',
+ '--cluster',
+ 'ceph',
+ '--osd-objectstore', 'filestore', '--mkfs',
+ '-i', '1', '--monmap', '/var/lib/ceph/osd/ceph-1/activate.monmap',
+ '--keyfile', '-', '--osd-data', '/var/lib/ceph/osd/ceph-1/',
+ '--osd-journal', '/var/lib/ceph/osd/ceph-1/journal',
+ '--osd-uuid', 'asdf-1234',
+ '--setuser', 'ceph', '--setgroup', 'ceph'])
+ assert expected in str(error.value)
+
+
+class TestMkfsBluestore(object):
+
+ def test_non_zero_exit_status(self, stub_call, monkeypatch):
+ conf.cluster = 'ceph'
+ monkeypatch.setattr('ceph_volume.util.prepare.system.chown', lambda x: True)
+ stub_call(([], [], 1))
+ with pytest.raises(RuntimeError) as error:
+ prepare.osd_mkfs_bluestore('1', 'asdf-1234', keyring='keyring')
+ assert "Command failed with exit code 1" in str(error.value)
+
+ def test_non_zero_exit_formats_command_correctly(self, stub_call, monkeypatch):
+ conf.cluster = 'ceph'
+ monkeypatch.setattr('ceph_volume.util.prepare.system.chown', lambda x: True)
+ stub_call(([], [], 1))
+ with pytest.raises(RuntimeError) as error:
+ prepare.osd_mkfs_bluestore('1', 'asdf-1234', keyring='keyring')
+ expected = ' '.join([
+ 'ceph-osd',
+ '--cluster',
+ 'ceph',
+ '--osd-objectstore', 'bluestore', '--mkfs',
+ '-i', '1', '--monmap', '/var/lib/ceph/osd/ceph-1/activate.monmap',
+ '--keyfile', '-', '--osd-data', '/var/lib/ceph/osd/ceph-1/',
+ '--osd-uuid', 'asdf-1234',
+ '--setuser', 'ceph', '--setgroup', 'ceph'])
+ assert expected in str(error.value)
+
+
+class TestGetJournalSize(object):
+
+ def test_undefined_size_fallbacks_formatted(self, conf_ceph_stub):
+ conf_ceph_stub(dedent("""
+ [global]
+ fsid = a25d19a6-7d57-4eda-b006-78e35d2c4d9f
+ """))
+ result = prepare.get_journal_size()
+ assert result == '5G'
+
+ def test_undefined_size_fallbacks_unformatted(self, conf_ceph_stub):
+ conf_ceph_stub(dedent("""
+ [global]
+ fsid = a25d19a6-7d57-4eda-b006-78e35d2c4d9f
+ """))
+ result = prepare.get_journal_size(lv_format=False)
+ assert result.gb.as_int() == 5
+
+ def test_defined_size_unformatted(self, conf_ceph_stub):
+ conf_ceph_stub(dedent("""
+ [global]
+ fsid = a25d19a6-7d57-4eda-b006-78e35d2c4d9f
+
+ [osd]
+ osd journal size = 10240
+ """))
+ result = prepare.get_journal_size(lv_format=False)
+ assert result.gb.as_int() == 10
+
+ def test_defined_size_formatted(self, conf_ceph_stub):
+ conf_ceph_stub(dedent("""
+ [global]
+ fsid = a25d19a6-7d57-4eda-b006-78e35d2c4d9f
+
+ [osd]
+ osd journal size = 10240
+ """))
+ result = prepare.get_journal_size()
+ assert result == '10G'
+
+ def test_refuse_tiny_journals(self, conf_ceph_stub):
+ conf_ceph_stub(dedent("""
+ [global]
+ fsid = a25d19a6-7d57-4eda-b006-78e35d2c4d9f
+
+ [osd]
+ osd journal size = 1024
+ """))
+ with pytest.raises(RuntimeError) as error:
+ prepare.get_journal_size()
+ assert 'journal sizes must be larger' in str(error.value)
+ assert 'detected: 1024.00 MB' in str(error.value)
diff --git a/src/ceph-volume/ceph_volume/tests/util/test_system.py b/src/ceph-volume/ceph_volume/tests/util/test_system.py
new file mode 100644
index 000000000..5746f7023
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/util/test_system.py
@@ -0,0 +1,309 @@
+import os
+import pwd
+import getpass
+import pytest
+from textwrap import dedent
+from ceph_volume.util import system
+from mock.mock import patch
+from ceph_volume.tests.conftest import Factory
+
+
+@pytest.fixture
+def mock_find_executable_on_host(monkeypatch):
+ """
+ Monkeypatches util.system.find_executable_on_host, so that a caller can add behavior to the response
+ """
+ def apply(stdout=None, stderr=None, returncode=0):
+ stdout_stream = Factory(read=lambda: stdout)
+ stderr_stream = Factory(read=lambda: stderr)
+ return_value = Factory(
+ stdout=stdout_stream,
+ stderr=stderr_stream,
+ wait=lambda: returncode,
+ communicate=lambda x: (stdout, stderr, returncode)
+ )
+
+ monkeypatch.setattr(
+ 'ceph_volume.util.system.subprocess.Popen',
+ lambda *a, **kw: return_value)
+
+ return apply
+
+class TestMkdirP(object):
+
+ def test_existing_dir_does_not_raise_w_chown(self, monkeypatch, tmpdir):
+ user = pwd.getpwnam(getpass.getuser())
+ uid, gid = user[2], user[3]
+ monkeypatch.setattr(system, 'get_ceph_user_ids', lambda: (uid, gid,))
+ path = str(tmpdir)
+ system.mkdir_p(path)
+ assert os.path.isdir(path)
+
+ def test_new_dir_w_chown(self, monkeypatch, tmpdir):
+ user = pwd.getpwnam(getpass.getuser())
+ uid, gid = user[2], user[3]
+ monkeypatch.setattr(system, 'get_ceph_user_ids', lambda: (uid, gid,))
+ path = os.path.join(str(tmpdir), 'new')
+ system.mkdir_p(path)
+ assert os.path.isdir(path)
+
+ def test_existing_dir_does_not_raise_no_chown(self, tmpdir):
+ path = str(tmpdir)
+ system.mkdir_p(path, chown=False)
+ assert os.path.isdir(path)
+
+ def test_new_dir_no_chown(self, tmpdir):
+ path = os.path.join(str(tmpdir), 'new')
+ system.mkdir_p(path, chown=False)
+ assert os.path.isdir(path)
+
+
+@pytest.fixture
+def fake_proc(tmpdir, monkeypatch):
+ PROCDIR = str(tmpdir)
+ proc_path = os.path.join(PROCDIR, 'mounts')
+ with open(proc_path, 'w') as f:
+ f.write(dedent("""nfsd /proc/fs/nfsd nfsd rw,relatime 0 0
+ rootfs / rootfs rw 0 0
+ sysfs /sys sysfs rw,seclabel,nosuid,nodev,noexec,relatime 0 0
+ proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0
+ devtmpfs /dev devtmpfs rw,seclabel,nosuid,size=238292k,nr_inodes=59573,mode=755 0 0
+ securityfs /sys/kernel/security securityfs rw,nosuid,nodev,noexec,relatime 0 0
+ tmpfs /dev/shm tmpfs rw,seclabel,nosuid,nodev 0 0
+ devpts /dev/pts devpts rw,seclabel,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0
+ tmpfs /run tmpfs rw,seclabel,nosuid,nodev,mode=755 0 0
+ tmpfs /sys/fs/cgroup tmpfs ro,seclabel,nosuid,nodev,noexec,mode=755 0 0
+ cgroup /sys/fs/cgroup/systemd cgroup rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd 0 0
+ cgroup /sys/fs/cgroup/freezer cgroup rw,nosuid,nodev,noexec,relatime,freezer 0 0
+ configfs /sys/kernel/config configfs rw,relatime 0 0
+ /dev/mapper/VolGroup00-LogVol00 / xfs rw,seclabel,relatime,attr2,inode64,noquota 0 0
+ selinuxfs /sys/fs/selinux selinuxfs rw,relatime 0 0
+ debugfs /sys/kernel/debug debugfs rw,relatime 0 0
+ hugetlbfs /dev/hugepages hugetlbfs rw,seclabel,relatime 0 0
+ mqueue /dev/mqueue mqueue rw,seclabel,relatime 0 0
+ sunrpc /far/lib/nfs/rpc_pipefs rpc_pipefs rw,relatime 0 0
+ /dev/sde4 /two/field/path
+ nfsd /proc/fs/nfsd nfsd rw,relatime 0 0
+ /dev/sde2 /boot xfs rw,seclabel,relatime,attr2,inode64,noquota 0 0
+ tmpfs /far/lib/ceph/osd/ceph-5 tmpfs rw,seclabel,relatime 0 0
+ tmpfs /far/lib/ceph/osd/ceph-7 tmpfs rw,seclabel,relatime 0 0
+ /dev/sda1 /far/lib/ceph/osd/ceph-0 xfs rw,seclabel,noatime,attr2,inode64,noquota 0 0
+ tmpfs /run/user/1000 tmpfs rw,seclabel,nosuid,nodev,relatime,size=50040k,mode=700,uid=1000,gid=1000 0 0
+ /dev/sdc2 /boot xfs rw,seclabel,relatime,attr2,inode64,noquota 0 0
+ tmpfs /run/user/1000 tmpfs rw,seclabel,mode=700,uid=1000,gid=1000 0 0"""))
+ monkeypatch.setattr(system, 'PROCDIR', PROCDIR)
+ monkeypatch.setattr(os.path, 'exists', lambda x: True)
+
+
+class TestPathIsMounted(object):
+
+ def test_is_mounted(self, fake_proc):
+ assert system.path_is_mounted('/boot') is True
+
+ def test_is_not_mounted(self, fake_proc):
+ assert system.path_is_mounted('/far/fib/feph') is False
+
+ def test_is_not_mounted_at_destination(self, fake_proc):
+ assert system.path_is_mounted('/boot', destination='/dev/sda1') is False
+
+ def test_is_mounted_at_destination(self, fake_proc):
+ assert system.path_is_mounted('/boot', destination='/dev/sdc2') is True
+
+
+class TestDeviceIsMounted(object):
+
+ def test_is_mounted(self, fake_proc):
+ assert system.device_is_mounted('/dev/sda1') is True
+
+ def test_path_is_not_device(self, fake_proc):
+ assert system.device_is_mounted('/far/lib/ceph/osd/ceph-7') is False
+
+ def test_is_not_mounted_at_destination(self, fake_proc):
+ assert system.device_is_mounted('/dev/sda1', destination='/far/lib/ceph/osd/test-1') is False
+
+ def test_is_mounted_at_destination(self, fake_proc):
+ assert system.device_is_mounted('/dev/sda1', destination='/far/lib/ceph/osd/ceph-7') is False
+
+ def test_is_realpath_dev_mounted_at_destination(self, fake_proc, monkeypatch):
+ monkeypatch.setattr(system.os.path, 'realpath', lambda x: '/dev/sda1' if 'foo' in x else x)
+ result = system.device_is_mounted('/dev/maper/foo', destination='/far/lib/ceph/osd/ceph-0')
+ assert result is True
+
+ def test_is_realpath_path_mounted_at_destination(self, fake_proc, monkeypatch):
+ monkeypatch.setattr(
+ system.os.path, 'realpath',
+ lambda x: '/far/lib/ceph/osd/ceph-0' if 'symlink' in x else x)
+ result = system.device_is_mounted('/dev/sda1', destination='/symlink/lib/ceph/osd/ceph-0')
+ assert result is True
+
+
+class TestGetMounts(object):
+
+ def test_not_mounted(self, tmpdir, monkeypatch):
+ PROCDIR = str(tmpdir)
+ proc_path = os.path.join(PROCDIR, 'mounts')
+ with open(proc_path, 'w') as f:
+ f.write('')
+ monkeypatch.setattr(system, 'PROCDIR', PROCDIR)
+ m = system.Mounts()
+ assert m.get_mounts() == {}
+
+ def test_is_mounted_(self, fake_proc):
+ m = system.Mounts()
+ assert m.get_mounts()['/dev/sdc2'] == ['/boot']
+
+ def test_ignores_two_fields(self, fake_proc):
+ m = system.Mounts()
+ assert m.get_mounts().get('/dev/sde4') is None
+
+ def test_tmpfs_is_reported(self, fake_proc):
+ m = system.Mounts()
+ assert m.get_mounts()['tmpfs'][0] == '/dev/shm'
+
+ def test_non_skip_devs_arent_reported(self, fake_proc):
+ m = system.Mounts()
+ assert m.get_mounts().get('cgroup') is None
+
+ def test_multiple_mounts_are_appended(self, fake_proc):
+ m = system.Mounts()
+ assert len(m.get_mounts()['tmpfs']) == 7
+
+ def test_nonexistent_devices_are_skipped(self, tmpdir, monkeypatch):
+ PROCDIR = str(tmpdir)
+ proc_path = os.path.join(PROCDIR, 'mounts')
+ with open(proc_path, 'w') as f:
+ f.write(dedent("""nfsd /proc/fs/nfsd nfsd rw,relatime 0 0
+ /dev/sda1 /far/lib/ceph/osd/ceph-0 xfs rw,attr2,inode64,noquota 0 0
+ /dev/sda2 /far/lib/ceph/osd/ceph-1 xfs rw,attr2,inode64,noquota 0 0"""))
+ monkeypatch.setattr(system, 'PROCDIR', PROCDIR)
+ monkeypatch.setattr(os.path, 'exists', lambda x: False if x == '/dev/sda1' else True)
+ m = system.Mounts()
+ assert m.get_mounts().get('/dev/sda1') is None
+
+
+class TestIsBinary(object):
+
+ def test_is_binary(self, fake_filesystem):
+ binary_path = fake_filesystem.create_file('/tmp/fake-file', contents='asd\n\nlkjh\x00')
+ assert system.is_binary(binary_path.path)
+
+ def test_is_not_binary(self, fake_filesystem):
+ binary_path = fake_filesystem.create_file('/tmp/fake-file', contents='asd\n\nlkjh0')
+ assert system.is_binary(binary_path.path) is False
+
+
+class TestGetFileContents(object):
+
+ def test_path_does_not_exist(self, tmpdir):
+ filepath = os.path.join(str(tmpdir), 'doesnotexist')
+ assert system.get_file_contents(filepath, 'default') == 'default'
+
+ def test_path_has_contents(self, fake_filesystem):
+ interesting_file = fake_filesystem.create_file('/tmp/fake-file', contents="1")
+ result = system.get_file_contents(interesting_file.path)
+ assert result == "1"
+
+ def test_path_has_multiline_contents(self, fake_filesystem):
+ interesting_file = fake_filesystem.create_file('/tmp/fake-file', contents="0\n1")
+ result = system.get_file_contents(interesting_file.path)
+ assert result == "0\n1"
+
+ def test_exception_returns_default(self):
+ with patch('builtins.open') as mocked_open:
+ mocked_open.side_effect = Exception()
+ result = system.get_file_contents('/tmp/fake-file')
+ assert result == ''
+
+
+class TestWhich(object):
+
+ def test_executable_exists_but_is_not_file(self, monkeypatch):
+ monkeypatch.setattr(system.os.path, 'isfile', lambda x: False)
+ monkeypatch.setattr(system.os.path, 'exists', lambda x: True)
+ assert system.which('exedir') == 'exedir'
+
+ def test_executable_does_not_exist(self, monkeypatch):
+ monkeypatch.setattr(system.os.path, 'isfile', lambda x: False)
+ monkeypatch.setattr(system.os.path, 'exists', lambda x: False)
+ assert system.which('exedir') == 'exedir'
+
+ def test_executable_exists_as_file(self, monkeypatch):
+ monkeypatch.setattr(system.os, 'getenv', lambda x, y: '')
+ monkeypatch.setattr(system.os.path, 'isfile', lambda x: x != 'ceph')
+ monkeypatch.setattr(system.os.path, 'exists', lambda x: x != 'ceph')
+ assert system.which('ceph') == '/usr/local/bin/ceph'
+
+ def test_warnings_when_executable_isnt_matched(self, monkeypatch, capsys):
+ monkeypatch.setattr(system.os.path, 'isfile', lambda x: True)
+ monkeypatch.setattr(system.os.path, 'exists', lambda x: False)
+ system.which('exedir')
+ cap = capsys.readouterr()
+ assert 'Executable exedir not in PATH' in cap.err
+
+ def test_run_on_host_found(self, mock_find_executable_on_host):
+ mock_find_executable_on_host(stdout="/sbin/lvs\n", stderr="some stderr message\n")
+ assert system.which('lvs', run_on_host=True) == '/sbin/lvs'
+
+ def test_run_on_host_not_found(self, mock_find_executable_on_host):
+ mock_find_executable_on_host(stdout="", stderr="some stderr message\n")
+ assert system.which('lvs', run_on_host=True) == 'lvs'
+
+@pytest.fixture
+def stub_which(monkeypatch):
+ def apply(value='/bin/restorecon'):
+ monkeypatch.setattr(system, 'which', lambda x: value)
+ return apply
+
+
+# python2 has no FileNotFoundError
+try:
+ FileNotFoundError
+except NameError:
+ FileNotFoundError = OSError
+
+
+class TestSetContext(object):
+
+ def setup(self):
+ try:
+ os.environ.pop('CEPH_VOLUME_SKIP_RESTORECON')
+ except KeyError:
+ pass
+
+ @pytest.mark.parametrize('value', ['1', 'True', 'true', 'TRUE', 'yes'])
+ def test_set_context_skips(self, stub_call, fake_run, value):
+ stub_call(('', '', 0))
+ os.environ['CEPH_VOLUME_SKIP_RESTORECON'] = value
+ system.set_context('/tmp/foo')
+ assert fake_run.calls == []
+
+ @pytest.mark.parametrize('value', ['0', 'False', 'false', 'FALSE', 'no'])
+ def test_set_context_doesnt_skip_with_env(self, stub_call, stub_which, fake_run, value):
+ stub_call(('', '', 0))
+ stub_which()
+ os.environ['CEPH_VOLUME_SKIP_RESTORECON'] = value
+ system.set_context('/tmp/foo')
+ assert len(fake_run.calls)
+
+ def test_set_context_skips_on_executable(self, stub_call, stub_which, fake_run):
+ stub_call(('', '', 0))
+ stub_which('restorecon')
+ system.set_context('/tmp/foo')
+ assert fake_run.calls == []
+
+ def test_set_context_no_skip_on_executable(self, stub_call, stub_which, fake_run):
+ stub_call(('', '', 0))
+ stub_which('/bin/restorecon')
+ system.set_context('/tmp/foo')
+ assert len(fake_run.calls)
+
+ @patch('ceph_volume.process.call')
+ def test_selinuxenabled_doesnt_exist(self, mocked_call, fake_run):
+ mocked_call.side_effect = FileNotFoundError()
+ system.set_context('/tmp/foo')
+ assert fake_run.calls == []
+
+ def test_selinuxenabled_is_not_enabled(self, stub_call, fake_run):
+ stub_call(('', '', 1))
+ system.set_context('/tmp/foo')
+ assert fake_run.calls == []
diff --git a/src/ceph-volume/ceph_volume/tests/util/test_util.py b/src/ceph-volume/ceph_volume/tests/util/test_util.py
new file mode 100644
index 000000000..1a094d33f
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/util/test_util.py
@@ -0,0 +1,116 @@
+import pytest
+from ceph_volume import util
+
+
+class TestAsBytes(object):
+
+ def test_bytes_just_gets_returned(self):
+ bytes_string = "contents".encode('utf-8')
+ assert util.as_bytes(bytes_string) == bytes_string
+
+ def test_string_gets_converted_to_bytes(self):
+ result = util.as_bytes('contents')
+ assert isinstance(result, bytes)
+
+
+class TestStrToInt(object):
+
+ def test_passing_a_float_str_comma(self):
+ result = util.str_to_int("1,99")
+ assert result == 1
+
+ def test_passing_a_float_does_not_round_comma(self):
+ result = util.str_to_int("1,99", round_down=False)
+ assert result == 2
+
+ @pytest.mark.parametrize("value", ['2', 2])
+ def test_passing_an_int(self, value):
+ result = util.str_to_int(value)
+ assert result == 2
+
+ @pytest.mark.parametrize("value", ['1.99', 1.99])
+ def test_passing_a_float(self, value):
+ result = util.str_to_int(value)
+ assert result == 1
+
+ @pytest.mark.parametrize("value", ['1.99', 1.99])
+ def test_passing_a_float_does_not_round(self, value):
+ result = util.str_to_int(value, round_down=False)
+ assert result == 2
+
+ def test_text_is_not_an_integer_like(self):
+ with pytest.raises(RuntimeError) as error:
+ util.str_to_int("1.4GB")
+ assert str(error.value) == "Unable to convert to integer: '1.4GB'"
+
+ def test_input_is_not_string(self):
+ with pytest.raises(RuntimeError) as error:
+ util.str_to_int(None)
+ assert str(error.value) == "Unable to convert to integer: 'None'"
+
+
+def true_responses(upper_casing=False):
+ if upper_casing:
+ return ['Y', 'YES', '']
+ return ['y', 'yes', '']
+
+
+def false_responses(upper_casing=False):
+ if upper_casing:
+ return ['N', 'NO']
+ return ['n', 'no']
+
+
+def invalid_responses():
+ return [9, 0.1, 'h', [], {}, None]
+
+
+class TestStrToBool(object):
+
+ @pytest.mark.parametrize('response', true_responses())
+ def test_trueish(self, response):
+ assert util.str_to_bool(response) is True
+
+ @pytest.mark.parametrize('response', false_responses())
+ def test_falseish(self, response):
+ assert util.str_to_bool(response) is False
+
+ @pytest.mark.parametrize('response', true_responses(True))
+ def test_trueish_upper(self, response):
+ assert util.str_to_bool(response) is True
+
+ @pytest.mark.parametrize('response', false_responses(True))
+ def test_falseish_upper(self, response):
+ assert util.str_to_bool(response) is False
+
+ @pytest.mark.parametrize('response', invalid_responses())
+ def test_invalid(self, response):
+ with pytest.raises(ValueError):
+ util.str_to_bool(response)
+
+
+class TestPromptBool(object):
+
+ @pytest.mark.parametrize('response', true_responses())
+ def test_trueish(self, response):
+ fake_input = lambda x: response
+ qx = 'what the what?'
+ assert util.prompt_bool(qx, input_=fake_input) is True
+
+ @pytest.mark.parametrize('response', false_responses())
+ def test_falseish(self, response):
+ fake_input = lambda x: response
+ qx = 'what the what?'
+ assert util.prompt_bool(qx, input_=fake_input) is False
+
+ def test_try_again_true(self):
+ responses = ['g', 'h', 'y']
+ fake_input = lambda x: responses.pop(0)
+ qx = 'what the what?'
+ assert util.prompt_bool(qx, input_=fake_input) is True
+
+ def test_try_again_false(self):
+ responses = ['g', 'h', 'n']
+ fake_input = lambda x: responses.pop(0)
+ qx = 'what the what?'
+ assert util.prompt_bool(qx, input_=fake_input) is False
diff --git a/src/ceph-volume/ceph_volume/util/__init__.py b/src/ceph-volume/ceph_volume/util/__init__.py
new file mode 100644
index 000000000..1b5afe970
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/util/__init__.py
@@ -0,0 +1,108 @@
+import logging
+from math import floor
+from ceph_volume import terminal
+
+try:
+ input = raw_input # pylint: disable=redefined-builtin
+except NameError:
+ pass
+
+logger = logging.getLogger(__name__)
+
+
+def as_string(string):
+ """
+ Ensure that whatever type of string is incoming, it is returned as an
+ actual string, versus 'bytes' which Python 3 likes to use.
+ """
+ if isinstance(string, bytes):
+ # we really ignore here if we can't properly decode with utf-8
+ return string.decode('utf-8', 'ignore')
+ return string
+
+
+def as_bytes(string):
+ """
+ Ensure that whatever type of string is incoming, it is returned as bytes,
+ encoding to utf-8 otherwise
+ """
+ if isinstance(string, bytes):
+ return string
+ return string.encode('utf-8', errors='ignore')
+
+
+def str_to_int(string, round_down=True):
+ """
+ Parses a string number into an integer, optionally converting to a float
+ and rounding down.
+
+ Some LVM values may come with a comma instead of a dot to define decimals.
+ This function normalizes a comma into a dot
+ """
+ error_msg = "Unable to convert to integer: '%s'" % str(string)
+ try:
+ integer = float(string.replace(',', '.'))
+ except AttributeError:
+ # this might be a integer already, so try to use it, otherwise raise
+ # the original exception
+ if isinstance(string, (int, float)):
+ integer = string
+ else:
+ logger.exception(error_msg)
+ raise RuntimeError(error_msg)
+ except (TypeError, ValueError):
+ logger.exception(error_msg)
+ raise RuntimeError(error_msg)
+
+ if round_down:
+ integer = floor(integer)
+ else:
+ integer = round(integer)
+ return int(integer)
+
+
+def str_to_bool(val):
+ """
+ Convert a string representation of truth to True or False
+
+ True values are 'y', 'yes', or ''; case-insensitive
+ False values are 'n', or 'no'; case-insensitive
+ Raises ValueError if 'val' is anything else.
+ """
+ true_vals = ['yes', 'y', '']
+ false_vals = ['no', 'n']
+ try:
+ val = val.lower()
+ except AttributeError:
+ val = str(val).lower()
+ if val in true_vals:
+ return True
+ elif val in false_vals:
+ return False
+ else:
+ raise ValueError("Invalid input value: %s" % val)
+
+
+def prompt_bool(question, input_=None):
+ """
+ Interface to prompt a boolean (or boolean-like) response from a user.
+ Usually a confirmation.
+ """
+ input_prompt = input_ or input
+ prompt_format = '--> {question} '.format(question=question)
+ response = input_prompt(prompt_format)
+ try:
+ return str_to_bool(response)
+ except ValueError:
+ terminal.error('Valid true responses are: y, yes, <Enter>')
+ terminal.error('Valid false responses are: n, no')
+ terminal.error('That response was invalid, please try again')
+ return prompt_bool(question, input_=input_prompt)
+
+def merge_dict(x, y):
+ """
+ Return two dicts merged
+ """
+ z = x.copy()
+ z.update(y)
+ return z \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/util/arg_validators.py b/src/ceph-volume/ceph_volume/util/arg_validators.py
new file mode 100644
index 000000000..270c8a648
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/util/arg_validators.py
@@ -0,0 +1,222 @@
+import argparse
+import os
+from ceph_volume import terminal, decorators, process
+from ceph_volume.util.device import Device
+from ceph_volume.util import disk
+
+
+def valid_osd_id(val):
+ return str(int(val))
+
+class ValidDevice(object):
+
+ def __init__(self, as_string=False, gpt_ok=False):
+ self.as_string = as_string
+ self.gpt_ok = gpt_ok
+
+ def __call__(self, dev_path):
+ self.get_device(dev_path)
+ self._validated_device = self._is_valid_device()
+ return self._format_device(self._validated_device)
+
+ def get_device(self, dev_path):
+ self._device = Device(dev_path)
+ self.dev_path = dev_path
+
+ def _format_device(self, device):
+ if self.as_string:
+ if device.is_lv:
+ # all codepaths expect an lv path to be returned in this format
+ return "{}/{}".format(device.vg_name, device.lv_name)
+ return device.path
+ return device
+
+ def _is_valid_device(self):
+ error = None
+ if not self._device.exists:
+ error = "Unable to proceed with non-existing device: %s" % self.dev_path
+ # FIXME this is not a nice API, this validator was meant to catch any
+ # non-existing devices upfront, not check for gpt headers. Now this
+ # needs to optionally skip checking gpt headers which is beyond
+ # verifying if the device exists. The better solution would be to
+ # configure this with a list of checks that can be excluded/included on
+ # __init__
+ elif self._device.has_gpt_headers and not self.gpt_ok:
+ error = "GPT headers found, they must be removed on: %s" % self.dev_path
+ if self._device.has_partitions:
+ raise RuntimeError("Device {} has partitions.".format(self.dev_path))
+ if error:
+ raise argparse.ArgumentError(None, error)
+ return self._device
+
+
+class ValidZapDevice(ValidDevice):
+ def __call__(self, dev_path):
+ super().get_device(dev_path)
+ return self._format_device(self._is_valid_device())
+
+ def _is_valid_device(self, raise_sys_exit=True):
+ super()._is_valid_device()
+ return self._device
+
+
+class ValidDataDevice(ValidDevice):
+ def __call__(self, dev_path):
+ super().get_device(dev_path)
+ return self._format_device(self._is_valid_device())
+
+ def _is_valid_device(self, raise_sys_exit=True):
+ super()._is_valid_device()
+ if self._device.used_by_ceph:
+ terminal.info('Device {} is already prepared'.format(self.dev_path))
+ if raise_sys_exit:
+ raise SystemExit(0)
+ if self._device.has_fs and not self._device.used_by_ceph:
+ raise RuntimeError("Device {} has a filesystem.".format(self.dev_path))
+ if self.dev_path[0] == '/' and disk.has_bluestore_label(self.dev_path):
+ raise RuntimeError("Device {} has bluestore signature.".format(self.dev_path))
+ return self._device
+
+class ValidRawDevice(ValidDevice):
+ def __call__(self, dev_path):
+ super().get_device(dev_path)
+ return self._format_device(self._is_valid_device())
+
+ def _is_valid_device(self, raise_sys_exit=True):
+ out, err, rc = process.call([
+ 'ceph-bluestore-tool', 'show-label',
+ '--dev', self.dev_path], verbose_on_failure=False)
+ if not rc:
+ terminal.info("Raw device {} is already prepared.".format(self.dev_path))
+ raise SystemExit(0)
+ if disk.blkid(self.dev_path).get('TYPE') == 'crypto_LUKS':
+ terminal.info("Raw device {} might already be in use for a dmcrypt OSD, skipping.".format(self.dev_path))
+ raise SystemExit(0)
+ super()._is_valid_device()
+ return self._device
+
+class ValidBatchDevice(ValidDevice):
+ def __call__(self, dev_path):
+ super().get_device(dev_path)
+ return self._format_device(self._is_valid_device())
+
+ def _is_valid_device(self, raise_sys_exit=False):
+ super()._is_valid_device()
+ if self._device.is_partition:
+ raise argparse.ArgumentError(
+ None,
+ '{} is a partition, please pass '
+ 'LVs or raw block devices'.format(self.dev_path))
+ return self._device
+
+
+class ValidBatchDataDevice(ValidBatchDevice, ValidDataDevice):
+ def __call__(self, dev_path):
+ super().get_device(dev_path)
+ return self._format_device(self._is_valid_device())
+
+ def _is_valid_device(self):
+ # if device is already used by ceph,
+ # leave the validation to Batch.get_deployment_layout()
+ # This way the idempotency isn't broken (especially when using --osds-per-device)
+ for lv in self._device.lvs:
+ if lv.tags.get('ceph.type') in ['db', 'wal', 'journal']:
+ return self._device
+ if self._device.used_by_ceph:
+ return self._device
+ super()._is_valid_device(raise_sys_exit=False)
+ return self._device
+
+
+class OSDPath(object):
+ """
+ Validate path exists and it looks like an OSD directory.
+ """
+
+ @decorators.needs_root
+ def __call__(self, string):
+ if not os.path.exists(string):
+ error = "Path does not exist: %s" % string
+ raise argparse.ArgumentError(None, error)
+
+ arg_is_partition = disk.is_partition(string)
+ if arg_is_partition:
+ return os.path.abspath(string)
+ absolute_path = os.path.abspath(string)
+ if not os.path.isdir(absolute_path):
+ error = "Argument is not a directory or device which is required to scan"
+ raise argparse.ArgumentError(None, error)
+ key_files = ['ceph_fsid', 'fsid', 'keyring', 'ready', 'type', 'whoami']
+ dir_files = os.listdir(absolute_path)
+ for key_file in key_files:
+ if key_file not in dir_files:
+ terminal.error('All following files must exist in path: %s' % ' '.join(key_files))
+ error = "Required file (%s) was not found in OSD dir path: %s" % (
+ key_file,
+ absolute_path
+ )
+ raise argparse.ArgumentError(None, error)
+
+ return os.path.abspath(string)
+
+
+def exclude_group_options(parser, groups, argv=None):
+ """
+ ``argparse`` has the ability to check for mutually exclusive options, but
+ it only allows a basic XOR behavior: only one flag can be used from
+ a defined group of options. This doesn't help when two groups of options
+ need to be separated. For example, with filestore and bluestore, neither
+ set can be used in conjunction with the other set.
+
+ This helper validator will consume the parser to inspect the group flags,
+ and it will group them together from ``groups``. This allows proper error
+ reporting, matching each incompatible flag with its group name.
+
+ :param parser: The argparse object, once it has configured all flags. It is
+ required to contain the group names being used to validate.
+ :param groups: A list of group names (at least two), with the same used for
+ ``add_argument_group``
+ :param argv: Consume the args (sys.argv) directly from this argument
+
+ .. note: **Unfortunately** this will not be able to validate correctly when
+ using default flags. In the case of filestore vs. bluestore, ceph-volume
+ defaults to --bluestore, but we can't check that programmatically, we can
+ only parse the flags seen via argv
+ """
+ # Reduce the parser groups to only the groups we need to intersect
+ parser_groups = [g for g in parser._action_groups if g.title in groups]
+ # A mapping of the group name to flags/options
+ group_flags = {}
+ flags_to_verify = []
+ for group in parser_groups:
+ # option groups may have more than one item in ``option_strings``, this
+ # will loop over ``_group_actions`` which contains the
+ # ``option_strings``, like ``['--filestore']``
+ group_flags[group.title] = [
+ option for group_action in group._group_actions
+ for option in group_action.option_strings
+ ]
+
+ # Gather all the flags present in the groups so that we only check on those.
+ for flags in group_flags.values():
+ flags_to_verify.extend(flags)
+
+ seen = []
+ last_flag = None
+ last_group = None
+ for flag in argv:
+ if flag not in flags_to_verify:
+ continue
+ for group_name, flags in group_flags.items():
+ if flag in flags:
+ seen.append(group_name)
+ # We are mutually excluding groups, so having more than 1 group
+ # in ``seen`` means we must raise an error
+ if len(set(seen)) == len(groups):
+ terminal.warning('Incompatible flags were found, some values may get ignored')
+ msg = 'Cannot use %s (%s) with %s (%s)' % (
+ last_flag, last_group, flag, group_name
+ )
+ terminal.warning(msg)
+ last_group = group_name
+ last_flag = flag
diff --git a/src/ceph-volume/ceph_volume/util/constants.py b/src/ceph-volume/ceph_volume/util/constants.py
new file mode 100644
index 000000000..3ec819ec3
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/util/constants.py
@@ -0,0 +1,46 @@
+
+# mount flags
+mount = dict(
+ xfs=['rw', 'noatime' , 'inode64']
+)
+
+
+# format flags
+mkfs = dict(
+ xfs=[
+ # force overwriting previous fs
+ '-f',
+ # set the inode size to 2kb
+ '-i', 'size=2048',
+ ],
+)
+
+# The fantastical world of ceph-disk labels, they should give you the
+# collywobbles
+ceph_disk_guids = {
+ # luks
+ '45b0969e-9b03-4f30-b4c6-35865ceff106': {'type': 'journal', 'encrypted': True, 'encryption_type': 'luks'},
+ 'cafecafe-9b03-4f30-b4c6-35865ceff106': {'type': 'block', 'encrypted': True, 'encryption_type': 'luks'},
+ '166418da-c469-4022-adf4-b30afd37f176': {'type': 'block.db', 'encrypted': True, 'encryption_type': 'luks'},
+ '86a32090-3647-40b9-bbbd-38d8c573aa86': {'type': 'block.wal', 'encrypted': True, 'encryption_type': 'luks'},
+ '4fbd7e29-9d25-41b8-afd0-35865ceff05d': {'type': 'data', 'encrypted': True, 'encryption_type': 'luks'},
+ # plain
+ '45b0969e-9b03-4f30-b4c6-5ec00ceff106': {'type': 'journal', 'encrypted': True, 'encryption_type': 'plain'},
+ 'cafecafe-9b03-4f30-b4c6-5ec00ceff106': {'type': 'block', 'encrypted': True, 'encryption_type': 'plain'},
+ '93b0052d-02d9-4d8a-a43b-33a3ee4dfbc3': {'type': 'block.db', 'encrypted': True, 'encryption_type': 'plain'},
+ '306e8683-4fe2-4330-b7c0-00a917c16966': {'type': 'block.wal', 'encrypted': True, 'encryption_type': 'plain'},
+ '4fbd7e29-9d25-41b8-afd0-5ec00ceff05d': {'type': 'data', 'encrypted': True, 'encryption_type': 'plain'},
+ # regular guids that differ from plain
+ 'fb3aabf9-d25f-47cc-bf5e-721d1816496b': {'type': 'lockbox', 'encrypted': False, 'encryption_type': None},
+ '30cd0809-c2b2-499c-8879-2d6b78529876': {'type': 'block.db', 'encrypted': False, 'encryption_type': None},
+ '5ce17fce-4087-4169-b7ff-056cc58473f9': {'type': 'block.wal', 'encrypted': False, 'encryption_type': None},
+ '4fbd7e29-9d25-41b8-afd0-062c0ceff05d': {'type': 'data', 'encrypted': False, 'encryption_type': None},
+ 'cafecafe-9b03-4f30-b4c6-b4b80ceff106': {'type': 'block', 'encrypted': False, 'encryption_type': None},
+ # multipath
+ '01b41e1b-002a-453c-9f17-88793989ff8f': {'type': 'block.wal', 'encrypted': False, 'encryption_type': None},
+ 'ec6d6385-e346-45dc-be91-da2a7c8b3261': {'type': 'block.wal', 'encrypted': False, 'encryption_type': None},
+ '45b0969e-8ae0-4982-bf9d-5a8d867af560': {'type': 'journal', 'encrypted': False, 'encryption_type': None},
+ '4fbd7e29-8ae0-4982-bf9d-5a8d867af560': {'type': 'data', 'encrypted': False, 'encryption_type': None},
+ '7f4a666a-16f3-47a2-8445-152ef4d03f6c': {'type': 'lockbox', 'encrypted': False, 'encryption_type': None},
+ 'cafecafe-8ae0-4982-bf9d-5a8d867af560': {'type': 'block', 'encrypted': False, 'encryption_type': None},
+}
diff --git a/src/ceph-volume/ceph_volume/util/device.py b/src/ceph-volume/ceph_volume/util/device.py
new file mode 100644
index 000000000..51605c659
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/util/device.py
@@ -0,0 +1,699 @@
+# -*- coding: utf-8 -*-
+
+import logging
+import os
+from functools import total_ordering
+from ceph_volume import sys_info
+from ceph_volume.api import lvm
+from ceph_volume.util import disk, system
+from ceph_volume.util.lsmdisk import LSMDisk
+from ceph_volume.util.constants import ceph_disk_guids
+from ceph_volume.util.disk import allow_loop_devices
+
+
+logger = logging.getLogger(__name__)
+
+
+report_template = """
+{dev:<25} {size:<12} {device_nodes:<15} {rot!s:<7} {available!s:<9} {model}"""
+
+
+def encryption_status(abspath):
+ """
+ Helper function to run ``encryption.status()``. It is done here to avoid
+ a circular import issue (encryption module imports from this module) and to
+ ease testing by allowing monkeypatching of this function.
+ """
+ from ceph_volume.util import encryption
+ return encryption.status(abspath)
+
+
+class Devices(object):
+ """
+ A container for Device instances with reporting
+ """
+
+ def __init__(self, filter_for_batch=False, with_lsm=False):
+ lvs = lvm.get_lvs()
+ lsblk_all = disk.lsblk_all()
+ all_devices_vgs = lvm.get_all_devices_vgs()
+ if not sys_info.devices:
+ sys_info.devices = disk.get_devices()
+ self.devices = [Device(k,
+ with_lsm,
+ lvs=lvs,
+ lsblk_all=lsblk_all,
+ all_devices_vgs=all_devices_vgs) for k in
+ sys_info.devices.keys()]
+ if filter_for_batch:
+ self.devices = [d for d in self.devices if d.available_lvm_batch]
+
+ def pretty_report(self):
+ output = [
+ report_template.format(
+ dev='Device Path',
+ size='Size',
+ rot='rotates',
+ model='Model name',
+ available='available',
+ device_nodes='Device nodes',
+
+ )]
+ for device in sorted(self.devices):
+ output.append(device.report())
+ return ''.join(output)
+
+ def json_report(self):
+ output = []
+ for device in sorted(self.devices):
+ output.append(device.json_report())
+ return output
+
+@total_ordering
+class Device(object):
+
+ pretty_template = """
+ {attr:<25} {value}"""
+
+ report_fields = [
+ 'ceph_device',
+ 'rejected_reasons',
+ 'available',
+ 'path',
+ 'sys_api',
+ 'device_id',
+ 'lsm_data',
+ ]
+ pretty_report_sys_fields = [
+ 'human_readable_size',
+ 'model',
+ 'removable',
+ 'ro',
+ 'rotational',
+ 'sas_address',
+ 'scheduler_mode',
+ 'vendor',
+ ]
+
+ # define some class variables; mostly to enable the use of autospec in
+ # unittests
+ lvs = []
+
+ def __init__(self, path, with_lsm=False, lvs=None, lsblk_all=None, all_devices_vgs=None):
+ self.path = path
+ # LVs can have a vg/lv path, while disks will have /dev/sda
+ self.symlink = None
+ # check if we are a symlink
+ if os.path.islink(self.path):
+ self.symlink = self.path
+ real_path = os.path.realpath(self.path)
+ # check if we are not a device mapper
+ if "dm-" not in real_path:
+ self.path = real_path
+ if not sys_info.devices:
+ if self.path:
+ sys_info.devices = disk.get_devices(device=self.path)
+ else:
+ sys_info.devices = disk.get_devices()
+ if sys_info.devices.get(self.path, {}):
+ self.device_nodes = sys_info.devices[self.path]['device_nodes']
+ self.sys_api = sys_info.devices.get(self.path, {})
+ self.partitions = self._get_partitions()
+ self.lv_api = None
+ self.lvs = [] if not lvs else lvs
+ self.lsblk_all = lsblk_all
+ self.all_devices_vgs = all_devices_vgs
+ self.vgs = []
+ self.vg_name = None
+ self.lv_name = None
+ self.disk_api = {}
+ self.blkid_api = None
+ self._exists = None
+ self._is_lvm_member = None
+ self.ceph_device = False
+ self._parse()
+ self.lsm_data = self.fetch_lsm(with_lsm)
+
+ self.available_lvm, self.rejected_reasons_lvm = self._check_lvm_reject_reasons()
+ self.available_raw, self.rejected_reasons_raw = self._check_raw_reject_reasons()
+ self.available = self.available_lvm and self.available_raw
+ self.rejected_reasons = list(set(self.rejected_reasons_lvm +
+ self.rejected_reasons_raw))
+
+ self.device_id = self._get_device_id()
+
+ def fetch_lsm(self, with_lsm):
+ '''
+ Attempt to fetch libstoragemgmt (LSM) metadata, and return to the caller
+ as a dict. An empty dict is passed back to the caller if the target path
+ is not a block device, or lsm is unavailable on the host. Otherwise the
+ json returned will provide LSM attributes, and any associated errors that
+ lsm encountered when probing the device.
+ '''
+ if not with_lsm or not self.exists or not self.is_device:
+ return {}
+
+ lsm_disk = LSMDisk(self.path)
+
+ return lsm_disk.json_report()
+
+ def __lt__(self, other):
+ '''
+ Implementing this method and __eq__ allows the @total_ordering
+ decorator to turn the Device class into a totally ordered type.
+ This can slower then implementing all comparison operations.
+ This sorting should put available devices before unavailable devices
+ and sort on the path otherwise (str sorting).
+ '''
+ if self.available == other.available:
+ return self.path < other.path
+ return self.available and not other.available
+
+ def __eq__(self, other):
+ return self.path == other.path
+
+ def __hash__(self):
+ return hash(self.path)
+
+ def load_blkid_api(self):
+ if self.blkid_api is None:
+ self.blkid_api = disk.blkid(self.path)
+
+ def _parse(self):
+ lv = None
+ if not self.sys_api:
+ # if no device was found check if we are a partition
+ partname = self.path.split('/')[-1]
+ for device, info in sys_info.devices.items():
+ part = info['partitions'].get(partname, {})
+ if part:
+ self.sys_api = part
+ break
+
+ if self.lvs:
+ for _lv in self.lvs:
+ # if the path is not absolute, we have 'vg/lv', let's use LV name
+ # to get the LV.
+ if self.path[0] == '/':
+ if _lv.lv_path == self.path:
+ lv = _lv
+ break
+ else:
+ vgname, lvname = self.path.split('/')
+ if _lv.lv_name == lvname and _lv.vg_name == vgname:
+ lv = _lv
+ break
+ else:
+ if self.path[0] == '/':
+ lv = lvm.get_single_lv(filters={'lv_path': self.path})
+ else:
+ vgname, lvname = self.path.split('/')
+ lv = lvm.get_single_lv(filters={'lv_name': lvname,
+ 'vg_name': vgname})
+
+ if lv:
+ self.lv_api = lv
+ self.lvs = [lv]
+ self.path = lv.lv_path
+ self.vg_name = lv.vg_name
+ self.lv_name = lv.name
+ self.ceph_device = lvm.is_ceph_device(lv)
+ else:
+ self.lvs = []
+ if self.lsblk_all:
+ for dev in self.lsblk_all:
+ if dev['NAME'] == os.path.basename(self.path):
+ break
+ else:
+ dev = disk.lsblk(self.path)
+ self.disk_api = dev
+ device_type = dev.get('TYPE', '')
+ # always check is this is an lvm member
+ valid_types = ['part', 'disk']
+ if allow_loop_devices():
+ valid_types.append('loop')
+ if device_type in valid_types:
+ self._set_lvm_membership()
+
+ self.ceph_disk = CephDiskDevice(self)
+
+ def __repr__(self):
+ prefix = 'Unknown'
+ if self.is_lv:
+ prefix = 'LV'
+ elif self.is_partition:
+ prefix = 'Partition'
+ elif self.is_device:
+ prefix = 'Raw Device'
+ return '<%s: %s>' % (prefix, self.path)
+
+ def pretty_report(self):
+ def format_value(v):
+ if isinstance(v, list):
+ return ', '.join(v)
+ else:
+ return v
+ def format_key(k):
+ return k.strip('_').replace('_', ' ')
+ output = ['\n====== Device report {} ======\n'.format(self.path)]
+ output.extend(
+ [self.pretty_template.format(
+ attr=format_key(k),
+ value=format_value(v)) for k, v in vars(self).items() if k in
+ self.report_fields and k != 'disk_api' and k != 'sys_api'] )
+ output.extend(
+ [self.pretty_template.format(
+ attr=format_key(k),
+ value=format_value(v)) for k, v in self.sys_api.items() if k in
+ self.pretty_report_sys_fields])
+ for lv in self.lvs:
+ output.append("""
+ --- Logical Volume ---""")
+ output.extend(
+ [self.pretty_template.format(
+ attr=format_key(k),
+ value=format_value(v)) for k, v in lv.report().items()])
+ return ''.join(output)
+
+ def report(self):
+ return report_template.format(
+ dev=self.path,
+ size=self.size_human,
+ rot=self.rotational,
+ available=self.available,
+ model=self.model,
+ device_nodes=self.device_nodes
+ )
+
+ def json_report(self):
+ output = {k.strip('_'): v for k, v in vars(self).items() if k in
+ self.report_fields}
+ output['lvs'] = [lv.report() for lv in self.lvs]
+ return output
+
+ def _get_device_id(self):
+ """
+ Please keep this implementation in sync with get_device_id() in
+ src/common/blkdev.cc
+ """
+ props = ['ID_VENDOR', 'ID_MODEL', 'ID_MODEL_ENC', 'ID_SERIAL_SHORT', 'ID_SERIAL',
+ 'ID_SCSI_SERIAL']
+ p = disk.udevadm_property(self.path, props)
+ if p.get('ID_MODEL','').startswith('LVM PV '):
+ p['ID_MODEL'] = p.get('ID_MODEL_ENC', '').replace('\\x20', ' ').strip()
+ if 'ID_VENDOR' in p and 'ID_MODEL' in p and 'ID_SCSI_SERIAL' in p:
+ dev_id = '_'.join([p['ID_VENDOR'], p['ID_MODEL'],
+ p['ID_SCSI_SERIAL']])
+ elif 'ID_MODEL' in p and 'ID_SERIAL_SHORT' in p:
+ dev_id = '_'.join([p['ID_MODEL'], p['ID_SERIAL_SHORT']])
+ elif 'ID_SERIAL' in p:
+ dev_id = p['ID_SERIAL']
+ if dev_id.startswith('MTFD'):
+ # Micron NVMes hide the vendor
+ dev_id = 'Micron_' + dev_id
+ else:
+ # the else branch should fallback to using sysfs and ioctl to
+ # retrieve device_id on FreeBSD. Still figuring out if/how the
+ # python ioctl implementation does that on FreeBSD
+ dev_id = ''
+ dev_id.replace(' ', '_')
+ return dev_id
+
+ def _set_lvm_membership(self):
+ if self._is_lvm_member is None:
+ # this is contentious, if a PV is recognized by LVM but has no
+ # VGs, should we consider it as part of LVM? We choose not to
+ # here, because most likely, we need to use VGs from this PV.
+ self._is_lvm_member = False
+ device_to_check = [self.path]
+ device_to_check.extend(self.partitions)
+
+ # a pv can only be in one vg, so this should be safe
+ # FIXME: While the above assumption holds, sda1 and sda2
+ # can each host a PV and VG. I think the vg_name property is
+ # actually unused (not 100% sure) and can simply be removed
+ vgs = None
+ if not self.all_devices_vgs:
+ self.all_devices_vgs = lvm.get_all_devices_vgs()
+ for path in device_to_check:
+ for dev_vg in self.all_devices_vgs:
+ if dev_vg.pv_name == path:
+ vgs = [dev_vg]
+ if vgs:
+ self.vgs.extend(vgs)
+ self.vg_name = vgs[0]
+ self._is_lvm_member = True
+ self.lvs.extend(lvm.get_device_lvs(path))
+ if self.lvs:
+ self.ceph_device = any([True if lv.tags.get('ceph.osd_id') else False for lv in self.lvs])
+
+ def _get_partitions(self):
+ """
+ For block devices LVM can reside on the raw block device or on a
+ partition. Return a list of paths to be checked for a pv.
+ """
+ partitions = []
+ path_dir = os.path.dirname(self.path)
+ for partition in self.sys_api.get('partitions', {}).keys():
+ partitions.append(os.path.join(path_dir, partition))
+ return partitions
+
+ @property
+ def exists(self):
+ return os.path.exists(self.path)
+
+ @property
+ def has_fs(self):
+ self.load_blkid_api()
+ return 'TYPE' in self.blkid_api
+
+ @property
+ def has_gpt_headers(self):
+ self.load_blkid_api()
+ return self.blkid_api.get("PTTYPE") == "gpt"
+
+ @property
+ def rotational(self):
+ rotational = self.sys_api.get('rotational')
+ if rotational is None:
+ # fall back to lsblk if not found in sys_api
+ # default to '1' if no value is found with lsblk either
+ rotational = self.disk_api.get('ROTA', '1')
+ return rotational == '1'
+
+ @property
+ def model(self):
+ return self.sys_api['model']
+
+ @property
+ def size_human(self):
+ return self.sys_api['human_readable_size']
+
+ @property
+ def size(self):
+ return self.sys_api['size']
+
+ @property
+ def parent_device(self):
+ if 'PKNAME' in self.disk_api:
+ return '/dev/%s' % self.disk_api['PKNAME']
+ return None
+
+ @property
+ def lvm_size(self):
+ """
+ If this device was made into a PV it would lose 1GB in total size
+ due to the 1GB physical extent size we set when creating volume groups
+ """
+ size = disk.Size(b=self.size)
+ lvm_size = disk.Size(gb=size.gb.as_int()) - disk.Size(gb=1)
+ return lvm_size
+
+ @property
+ def is_lvm_member(self):
+ if self._is_lvm_member is None:
+ self._set_lvm_membership()
+ return self._is_lvm_member
+
+ @property
+ def is_ceph_disk_member(self):
+ def is_member(device):
+ return 'ceph' in device.get('PARTLABEL', '') or \
+ device.get('PARTTYPE', '') in ceph_disk_guids.keys()
+ # If we come from Devices(), self.lsblk_all is set already.
+ # Otherwise, we have to grab the data.
+ details = self.lsblk_all or disk.lsblk_all()
+ _is_member = False
+ if self.sys_api.get("partitions"):
+ for part in self.sys_api.get("partitions").keys():
+ for dev in details:
+ if part.startswith(dev['NAME']):
+ if is_member(dev):
+ _is_member = True
+ return _is_member
+ else:
+ return is_member(self.disk_api)
+ raise RuntimeError(f"Couln't check if device {self.path} is a ceph-disk member.")
+
+ @property
+ def has_bluestore_label(self):
+ return disk.has_bluestore_label(self.path)
+
+ @property
+ def is_mapper(self):
+ return self.path.startswith(('/dev/mapper', '/dev/dm-'))
+
+ @property
+ def device_type(self):
+ self.load_blkid_api()
+ if 'type' in self.sys_api:
+ return self.sys_api['type']
+ elif self.disk_api:
+ return self.disk_api['TYPE']
+ elif self.blkid_api:
+ return self.blkid_api['TYPE']
+
+ @property
+ def is_mpath(self):
+ return self.device_type == 'mpath'
+
+ @property
+ def is_lv(self):
+ return self.lv_api is not None
+
+ @property
+ def is_partition(self):
+ self.load_blkid_api()
+ if self.disk_api:
+ return self.disk_api['TYPE'] == 'part'
+ elif self.blkid_api:
+ return self.blkid_api['TYPE'] == 'part'
+ return False
+
+ @property
+ def is_device(self):
+ self.load_blkid_api()
+ api = None
+ if self.disk_api:
+ api = self.disk_api
+ elif self.blkid_api:
+ api = self.blkid_api
+ if api:
+ valid_types = ['disk', 'device', 'mpath']
+ if allow_loop_devices():
+ valid_types.append('loop')
+ return self.device_type in valid_types
+ return False
+
+ @property
+ def is_acceptable_device(self):
+ return self.is_device or self.is_partition
+
+ @property
+ def is_encrypted(self):
+ """
+ Only correct for LVs, device mappers, and partitions. Will report a ``None``
+ for raw devices.
+ """
+ self.load_blkid_api()
+ crypt_reports = [self.blkid_api.get('TYPE', ''), self.disk_api.get('FSTYPE', '')]
+ if self.is_lv:
+ # if disk APIs are reporting this is encrypted use that:
+ if 'crypto_LUKS' in crypt_reports:
+ return True
+ # if ceph-volume created this, then a tag would let us know
+ elif self.lv_api.encrypted:
+ return True
+ return False
+ elif self.is_partition:
+ return 'crypto_LUKS' in crypt_reports
+ elif self.is_mapper:
+ active_mapper = encryption_status(self.path)
+ if active_mapper:
+ # normalize a bit to ensure same values regardless of source
+ encryption_type = active_mapper['type'].lower().strip('12') # turn LUKS1 or LUKS2 into luks
+ return True if encryption_type in ['plain', 'luks'] else False
+ else:
+ return False
+ else:
+ return None
+
+ @property
+ def used_by_ceph(self):
+ # only filter out data devices as journals could potentially be reused
+ osd_ids = [lv.tags.get("ceph.osd_id") is not None for lv in self.lvs
+ if lv.tags.get("ceph.type") in ["data", "block"]]
+ return any(osd_ids)
+
+ @property
+ def vg_free_percent(self):
+ if self.vgs:
+ return [vg.free_percent for vg in self.vgs]
+ else:
+ return [1]
+
+ @property
+ def vg_size(self):
+ if self.vgs:
+ return [vg.size for vg in self.vgs]
+ else:
+ # TODO fix this...we can probably get rid of vg_free
+ return self.vg_free
+
+ @property
+ def vg_free(self):
+ '''
+ Returns the free space in all VGs on this device. If no VGs are
+ present, returns the disk size.
+ '''
+ if self.vgs:
+ return [vg.free for vg in self.vgs]
+ else:
+ # We could also query 'lvmconfig
+ # --typeconfig full' and use allocations -> physical_extent_size
+ # value to project the space for a vg
+ # assuming 4M extents here
+ extent_size = 4194304
+ vg_free = int(self.size / extent_size) * extent_size
+ if self.size % extent_size == 0:
+ # If the extent size divides size exactly, deduct on extent for
+ # LVM metadata
+ vg_free -= extent_size
+ return [vg_free]
+
+ @property
+ def has_partitions(self):
+ '''
+ Boolean to determine if a given device has partitions.
+ '''
+ if self.sys_api.get('partitions'):
+ return True
+ return False
+
+ def _check_generic_reject_reasons(self):
+ reasons = [
+ ('removable', 1, 'removable'),
+ ('ro', 1, 'read-only'),
+ ('locked', 1, 'locked'),
+ ]
+ rejected = [reason for (k, v, reason) in reasons if
+ self.sys_api.get(k, '') == v]
+ if self.is_acceptable_device:
+ # reject disks smaller than 5GB
+ if int(self.sys_api.get('size', 0)) < 5368709120:
+ rejected.append('Insufficient space (<5GB)')
+ else:
+ rejected.append("Device type is not acceptable. It should be raw device or partition")
+ if self.is_ceph_disk_member:
+ rejected.append("Used by ceph-disk")
+
+ try:
+ if self.has_bluestore_label:
+ rejected.append('Has BlueStore device label')
+ except OSError as e:
+ # likely failed to open the device. assuming it is BlueStore is the safest option
+ # so that a possibly-already-existing OSD doesn't get overwritten
+ logger.error('failed to determine if device {} is BlueStore. device should not be used to avoid false negatives. err: {}'.format(self.path, e))
+ rejected.append('Failed to determine if device is BlueStore')
+
+ if self.is_partition:
+ try:
+ if disk.has_bluestore_label(self.parent_device):
+ rejected.append('Parent has BlueStore device label')
+ except OSError as e:
+ # likely failed to open the device. assuming the parent is BlueStore is the safest
+ # option so that a possibly-already-existing OSD doesn't get overwritten
+ logger.error('failed to determine if partition {} (parent: {}) has a BlueStore parent. partition should not be used to avoid false negatives. err: {}'.format(self.path, self.parent_device, e))
+ rejected.append('Failed to determine if parent device is BlueStore')
+
+ if self.has_gpt_headers:
+ rejected.append('Has GPT headers')
+ if self.has_partitions:
+ rejected.append('Has partitions')
+ return rejected
+
+ def _check_lvm_reject_reasons(self):
+ rejected = []
+ if self.vgs:
+ available_vgs = [vg for vg in self.vgs if int(vg.vg_free_count) > 10]
+ if not available_vgs:
+ rejected.append('Insufficient space (<10 extents) on vgs')
+ else:
+ # only check generic if no vgs are present. Vgs might hold lvs and
+ # that might cause 'locked' to trigger
+ rejected.extend(self._check_generic_reject_reasons())
+
+ return len(rejected) == 0, rejected
+
+ def _check_raw_reject_reasons(self):
+ rejected = self._check_generic_reject_reasons()
+ if len(self.vgs) > 0:
+ rejected.append('LVM detected')
+
+ return len(rejected) == 0, rejected
+
+ @property
+ def available_lvm_batch(self):
+ if self.sys_api.get("partitions"):
+ return False
+ if system.device_is_mounted(self.path):
+ return False
+ return self.is_device or self.is_lv
+
+
+class CephDiskDevice(object):
+ """
+ Detect devices that have been created by ceph-disk, report their type
+ (journal, data, etc..). Requires a ``Device`` object as input.
+ """
+
+ def __init__(self, device):
+ self.device = device
+ self._is_ceph_disk_member = None
+
+ @property
+ def partlabel(self):
+ """
+ In containers, the 'PARTLABEL' attribute might not be detected
+ correctly via ``lsblk``, so we poke at the value with ``lsblk`` first,
+ falling back to ``blkid`` (which works correclty in containers).
+ """
+ lsblk_partlabel = self.device.disk_api.get('PARTLABEL')
+ if lsblk_partlabel:
+ return lsblk_partlabel
+ return self.device.blkid_api.get('PARTLABEL', '')
+
+ @property
+ def parttype(self):
+ """
+ Seems like older version do not detect PARTTYPE correctly (assuming the
+ info in util/disk.py#lsblk is still valid).
+ SImply resolve to using blkid since lsblk will throw an error if asked
+ for an unknown columns
+ """
+ return self.device.blkid_api.get('PARTTYPE', '')
+
+ @property
+ def is_member(self):
+ if self._is_ceph_disk_member is None:
+ if 'ceph' in self.partlabel:
+ self._is_ceph_disk_member = True
+ return True
+ elif self.parttype in ceph_disk_guids.keys():
+ return True
+ return False
+ return self._is_ceph_disk_member
+
+ @property
+ def type(self):
+ types = [
+ 'data', 'wal', 'db', 'lockbox', 'journal',
+ # ceph-disk uses 'ceph block' when placing data in bluestore, but
+ # keeps the regular OSD files in 'ceph data' :( :( :( :(
+ 'block',
+ ]
+ for t in types:
+ if t in self.partlabel:
+ return t
+ label = ceph_disk_guids.get(self.parttype, {})
+ return label.get('type', 'unknown').split('.')[-1]
diff --git a/src/ceph-volume/ceph_volume/util/disk.py b/src/ceph-volume/ceph_volume/util/disk.py
new file mode 100644
index 000000000..4a310e30f
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/util/disk.py
@@ -0,0 +1,927 @@
+import logging
+import os
+import re
+import stat
+import time
+from ceph_volume import process
+from ceph_volume.api import lvm
+from ceph_volume.util.system import get_file_contents
+
+
+logger = logging.getLogger(__name__)
+
+
+# The blkid CLI tool has some oddities which prevents having one common call
+# to extract the information instead of having separate utilities. The `udev`
+# type of output is needed in older versions of blkid (v 2.23) that will not
+# work correctly with just the ``-p`` flag to bypass the cache for example.
+# Xenial doesn't have this problem as it uses a newer blkid version.
+
+
+def get_partuuid(device):
+ """
+ If a device is a partition, it will probably have a PARTUUID on it that
+ will persist and can be queried against `blkid` later to detect the actual
+ device
+ """
+ out, err, rc = process.call(
+ ['blkid', '-c', '/dev/null', '-s', 'PARTUUID', '-o', 'value', device]
+ )
+ return ' '.join(out).strip()
+
+
+def _blkid_parser(output):
+ """
+ Parses the output from a system ``blkid`` call, requires output to be
+ produced using the ``-p`` flag which bypasses the cache, mangling the
+ names. These names are corrected to what it would look like without the
+ ``-p`` flag.
+
+ Normal output::
+
+ /dev/sdb1: UUID="62416664-cbaf-40bd-9689-10bd337379c3" TYPE="xfs" [...]
+ """
+ # first spaced separated item is garbage, gets tossed:
+ output = ' '.join(output.split()[1:])
+ # split again, respecting possible whitespace in quoted values
+ pairs = output.split('" ')
+ raw = {}
+ processed = {}
+ mapping = {
+ 'UUID': 'UUID',
+ 'TYPE': 'TYPE',
+ 'PART_ENTRY_NAME': 'PARTLABEL',
+ 'PART_ENTRY_UUID': 'PARTUUID',
+ 'PART_ENTRY_TYPE': 'PARTTYPE',
+ 'PTTYPE': 'PTTYPE',
+ }
+
+ for pair in pairs:
+ try:
+ column, value = pair.split('=')
+ except ValueError:
+ continue
+ raw[column] = value.strip().strip().strip('"')
+
+ for key, value in raw.items():
+ new_key = mapping.get(key)
+ if not new_key:
+ continue
+ processed[new_key] = value
+
+ return processed
+
+
+def blkid(device):
+ """
+ The blkid interface to its CLI, creating an output similar to what is
+ expected from ``lsblk``. In most cases, ``lsblk()`` should be the preferred
+ method for extracting information about a device. There are some corner
+ cases where it might provide information that is otherwise unavailable.
+
+ The system call uses the ``-p`` flag which bypasses the cache, the caveat
+ being that the keys produced are named completely different to expected
+ names.
+
+ For example, instead of ``PARTLABEL`` it provides a ``PART_ENTRY_NAME``.
+ A bit of translation between these known keys is done, which is why
+ ``lsblk`` should always be preferred: the output provided here is not as
+ rich, given that a translation of keys is required for a uniform interface
+ with the ``-p`` flag.
+
+ Label name to expected output chart:
+
+ cache bypass name expected name
+
+ UUID UUID
+ TYPE TYPE
+ PART_ENTRY_NAME PARTLABEL
+ PART_ENTRY_UUID PARTUUID
+ """
+ out, err, rc = process.call(
+ ['blkid', '-c', '/dev/null', '-p', device]
+ )
+ return _blkid_parser(' '.join(out))
+
+
+def get_part_entry_type(device):
+ """
+ Parses the ``ID_PART_ENTRY_TYPE`` from the "low level" (bypasses the cache)
+ output that uses the ``udev`` type of output. This output is intended to be
+ used for udev rules, but it is useful in this case as it is the only
+ consistent way to retrieve the GUID used by ceph-disk to identify devices.
+ """
+ out, err, rc = process.call(['blkid', '-c', '/dev/null', '-p', '-o', 'udev', device])
+ for line in out:
+ if 'ID_PART_ENTRY_TYPE=' in line:
+ return line.split('=')[-1].strip()
+ return ''
+
+
+def get_device_from_partuuid(partuuid):
+ """
+ If a device has a partuuid, query blkid so that it can tell us what that
+ device is
+ """
+ out, err, rc = process.call(
+ ['blkid', '-c', '/dev/null', '-t', 'PARTUUID="%s"' % partuuid, '-o', 'device']
+ )
+ return ' '.join(out).strip()
+
+
+def remove_partition(device):
+ """
+ Removes a partition using parted
+
+ :param device: A ``Device()`` object
+ """
+ # Sometimes there's a race condition that makes 'ID_PART_ENTRY_NUMBER' be not present
+ # in the output of `udevadm info --query=property`.
+ # Probably not ideal and not the best fix but this allows to get around that issue.
+ # The idea is to make it retry multiple times before actually failing.
+ for i in range(10):
+ udev_info = udevadm_property(device.path)
+ partition_number = udev_info.get('ID_PART_ENTRY_NUMBER')
+ if partition_number:
+ break
+ time.sleep(0.2)
+ if not partition_number:
+ raise RuntimeError('Unable to detect the partition number for device: %s' % device.path)
+
+ process.run(
+ ['parted', device.parent_device, '--script', '--', 'rm', partition_number]
+ )
+
+
+def _stat_is_device(stat_obj):
+ """
+ Helper function that will interpret ``os.stat`` output directly, so that other
+ functions can call ``os.stat`` once and interpret that result several times
+ """
+ return stat.S_ISBLK(stat_obj)
+
+
+def _lsblk_parser(line):
+ """
+ Parses lines in lsblk output. Requires output to be in pair mode (``-P`` flag). Lines
+ need to be whole strings, the line gets split when processed.
+
+ :param line: A string, with the full line from lsblk output
+ """
+ # parse the COLUMN="value" output to construct the dictionary
+ pairs = line.split('" ')
+ parsed = {}
+ for pair in pairs:
+ try:
+ column, value = pair.split('=')
+ except ValueError:
+ continue
+ parsed[column] = value.strip().strip().strip('"')
+ return parsed
+
+
+def device_family(device):
+ """
+ Returns a list of associated devices. It assumes that ``device`` is
+ a parent device. It is up to the caller to ensure that the device being
+ used is a parent, not a partition.
+ """
+ labels = ['NAME', 'PARTLABEL', 'TYPE']
+ command = ['lsblk', '-P', '-p', '-o', ','.join(labels), device]
+ out, err, rc = process.call(command)
+ devices = []
+ for line in out:
+ devices.append(_lsblk_parser(line))
+
+ return devices
+
+
+def udevadm_property(device, properties=[]):
+ """
+ Query udevadm for information about device properties.
+ Optionally pass a list of properties to return. A requested property might
+ not be returned if not present.
+
+ Expected output format::
+ # udevadm info --query=property --name=/dev/sda :(
+ DEVNAME=/dev/sda
+ DEVTYPE=disk
+ ID_ATA=1
+ ID_BUS=ata
+ ID_MODEL=SK_hynix_SC311_SATA_512GB
+ ID_PART_TABLE_TYPE=gpt
+ ID_PART_TABLE_UUID=c8f91d57-b26c-4de1-8884-0c9541da288c
+ ID_PATH=pci-0000:00:17.0-ata-3
+ ID_PATH_TAG=pci-0000_00_17_0-ata-3
+ ID_REVISION=70000P10
+ ID_SERIAL=SK_hynix_SC311_SATA_512GB_MS83N71801150416A
+ TAGS=:systemd:
+ USEC_INITIALIZED=16117769
+ ...
+ """
+ out = _udevadm_info(device)
+ ret = {}
+ for line in out:
+ p, v = line.split('=', 1)
+ if not properties or p in properties:
+ ret[p] = v
+ return ret
+
+
+def _udevadm_info(device):
+ """
+ Call udevadm and return the output
+ """
+ cmd = ['udevadm', 'info', '--query=property', device]
+ out, _err, _rc = process.call(cmd)
+ return out
+
+
+def lsblk(device, columns=None, abspath=False):
+ return lsblk_all(device=device,
+ columns=columns,
+ abspath=abspath)
+
+def lsblk_all(device='', columns=None, abspath=False):
+ """
+ Create a dictionary of identifying values for a device using ``lsblk``.
+ Each supported column is a key, in its *raw* format (all uppercase
+ usually). ``lsblk`` has support for certain "columns" (in blkid these
+ would be labels), and these columns vary between distributions and
+ ``lsblk`` versions. The newer versions support a richer set of columns,
+ while older ones were a bit limited.
+
+ These are a subset of lsblk columns which are known to work on both CentOS 7 and Xenial:
+
+ NAME device name
+ KNAME internal kernel device name
+ PKNAME internal kernel parent device name
+ MAJ:MIN major:minor device number
+ FSTYPE filesystem type
+ MOUNTPOINT where the device is mounted
+ LABEL filesystem LABEL
+ UUID filesystem UUID
+ RO read-only device
+ RM removable device
+ MODEL device identifier
+ SIZE size of the device
+ STATE state of the device
+ OWNER user name
+ GROUP group name
+ MODE device node permissions
+ ALIGNMENT alignment offset
+ MIN-IO minimum I/O size
+ OPT-IO optimal I/O size
+ PHY-SEC physical sector size
+ LOG-SEC logical sector size
+ ROTA rotational device
+ SCHED I/O scheduler name
+ RQ-SIZE request queue size
+ TYPE device type
+ PKNAME internal parent kernel device name
+ DISC-ALN discard alignment offset
+ DISC-GRAN discard granularity
+ DISC-MAX discard max bytes
+ DISC-ZERO discard zeroes data
+
+ There is a bug in ``lsblk`` where using all the available (supported)
+ columns will result in no output (!), in order to workaround this the
+ following columns have been removed from the default reporting columns:
+
+ * RQ-SIZE (request queue size)
+ * MIN-IO minimum I/O size
+ * OPT-IO optimal I/O size
+
+ These should be available however when using `columns`. For example::
+
+ >>> lsblk('/dev/sda1', columns=['OPT-IO'])
+ {'OPT-IO': '0'}
+
+ Normal CLI output, as filtered by the flags in this function will look like ::
+
+ $ lsblk -P -o NAME,KNAME,PKNAME,MAJ:MIN,FSTYPE,MOUNTPOINT
+ NAME="sda1" KNAME="sda1" MAJ:MIN="8:1" FSTYPE="ext4" MOUNTPOINT="/"
+
+ :param columns: A list of columns to report as keys in its original form.
+ :param abspath: Set the flag for absolute paths on the report
+ """
+ default_columns = [
+ 'NAME', 'KNAME', 'PKNAME', 'MAJ:MIN', 'FSTYPE', 'MOUNTPOINT', 'LABEL',
+ 'UUID', 'RO', 'RM', 'MODEL', 'SIZE', 'STATE', 'OWNER', 'GROUP', 'MODE',
+ 'ALIGNMENT', 'PHY-SEC', 'LOG-SEC', 'ROTA', 'SCHED', 'TYPE', 'DISC-ALN',
+ 'DISC-GRAN', 'DISC-MAX', 'DISC-ZERO', 'PKNAME', 'PARTLABEL'
+ ]
+ columns = columns or default_columns
+ # -P -> Produce pairs of COLUMN="value"
+ # -p -> Return full paths to devices, not just the names, when ``abspath`` is set
+ # -o -> Use the columns specified or default ones provided by this function
+ base_command = ['lsblk', '-P']
+ if abspath:
+ base_command.append('-p')
+ base_command.append('-o')
+ base_command.append(','.join(columns))
+
+ out, err, rc = process.call(base_command)
+
+ if rc != 0:
+ raise RuntimeError(f"Error: {err}")
+
+ result = []
+
+ for line in out:
+ result.append(_lsblk_parser(line))
+
+ if not device:
+ return result
+
+ for dev in result:
+ if dev['NAME'] == os.path.basename(device):
+ return dev
+
+ return {}
+
+def is_device(dev):
+ """
+ Boolean to determine if a given device is a block device (**not**
+ a partition!)
+
+ For example: /dev/sda would return True, but not /dev/sdc1
+ """
+ if not os.path.exists(dev):
+ return False
+ if not dev.startswith('/dev/'):
+ return False
+ if dev[len('/dev/'):].startswith('loop'):
+ if not allow_loop_devices():
+ return False
+
+ # fallback to stat
+ return _stat_is_device(os.lstat(dev).st_mode)
+
+
+def is_partition(dev):
+ """
+ Boolean to determine if a given device is a partition, like /dev/sda1
+ """
+ if not os.path.exists(dev):
+ return False
+ # use lsblk first, fall back to using stat
+ TYPE = lsblk(dev).get('TYPE')
+ if TYPE:
+ return TYPE == 'part'
+
+ # fallback to stat
+ stat_obj = os.stat(dev)
+ if _stat_is_device(stat_obj.st_mode):
+ return False
+
+ major = os.major(stat_obj.st_rdev)
+ minor = os.minor(stat_obj.st_rdev)
+ if os.path.exists('/sys/dev/block/%d:%d/partition' % (major, minor)):
+ return True
+ return False
+
+
+def is_ceph_rbd(dev):
+ """
+ Boolean to determine if a given device is a ceph RBD device, like /dev/rbd0
+ """
+ return dev.startswith(('/dev/rbd'))
+
+
+class BaseFloatUnit(float):
+ """
+ Base class to support float representations of size values. Suffix is
+ computed on child classes by inspecting the class name
+ """
+
+ def __repr__(self):
+ return "<%s(%s)>" % (self.__class__.__name__, self.__float__())
+
+ def __str__(self):
+ return "{size:.2f} {suffix}".format(
+ size=self.__float__(),
+ suffix=self.__class__.__name__.split('Float')[-1]
+ )
+
+ def as_int(self):
+ return int(self.real)
+
+ def as_float(self):
+ return self.real
+
+
+class FloatB(BaseFloatUnit):
+ pass
+
+
+class FloatMB(BaseFloatUnit):
+ pass
+
+
+class FloatGB(BaseFloatUnit):
+ pass
+
+
+class FloatKB(BaseFloatUnit):
+ pass
+
+
+class FloatTB(BaseFloatUnit):
+ pass
+
+class FloatPB(BaseFloatUnit):
+ pass
+
+class Size(object):
+ """
+ Helper to provide an interface for different sizes given a single initial
+ input. Allows for comparison between different size objects, which avoids
+ the need to convert sizes before comparison (e.g. comparing megabytes
+ against gigabytes).
+
+ Common comparison operators are supported::
+
+ >>> hd1 = Size(gb=400)
+ >>> hd2 = Size(gb=500)
+ >>> hd1 > hd2
+ False
+ >>> hd1 < hd2
+ True
+ >>> hd1 == hd2
+ False
+ >>> hd1 == Size(gb=400)
+ True
+
+ The Size object can also be multiplied or divided::
+
+ >>> hd1
+ <Size(400.00 GB)>
+ >>> hd1 * 2
+ <Size(800.00 GB)>
+ >>> hd1
+ <Size(800.00 GB)>
+
+ Additions and subtractions are only supported between Size objects::
+
+ >>> Size(gb=224) - Size(gb=100)
+ <Size(124.00 GB)>
+ >>> Size(gb=1) + Size(mb=300)
+ <Size(1.29 GB)>
+
+ Can also display a human-readable representation, with automatic detection
+ on best suited unit, or alternatively, specific unit representation::
+
+ >>> s = Size(mb=2211)
+ >>> s
+ <Size(2.16 GB)>
+ >>> s.mb
+ <FloatMB(2211.0)>
+ >>> print("Total size: %s" % s.mb)
+ Total size: 2211.00 MB
+ >>> print("Total size: %s" % s)
+ Total size: 2.16 GB
+ """
+
+ @classmethod
+ def parse(cls, size):
+ if (len(size) > 2 and
+ size[-2].lower() in ['k', 'm', 'g', 't', 'p'] and
+ size[-1].lower() == 'b'):
+ return cls(**{size[-2:].lower(): float(size[0:-2])})
+ elif size[-1].lower() in ['b', 'k', 'm', 'g', 't', 'p']:
+ return cls(**{size[-1].lower(): float(size[0:-1])})
+ else:
+ return cls(b=float(size))
+
+
+ def __init__(self, multiplier=1024, **kw):
+ self._multiplier = multiplier
+ # create a mapping of units-to-multiplier, skip bytes as that is
+ # calculated initially always and does not need to convert
+ aliases = [
+ [('k', 'kb', 'kilobytes'), self._multiplier],
+ [('m', 'mb', 'megabytes'), self._multiplier ** 2],
+ [('g', 'gb', 'gigabytes'), self._multiplier ** 3],
+ [('t', 'tb', 'terabytes'), self._multiplier ** 4],
+ [('p', 'pb', 'petabytes'), self._multiplier ** 5]
+ ]
+ # and mappings for units-to-formatters, including bytes and aliases for
+ # each
+ format_aliases = [
+ [('b', 'bytes'), FloatB],
+ [('kb', 'kilobytes'), FloatKB],
+ [('mb', 'megabytes'), FloatMB],
+ [('gb', 'gigabytes'), FloatGB],
+ [('tb', 'terabytes'), FloatTB],
+ [('pb', 'petabytes'), FloatPB],
+ ]
+ self._formatters = {}
+ for key, value in format_aliases:
+ for alias in key:
+ self._formatters[alias] = value
+ self._factors = {}
+ for key, value in aliases:
+ for alias in key:
+ self._factors[alias] = value
+
+ for k, v in kw.items():
+ self._convert(v, k)
+ # only pursue the first occurrence
+ break
+
+ def _convert(self, size, unit):
+ """
+ Convert any size down to bytes so that other methods can rely on bytes
+ being available always, regardless of what they pass in, avoiding the
+ need for a mapping of every permutation.
+ """
+ if unit in ['b', 'bytes']:
+ self._b = size
+ return
+ factor = self._factors[unit]
+ self._b = float(size * factor)
+
+ def _get_best_format(self):
+ """
+ Go through all the supported units, and use the first one that is less
+ than 1024. This allows to represent size in the most readable format
+ available
+ """
+ for unit in ['b', 'kb', 'mb', 'gb', 'tb', 'pb']:
+ if getattr(self, unit) > 1024:
+ continue
+ return getattr(self, unit)
+
+ def __repr__(self):
+ return "<Size(%s)>" % self._get_best_format()
+
+ def __str__(self):
+ return "%s" % self._get_best_format()
+
+ def __format__(self, spec):
+ return str(self._get_best_format()).__format__(spec)
+
+ def __int__(self):
+ return int(self._b)
+
+ def __float__(self):
+ return self._b
+
+ def __lt__(self, other):
+ if isinstance(other, Size):
+ return self._b < other._b
+ else:
+ return self.b < other
+
+ def __le__(self, other):
+ if isinstance(other, Size):
+ return self._b <= other._b
+ else:
+ return self.b <= other
+
+ def __eq__(self, other):
+ if isinstance(other, Size):
+ return self._b == other._b
+ else:
+ return self.b == other
+
+ def __ne__(self, other):
+ if isinstance(other, Size):
+ return self._b != other._b
+ else:
+ return self.b != other
+
+ def __ge__(self, other):
+ if isinstance(other, Size):
+ return self._b >= other._b
+ else:
+ return self.b >= other
+
+ def __gt__(self, other):
+ if isinstance(other, Size):
+ return self._b > other._b
+ else:
+ return self.b > other
+
+ def __add__(self, other):
+ if isinstance(other, Size):
+ _b = self._b + other._b
+ return Size(b=_b)
+ raise TypeError('Cannot add "Size" object with int')
+
+ def __sub__(self, other):
+ if isinstance(other, Size):
+ _b = self._b - other._b
+ return Size(b=_b)
+ raise TypeError('Cannot subtract "Size" object from int')
+
+ def __mul__(self, other):
+ if isinstance(other, Size):
+ raise TypeError('Cannot multiply with "Size" object')
+ _b = self._b * other
+ return Size(b=_b)
+
+ def __truediv__(self, other):
+ if isinstance(other, Size):
+ return self._b / other._b
+ _b = self._b / other
+ return Size(b=_b)
+
+ def __div__(self, other):
+ if isinstance(other, Size):
+ return self._b / other._b
+ _b = self._b / other
+ return Size(b=_b)
+
+ def __bool__(self):
+ return self.b != 0
+
+ def __nonzero__(self):
+ return self.__bool__()
+
+ def __getattr__(self, unit):
+ """
+ Calculate units on the fly, relies on the fact that ``bytes`` has been
+ converted at instantiation. Units that don't exist will trigger an
+ ``AttributeError``
+ """
+ try:
+ formatter = self._formatters[unit]
+ except KeyError:
+ raise AttributeError('Size object has not attribute "%s"' % unit)
+ if unit in ['b', 'bytes']:
+ return formatter(self._b)
+ try:
+ factor = self._factors[unit]
+ except KeyError:
+ raise AttributeError('Size object has not attribute "%s"' % unit)
+ return formatter(float(self._b) / factor)
+
+
+def human_readable_size(size):
+ """
+ Take a size in bytes, and transform it into a human readable size with up
+ to two decimals of precision.
+ """
+ suffixes = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
+ for suffix in suffixes:
+ if size >= 1024:
+ size = size / 1024
+ else:
+ break
+ return "{size:.2f} {suffix}".format(
+ size=size,
+ suffix=suffix)
+
+
+def size_from_human_readable(s):
+ """
+ Takes a human readable string and converts into a Size. If no unit is
+ passed, bytes is assumed.
+ """
+ s = s.replace(' ', '')
+ if s[-1].isdigit():
+ return Size(b=float(s))
+ n = float(s[:-1])
+ if s[-1].lower() == 'p':
+ return Size(pb=n)
+ if s[-1].lower() == 't':
+ return Size(tb=n)
+ if s[-1].lower() == 'g':
+ return Size(gb=n)
+ if s[-1].lower() == 'm':
+ return Size(mb=n)
+ if s[-1].lower() == 'k':
+ return Size(kb=n)
+ return None
+
+
+def get_partitions_facts(sys_block_path):
+ partition_metadata = {}
+ for folder in os.listdir(sys_block_path):
+ folder_path = os.path.join(sys_block_path, folder)
+ if os.path.exists(os.path.join(folder_path, 'partition')):
+ contents = get_file_contents(os.path.join(folder_path, 'partition'))
+ if contents:
+ part = {}
+ partname = folder
+ part_sys_block_path = os.path.join(sys_block_path, partname)
+
+ part['start'] = get_file_contents(part_sys_block_path + "/start", 0)
+ part['sectors'] = get_file_contents(part_sys_block_path + "/size", 0)
+
+ part['sectorsize'] = get_file_contents(
+ part_sys_block_path + "/queue/logical_block_size")
+ if not part['sectorsize']:
+ part['sectorsize'] = get_file_contents(
+ part_sys_block_path + "/queue/hw_sector_size", 512)
+ part['size'] = float(part['sectors']) * 512
+ part['human_readable_size'] = human_readable_size(float(part['sectors']) * 512)
+ part['holders'] = []
+ for holder in os.listdir(part_sys_block_path + '/holders'):
+ part['holders'].append(holder)
+
+ partition_metadata[partname] = part
+ return partition_metadata
+
+
+def is_mapper_device(device_name):
+ return device_name.startswith(('/dev/mapper', '/dev/dm-'))
+
+
+def is_locked_raw_device(disk_path):
+ """
+ A device can be locked by a third party software like a database.
+ To detect that case, the device is opened in Read/Write and exclusive mode
+ """
+ open_flags = (os.O_RDWR | os.O_EXCL)
+ open_mode = 0
+ fd = None
+
+ try:
+ fd = os.open(disk_path, open_flags, open_mode)
+ except OSError:
+ return 1
+
+ try:
+ os.close(fd)
+ except OSError:
+ return 1
+
+ return 0
+
+
+class AllowLoopDevices(object):
+ allow = False
+ warned = False
+
+ @classmethod
+ def __call__(cls):
+ val = os.environ.get("CEPH_VOLUME_ALLOW_LOOP_DEVICES", "false").lower()
+ if val not in ("false", 'no', '0'):
+ cls.allow = True
+ if not cls.warned:
+ logger.warning(
+ "CEPH_VOLUME_ALLOW_LOOP_DEVICES is set in your "
+ "environment, so we will allow the use of unattached loop"
+ " devices as disks. This feature is intended for "
+ "development purposes only and will never be supported in"
+ " production. Issues filed based on this behavior will "
+ "likely be ignored."
+ )
+ cls.warned = True
+ return cls.allow
+
+
+allow_loop_devices = AllowLoopDevices()
+
+
+def get_block_devs_sysfs(_sys_block_path='/sys/block', _sys_dev_block_path='/sys/dev/block', device=''):
+ def holder_inner_loop():
+ for holder in holders:
+ # /sys/block/sdy/holders/dm-8/dm/uuid
+ holder_dm_type = get_file_contents(os.path.join(_sys_block_path, dev, f'holders/{holder}/dm/uuid')).split('-')[0].lower()
+ if holder_dm_type == 'mpath':
+ return True
+
+ # First, get devices that are _not_ partitions
+ result = list()
+ if not device:
+ dev_names = os.listdir(_sys_block_path)
+ else:
+ dev_names = [device]
+ for dev in dev_names:
+ name = kname = os.path.join("/dev", dev)
+ if not os.path.exists(name):
+ continue
+ type_ = 'disk'
+ holders = os.listdir(os.path.join(_sys_block_path, dev, 'holders'))
+ if get_file_contents(os.path.join(_sys_block_path, dev, 'removable')) == "1":
+ continue
+ if holder_inner_loop():
+ continue
+ dm_dir_path = os.path.join(_sys_block_path, dev, 'dm')
+ if os.path.isdir(dm_dir_path):
+ dm_type = get_file_contents(os.path.join(dm_dir_path, 'uuid'))
+ type_ = dm_type.split('-')[0].lower()
+ basename = get_file_contents(os.path.join(dm_dir_path, 'name'))
+ name = os.path.join("/dev/mapper", basename)
+ if dev.startswith('loop'):
+ if not allow_loop_devices():
+ continue
+ # Skip loop devices that are not attached
+ if not os.path.exists(os.path.join(_sys_block_path, dev, 'loop')):
+ continue
+ type_ = 'loop'
+ result.append([kname, name, type_])
+ # Next, look for devices that _are_ partitions
+ for item in os.listdir(_sys_dev_block_path):
+ is_part = get_file_contents(os.path.join(_sys_dev_block_path, item, 'partition')) == "1"
+ dev = os.path.basename(os.readlink(os.path.join(_sys_dev_block_path, item)))
+ if not is_part:
+ continue
+ name = kname = os.path.join("/dev", dev)
+ result.append([name, kname, "part"])
+ return sorted(result, key=lambda x: x[0])
+
+
+def get_devices(_sys_block_path='/sys/block', device=''):
+ """
+ Captures all available block devices as reported by lsblk.
+ Additional interesting metadata like sectors, size, vendor,
+ solid/rotational, etc. is collected from /sys/block/<device>
+
+ Returns a dictionary, where keys are the full paths to devices.
+
+ ..note:: loop devices, removable media, and logical volumes are never included.
+ """
+
+ device_facts = {}
+
+ block_devs = get_block_devs_sysfs(_sys_block_path)
+
+ block_types = ['disk', 'mpath']
+ if allow_loop_devices():
+ block_types.append('loop')
+
+ for block in block_devs:
+ devname = os.path.basename(block[0])
+ diskname = block[1]
+ if block[2] not in block_types:
+ continue
+ sysdir = os.path.join(_sys_block_path, devname)
+ metadata = {}
+
+ # If the device is ceph rbd it gets excluded
+ if is_ceph_rbd(diskname):
+ continue
+
+ # If the mapper device is a logical volume it gets excluded
+ if is_mapper_device(diskname):
+ if lvm.get_device_lvs(diskname):
+ continue
+
+ # all facts that have no defaults
+ # (<name>, <path relative to _sys_block_path>)
+ facts = [('removable', 'removable'),
+ ('ro', 'ro'),
+ ('vendor', 'device/vendor'),
+ ('model', 'device/model'),
+ ('rev', 'device/rev'),
+ ('sas_address', 'device/sas_address'),
+ ('sas_device_handle', 'device/sas_device_handle'),
+ ('support_discard', 'queue/discard_granularity'),
+ ('rotational', 'queue/rotational'),
+ ('nr_requests', 'queue/nr_requests'),
+ ]
+ for key, file_ in facts:
+ metadata[key] = get_file_contents(os.path.join(sysdir, file_))
+
+ device_slaves = os.listdir(os.path.join(sysdir, 'slaves'))
+ if device_slaves:
+ metadata['device_nodes'] = ','.join(device_slaves)
+ else:
+ metadata['device_nodes'] = devname
+
+ metadata['scheduler_mode'] = ""
+ scheduler = get_file_contents(sysdir + "/queue/scheduler")
+ if scheduler is not None:
+ m = re.match(r".*?(\[(.*)\])", scheduler)
+ if m:
+ metadata['scheduler_mode'] = m.group(2)
+
+ metadata['partitions'] = get_partitions_facts(sysdir)
+
+ size = get_file_contents(os.path.join(sysdir, 'size'), 0)
+
+ metadata['sectors'] = get_file_contents(os.path.join(sysdir, 'sectors'), 0)
+ fallback_sectorsize = get_file_contents(sysdir + "/queue/hw_sector_size", 512)
+ metadata['sectorsize'] = get_file_contents(sysdir +
+ "/queue/logical_block_size",
+ fallback_sectorsize)
+ metadata['size'] = float(size) * 512
+ metadata['human_readable_size'] = human_readable_size(metadata['size'])
+ metadata['path'] = diskname
+ metadata['locked'] = is_locked_raw_device(metadata['path'])
+ metadata['type'] = block[2]
+
+ device_facts[diskname] = metadata
+ return device_facts
+
+def has_bluestore_label(device_path):
+ isBluestore = False
+ bluestoreDiskSignature = 'bluestore block device' # 22 bytes long
+
+ # throws OSError on failure
+ logger.info("opening device {} to check for BlueStore label".format(device_path))
+ try:
+ with open(device_path, "rb") as fd:
+ # read first 22 bytes looking for bluestore disk signature
+ signature = fd.read(22)
+ if signature.decode('ascii', 'replace') == bluestoreDiskSignature:
+ isBluestore = True
+ except IsADirectoryError:
+ logger.info(f'{device_path} is a directory, skipping.')
+
+ return isBluestore
diff --git a/src/ceph-volume/ceph_volume/util/encryption.py b/src/ceph-volume/ceph_volume/util/encryption.py
new file mode 100644
index 000000000..cefd6094b
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/util/encryption.py
@@ -0,0 +1,278 @@
+import base64
+import os
+import logging
+from ceph_volume import process, conf, terminal
+from ceph_volume.util import constants, system
+from ceph_volume.util.device import Device
+from .prepare import write_keyring
+from .disk import lsblk, device_family, get_part_entry_type
+
+logger = logging.getLogger(__name__)
+mlogger = terminal.MultiLogger(__name__)
+
+def get_key_size_from_conf():
+ """
+ Return the osd dmcrypt key size from config file.
+ Default is 512.
+ """
+ default_key_size = '512'
+ key_size = conf.ceph.get_safe(
+ 'osd',
+ 'osd_dmcrypt_key_size',
+ default='512', check_valid=False)
+
+ if key_size not in ['256', '512']:
+ logger.warning(("Invalid value set for osd_dmcrypt_key_size ({}). "
+ "Falling back to {}bits".format(key_size, default_key_size)))
+ return default_key_size
+
+ return key_size
+
+def create_dmcrypt_key():
+ """
+ Create the secret dm-crypt key (KEK) used to encrypt/decrypt the Volume Key.
+ """
+ random_string = os.urandom(128)
+ key = base64.b64encode(random_string).decode('utf-8')
+ return key
+
+
+def luks_format(key, device):
+ """
+ Decrypt (open) an encrypted device, previously prepared with cryptsetup
+
+ :param key: dmcrypt secret key, will be used for decrypting
+ :param device: Absolute path to device
+ """
+ command = [
+ 'cryptsetup',
+ '--batch-mode', # do not prompt
+ '--key-size',
+ get_key_size_from_conf(),
+ '--key-file', # misnomer, should be key
+ '-', # because we indicate stdin for the key here
+ 'luksFormat',
+ device,
+ ]
+ process.call(command, stdin=key, terminal_verbose=True, show_command=True)
+
+
+def plain_open(key, device, mapping):
+ """
+ Decrypt (open) an encrypted device, previously prepared with cryptsetup in plain mode
+
+ .. note: ceph-disk will require an additional b64decode call for this to work
+
+ :param key: dmcrypt secret key
+ :param device: absolute path to device
+ :param mapping: mapping name used to correlate device. Usually a UUID
+ """
+ command = [
+ 'cryptsetup',
+ '--key-file',
+ '-',
+ '--allow-discards', # allow discards (aka TRIM) requests for device
+ 'open',
+ device,
+ mapping,
+ '--type', 'plain',
+ '--key-size', '256',
+ ]
+
+ process.call(command, stdin=key, terminal_verbose=True, show_command=True)
+
+
+def luks_open(key, device, mapping):
+ """
+ Decrypt (open) an encrypted device, previously prepared with cryptsetup
+
+ .. note: ceph-disk will require an additional b64decode call for this to work
+
+ :param key: dmcrypt secret key
+ :param device: absolute path to device
+ :param mapping: mapping name used to correlate device. Usually a UUID
+ """
+ command = [
+ 'cryptsetup',
+ '--key-size',
+ get_key_size_from_conf(),
+ '--key-file',
+ '-',
+ '--allow-discards', # allow discards (aka TRIM) requests for device
+ 'luksOpen',
+ device,
+ mapping,
+ ]
+ process.call(command, stdin=key, terminal_verbose=True, show_command=True)
+
+
+def dmcrypt_close(mapping):
+ """
+ Encrypt (close) a device, previously decrypted with cryptsetup
+
+ :param mapping:
+ """
+ if not os.path.exists(mapping):
+ logger.debug('device mapper path does not exist %s' % mapping)
+ logger.debug('will skip cryptsetup removal')
+ return
+ # don't be strict about the remove call, but still warn on the terminal if it fails
+ process.run(['cryptsetup', 'remove', mapping], stop_on_error=False)
+
+
+def get_dmcrypt_key(osd_id, osd_fsid, lockbox_keyring=None):
+ """
+ Retrieve the dmcrypt (secret) key stored initially on the monitor. The key
+ is sent initially with JSON, and the Monitor then mangles the name to
+ ``dm-crypt/osd/<fsid>/luks``
+
+ The ``lockbox.keyring`` file is required for this operation, and it is
+ assumed it will exist on the path for the same OSD that is being activated.
+ To support scanning, it is optionally configurable to a custom location
+ (e.g. inside a lockbox partition mounted in a temporary location)
+ """
+ if lockbox_keyring is None:
+ lockbox_keyring = '/var/lib/ceph/osd/%s-%s/lockbox.keyring' % (conf.cluster, osd_id)
+ name = 'client.osd-lockbox.%s' % osd_fsid
+ config_key = 'dm-crypt/osd/%s/luks' % osd_fsid
+
+ mlogger.info(f'Running ceph config-key get {config_key}')
+ stdout, stderr, returncode = process.call(
+ [
+ 'ceph',
+ '--cluster', conf.cluster,
+ '--name', name,
+ '--keyring', lockbox_keyring,
+ 'config-key',
+ 'get',
+ config_key
+ ],
+ show_command=True,
+ logfile_verbose=False
+ )
+ if returncode != 0:
+ raise RuntimeError('Unable to retrieve dmcrypt secret')
+ return ' '.join(stdout).strip()
+
+
+def write_lockbox_keyring(osd_id, osd_fsid, secret):
+ """
+ Helper to write the lockbox keyring. This is needed because the bluestore OSD will
+ not persist the keyring, and it can't be stored in the data device for filestore because
+ at the time this is needed, the device is encrypted.
+
+ For bluestore: A tmpfs filesystem is mounted, so the path can get written
+ to, but the files are ephemeral, which requires this file to be created
+ every time it is activated.
+ For filestore: The path for the OSD would exist at this point even if no
+ OSD data device is mounted, so the keyring is written to fetch the key, and
+ then the data device is mounted on that directory, making the keyring
+ "disappear".
+ """
+ if os.path.exists('/var/lib/ceph/osd/%s-%s/lockbox.keyring' % (conf.cluster, osd_id)):
+ return
+
+ name = 'client.osd-lockbox.%s' % osd_fsid
+ write_keyring(
+ osd_id,
+ secret,
+ keyring_name='lockbox.keyring',
+ name=name
+ )
+
+
+def status(device):
+ """
+ Capture the metadata information of a possibly encrypted device, returning
+ a dictionary with all the values found (if any).
+
+ An encrypted device will contain information about a device. Example
+ successful output looks like::
+
+ $ cryptsetup status /dev/mapper/ed6b5a26-eafe-4cd4-87e3-422ff61e26c4
+ /dev/mapper/ed6b5a26-eafe-4cd4-87e3-422ff61e26c4 is active and is in use.
+ type: LUKS1
+ cipher: aes-xts-plain64
+ keysize: 256 bits
+ device: /dev/sdc2
+ offset: 4096 sectors
+ size: 20740063 sectors
+ mode: read/write
+
+ As long as the mapper device is in 'open' state, the ``status`` call will work.
+
+ :param device: Absolute path or UUID of the device mapper
+ """
+ command = [
+ 'cryptsetup',
+ 'status',
+ device,
+ ]
+ out, err, code = process.call(command, show_command=True, verbose_on_failure=False)
+
+ metadata = {}
+ if code != 0:
+ logger.warning('failed to detect device mapper information')
+ return metadata
+ for line in out:
+ # get rid of lines that might not be useful to construct the report:
+ if not line.startswith(' '):
+ continue
+ try:
+ column, value = line.split(': ')
+ except ValueError:
+ continue
+ metadata[column.strip()] = value.strip().strip('"')
+ return metadata
+
+
+def legacy_encrypted(device):
+ """
+ Detect if a device was encrypted with ceph-disk or not. In the case of
+ encrypted devices, include the type of encryption (LUKS, or PLAIN), and
+ infer what the lockbox partition is.
+
+ This function assumes that ``device`` will be a partition.
+ """
+ if os.path.isdir(device):
+ mounts = system.Mounts(paths=True).get_mounts()
+ # yes, rebind the device variable here because a directory isn't going
+ # to help with parsing
+ device = mounts.get(device, [None])[0]
+ if not device:
+ raise RuntimeError('unable to determine the device mounted at %s' % device)
+ metadata = {'encrypted': False, 'type': None, 'lockbox': '', 'device': device}
+ # check if the device is online/decrypted first
+ active_mapper = status(device)
+ if active_mapper:
+ # normalize a bit to ensure same values regardless of source
+ metadata['type'] = active_mapper['type'].lower().strip('12') # turn LUKS1 or LUKS2 into luks
+ metadata['encrypted'] = True if metadata['type'] in ['plain', 'luks'] else False
+ # The true device is now available to this function, so it gets
+ # re-assigned here for the lockbox checks to succeed (it is not
+ # possible to guess partitions from a device mapper device otherwise
+ device = active_mapper.get('device', device)
+ metadata['device'] = device
+ else:
+ uuid = get_part_entry_type(device)
+ guid_match = constants.ceph_disk_guids.get(uuid, {})
+ encrypted_guid = guid_match.get('encrypted', False)
+ if encrypted_guid:
+ metadata['encrypted'] = True
+ metadata['type'] = guid_match['encryption_type']
+
+ # Lets find the lockbox location now, to do this, we need to find out the
+ # parent device name for the device so that we can query all of its
+ # associated devices and *then* look for one that has the 'lockbox' label
+ # on it. Thanks for being awesome ceph-disk
+ disk_meta = lsblk(device, abspath=True)
+ if not disk_meta:
+ return metadata
+ parent_device = disk_meta['PKNAME']
+ # With the parent device set, we can now look for the lockbox listing associated devices
+ devices = [Device(i['NAME']) for i in device_family(parent_device)]
+ for d in devices:
+ if d.ceph_disk.type == 'lockbox':
+ metadata['lockbox'] = d.path
+ break
+ return metadata
diff --git a/src/ceph-volume/ceph_volume/util/lsmdisk.py b/src/ceph-volume/ceph_volume/util/lsmdisk.py
new file mode 100644
index 000000000..607fd4f0a
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/util/lsmdisk.py
@@ -0,0 +1,196 @@
+"""
+This module handles the interaction with libstoragemgmt for local disk
+devices. Interaction may fail with LSM for a number of issues, but the
+intent here is to make this a soft fail, since LSM related data is not
+a critical component of ceph-volume.
+"""
+import logging
+
+try:
+ from lsm import LocalDisk, LsmError
+ from lsm import Disk as lsm_Disk
+except ImportError:
+ lsm_available = False
+ transport_map = {}
+ health_map = {}
+ lsm_Disk = None
+else:
+ lsm_available = True
+ transport_map = {
+ lsm_Disk.LINK_TYPE_UNKNOWN: "Unavailable",
+ lsm_Disk.LINK_TYPE_FC: "Fibre Channel",
+ lsm_Disk.LINK_TYPE_SSA: "IBM SSA",
+ lsm_Disk.LINK_TYPE_SBP: "Serial Bus",
+ lsm_Disk.LINK_TYPE_SRP: "SCSI RDMA",
+ lsm_Disk.LINK_TYPE_ISCSI: "iSCSI",
+ lsm_Disk.LINK_TYPE_SAS: "SAS",
+ lsm_Disk.LINK_TYPE_ADT: "ADT (Tape)",
+ lsm_Disk.LINK_TYPE_ATA: "ATA/SATA",
+ lsm_Disk.LINK_TYPE_USB: "USB",
+ lsm_Disk.LINK_TYPE_SOP: "SCSI over PCI-E",
+ lsm_Disk.LINK_TYPE_PCIE: "PCI-E",
+ }
+ health_map = {
+ lsm_Disk.HEALTH_STATUS_UNKNOWN: "Unknown",
+ lsm_Disk.HEALTH_STATUS_FAIL: "Fail",
+ lsm_Disk.HEALTH_STATUS_WARN: "Warn",
+ lsm_Disk.HEALTH_STATUS_GOOD: "Good",
+ }
+
+logger = logging.getLogger(__name__)
+
+
+class LSMDisk:
+ def __init__(self, dev_path):
+ self.dev_path = dev_path
+ self.error_list = set()
+
+ if lsm_available:
+ self.lsm_available = True
+ self.disk = LocalDisk()
+ else:
+ self.lsm_available = False
+ self.error_list.add("libstoragemgmt (lsm module) is unavailable")
+ logger.info("LSM information is unavailable: libstoragemgmt is not installed")
+ self.disk = None
+
+ self.led_bits = None
+
+ @property
+ def errors(self):
+ """show any errors that the LSM interaction has encountered (str)"""
+ return ", ".join(self.error_list)
+
+ def _query_lsm(self, func, path):
+ """Common method used to call the LSM functions, returning the function's result or None"""
+
+ # if disk is None, lsm is unavailable so all calls should return None
+ if self.disk is None:
+ return None
+
+ method = getattr(self.disk, func)
+ try:
+ output = method(path)
+ except LsmError as err:
+ logger.error("LSM Error: {}".format(err._msg))
+ self.error_list.add(err._msg)
+ return None
+ else:
+ return output
+
+ @property
+ def led_status(self):
+ """Fetch LED status, store in the LSMDisk object and return current status (int)"""
+ if self.led_bits is None:
+ self.led_bits = self._query_lsm('led_status_get', self.dev_path) or 1
+ return self.led_bits
+ else:
+ return self.led_bits
+
+ @property
+ def led_ident_state(self):
+ """Query a disks IDENT LED state to discover when it is On, Off or Unknown (str)"""
+ if self.led_status == 1:
+ return "Unsupported"
+ if self.led_status & lsm_Disk.LED_STATUS_IDENT_ON == lsm_Disk.LED_STATUS_IDENT_ON:
+ return "On"
+ elif self.led_status & lsm_Disk.LED_STATUS_IDENT_OFF == lsm_Disk.LED_STATUS_IDENT_OFF:
+ return "Off"
+ elif self.led_status & lsm_Disk.LED_STATUS_IDENT_UNKNOWN == lsm_Disk.LED_STATUS_IDENT_UNKNOWN:
+ return "Unknown"
+
+ return "Unsupported"
+
+ @property
+ def led_fault_state(self):
+ """Query a disks FAULT LED state to discover when it is On, Off or Unknown (str)"""
+ if self.led_status == 1:
+ return "Unsupported"
+ if self.led_status & lsm_Disk.LED_STATUS_FAULT_ON == lsm_Disk.LED_STATUS_FAULT_ON:
+ return "On"
+ elif self.led_status & lsm_Disk.LED_STATUS_FAULT_OFF == lsm_Disk.LED_STATUS_FAULT_OFF:
+ return "Off"
+ elif self.led_status & lsm_Disk.LED_STATUS_FAULT_UNKNOWN == lsm_Disk.LED_STATUS_FAULT_UNKNOWN:
+ return "Unknown"
+
+ return "Unsupported"
+
+ @property
+ def led_ident_support(self):
+ """Query the LED state to determine IDENT support: Unknown, Supported, Unsupported (str)"""
+ if self.led_status == 1:
+ return "Unknown"
+
+ ident_states = (
+ lsm_Disk.LED_STATUS_IDENT_ON +
+ lsm_Disk.LED_STATUS_IDENT_OFF +
+ lsm_Disk.LED_STATUS_IDENT_UNKNOWN
+ )
+
+ if (self.led_status & ident_states) == 0:
+ return "Unsupported"
+
+ return "Supported"
+
+ @property
+ def led_fault_support(self):
+ """Query the LED state to determine FAULT support: Unknown, Supported, Unsupported (str)"""
+ if self.led_status == 1:
+ return "Unknown"
+
+ fail_states = (
+ lsm_Disk.LED_STATUS_FAULT_ON +
+ lsm_Disk.LED_STATUS_FAULT_OFF +
+ lsm_Disk.LED_STATUS_FAULT_UNKNOWN
+ )
+
+ if self.led_status & fail_states == 0:
+ return "Unsupported"
+
+ return "Supported"
+
+ @property
+ def health(self):
+ """Determine the health of the disk from LSM : Unknown, Fail, Warn or Good (str)"""
+ _health_int = self._query_lsm('health_status_get', self.dev_path)
+ return health_map.get(_health_int, "Unknown")
+
+ @property
+ def transport(self):
+ """Translate a disks link type to a human readable format (str)"""
+ _link_type = self._query_lsm('link_type_get', self.dev_path)
+ return transport_map.get(_link_type, "Unknown")
+
+
+ @property
+ def media_type(self):
+ """Use the rpm value to determine the type of disk media: Flash or HDD (str)"""
+ _rpm = self._query_lsm('rpm_get', self.dev_path)
+ if _rpm is not None:
+ if _rpm == 0:
+ return "Flash"
+ elif _rpm > 1:
+ return "HDD"
+
+ return "Unknown"
+
+ def json_report(self):
+ """Return the LSM related metadata for the current local disk (dict)"""
+ if self.lsm_available:
+ return {
+ "serialNum": self._query_lsm('serial_num_get', self.dev_path) or "Unknown",
+ "transport": self.transport,
+ "mediaType": self.media_type,
+ "rpm": self._query_lsm('rpm_get', self.dev_path) or "Unknown",
+ "linkSpeed": self._query_lsm('link_speed_get', self.dev_path) or "Unknown",
+ "health": self.health,
+ "ledSupport": {
+ "IDENTsupport": self.led_ident_support,
+ "IDENTstatus": self.led_ident_state,
+ "FAILsupport": self.led_fault_support,
+ "FAILstatus": self.led_fault_state,
+ },
+ "errors": list(self.error_list)
+ }
+ else:
+ return {}
diff --git a/src/ceph-volume/ceph_volume/util/prepare.py b/src/ceph-volume/ceph_volume/util/prepare.py
new file mode 100644
index 000000000..323ba1e93
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/util/prepare.py
@@ -0,0 +1,535 @@
+"""
+These utilities for prepare provide all the pieces needed to prepare a device
+but also a compounded ("single call") helper to do them in order. Some plugins
+may want to change some part of the process, while others might want to consume
+the single-call helper
+"""
+import errno
+import os
+import logging
+import json
+import time
+from ceph_volume import process, conf, __release__, terminal
+from ceph_volume.util import system, constants, str_to_int, disk
+
+logger = logging.getLogger(__name__)
+mlogger = terminal.MultiLogger(__name__)
+
+
+def create_key():
+ stdout, stderr, returncode = process.call(
+ ['ceph-authtool', '--gen-print-key'],
+ show_command=True,
+ logfile_verbose=False)
+ if returncode != 0:
+ raise RuntimeError('Unable to generate a new auth key')
+ return ' '.join(stdout).strip()
+
+
+def write_keyring(osd_id, secret, keyring_name='keyring', name=None):
+ """
+ Create a keyring file with the ``ceph-authtool`` utility. Constructs the
+ path over well-known conventions for the OSD, and allows any other custom
+ ``name`` to be set.
+
+ :param osd_id: The ID for the OSD to be used
+ :param secret: The key to be added as (as a string)
+ :param name: Defaults to 'osd.{ID}' but can be used to add other client
+ names, specifically for 'lockbox' type of keys
+ :param keyring_name: Alternative keyring name, for supporting other
+ types of keys like for lockbox
+ """
+ osd_keyring = '/var/lib/ceph/osd/%s-%s/%s' % (conf.cluster, osd_id, keyring_name)
+ name = name or 'osd.%s' % str(osd_id)
+ mlogger.info(f'Creating keyring file for {name}')
+ process.call(
+ [
+ 'ceph-authtool', osd_keyring,
+ '--create-keyring',
+ '--name', name,
+ '--add-key', secret
+ ],
+ logfile_verbose=False)
+ system.chown(osd_keyring)
+
+
+def get_journal_size(lv_format=True):
+ """
+ Helper to retrieve the size (defined in megabytes in ceph.conf) to create
+ the journal logical volume, it "translates" the string into a float value,
+ then converts that into gigabytes, and finally (optionally) it formats it
+ back as a string so that it can be used for creating the LV.
+
+ :param lv_format: Return a string to be used for ``lv_create``. A 5 GB size
+ would result in '5G', otherwise it will return a ``Size`` object.
+ """
+ conf_journal_size = conf.ceph.get_safe('osd', 'osd_journal_size', '5120')
+ logger.debug('osd_journal_size set to %s' % conf_journal_size)
+ journal_size = disk.Size(mb=str_to_int(conf_journal_size))
+
+ if journal_size < disk.Size(gb=2):
+ mlogger.error('Refusing to continue with configured size for journal')
+ raise RuntimeError('journal sizes must be larger than 2GB, detected: %s' % journal_size)
+ if lv_format:
+ return '%sG' % journal_size.gb.as_int()
+ return journal_size
+
+
+def get_block_db_size(lv_format=True):
+ """
+ Helper to retrieve the size (defined in megabytes in ceph.conf) to create
+ the block.db logical volume, it "translates" the string into a float value,
+ then converts that into gigabytes, and finally (optionally) it formats it
+ back as a string so that it can be used for creating the LV.
+
+ :param lv_format: Return a string to be used for ``lv_create``. A 5 GB size
+ would result in '5G', otherwise it will return a ``Size`` object.
+
+ .. note: Configuration values are in bytes, unlike journals which
+ are defined in gigabytes
+ """
+ conf_db_size = None
+ try:
+ conf_db_size = conf.ceph.get_safe('osd', 'bluestore_block_db_size', None)
+ except RuntimeError:
+ logger.exception("failed to load ceph configuration, will use defaults")
+
+ if not conf_db_size:
+ logger.debug(
+ 'block.db has no size configuration, will fallback to using as much as possible'
+ )
+ # TODO better to return disk.Size(b=0) here
+ return None
+ logger.debug('bluestore_block_db_size set to %s' % conf_db_size)
+ db_size = disk.Size(b=str_to_int(conf_db_size))
+
+ if db_size < disk.Size(gb=2):
+ mlogger.error('Refusing to continue with configured size for block.db')
+ raise RuntimeError('block.db sizes must be larger than 2GB, detected: %s' % db_size)
+ if lv_format:
+ return '%sG' % db_size.gb.as_int()
+ return db_size
+
+def get_block_wal_size(lv_format=True):
+ """
+ Helper to retrieve the size (defined in megabytes in ceph.conf) to create
+ the block.wal logical volume, it "translates" the string into a float value,
+ then converts that into gigabytes, and finally (optionally) it formats it
+ back as a string so that it can be used for creating the LV.
+
+ :param lv_format: Return a string to be used for ``lv_create``. A 5 GB size
+ would result in '5G', otherwise it will return a ``Size`` object.
+
+ .. note: Configuration values are in bytes, unlike journals which
+ are defined in gigabytes
+ """
+ conf_wal_size = None
+ try:
+ conf_wal_size = conf.ceph.get_safe('osd', 'bluestore_block_wal_size', None)
+ except RuntimeError:
+ logger.exception("failed to load ceph configuration, will use defaults")
+
+ if not conf_wal_size:
+ logger.debug(
+ 'block.wal has no size configuration, will fallback to using as much as possible'
+ )
+ return None
+ logger.debug('bluestore_block_wal_size set to %s' % conf_wal_size)
+ wal_size = disk.Size(b=str_to_int(conf_wal_size))
+
+ if wal_size < disk.Size(gb=2):
+ mlogger.error('Refusing to continue with configured size for block.wal')
+ raise RuntimeError('block.wal sizes must be larger than 2GB, detected: %s' % wal_size)
+ if lv_format:
+ return '%sG' % wal_size.gb.as_int()
+ return wal_size
+
+
+def create_id(fsid, json_secrets, osd_id=None):
+ """
+ :param fsid: The osd fsid to create, always required
+ :param json_secrets: a json-ready object with whatever secrets are wanted
+ to be passed to the monitor
+ :param osd_id: Reuse an existing ID from an OSD that's been destroyed, if the
+ id does not exist in the cluster a new ID will be created
+ """
+ bootstrap_keyring = '/var/lib/ceph/bootstrap-osd/%s.keyring' % conf.cluster
+ cmd = [
+ 'ceph',
+ '--cluster', conf.cluster,
+ '--name', 'client.bootstrap-osd',
+ '--keyring', bootstrap_keyring,
+ '-i', '-',
+ 'osd', 'new', fsid
+ ]
+ if osd_id is not None:
+ if osd_id_available(osd_id):
+ cmd.append(osd_id)
+ else:
+ raise RuntimeError("The osd ID {} is already in use or does not exist.".format(osd_id))
+ stdout, stderr, returncode = process.call(
+ cmd,
+ stdin=json_secrets,
+ show_command=True
+ )
+ if returncode != 0:
+ raise RuntimeError('Unable to create a new OSD id')
+ return ' '.join(stdout).strip()
+
+
+def osd_id_available(osd_id):
+ """
+ Checks to see if an osd ID exists and if it's available for
+ reuse. Returns True if it is, False if it isn't.
+
+ :param osd_id: The osd ID to check
+ """
+ if osd_id is None:
+ return False
+
+ bootstrap_keyring = '/var/lib/ceph/bootstrap-osd/%s.keyring' % conf.cluster
+ stdout, stderr, returncode = process.call(
+ [
+ 'ceph',
+ '--cluster', conf.cluster,
+ '--name', 'client.bootstrap-osd',
+ '--keyring', bootstrap_keyring,
+ 'osd',
+ 'tree',
+ '-f', 'json',
+ ],
+ show_command=True
+ )
+ if returncode != 0:
+ raise RuntimeError('Unable check if OSD id exists: %s' % osd_id)
+
+ output = json.loads(''.join(stdout).strip())
+ osds = output['nodes']
+ osd = [osd for osd in osds if str(osd['id']) == str(osd_id)]
+ if not osd or (osd and osd[0].get('status') == "destroyed"):
+ return True
+ return False
+
+
+def mount_tmpfs(path):
+ process.run([
+ 'mount',
+ '-t',
+ 'tmpfs', 'tmpfs',
+ path
+ ])
+
+ # Restore SELinux context
+ system.set_context(path)
+
+
+def create_osd_path(osd_id, tmpfs=False):
+ path = '/var/lib/ceph/osd/%s-%s' % (conf.cluster, osd_id)
+ system.mkdir_p('/var/lib/ceph/osd/%s-%s' % (conf.cluster, osd_id))
+ if tmpfs:
+ mount_tmpfs(path)
+
+
+def format_device(device):
+ # only supports xfs
+ command = ['mkfs', '-t', 'xfs']
+
+ # get the mkfs options if any for xfs,
+ # fallback to the default options defined in constants.mkfs
+ flags = conf.ceph.get_list(
+ 'osd',
+ 'osd_mkfs_options_xfs',
+ default=constants.mkfs.get('xfs'),
+ split=' ',
+ )
+
+ # always force
+ if '-f' not in flags:
+ flags.insert(0, '-f')
+
+ command.extend(flags)
+ command.append(device)
+ process.run(command)
+
+
+def _normalize_mount_flags(flags, extras=None):
+ """
+ Mount flag options have to be a single string, separated by a comma. If the
+ flags are separated by spaces, or with commas and spaces in ceph.conf, the
+ mount options will be passed incorrectly.
+
+ This will help when parsing ceph.conf values return something like::
+
+ ["rw,", "exec,"]
+
+ Or::
+
+ [" rw ,", "exec"]
+
+ :param flags: A list of flags, or a single string of mount flags
+ :param extras: Extra set of mount flags, useful when custom devices like VDO need
+ ad-hoc mount configurations
+ """
+ # Instead of using set(), we append to this new list here, because set()
+ # will create an arbitrary order on the items that is made worst when
+ # testing with tools like tox that includes a randomizer seed. By
+ # controlling the order, it is easier to correctly assert the expectation
+ unique_flags = []
+ if isinstance(flags, list):
+ if extras:
+ flags.extend(extras)
+
+ # ensure that spaces and commas are removed so that they can join
+ # correctly, remove duplicates
+ for f in flags:
+ if f and f not in unique_flags:
+ unique_flags.append(f.strip().strip(','))
+ return ','.join(unique_flags)
+
+ # split them, clean them, and join them back again
+ flags = flags.strip().split(' ')
+ if extras:
+ flags.extend(extras)
+
+ # remove possible duplicates
+ for f in flags:
+ if f and f not in unique_flags:
+ unique_flags.append(f.strip().strip(','))
+ flags = ','.join(unique_flags)
+ # Before returning, split them again, since strings can be mashed up
+ # together, preventing removal of duplicate entries
+ return ','.join(set(flags.split(',')))
+
+
+def mount_osd(device, osd_id, **kw):
+ extras = []
+ is_vdo = kw.get('is_vdo', '0')
+ if is_vdo == '1':
+ extras = ['discard']
+ destination = '/var/lib/ceph/osd/%s-%s' % (conf.cluster, osd_id)
+ command = ['mount', '-t', 'xfs', '-o']
+ flags = conf.ceph.get_list(
+ 'osd',
+ 'osd_mount_options_xfs',
+ default=constants.mount.get('xfs'),
+ split=' ',
+ )
+ command.append(
+ _normalize_mount_flags(flags, extras=extras)
+ )
+ command.append(device)
+ command.append(destination)
+ process.run(command)
+
+ # Restore SELinux context
+ system.set_context(destination)
+
+
+def _link_device(device, device_type, osd_id):
+ """
+ Allow linking any device type in an OSD directory. ``device`` must the be
+ source, with an absolute path and ``device_type`` will be the destination
+ name, like 'journal', or 'block'
+ """
+ device_path = '/var/lib/ceph/osd/%s-%s/%s' % (
+ conf.cluster,
+ osd_id,
+ device_type
+ )
+ command = ['ln', '-s', device, device_path]
+ system.chown(device)
+
+ process.run(command)
+
+def _validate_bluestore_device(device, excepted_device_type, osd_uuid):
+ """
+ Validate whether the given device is truly what it is supposed to be
+ """
+
+ out, err, ret = process.call(['ceph-bluestore-tool', 'show-label', '--dev', device])
+ if err:
+ terminal.error('ceph-bluestore-tool failed to run. %s'% err)
+ raise SystemExit(1)
+ if ret:
+ terminal.error('no label on %s'% device)
+ raise SystemExit(1)
+ oj = json.loads(''.join(out))
+ if device not in oj:
+ terminal.error('%s not in the output of ceph-bluestore-tool, buggy?'% device)
+ raise SystemExit(1)
+ current_device_type = oj[device]['description']
+ if current_device_type != excepted_device_type:
+ terminal.error('%s is not a %s device but %s'% (device, excepted_device_type, current_device_type))
+ raise SystemExit(1)
+ current_osd_uuid = oj[device]['osd_uuid']
+ if current_osd_uuid != osd_uuid:
+ terminal.error('device %s is used by another osd %s as %s, should be %s'% (device, current_osd_uuid, current_device_type, osd_uuid))
+ raise SystemExit(1)
+
+def link_journal(journal_device, osd_id):
+ _link_device(journal_device, 'journal', osd_id)
+
+
+def link_block(block_device, osd_id):
+ _link_device(block_device, 'block', osd_id)
+
+
+def link_wal(wal_device, osd_id, osd_uuid=None):
+ _validate_bluestore_device(wal_device, 'bluefs wal', osd_uuid)
+ _link_device(wal_device, 'block.wal', osd_id)
+
+
+def link_db(db_device, osd_id, osd_uuid=None):
+ _validate_bluestore_device(db_device, 'bluefs db', osd_uuid)
+ _link_device(db_device, 'block.db', osd_id)
+
+
+def get_monmap(osd_id):
+ """
+ Before creating the OSD files, a monmap needs to be retrieved so that it
+ can be used to tell the monitor(s) about the new OSD. A call will look like::
+
+ ceph --cluster ceph --name client.bootstrap-osd \
+ --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring \
+ mon getmap -o /var/lib/ceph/osd/ceph-0/activate.monmap
+ """
+ path = '/var/lib/ceph/osd/%s-%s/' % (conf.cluster, osd_id)
+ bootstrap_keyring = '/var/lib/ceph/bootstrap-osd/%s.keyring' % conf.cluster
+ monmap_destination = os.path.join(path, 'activate.monmap')
+
+ process.run([
+ 'ceph',
+ '--cluster', conf.cluster,
+ '--name', 'client.bootstrap-osd',
+ '--keyring', bootstrap_keyring,
+ 'mon', 'getmap', '-o', monmap_destination
+ ])
+
+
+def get_osdspec_affinity():
+ return os.environ.get('CEPH_VOLUME_OSDSPEC_AFFINITY', '')
+
+
+def osd_mkfs_bluestore(osd_id, fsid, keyring=None, wal=False, db=False):
+ """
+ Create the files for the OSD to function. A normal call will look like:
+
+ ceph-osd --cluster ceph --mkfs --mkkey -i 0 \
+ --monmap /var/lib/ceph/osd/ceph-0/activate.monmap \
+ --osd-data /var/lib/ceph/osd/ceph-0 \
+ --osd-uuid 8d208665-89ae-4733-8888-5d3bfbeeec6c \
+ --keyring /var/lib/ceph/osd/ceph-0/keyring \
+ --setuser ceph --setgroup ceph
+
+ In some cases it is required to use the keyring, when it is passed in as
+ a keyword argument it is used as part of the ceph-osd command
+ """
+ path = '/var/lib/ceph/osd/%s-%s/' % (conf.cluster, osd_id)
+ monmap = os.path.join(path, 'activate.monmap')
+
+ system.chown(path)
+
+ base_command = [
+ 'ceph-osd',
+ '--cluster', conf.cluster,
+ '--osd-objectstore', 'bluestore',
+ '--mkfs',
+ '-i', osd_id,
+ '--monmap', monmap,
+ ]
+
+ supplementary_command = [
+ '--osd-data', path,
+ '--osd-uuid', fsid,
+ '--setuser', 'ceph',
+ '--setgroup', 'ceph'
+ ]
+
+ if keyring is not None:
+ base_command.extend(['--keyfile', '-'])
+
+ if wal:
+ base_command.extend(
+ ['--bluestore-block-wal-path', wal]
+ )
+ system.chown(wal)
+
+ if db:
+ base_command.extend(
+ ['--bluestore-block-db-path', db]
+ )
+ system.chown(db)
+
+ if get_osdspec_affinity():
+ base_command.extend(['--osdspec-affinity', get_osdspec_affinity()])
+
+ command = base_command + supplementary_command
+
+ """
+ When running in containers the --mkfs on raw device sometimes fails
+ to acquire a lock through flock() on the device because systemd-udevd holds one temporarily.
+ See KernelDevice.cc and _lock() to understand how ceph-osd acquires the lock.
+ Because this is really transient, we retry up to 5 times and wait for 1 sec in-between
+ """
+ for retry in range(5):
+ _, _, returncode = process.call(command, stdin=keyring, terminal_verbose=True, show_command=True)
+ if returncode == 0:
+ break
+ else:
+ if returncode == errno.EWOULDBLOCK:
+ time.sleep(1)
+ logger.info('disk is held by another process, trying to mkfs again... (%s/5 attempt)' % retry)
+ continue
+ else:
+ raise RuntimeError('Command failed with exit code %s: %s' % (returncode, ' '.join(command)))
+
+
+def osd_mkfs_filestore(osd_id, fsid, keyring):
+ """
+ Create the files for the OSD to function. A normal call will look like:
+
+ ceph-osd --cluster ceph --mkfs --mkkey -i 0 \
+ --monmap /var/lib/ceph/osd/ceph-0/activate.monmap \
+ --osd-data /var/lib/ceph/osd/ceph-0 \
+ --osd-journal /var/lib/ceph/osd/ceph-0/journal \
+ --osd-uuid 8d208665-89ae-4733-8888-5d3bfbeeec6c \
+ --keyring /var/lib/ceph/osd/ceph-0/keyring \
+ --setuser ceph --setgroup ceph
+
+ """
+ path = '/var/lib/ceph/osd/%s-%s/' % (conf.cluster, osd_id)
+ monmap = os.path.join(path, 'activate.monmap')
+ journal = os.path.join(path, 'journal')
+
+ system.chown(journal)
+ system.chown(path)
+
+ command = [
+ 'ceph-osd',
+ '--cluster', conf.cluster,
+ '--osd-objectstore', 'filestore',
+ '--mkfs',
+ '-i', osd_id,
+ '--monmap', monmap,
+ ]
+
+ if get_osdspec_affinity():
+ command.extend(['--osdspec-affinity', get_osdspec_affinity()])
+
+ if __release__ != 'luminous':
+ # goes through stdin
+ command.extend(['--keyfile', '-'])
+
+ command.extend([
+ '--osd-data', path,
+ '--osd-journal', journal,
+ '--osd-uuid', fsid,
+ '--setuser', 'ceph',
+ '--setgroup', 'ceph'
+ ])
+
+ _, _, returncode = process.call(
+ command, stdin=keyring, terminal_verbose=True, show_command=True
+ )
+ if returncode != 0:
+ raise RuntimeError('Command failed with exit code %s: %s' % (returncode, ' '.join(command)))
diff --git a/src/ceph-volume/ceph_volume/util/system.py b/src/ceph-volume/ceph_volume/util/system.py
new file mode 100644
index 000000000..590a0599b
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/util/system.py
@@ -0,0 +1,419 @@
+import errno
+import logging
+import os
+import pwd
+import platform
+import tempfile
+import uuid
+import subprocess
+import threading
+from ceph_volume import process, terminal
+from . import as_string
+
+# python2 has no FileNotFoundError
+try:
+ FileNotFoundError
+except NameError:
+ FileNotFoundError = OSError
+
+logger = logging.getLogger(__name__)
+mlogger = terminal.MultiLogger(__name__)
+
+# TODO: get these out of here and into a common area for others to consume
+if platform.system() == 'FreeBSD':
+ FREEBSD = True
+ DEFAULT_FS_TYPE = 'zfs'
+ PROCDIR = '/compat/linux/proc'
+ # FreeBSD does not have blockdevices any more
+ BLOCKDIR = '/dev'
+ ROOTGROUP = 'wheel'
+else:
+ FREEBSD = False
+ DEFAULT_FS_TYPE = 'xfs'
+ PROCDIR = '/proc'
+ BLOCKDIR = '/sys/block'
+ ROOTGROUP = 'root'
+
+host_rootfs = '/rootfs'
+run_host_cmd = [
+ 'nsenter',
+ '--mount={}/proc/1/ns/mnt'.format(host_rootfs),
+ '--ipc={}/proc/1/ns/ipc'.format(host_rootfs),
+ '--net={}/proc/1/ns/net'.format(host_rootfs),
+ '--uts={}/proc/1/ns/uts'.format(host_rootfs)
+]
+
+def generate_uuid():
+ return str(uuid.uuid4())
+
+def find_executable_on_host(locations=[], executable='', binary_check='/bin/ls'):
+ paths = ['{}/{}'.format(location, executable) for location in locations]
+ command = []
+ command.extend(run_host_cmd + [binary_check] + paths)
+ process = subprocess.Popen(
+ command,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ stdin=subprocess.PIPE,
+ close_fds=True
+ )
+ stdout = as_string(process.stdout.read())
+ if stdout:
+ executable_on_host = stdout.split('\n')[0]
+ logger.info('Executable {} found on the host, will use {}'.format(executable, executable_on_host))
+ return executable_on_host
+ else:
+ logger.warning('Executable {} not found on the host, will return {} as-is'.format(executable, executable))
+ return executable
+
+def which(executable, run_on_host=False):
+ """find the location of an executable"""
+ def _get_path(executable, locations):
+ for location in locations:
+ executable_path = os.path.join(location, executable)
+ if os.path.exists(executable_path) and os.path.isfile(executable_path):
+ return executable_path
+ return None
+
+ static_locations = (
+ '/usr/local/bin',
+ '/bin',
+ '/usr/bin',
+ '/usr/local/sbin',
+ '/usr/sbin',
+ '/sbin',
+ )
+
+ if not run_on_host:
+ path = os.getenv('PATH', '')
+ path_locations = path.split(':')
+ exec_in_path = _get_path(executable, path_locations)
+ if exec_in_path:
+ return exec_in_path
+ mlogger.warning('Executable {} not in PATH: {}'.format(executable, path))
+
+ exec_in_static_locations = _get_path(executable, static_locations)
+ if exec_in_static_locations:
+ mlogger.warning('Found executable under {}, please ensure $PATH is set correctly!'.format(exec_in_static_locations))
+ return exec_in_static_locations
+ else:
+ executable = find_executable_on_host(static_locations, executable)
+
+ # At this point, either `find_executable_on_host()` found an executable on the host
+ # or we fallback to just returning the argument as-is, to prevent a hard fail, and
+ # hoping that the system might have the executable somewhere custom
+ return executable
+
+def get_ceph_user_ids():
+ """
+ Return the id and gid of the ceph user
+ """
+ try:
+ user = pwd.getpwnam('ceph')
+ except KeyError:
+ # is this even possible?
+ raise RuntimeError('"ceph" user is not available in the current system')
+ return user[2], user[3]
+
+
+def get_file_contents(path, default=''):
+ contents = default
+ if not os.path.exists(path):
+ return contents
+ try:
+ with open(path, 'r') as open_file:
+ contents = open_file.read().strip()
+ except Exception:
+ logger.exception('Failed to read contents from: %s' % path)
+
+ return contents
+
+
+def mkdir_p(path, chown=True):
+ """
+ A `mkdir -p` that defaults to chown the path to the ceph user
+ """
+ try:
+ os.mkdir(path)
+ except OSError as e:
+ if e.errno == errno.EEXIST:
+ pass
+ else:
+ raise
+ if chown:
+ uid, gid = get_ceph_user_ids()
+ os.chown(path, uid, gid)
+
+
+def chown(path, recursive=True):
+ """
+ ``chown`` a path to the ceph user (uid and guid fetched at runtime)
+ """
+ uid, gid = get_ceph_user_ids()
+ if os.path.islink(path):
+ process.run(['chown', '-h', 'ceph:ceph', path])
+ path = os.path.realpath(path)
+ if recursive:
+ process.run(['chown', '-R', 'ceph:ceph', path])
+ else:
+ os.chown(path, uid, gid)
+
+
+def is_binary(path):
+ """
+ Detect if a file path is a binary or not. Will falsely report as binary
+ when utf-16 encoded. In the ceph universe there is no such risk (yet)
+ """
+ with open(path, 'rb') as fp:
+ contents = fp.read(8192)
+ if b'\x00' in contents: # a null byte may signal binary
+ return True
+ return False
+
+
+class tmp_mount(object):
+ """
+ Temporarily mount a device on a temporary directory,
+ and unmount it upon exit
+
+ When ``encrypted`` is set to ``True``, the exit method will call out to
+ close the device so that it doesn't remain open after mounting. It is
+ assumed that it will be open because otherwise it wouldn't be possible to
+ mount in the first place
+ """
+
+ def __init__(self, device, encrypted=False):
+ self.device = device
+ self.path = None
+ self.encrypted = encrypted
+
+ def __enter__(self):
+ self.path = tempfile.mkdtemp()
+ process.run([
+ 'mount',
+ '-v',
+ self.device,
+ self.path
+ ])
+ return self.path
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ process.run([
+ 'umount',
+ '-v',
+ self.path
+ ])
+ if self.encrypted:
+ # avoid a circular import from the encryption module
+ from ceph_volume.util import encryption
+ encryption.dmcrypt_close(self.device)
+
+
+def unmount_tmpfs(path):
+ """
+ Removes the mount at the given path iff the path is a tmpfs mount point.
+ Otherwise no action is taken.
+ """
+ _out, _err, rc = process.call(['findmnt', '-t', 'tmpfs', '-M', path])
+ if rc != 0:
+ logger.info('{} does not appear to be a tmpfs mount'.format(path))
+ else:
+ logger.info('Unmounting tmpfs path at {}'.format( path))
+ unmount(path)
+
+
+def unmount(path):
+ """
+ Removes mounts at the given path
+ """
+ process.run([
+ 'umount',
+ '-v',
+ path,
+ ])
+
+
+def path_is_mounted(path, destination=None):
+ """
+ Check if the given path is mounted
+ """
+ m = Mounts(paths=True)
+ mounts = m.get_mounts()
+ realpath = os.path.realpath(path)
+ mounted_locations = mounts.get(realpath, [])
+
+ if destination:
+ return destination in mounted_locations
+ return mounted_locations != []
+
+
+def device_is_mounted(dev, destination=None):
+ """
+ Check if the given device is mounted, optionally validating that a
+ destination exists
+ """
+ plain_mounts = Mounts(devices=True)
+ realpath_mounts = Mounts(devices=True, realpath=True)
+
+ realpath_dev = os.path.realpath(dev) if dev.startswith('/') else dev
+ destination = os.path.realpath(destination) if destination else None
+ # plain mounts
+ plain_dev_mounts = plain_mounts.get_mounts().get(dev, [])
+ realpath_dev_mounts = plain_mounts.get_mounts().get(realpath_dev, [])
+ # realpath mounts
+ plain_dev_real_mounts = realpath_mounts.get_mounts().get(dev, [])
+ realpath_dev_real_mounts = realpath_mounts.get_mounts().get(realpath_dev, [])
+
+ mount_locations = [
+ plain_dev_mounts,
+ realpath_dev_mounts,
+ plain_dev_real_mounts,
+ realpath_dev_real_mounts
+ ]
+
+ for mounts in mount_locations:
+ if mounts: # we have a matching mount
+ if destination:
+ if destination in mounts:
+ logger.info(
+ '%s detected as mounted, exists at destination: %s', dev, destination
+ )
+ return True
+ else:
+ logger.info('%s was found as mounted', dev)
+ return True
+ logger.info('%s was not found as mounted', dev)
+ return False
+
+class Mounts(object):
+ excluded_paths = []
+
+ def __init__(self, devices=False, paths=False, realpath=False):
+ self.devices = devices
+ self.paths = paths
+ self.realpath = realpath
+
+ def safe_realpath(self, path, timeout=0.2):
+ def _realpath(path, result):
+ p = os.path.realpath(path)
+ result.append(p)
+
+ result = []
+ t = threading.Thread(target=_realpath, args=(path, result))
+ t.setDaemon(True)
+ t.start()
+ t.join(timeout)
+ if t.is_alive():
+ return None
+ return result[0]
+
+ def get_mounts(self):
+ """
+ Create a mapping of all available system mounts so that other helpers can
+ detect nicely what path or device is mounted
+
+ It ignores (most of) non existing devices, but since some setups might need
+ some extra device information, it will make an exception for:
+
+ - tmpfs
+ - devtmpfs
+ - /dev/root
+
+ If ``devices`` is set to ``True`` the mapping will be a device-to-path(s),
+ if ``paths`` is set to ``True`` then the mapping will be
+ a path-to-device(s)
+
+ :param realpath: Resolve devices to use their realpaths. This is useful for
+ paths like LVM where more than one path can point to the same device
+ """
+ devices_mounted = {}
+ paths_mounted = {}
+ do_not_skip = ['tmpfs', 'devtmpfs', '/dev/root']
+ default_to_devices = self.devices is False and self.paths is False
+
+
+ with open(PROCDIR + '/mounts', 'rb') as mounts:
+ proc_mounts = mounts.readlines()
+
+ for line in proc_mounts:
+ fields = [as_string(f) for f in line.split()]
+ if len(fields) < 3:
+ continue
+ if fields[0] in Mounts.excluded_paths or \
+ fields[1] in Mounts.excluded_paths:
+ continue
+ if self.realpath:
+ if fields[0].startswith('/'):
+ device = self.safe_realpath(fields[0])
+ if device is None:
+ logger.warning(f"Can't get realpath on {fields[0]}, skipping.")
+ Mounts.excluded_paths.append(fields[0])
+ continue
+ else:
+ device = fields[0]
+ else:
+ device = fields[0]
+ path = self.safe_realpath(fields[1])
+ if path is None:
+ logger.warning(f"Can't get realpath on {fields[1]}, skipping.")
+ Mounts.excluded_paths.append(fields[1])
+ continue
+ # only care about actual existing devices
+ if not os.path.exists(device) or not device.startswith('/'):
+ if device not in do_not_skip:
+ continue
+ if device in devices_mounted.keys():
+ devices_mounted[device].append(path)
+ else:
+ devices_mounted[device] = [path]
+ if path in paths_mounted.keys():
+ paths_mounted[path].append(device)
+ else:
+ paths_mounted[path] = [device]
+
+ # Default to returning information for devices if
+ if self.devices is True or default_to_devices:
+ return devices_mounted
+ else:
+ return paths_mounted
+
+
+def set_context(path, recursive=False):
+ """
+ Calls ``restorecon`` to set the proper context on SELinux systems. Only if
+ the ``restorecon`` executable is found anywhere in the path it will get
+ called.
+
+ If the ``CEPH_VOLUME_SKIP_RESTORECON`` environment variable is set to
+ any of: "1", "true", "yes" the call will be skipped as well.
+
+ Finally, if SELinux is not enabled, or not available in the system,
+ ``restorecon`` will not be called. This is checked by calling out to the
+ ``selinuxenabled`` executable. If that tool is not installed or returns
+ a non-zero exit status then no further action is taken and this function
+ will return.
+ """
+ skip = os.environ.get('CEPH_VOLUME_SKIP_RESTORECON', '')
+ if skip.lower() in ['1', 'true', 'yes']:
+ logger.info(
+ 'CEPH_VOLUME_SKIP_RESTORECON environ is set, will not call restorecon'
+ )
+ return
+
+ try:
+ stdout, stderr, code = process.call(['selinuxenabled'],
+ verbose_on_failure=False)
+ except FileNotFoundError:
+ logger.info('No SELinux found, skipping call to restorecon')
+ return
+
+ if code != 0:
+ logger.info('SELinux is not enabled, will not call restorecon')
+ return
+
+ # restore selinux context to default policy values
+ if which('restorecon').startswith('/'):
+ if recursive:
+ process.run(['restorecon', '-R', path])
+ else:
+ process.run(['restorecon', path])
diff --git a/src/ceph-volume/ceph_volume/util/templates.py b/src/ceph-volume/ceph_volume/util/templates.py
new file mode 100644
index 000000000..85a366d26
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/util/templates.py
@@ -0,0 +1,49 @@
+
+osd_header = """
+{:-^100}""".format('')
+
+
+osd_component_titles = """
+ Type Path LV Size % of device"""
+
+
+osd_reused_id = """
+ OSD id {id_: <55}"""
+
+
+osd_component = """
+ {_type: <15} {path: <55} {size: <15} {percent:.2%}"""
+
+
+osd_encryption = """
+ encryption: {enc: <15}"""
+
+
+total_osds = """
+Total OSDs: {total_osds}
+"""
+
+
+def filtered_devices(devices):
+ string = """
+Filtered Devices:"""
+ for device, info in devices.items():
+ string += """
+ %s""" % device
+
+ for reason in info['reasons']:
+ string += """
+ %s""" % reason
+
+ string += "\n"
+ return string
+
+
+ssd_volume_group = """
+Solid State VG:
+ Targets: {target: <25} Total size: {total_lv_size: <25}
+ Total LVs: {total_lvs: <25} Size per LV: {lv_size: <25}
+ Devices: {block_db_devices}
+"""
+
+
diff --git a/src/ceph-volume/plugin/zfs/CMakeLists.txt b/src/ceph-volume/plugin/zfs/CMakeLists.txt
new file mode 100644
index 000000000..da10f46fd
--- /dev/null
+++ b/src/ceph-volume/plugin/zfs/CMakeLists.txt
@@ -0,0 +1,3 @@
+
+distutils_install_module(ceph_volume_zfs
+ INSTALL_SCRIPT ${CMAKE_INSTALL_FULL_SBINDIR})
diff --git a/src/ceph-volume/plugin/zfs/LICENSE b/src/ceph-volume/plugin/zfs/LICENSE
new file mode 100644
index 000000000..92cc048b8
--- /dev/null
+++ b/src/ceph-volume/plugin/zfs/LICENSE
@@ -0,0 +1,32 @@
+
+
+BSD License
+
+Copyright (c) 2018, Willem Jan Withagen
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice, this
+ list of conditions and the following disclaimer in the documentation and/or
+ other materials provided with the distribution.
+
+* Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from this
+ software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/src/ceph-volume/plugin/zfs/MANIFEST.in b/src/ceph-volume/plugin/zfs/MANIFEST.in
new file mode 100644
index 000000000..ed96496e6
--- /dev/null
+++ b/src/ceph-volume/plugin/zfs/MANIFEST.in
@@ -0,0 +1,7 @@
+include LICENSE
+
+recursive-include ceph_volume_zfs *
+recursive-exclude * __pycache__
+recursive-exclude * *.py[co]
+
+recursive-include *.rst conf.py Makefile *.jpg *.png *.gif
diff --git a/src/ceph-volume/plugin/zfs/ceph_volume_zfs/__init__.py b/src/ceph-volume/plugin/zfs/ceph_volume_zfs/__init__.py
new file mode 100755
index 000000000..0b0889f36
--- /dev/null
+++ b/src/ceph-volume/plugin/zfs/ceph_volume_zfs/__init__.py
@@ -0,0 +1,13 @@
+# -*- coding: utf-8 -*-
+
+"""Top-level package for Ceph volume on ZFS."""
+
+__author__ = """Willem Jan Withagen"""
+__email__ = 'wjw@digiware.nl'
+
+import ceph_volume_zfs.zfs
+
+from collections import namedtuple
+
+sys_info = namedtuple('sys_info', ['devices'])
+sys_info.devices = dict()
diff --git a/src/ceph-volume/plugin/zfs/ceph_volume_zfs/api/__init__.py b/src/ceph-volume/plugin/zfs/ceph_volume_zfs/api/__init__.py
new file mode 100644
index 000000000..ecc971299
--- /dev/null
+++ b/src/ceph-volume/plugin/zfs/ceph_volume_zfs/api/__init__.py
@@ -0,0 +1,3 @@
+"""
+Device API that can be shared among other implementations.
+"""
diff --git a/src/ceph-volume/plugin/zfs/ceph_volume_zfs/devices/__init__.py b/src/ceph-volume/plugin/zfs/ceph_volume_zfs/devices/__init__.py
new file mode 100755
index 000000000..c1a8fe656
--- /dev/null
+++ b/src/ceph-volume/plugin/zfs/ceph_volume_zfs/devices/__init__.py
@@ -0,0 +1,2 @@
+# -*- coding: utf-8 -*-
+from . import zfs
diff --git a/src/ceph-volume/plugin/zfs/ceph_volume_zfs/devices/zfs/__init__.py b/src/ceph-volume/plugin/zfs/ceph_volume_zfs/devices/zfs/__init__.py
new file mode 100755
index 000000000..457418493
--- /dev/null
+++ b/src/ceph-volume/plugin/zfs/ceph_volume_zfs/devices/zfs/__init__.py
@@ -0,0 +1,4 @@
+# -*- coding: utf-8 -*-
+
+import logging
+logger = logging.getLogger(__name__)
diff --git a/src/ceph-volume/plugin/zfs/ceph_volume_zfs/devices/zfs/inventory.py b/src/ceph-volume/plugin/zfs/ceph_volume_zfs/devices/zfs/inventory.py
new file mode 100644
index 000000000..be65e39ac
--- /dev/null
+++ b/src/ceph-volume/plugin/zfs/ceph_volume_zfs/devices/zfs/inventory.py
@@ -0,0 +1,50 @@
+import argparse
+import json
+from textwrap import dedent
+
+# import ceph_volume.process
+
+from ceph_volume_zfs.util.disk import Disks
+
+class Inventory(object):
+
+ help = 'Generate a list of available devices'
+
+ def __init__(self, argv):
+ self.argv = argv
+
+ def format_report(self, inventory):
+ if self.args.format == 'json':
+ print(json.dumps(inventory.json_report()))
+ elif self.args.format == 'json-pretty':
+ print(json.dumps(inventory.json_report(), indent=4, sort_keys=True))
+ else:
+ print(inventory.pretty_report())
+
+ def main(self):
+ sub_command_help = dedent("""
+ Generate an inventory of available devices
+ """)
+ parser = argparse.ArgumentParser(
+ prog='ceph-volume zfs inventory',
+ description=sub_command_help,
+ )
+ parser.add_argument(
+ 'path',
+ nargs='?',
+ default=None,
+ help=('Report on specific disk'),
+ )
+ parser.add_argument(
+ '--format',
+ choices=['plain', 'json', 'json-pretty'],
+ default='plain',
+ help='Output format',
+ )
+
+ self.args = parser.parse_args(self.argv)
+ if self.args.path:
+ self.format_report(Disks(self.args.path))
+ else:
+ self.format_report(Disks())
+
diff --git a/src/ceph-volume/plugin/zfs/ceph_volume_zfs/devices/zfs/main.py b/src/ceph-volume/plugin/zfs/ceph_volume_zfs/devices/zfs/main.py
new file mode 100644
index 000000000..073be6467
--- /dev/null
+++ b/src/ceph-volume/plugin/zfs/ceph_volume_zfs/devices/zfs/main.py
@@ -0,0 +1,36 @@
+# vim: expandtab smarttab shiftwidth=4 softtabstop=4
+
+import argparse
+from textwrap import dedent
+from ceph_volume import terminal
+
+from . import inventory
+from . import prepare
+from . import zap
+
+class ZFSDEV(object):
+
+ help = 'Use ZFS to deploy OSDs'
+
+ _help = dedent("""
+ Use ZFS to deploy OSDs
+
+ {sub_help}
+ """)
+
+ def __init__(self, argv):
+ self.argv = argv
+
+ def print_help(self, sub_help):
+ return self._help.format(sub_help=sub_help)
+
+ def main(self):
+ terminal.dispatch(self.mapper, self.argv)
+ parser = argparse.ArgumentParser(
+ prog='ceph-volume zfs',
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ description=self.print_help(terminal.subhelp(self.mapper)),
+ )
+ parser.parse_args(self.argv)
+ if len(self.argv) <= 1:
+ return parser.print_help()
diff --git a/src/ceph-volume/plugin/zfs/ceph_volume_zfs/devices/zfs/prepare.py b/src/ceph-volume/plugin/zfs/ceph_volume_zfs/devices/zfs/prepare.py
new file mode 100644
index 000000000..7c075e86a
--- /dev/null
+++ b/src/ceph-volume/plugin/zfs/ceph_volume_zfs/devices/zfs/prepare.py
@@ -0,0 +1,25 @@
+import argparse
+
+from textwrap import dedent
+# from ceph_volume.util import arg_validators
+
+class Prepare(object):
+
+ help = 'Prepare a device'
+
+ def __init__(self, argv):
+ self.argv = argv
+
+ def main(self):
+ sub_command_help = dedent("""
+ Prepare a device
+ """)
+ parser = argparse.ArgumentParser(
+ prog='ceph-volume zfs prepare',
+ description=sub_command_help,
+ )
+ if len(self.argv) == 0 or len(self.argv) > 0:
+ print("Prepare: Print Help")
+ print(sub_command_help)
+ return
+
diff --git a/src/ceph-volume/plugin/zfs/ceph_volume_zfs/devices/zfs/zap.py b/src/ceph-volume/plugin/zfs/ceph_volume_zfs/devices/zfs/zap.py
new file mode 100644
index 000000000..f5177d5f2
--- /dev/null
+++ b/src/ceph-volume/plugin/zfs/ceph_volume_zfs/devices/zfs/zap.py
@@ -0,0 +1,34 @@
+import argparse
+
+from textwrap import dedent
+# from ceph_volume.util import arg_validators
+
+class Zap(object):
+
+ help = 'Zap a device'
+
+ def __init__(self, argv):
+ self.argv = argv
+
+ def main(self):
+ sub_command_help = dedent("""
+ Zap a device
+ """)
+ parser = argparse.ArgumentParser(
+ prog='ceph-volume zfs inventory',
+ description=sub_command_help,
+ )
+ parser.add_argument(
+ 'devices',
+ metavar='DEVICES',
+ nargs='*',
+ # type=arg_validators.ValidDevice(gpt_ok=True),
+ default=[],
+ help='Path to one or many lv (as vg/lv), partition (as /dev/sda1) or device (as /dev/sda)'
+ )
+
+ if len(self.argv) == 0 or len(self.argv) > 0:
+ print("Zap: Print Help")
+ print(sub_command_help)
+ return
+
diff --git a/src/ceph-volume/plugin/zfs/ceph_volume_zfs/util/__init__.py b/src/ceph-volume/plugin/zfs/ceph_volume_zfs/util/__init__.py
new file mode 100644
index 000000000..40a96afc6
--- /dev/null
+++ b/src/ceph-volume/plugin/zfs/ceph_volume_zfs/util/__init__.py
@@ -0,0 +1 @@
+# -*- coding: utf-8 -*-
diff --git a/src/ceph-volume/plugin/zfs/ceph_volume_zfs/util/disk.py b/src/ceph-volume/plugin/zfs/ceph_volume_zfs/util/disk.py
new file mode 100644
index 000000000..b666aa7d5
--- /dev/null
+++ b/src/ceph-volume/plugin/zfs/ceph_volume_zfs/util/disk.py
@@ -0,0 +1,148 @@
+import re
+
+from ceph_volume.util.disk import human_readable_size
+from ceph_volume import process
+from ceph_volume import sys_info
+
+report_template = """
+/dev/{geomname:<16} {mediasize:<16} {rotational!s:<7} {descr}"""
+# {geomname:<25} {mediasize:<12} {rotational!s:<7} {mode!s:<9} {descr}"""
+
+def geom_disk_parser(block):
+ """
+ Parses lines in 'geom disk list` output.
+
+ Geom name: ada3
+ Providers:
+ 1. Name: ada3
+ Mediasize: 40018599936 (37G)
+ Sectorsize: 512
+ Stripesize: 4096
+ Stripeoffset: 0
+ Mode: r2w2e4
+ descr: Corsair CSSD-F40GB2
+ lunid: 5000000000000236
+ ident: 111465010000101800EC
+ rotationrate: 0
+ fwsectors: 63
+ fwheads: 16
+
+ :param line: A string, with the full block for `geom disk list`
+ """
+ pairs = block.split(';')
+ parsed = {}
+ for pair in pairs:
+ if 'Providers' in pair:
+ continue
+ try:
+ column, value = pair.split(':')
+ except ValueError:
+ continue
+ # fixup
+ column = re.sub("\s+", "", column)
+ column= re.sub("^[0-9]+\.", "", column)
+ value = value.strip()
+ value = re.sub('\([0-9A-Z]+\)', '', value)
+ parsed[column.lower()] = value
+ return parsed
+
+def get_disk(diskname):
+ """
+ Captures all available info from geom
+ along with interesting metadata like sectors, size, vendor,
+ solid/rotational, etc...
+
+ Returns a dictionary, with all the geom fields as keys.
+ """
+
+ command = ['/sbin/geom', 'disk', 'list', re.sub('/dev/', '', diskname)]
+ out, err, rc = process.call(command)
+ geom_block = ""
+ for line in out:
+ line.strip()
+ geom_block += ";" + line
+ disk = geom_disk_parser(geom_block)
+ return disk
+
+def get_disks():
+ command = ['/sbin/geom', 'disk', 'status', '-s']
+ out, err, rc = process.call(command)
+ disks = {}
+ for path in out:
+ dsk, rest1, rest2 = path.split()
+ disk = get_disk(dsk)
+ disks['/dev/'+dsk] = disk
+ return disks
+
+class Disks(object):
+
+ def __init__(self, path=None):
+ if not sys_info.devices:
+ sys_info.devices = get_disks()
+ self.disks = {}
+ for k in sys_info.devices:
+ if path != None:
+ if path in k:
+ self.disks[k] = Disk(k)
+ else:
+ self.disks[k] = Disk(k)
+
+ def pretty_report(self, all=True):
+ output = [
+ report_template.format(
+ geomname='Device Path',
+ mediasize='Size',
+ rotational='rotates',
+ descr='Model name',
+ mode='available',
+ )]
+ for disk in sorted(self.disks):
+ output.append(self.disks[disk].report())
+ return ''.join(output)
+
+ def json_report(self):
+ output = []
+ for disk in sorted(self.disks):
+ output.append(self.disks[disk].json_report())
+ return output
+
+
+class Disk(object):
+
+ report_fields = [
+ 'rejected_reasons',
+ 'available',
+ 'path',
+ 'sys_api',
+ ]
+ pretty_report_sys_fields = [
+ 'human_readable_size',
+ 'model',
+ 'removable',
+ 'ro',
+ 'rotational',
+ 'sas_address',
+ 'scheduler_mode',
+ 'vendor',
+ ]
+
+ def __init__(self, path):
+ self.abspath = path
+ self.path = path
+ self.reject_reasons = []
+ self.available = True
+ self.sys_api = sys_info.devices.get(path)
+
+ def report(self):
+ return report_template.format(
+ geomname=self.sys_api.get('geomname'),
+ mediasize=human_readable_size(int(self.sys_api.get('mediasize'))),
+ rotational=int(self.sys_api.get('rotationrate')) != 0,
+ mode=self.sys_api.get('mode'),
+ descr=self.sys_api.get('descr')
+ )
+
+ def json_report(self):
+ output = {k.strip('_'): v for k, v in vars(self).items()}
+ return output
+
diff --git a/src/ceph-volume/plugin/zfs/ceph_volume_zfs/zfs.py b/src/ceph-volume/plugin/zfs/ceph_volume_zfs/zfs.py
new file mode 100755
index 000000000..e9911c75e
--- /dev/null
+++ b/src/ceph-volume/plugin/zfs/ceph_volume_zfs/zfs.py
@@ -0,0 +1,152 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import print_function
+import argparse
+import os
+import sys
+import logging
+
+from textwrap import dedent
+from ceph_volume import log, conf, configuration
+from ceph_volume import exceptions
+from ceph_volume import terminal
+
+# The ceph-volume-zfs specific code
+import ceph_volume_zfs.zfs
+from ceph_volume_zfs import devices
+# from ceph_volume_zfs.util import device
+from ceph_volume_zfs.devices import zfs
+
+# the supported actions
+from ceph_volume_zfs.devices.zfs import inventory
+from ceph_volume_zfs.devices.zfs import prepare
+from ceph_volume_zfs.devices.zfs import zap
+
+
+if __name__ == '__main__':
+ zfs.ZFS()
+
+
+class ZFS(object):
+
+ # help info for subcommands
+ help = "Use ZFS as the underlying technology for OSDs"
+
+ # help info for the plugin
+ help_menu = "Deploy OSDs with ZFS"
+ _help = dedent("""
+ Use ZFS as the underlying technology for OSDs
+
+ {sub_zfshelp}
+ """)
+ name = 'zfs'
+
+ def __init__(self, argv=None, parse=True):
+ self.zfs_mapper = {
+ 'inventory': inventory.Inventory,
+ 'prepare': prepare.Prepare,
+ 'zap': zap.Zap,
+ }
+ if argv is None:
+ self.argv = sys.argv
+ else:
+ self.argv = argv
+ if parse:
+ self.main(self.argv)
+
+ def print_help(self, warning=False):
+ return self._help.format(
+ sub_zfshelp=terminal.subhelp(self.zfs_mapper)
+ )
+
+ def get_environ_vars(self):
+ environ_vars = []
+ for key, value in os.environ.items():
+ if key.startswith('CEPH_'):
+ environ_vars.append("%s=%s" % (key, value))
+ if not environ_vars:
+ return ''
+ else:
+ environ_vars.insert(0, '\nEnviron Variables:')
+ return '\n'.join(environ_vars)
+
+ def load_ceph_conf_path(self, cluster_name='ceph'):
+ abspath = '/etc/ceph/%s.conf' % cluster_name
+ conf.path = os.getenv('CEPH_CONF', abspath)
+ conf.cluster = cluster_name
+
+ def stat_ceph_conf(self):
+ try:
+ configuration.load(conf.path)
+ return terminal.green(conf.path)
+ except exceptions.ConfigurationError as error:
+ return terminal.red(error)
+
+ def load_log_path(self):
+ conf.log_path = os.getenv('CEPH_VOLUME_LOG_PATH', '/var/log/ceph')
+
+ def _get_split_args(self):
+ subcommands = self.zfs_mapper.keys()
+ slice_on_index = len(self.argv)
+ pruned_args = self.argv
+ for count, arg in enumerate(pruned_args):
+ if arg in subcommands:
+ slice_on_index = count
+ break
+ return pruned_args[:slice_on_index], pruned_args[slice_on_index:]
+
+ def main(self, argv=None):
+ if argv is None:
+ return
+ self.load_ceph_conf_path()
+ # these need to be available for the help, which gets parsed super
+ # early
+ self.load_ceph_conf_path()
+ self.load_log_path()
+ main_args, subcommand_args = self._get_split_args()
+ # no flags where passed in, return the help menu instead of waiting for
+ # argparse which will end up complaning that there are no args
+ if len(argv) < 1:
+ print(self.print_help(warning=True))
+ return
+ parser = argparse.ArgumentParser(
+ prog='ceph-volume-zfs',
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ description=self.print_help(),
+ )
+ parser.add_argument(
+ '--cluster',
+ default='ceph',
+ help='Cluster name (defaults to "ceph")',
+ )
+ parser.add_argument(
+ '--log-level',
+ default='debug',
+ help='Change the file log level (defaults to debug)',
+ )
+ parser.add_argument(
+ '--log-path',
+ default='/var/log/ceph/',
+ help='Change the log path (defaults to /var/log/ceph)',
+ )
+ args = parser.parse_args(main_args)
+ conf.log_path = args.log_path
+ if os.path.isdir(conf.log_path):
+ conf.log_path = os.path.join(args.log_path, 'ceph-volume-zfs.log')
+ log.setup()
+ logger = logging.getLogger(__name__)
+ logger.info("Running command: ceph-volume-zfs %s %s",
+ " ".join(main_args), " ".join(subcommand_args))
+ # set all variables from args and load everything needed according to
+ # them
+ self.load_ceph_conf_path(cluster_name=args.cluster)
+ try:
+ conf.ceph = configuration.load(conf.path)
+ except exceptions.ConfigurationError as error:
+ # we warn only here, because it is possible that the configuration
+ # file is not needed, or that it will be loaded by some other means
+ # (like reading from zfs tags)
+ logger.exception('ignoring inability to load ceph.conf')
+ terminal.red(error)
+ # dispatch to sub-commands
+ terminal.dispatch(self.zfs_mapper, subcommand_args)
diff --git a/src/ceph-volume/plugin/zfs/requirements_dev.txt b/src/ceph-volume/plugin/zfs/requirements_dev.txt
new file mode 100644
index 000000000..7263a68fa
--- /dev/null
+++ b/src/ceph-volume/plugin/zfs/requirements_dev.txt
@@ -0,0 +1,5 @@
+pip==9.0.1
+wheel==0.30.0
+flake8==3.5.0
+tox==2.9.1
+coverage==4.5.1
diff --git a/src/ceph-volume/plugin/zfs/setup.py b/src/ceph-volume/plugin/zfs/setup.py
new file mode 100644
index 000000000..31f6998f9
--- /dev/null
+++ b/src/ceph-volume/plugin/zfs/setup.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+"""The setup script."""
+
+from setuptools import setup, find_packages
+
+requirements = [ ]
+
+setup_requirements = [ ]
+
+setup(
+ author="Willem Jan Withagen",
+ author_email='wjw@digiware.nl',
+ classifiers=[
+ 'Development Status :: 2 - Pre-Alpha',
+ 'Environment :: Console',
+ 'Intended Audience :: Information Technology',
+ 'Intended Audience :: System Administrators',
+ 'Operating System :: POSIX :: FreeBSD',
+ 'License :: OSI Approved :: BSD License',
+ 'Natural Language :: English',
+ "Programming Language :: Python :: 2",
+ 'Programming Language :: Python :: 2.7',
+ 'Programming Language :: Python :: 3',
+ 'Programming Language :: Python :: 3.6',
+ ],
+ description="Manage Ceph OSDs on ZFS pool/volume/filesystem",
+ install_requires=requirements,
+ license="BSD license",
+ include_package_data=True,
+ keywords='ceph-volume-zfs',
+ name='ceph-volume-zfs',
+ packages=find_packages(include=['ceph_volume_zfs']),
+ setup_requires=setup_requirements,
+ url='https://github.com/ceph/ceph/src/ceph-volume/plugin/zfs',
+ version='0.1.0',
+ zip_safe=False,
+ entry_points = dict(
+ ceph_volume_handlers = [
+ 'zfs = ceph_volume_zfs.zfs:ZFS',
+ ],
+ ),
+)
diff --git a/src/ceph-volume/plugin/zfs/tox.ini b/src/ceph-volume/plugin/zfs/tox.ini
new file mode 100644
index 000000000..80e35439f
--- /dev/null
+++ b/src/ceph-volume/plugin/zfs/tox.ini
@@ -0,0 +1,21 @@
+[tox]
+envlist = py27, py34, py35, py36, flake8
+
+[travis]
+python =
+ 3.6: py36
+ 3.5: py35
+ 3.4: py34
+ 2.7: py27
+
+[testenv:flake8]
+basepython = python
+deps = flake8
+commands = flake8
+
+[testenv]
+setenv =
+ PYTHONPATH = {toxinidir}
+
+commands = python setup.py test
+
diff --git a/src/ceph-volume/setup.py b/src/ceph-volume/setup.py
new file mode 100644
index 000000000..44a0d0e46
--- /dev/null
+++ b/src/ceph-volume/setup.py
@@ -0,0 +1,42 @@
+from setuptools import setup, find_packages
+import os
+
+
+setup(
+ name='ceph-volume',
+ version='1.0.0',
+ packages=find_packages(),
+
+ author='',
+ author_email='contact@redhat.com',
+ description='Deploy Ceph OSDs using different device technologies like lvm or physical disks',
+ license='LGPLv2+',
+ keywords='ceph volume disk devices lvm',
+ url="https://github.com/ceph/ceph",
+ zip_safe = False,
+ install_requires='ceph',
+ dependency_links=[''.join(['file://', os.path.join(os.getcwd(), '../',
+ 'python-common#egg=ceph-1.0.0')])],
+ tests_require=[
+ 'pytest >=2.1.3',
+ 'tox',
+ 'ceph',
+ ],
+ entry_points = dict(
+ console_scripts = [
+ 'ceph-volume = ceph_volume.main:Volume',
+ 'ceph-volume-systemd = ceph_volume.systemd:main',
+ ],
+ ),
+ classifiers = [
+ 'Environment :: Console',
+ 'Intended Audience :: Information Technology',
+ 'Intended Audience :: System Administrators',
+ 'Operating System :: POSIX :: Linux',
+ 'License :: OSI Approved :: GNU Lesser General Public License v2 or later (LGPLv2+)',
+ 'Programming Language :: Python',
+ 'Programming Language :: Python :: 2.7',
+ 'Programming Language :: Python :: 3.5',
+ 'Programming Language :: Python :: 3.6',
+ ]
+)
diff --git a/src/ceph-volume/shell_tox.ini b/src/ceph-volume/shell_tox.ini
new file mode 100644
index 000000000..2950c8a09
--- /dev/null
+++ b/src/ceph-volume/shell_tox.ini
@@ -0,0 +1,11 @@
+[tox]
+envlist = py36, py3
+skip_missing_interpreters = true
+
+[testenv]
+passenv=*
+allowlist_externals=
+ bash
+ grep
+ mktemp
+commands=bash {posargs:ceph_volume/tests/functional/scripts/test_unicode.sh} {posargs:ceph_volume/tests/functional/scripts/output.py}
diff --git a/src/ceph-volume/tox.ini b/src/ceph-volume/tox.ini
new file mode 100644
index 000000000..2f4ba243f
--- /dev/null
+++ b/src/ceph-volume/tox.ini
@@ -0,0 +1,22 @@
+[tox]
+envlist = py36, py3, py3-flake8
+skip_missing_interpreters = true
+
+[testenv]
+deps=
+ pytest
+ pytest-xdist
+ mock
+ pyfakefs
+install_command=./tox_install_command.sh {opts} {packages}
+commands=py.test --numprocesses=auto -vv {posargs:ceph_volume/tests} --ignore=ceph_volume/tests/functional
+
+[testenv:py3-flake8]
+deps=flake8
+commands=flake8 {posargs:ceph_volume}
+
+[tool:pytest]
+norecursedirs = .* _* virtualenv
+
+[flake8]
+select=F,E9
diff --git a/src/ceph-volume/tox_install_command.sh b/src/ceph-volume/tox_install_command.sh
new file mode 100755
index 000000000..c13c95533
--- /dev/null
+++ b/src/ceph-volume/tox_install_command.sh
@@ -0,0 +1,3 @@
+#!/usr/bin/env bash
+python -m pip install --editable="file://`pwd`/../python-common"
+python -m pip install $@